]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.57-201202161839.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.57-201202161839.patch
CommitLineData
b86e1712
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index 3377650..76aacb3 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+ifeq ($(KBUILD_EXTMOD),)
251+gcc-plugins:
252+ $(Q)$(MAKE) $(build)=tools/gcc
253+else
254+gcc-plugins: ;
255+endif
256+else
257+gcc-plugins:
258+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260+else
261+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262+endif
263+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264+endif
265+endif
266+
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270@@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279@@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287@@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291-$(vmlinux-dirs): prepare scripts
292+$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304@@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312@@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316-modules_prepare: prepare scripts
317+modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321@@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325- Module.symvers Module.markers tags TAGS cscope*
326+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330@@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334- -o -name '.*.rej' -o -size 0 \
335+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339@@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343+ @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347@@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355@@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359-tags TAGS cscope: FORCE
360+tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364@@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368-%.s: %.c prepare scripts FORCE
369+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370+%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374-%.o: %.c prepare scripts FORCE
375+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376+%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380-%.s: %.S prepare scripts FORCE
381+%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383-%.o: %.S prepare scripts FORCE
384+%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388@@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392-%/: prepare scripts FORCE
393+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394+%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398-%.ko: prepare scripts FORCE
399+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400+%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405index 5c75c1b..c82f878 100644
406--- a/arch/alpha/include/asm/elf.h
407+++ b/arch/alpha/include/asm/elf.h
408@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412+#ifdef CONFIG_PAX_ASLR
413+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414+
415+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417+#endif
418+
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423index 3f0c59f..cf1e100 100644
424--- a/arch/alpha/include/asm/pgtable.h
425+++ b/arch/alpha/include/asm/pgtable.h
426@@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430+
431+#ifdef CONFIG_PAX_PAGEEXEC
432+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435+#else
436+# define PAGE_SHARED_NOEXEC PAGE_SHARED
437+# define PAGE_COPY_NOEXEC PAGE_COPY
438+# define PAGE_READONLY_NOEXEC PAGE_READONLY
439+#endif
440+
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445index ebc3c89..20cfa63 100644
446--- a/arch/alpha/kernel/module.c
447+++ b/arch/alpha/kernel/module.c
448@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452- gp = (u64)me->module_core + me->core_size - 0x8000;
453+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458index a94e49c..d71dd44 100644
459--- a/arch/alpha/kernel/osf_sys.c
460+++ b/arch/alpha/kernel/osf_sys.c
461@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465- if (!vma || addr + len <= vma->vm_start)
466+ if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474+#ifdef CONFIG_PAX_RANDMMAP
475+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476+#endif
477+
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486- len, limit);
487+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488+
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493index 00a31de..2ded0f2 100644
494--- a/arch/alpha/mm/fault.c
495+++ b/arch/alpha/mm/fault.c
496@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500+#ifdef CONFIG_PAX_PAGEEXEC
501+/*
502+ * PaX: decide what to do with offenders (regs->pc = fault address)
503+ *
504+ * returns 1 when task should be killed
505+ * 2 when patched PLT trampoline was detected
506+ * 3 when unpatched PLT trampoline was detected
507+ */
508+static int pax_handle_fetch_fault(struct pt_regs *regs)
509+{
510+
511+#ifdef CONFIG_PAX_EMUPLT
512+ int err;
513+
514+ do { /* PaX: patched PLT emulation #1 */
515+ unsigned int ldah, ldq, jmp;
516+
517+ err = get_user(ldah, (unsigned int *)regs->pc);
518+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520+
521+ if (err)
522+ break;
523+
524+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526+ jmp == 0x6BFB0000U)
527+ {
528+ unsigned long r27, addr;
529+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531+
532+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533+ err = get_user(r27, (unsigned long *)addr);
534+ if (err)
535+ break;
536+
537+ regs->r27 = r27;
538+ regs->pc = r27;
539+ return 2;
540+ }
541+ } while (0);
542+
543+ do { /* PaX: patched PLT emulation #2 */
544+ unsigned int ldah, lda, br;
545+
546+ err = get_user(ldah, (unsigned int *)regs->pc);
547+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
548+ err |= get_user(br, (unsigned int *)(regs->pc+8));
549+
550+ if (err)
551+ break;
552+
553+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
555+ (br & 0xFFE00000U) == 0xC3E00000U)
556+ {
557+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560+
561+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563+ return 2;
564+ }
565+ } while (0);
566+
567+ do { /* PaX: unpatched PLT emulation */
568+ unsigned int br;
569+
570+ err = get_user(br, (unsigned int *)regs->pc);
571+
572+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573+ unsigned int br2, ldq, nop, jmp;
574+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575+
576+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577+ err = get_user(br2, (unsigned int *)addr);
578+ err |= get_user(ldq, (unsigned int *)(addr+4));
579+ err |= get_user(nop, (unsigned int *)(addr+8));
580+ err |= get_user(jmp, (unsigned int *)(addr+12));
581+ err |= get_user(resolver, (unsigned long *)(addr+16));
582+
583+ if (err)
584+ break;
585+
586+ if (br2 == 0xC3600000U &&
587+ ldq == 0xA77B000CU &&
588+ nop == 0x47FF041FU &&
589+ jmp == 0x6B7B0000U)
590+ {
591+ regs->r28 = regs->pc+4;
592+ regs->r27 = addr+16;
593+ regs->pc = resolver;
594+ return 3;
595+ }
596+ }
597+ } while (0);
598+#endif
599+
600+ return 1;
601+}
602+
603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604+{
605+ unsigned long i;
606+
607+ printk(KERN_ERR "PAX: bytes at PC: ");
608+ for (i = 0; i < 5; i++) {
609+ unsigned int c;
610+ if (get_user(c, (unsigned int *)pc+i))
611+ printk(KERN_CONT "???????? ");
612+ else
613+ printk(KERN_CONT "%08x ", c);
614+ }
615+ printk("\n");
616+}
617+#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625- if (!(vma->vm_flags & VM_EXEC))
626+ if (!(vma->vm_flags & VM_EXEC)) {
627+
628+#ifdef CONFIG_PAX_PAGEEXEC
629+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630+ goto bad_area;
631+
632+ up_read(&mm->mmap_sem);
633+ switch (pax_handle_fetch_fault(regs)) {
634+
635+#ifdef CONFIG_PAX_EMUPLT
636+ case 2:
637+ case 3:
638+ return;
639+#endif
640+
641+ }
642+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643+ do_group_exit(SIGKILL);
644+#else
645 goto bad_area;
646+#endif
647+
648+ }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653index 6aac3f5..265536b 100644
654--- a/arch/arm/include/asm/elf.h
655+++ b/arch/arm/include/asm/elf.h
656@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662+
663+#ifdef CONFIG_PAX_ASLR
664+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665+
666+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668+#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673index c019949..388fdd1 100644
674--- a/arch/arm/include/asm/kmap_types.h
675+++ b/arch/arm/include/asm/kmap_types.h
676@@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680+ KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685index 1d6bd40..fba0cb9 100644
686--- a/arch/arm/include/asm/uaccess.h
687+++ b/arch/arm/include/asm/uaccess.h
688@@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692+extern void check_object_size(const void *ptr, unsigned long n, bool to);
693+
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697@@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705+
706+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707+{
708+ if (!__builtin_constant_p(n))
709+ check_object_size(to, n, false);
710+ return ___copy_from_user(to, from, n);
711+}
712+
713+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714+{
715+ if (!__builtin_constant_p(n))
716+ check_object_size(from, n, true);
717+ return ___copy_to_user(to, from, n);
718+}
719+
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727+ if ((long)n < 0)
728+ return n;
729+
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737+ if ((long)n < 0)
738+ return n;
739+
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744index 0e62770..e2c2cd6 100644
745--- a/arch/arm/kernel/armksyms.c
746+++ b/arch/arm/kernel/armksyms.c
747@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751-EXPORT_SYMBOL(__copy_from_user);
752-EXPORT_SYMBOL(__copy_to_user);
753+EXPORT_SYMBOL(___copy_from_user);
754+EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759index ba8ccfe..2dc34dc 100644
760--- a/arch/arm/kernel/kgdb.c
761+++ b/arch/arm/kernel/kgdb.c
762@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766-struct kgdb_arch arch_kgdb_ops = {
767+const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772index 3f361a7..6e806e1 100644
773--- a/arch/arm/kernel/traps.c
774+++ b/arch/arm/kernel/traps.c
775@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779+extern void gr_handle_kernel_exploit(void);
780+
781 /*
782 * This function is protected against re-entrancy.
783 */
784@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788+ gr_handle_kernel_exploit();
789+
790 do_exit(SIGSEGV);
791 }
792
793diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794index e4fe124..0fc246b 100644
795--- a/arch/arm/lib/copy_from_user.S
796+++ b/arch/arm/lib/copy_from_user.S
797@@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801- * size_t __copy_from_user(void *to, const void *from, size_t n)
802+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806@@ -84,11 +84,11 @@
807
808 .text
809
810-ENTRY(__copy_from_user)
811+ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815-ENDPROC(__copy_from_user)
816+ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821index 1a71e15..ac7b258 100644
822--- a/arch/arm/lib/copy_to_user.S
823+++ b/arch/arm/lib/copy_to_user.S
824@@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828- * size_t __copy_to_user(void *to, const void *from, size_t n)
829+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833@@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837-WEAK(__copy_to_user)
838+WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842-ENDPROC(__copy_to_user)
843+ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848index ffdd274..91017b6 100644
849--- a/arch/arm/lib/uaccess.S
850+++ b/arch/arm/lib/uaccess.S
851@@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864-ENTRY(__copy_to_user)
865+ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873-ENDPROC(__copy_to_user)
874+ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890-ENTRY(__copy_from_user)
891+ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899-ENDPROC(__copy_from_user)
900+ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905index 6b967ff..67d5b2b 100644
906--- a/arch/arm/lib/uaccess_with_memcpy.c
907+++ b/arch/arm/lib/uaccess_with_memcpy.c
908@@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912-__copy_to_user(void __user *to, const void *from, unsigned long n)
913+___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918index 4028724..beec230 100644
919--- a/arch/arm/mach-at91/pm.c
920+++ b/arch/arm/mach-at91/pm.c
921@@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925-static struct platform_suspend_ops at91_pm_ops ={
926+static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931index 5218943..0a34552 100644
932--- a/arch/arm/mach-omap1/pm.c
933+++ b/arch/arm/mach-omap1/pm.c
934@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938-static struct platform_suspend_ops omap_pm_ops ={
939+static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944index bff5c4e..d4c649b 100644
945--- a/arch/arm/mach-omap2/pm24xx.c
946+++ b/arch/arm/mach-omap2/pm24xx.c
947@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951-static struct platform_suspend_ops omap_pm_ops = {
952+static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957index 8946319..7d3e661 100644
958--- a/arch/arm/mach-omap2/pm34xx.c
959+++ b/arch/arm/mach-omap2/pm34xx.c
960@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964-static struct platform_suspend_ops omap_pm_ops = {
965+static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970index b3d8d53..6e68ebc 100644
971--- a/arch/arm/mach-pnx4008/pm.c
972+++ b/arch/arm/mach-pnx4008/pm.c
973@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977-static struct platform_suspend_ops pnx4008_pm_ops = {
978+static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983index 7693355..9beb00a 100644
984--- a/arch/arm/mach-pxa/pm.c
985+++ b/arch/arm/mach-pxa/pm.c
986@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990-static struct platform_suspend_ops pxa_pm_ops = {
991+static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996index 629e05d..06be589 100644
997--- a/arch/arm/mach-pxa/sharpsl_pm.c
998+++ b/arch/arm/mach-pxa/sharpsl_pm.c
999@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003-static struct platform_suspend_ops sharpsl_pm_ops = {
1004+static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009index c83fdc8..ab9fc44 100644
1010--- a/arch/arm/mach-sa1100/pm.c
1011+++ b/arch/arm/mach-sa1100/pm.c
1012@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016-static struct platform_suspend_ops sa11x0_pm_ops = {
1017+static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022index 3191cd6..c0739db 100644
1023--- a/arch/arm/mm/fault.c
1024+++ b/arch/arm/mm/fault.c
1025@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029+#ifdef CONFIG_PAX_PAGEEXEC
1030+ if (fsr & FSR_LNX_PF) {
1031+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032+ do_group_exit(SIGKILL);
1033+ }
1034+#endif
1035+
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045+{
1046+ long i;
1047+
1048+ printk(KERN_ERR "PAX: bytes at PC: ");
1049+ for (i = 0; i < 20; i++) {
1050+ unsigned char c;
1051+ if (get_user(c, (__force unsigned char __user *)pc+i))
1052+ printk(KERN_CONT "?? ");
1053+ else
1054+ printk(KERN_CONT "%02x ", c);
1055+ }
1056+ printk("\n");
1057+
1058+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1059+ for (i = -1; i < 20; i++) {
1060+ unsigned long c;
1061+ if (get_user(c, (__force unsigned long __user *)sp+i))
1062+ printk(KERN_CONT "???????? ");
1063+ else
1064+ printk(KERN_CONT "%08lx ", c);
1065+ }
1066+ printk("\n");
1067+}
1068+#endif
1069+
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074index f5abc51..7ec524c 100644
1075--- a/arch/arm/mm/mmap.c
1076+++ b/arch/arm/mm/mmap.c
1077@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081+#ifdef CONFIG_PAX_RANDMMAP
1082+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083+#endif
1084+
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092- if (TASK_SIZE - len >= addr &&
1093- (!vma || addr + len <= vma->vm_start))
1094+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098- start_addr = addr = mm->free_area_cache;
1099+ start_addr = addr = mm->free_area_cache;
1100 } else {
1101- start_addr = addr = TASK_UNMAPPED_BASE;
1102- mm->cached_hole_size = 0;
1103+ start_addr = addr = mm->mmap_base;
1104+ mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108@@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112- if (start_addr != TASK_UNMAPPED_BASE) {
1113- start_addr = addr = TASK_UNMAPPED_BASE;
1114+ if (start_addr != mm->mmap_base) {
1115+ start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121- if (!vma || addr + len <= vma->vm_start) {
1122+ if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127index 8d97db2..b66cfa5 100644
1128--- a/arch/arm/plat-s3c/pm.c
1129+++ b/arch/arm/plat-s3c/pm.c
1130@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134-static struct platform_suspend_ops s3c_pm_ops = {
1135+static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140index d5d1d41..856e2ed 100644
1141--- a/arch/avr32/include/asm/elf.h
1142+++ b/arch/avr32/include/asm/elf.h
1143@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150+#ifdef CONFIG_PAX_ASLR
1151+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152+
1153+#define PAX_DELTA_MMAP_LEN 15
1154+#define PAX_DELTA_STACK_LEN 15
1155+#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160index b7f5c68..556135c 100644
1161--- a/arch/avr32/include/asm/kmap_types.h
1162+++ b/arch/avr32/include/asm/kmap_types.h
1163@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167-D(14) KM_TYPE_NR
1168+D(14) KM_CLEARPAGE,
1169+D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174index f021edf..32d680e 100644
1175--- a/arch/avr32/mach-at32ap/pm.c
1176+++ b/arch/avr32/mach-at32ap/pm.c
1177@@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181-static struct platform_suspend_ops avr32_pm_ops = {
1182+static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187index b61d86d..e292c7f 100644
1188--- a/arch/avr32/mm/fault.c
1189+++ b/arch/avr32/mm/fault.c
1190@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194+#ifdef CONFIG_PAX_PAGEEXEC
1195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196+{
1197+ unsigned long i;
1198+
1199+ printk(KERN_ERR "PAX: bytes at PC: ");
1200+ for (i = 0; i < 20; i++) {
1201+ unsigned char c;
1202+ if (get_user(c, (unsigned char *)pc+i))
1203+ printk(KERN_CONT "???????? ");
1204+ else
1205+ printk(KERN_CONT "%02x ", c);
1206+ }
1207+ printk("\n");
1208+}
1209+#endif
1210+
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214@@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218+
1219+#ifdef CONFIG_PAX_PAGEEXEC
1220+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223+ do_group_exit(SIGKILL);
1224+ }
1225+ }
1226+#endif
1227+
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232index cce79d0..c406c85 100644
1233--- a/arch/blackfin/kernel/kgdb.c
1234+++ b/arch/blackfin/kernel/kgdb.c
1235@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239-struct kgdb_arch arch_kgdb_ops = {
1240+const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245index 8837be4..b2fb413 100644
1246--- a/arch/blackfin/mach-common/pm.c
1247+++ b/arch/blackfin/mach-common/pm.c
1248@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252-struct platform_suspend_ops bfin_pm_ops = {
1253+const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258index f8e16b2..c73ff79 100644
1259--- a/arch/frv/include/asm/kmap_types.h
1260+++ b/arch/frv/include/asm/kmap_types.h
1261@@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265+ KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270index 385fd30..6c3d97e 100644
1271--- a/arch/frv/mm/elf-fdpic.c
1272+++ b/arch/frv/mm/elf-fdpic.c
1273@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277- if (TASK_SIZE - len >= addr &&
1278- (!vma || addr + len <= vma->vm_start))
1279+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287- if (addr + len <= vma->vm_start)
1288+ if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296- if (addr + len <= vma->vm_start)
1297+ if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302index e4a80d8..11a7ea1 100644
1303--- a/arch/ia64/hp/common/hwsw_iommu.c
1304+++ b/arch/ia64/hp/common/hwsw_iommu.c
1305@@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324index 01ae69b..35752fd 100644
1325--- a/arch/ia64/hp/common/sba_iommu.c
1326+++ b/arch/ia64/hp/common/sba_iommu.c
1327@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331-extern struct dma_map_ops swiotlb_dma_ops;
1332+extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340-struct dma_map_ops sba_dma_ops = {
1341+const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346index c69552b..c7122f4 100644
1347--- a/arch/ia64/ia32/binfmt_elf32.c
1348+++ b/arch/ia64/ia32/binfmt_elf32.c
1349@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353+#ifdef CONFIG_PAX_ASLR
1354+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355+
1356+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358+#endif
1359+
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364index 0f15349..26b3429 100644
1365--- a/arch/ia64/ia32/ia32priv.h
1366+++ b/arch/ia64/ia32/ia32priv.h
1367@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372+#ifdef CONFIG_PAX_RANDUSTACK
1373+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374+#else
1375+#define __IA32_DELTA_STACK 0UL
1376+#endif
1377+
1378+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379+
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384index 8d3c79c..71b3af6 100644
1385--- a/arch/ia64/include/asm/dma-mapping.h
1386+++ b/arch/ia64/include/asm/dma-mapping.h
1387@@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391-extern struct dma_map_ops *dma_ops;
1392+extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431index 86eddee..b116bb4 100644
1432--- a/arch/ia64/include/asm/elf.h
1433+++ b/arch/ia64/include/asm/elf.h
1434@@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438+#ifdef CONFIG_PAX_ASLR
1439+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440+
1441+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443+#endif
1444+
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449index 367d299..9ad4279 100644
1450--- a/arch/ia64/include/asm/machvec.h
1451+++ b/arch/ia64/include/asm/machvec.h
1452@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465-extern struct dma_map_ops *dma_get_ops(struct device *);
1466+extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471index 8840a69..cdb63d9 100644
1472--- a/arch/ia64/include/asm/pgtable.h
1473+++ b/arch/ia64/include/asm/pgtable.h
1474@@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478-
1479+#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483@@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487+
1488+#ifdef CONFIG_PAX_PAGEEXEC
1489+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492+#else
1493+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495+# define PAGE_COPY_NOEXEC PAGE_COPY
1496+#endif
1497+
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502index 239ecdc..f94170e 100644
1503--- a/arch/ia64/include/asm/spinlock.h
1504+++ b/arch/ia64/include/asm/spinlock.h
1505@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515index 449c8c0..432a3d2 100644
1516--- a/arch/ia64/include/asm/uaccess.h
1517+++ b/arch/ia64/include/asm/uaccess.h
1518@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537index f2c1600..969398a 100644
1538--- a/arch/ia64/kernel/dma-mapping.c
1539+++ b/arch/ia64/kernel/dma-mapping.c
1540@@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544-struct dma_map_ops *dma_ops;
1545+const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549@@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553-struct dma_map_ops *dma_get_ops(struct device *dev)
1554+const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559index 1481b0a..e7d38ff 100644
1560--- a/arch/ia64/kernel/module.c
1561+++ b/arch/ia64/kernel/module.c
1562@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566- if (mod && mod->arch.init_unw_table &&
1567- module_region == mod->module_init) {
1568+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576+in_init_rx (const struct module *mod, uint64_t addr)
1577+{
1578+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579+}
1580+
1581+static inline int
1582+in_init_rw (const struct module *mod, uint64_t addr)
1583+{
1584+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585+}
1586+
1587+static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590- return addr - (uint64_t) mod->module_init < mod->init_size;
1591+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592+}
1593+
1594+static inline int
1595+in_core_rx (const struct module *mod, uint64_t addr)
1596+{
1597+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598+}
1599+
1600+static inline int
1601+in_core_rw (const struct module *mod, uint64_t addr)
1602+{
1603+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609- return addr - (uint64_t) mod->module_core < mod->core_size;
1610+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619+ if (in_init_rx(mod, val))
1620+ val -= (uint64_t) mod->module_init_rx;
1621+ else if (in_init_rw(mod, val))
1622+ val -= (uint64_t) mod->module_init_rw;
1623+ else if (in_core_rx(mod, val))
1624+ val -= (uint64_t) mod->module_core_rx;
1625+ else if (in_core_rw(mod, val))
1626+ val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634- if (mod->core_size > MAX_LTOFF)
1635+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640- gp = mod->core_size - MAX_LTOFF / 2;
1641+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643- gp = mod->core_size / 2;
1644- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651index f6b1ff0..de773fb 100644
1652--- a/arch/ia64/kernel/pci-dma.c
1653+++ b/arch/ia64/kernel/pci-dma.c
1654@@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658-extern struct dma_map_ops intel_dma_ops;
1659+extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674+
1675+static const struct dma_map_ops intel_iommu_dma_ops = {
1676+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677+ .alloc_coherent = intel_alloc_coherent,
1678+ .free_coherent = intel_free_coherent,
1679+ .map_sg = intel_map_sg,
1680+ .unmap_sg = intel_unmap_sg,
1681+ .map_page = intel_map_page,
1682+ .unmap_page = intel_unmap_page,
1683+ .mapping_error = intel_mapping_error,
1684+
1685+ .sync_single_for_cpu = machvec_dma_sync_single,
1686+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1687+ .sync_single_for_device = machvec_dma_sync_single,
1688+ .sync_sg_for_device = machvec_dma_sync_sg,
1689+ .dma_supported = iommu_dma_supported,
1690+};
1691+
1692 void __init pci_iommu_alloc(void)
1693 {
1694- dma_ops = &intel_dma_ops;
1695-
1696- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700- dma_ops->dma_supported = iommu_dma_supported;
1701+ dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706index 285aae8..61dbab6 100644
1707--- a/arch/ia64/kernel/pci-swiotlb.c
1708+++ b/arch/ia64/kernel/pci-swiotlb.c
1709@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713-struct dma_map_ops swiotlb_dma_ops = {
1714+const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719index 609d500..7dde2a8 100644
1720--- a/arch/ia64/kernel/sys_ia64.c
1721+++ b/arch/ia64/kernel/sys_ia64.c
1722@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726+
1727+#ifdef CONFIG_PAX_RANDMMAP
1728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1729+ addr = mm->free_area_cache;
1730+ else
1731+#endif
1732+
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740- if (start_addr != TASK_UNMAPPED_BASE) {
1741+ if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743- addr = TASK_UNMAPPED_BASE;
1744+ addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749- if (!vma || addr + len <= vma->vm_start) {
1750+ if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755index 8f06035..b3a5818 100644
1756--- a/arch/ia64/kernel/topology.c
1757+++ b/arch/ia64/kernel/topology.c
1758@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762-static struct sysfs_ops cache_sysfs_ops = {
1763+static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768index 0a0c77b..8e55a81 100644
1769--- a/arch/ia64/kernel/vmlinux.lds.S
1770+++ b/arch/ia64/kernel/vmlinux.lds.S
1771@@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775- __phys_per_cpu_start = __per_cpu_load;
1776+ __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781index 19261a9..1611b7a 100644
1782--- a/arch/ia64/mm/fault.c
1783+++ b/arch/ia64/mm/fault.c
1784@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788+#ifdef CONFIG_PAX_PAGEEXEC
1789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790+{
1791+ unsigned long i;
1792+
1793+ printk(KERN_ERR "PAX: bytes at PC: ");
1794+ for (i = 0; i < 8; i++) {
1795+ unsigned int c;
1796+ if (get_user(c, (unsigned int *)pc+i))
1797+ printk(KERN_CONT "???????? ");
1798+ else
1799+ printk(KERN_CONT "%08x ", c);
1800+ }
1801+ printk("\n");
1802+}
1803+#endif
1804+
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812- if ((vma->vm_flags & mask) != mask)
1813+ if ((vma->vm_flags & mask) != mask) {
1814+
1815+#ifdef CONFIG_PAX_PAGEEXEC
1816+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818+ goto bad_area;
1819+
1820+ up_read(&mm->mmap_sem);
1821+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822+ do_group_exit(SIGKILL);
1823+ }
1824+#endif
1825+
1826 goto bad_area;
1827
1828+ }
1829+
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834index b0f6157..a082bbc 100644
1835--- a/arch/ia64/mm/hugetlbpage.c
1836+++ b/arch/ia64/mm/hugetlbpage.c
1837@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841- if (!vmm || (addr + len) <= vmm->vm_start)
1842+ if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847index 1857766..05cc6a3 100644
1848--- a/arch/ia64/mm/init.c
1849+++ b/arch/ia64/mm/init.c
1850@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854+
1855+#ifdef CONFIG_PAX_PAGEEXEC
1856+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857+ vma->vm_flags &= ~VM_EXEC;
1858+
1859+#ifdef CONFIG_PAX_MPROTECT
1860+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861+ vma->vm_flags &= ~VM_MAYEXEC;
1862+#endif
1863+
1864+ }
1865+#endif
1866+
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871index 98b6849..8046766 100644
1872--- a/arch/ia64/sn/pci/pci_dma.c
1873+++ b/arch/ia64/sn/pci/pci_dma.c
1874@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878-static struct dma_map_ops sn_dma_ops = {
1879+static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884index 82abd15..d95ae5d 100644
1885--- a/arch/m32r/lib/usercopy.c
1886+++ b/arch/m32r/lib/usercopy.c
1887@@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891+ if ((long)n < 0)
1892+ return n;
1893+
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908index 77f5021..2b1db8a 100644
1909--- a/arch/mips/Makefile
1910+++ b/arch/mips/Makefile
1911@@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915+cflags-y += -Wno-sign-compare -Wno-extra
1916+
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921index 632f986..fd0378d 100644
1922--- a/arch/mips/alchemy/devboards/pm.c
1923+++ b/arch/mips/alchemy/devboards/pm.c
1924@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928-static struct platform_suspend_ops db1x_pm_ops = {
1929+static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934index 7990694..4e93acf 100644
1935--- a/arch/mips/include/asm/elf.h
1936+++ b/arch/mips/include/asm/elf.h
1937@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941+#ifdef CONFIG_PAX_ASLR
1942+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943+
1944+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946+#endif
1947+
1948 #endif /* _ASM_ELF_H */
1949diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950index f266295..627cfff 100644
1951--- a/arch/mips/include/asm/page.h
1952+++ b/arch/mips/include/asm/page.h
1953@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963index e48c0bf..f3acf65 100644
1964--- a/arch/mips/include/asm/reboot.h
1965+++ b/arch/mips/include/asm/reboot.h
1966@@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970-extern void (*_machine_restart)(char *command);
1971-extern void (*_machine_halt)(void);
1972+extern void (*__noreturn _machine_restart)(char *command);
1973+extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977index 83b5509..9fa24a23 100644
1978--- a/arch/mips/include/asm/system.h
1979+++ b/arch/mips/include/asm/system.h
1980@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984-extern unsigned long arch_align_stack(unsigned long sp);
1985+#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989index 9fdd8bc..fcf9d68 100644
1990--- a/arch/mips/kernel/binfmt_elfn32.c
1991+++ b/arch/mips/kernel/binfmt_elfn32.c
1992@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996+#ifdef CONFIG_PAX_ASLR
1997+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998+
1999+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001+#endif
2002+
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007index ff44823..cf0b48a 100644
2008--- a/arch/mips/kernel/binfmt_elfo32.c
2009+++ b/arch/mips/kernel/binfmt_elfo32.c
2010@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014+#ifdef CONFIG_PAX_ASLR
2015+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016+
2017+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019+#endif
2020+
2021 #include <asm/processor.h>
2022
2023 /*
2024diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025index 50c9bb8..efdd5f8 100644
2026--- a/arch/mips/kernel/kgdb.c
2027+++ b/arch/mips/kernel/kgdb.c
2028@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032+/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037index f3d73e1..bb3f57a 100644
2038--- a/arch/mips/kernel/process.c
2039+++ b/arch/mips/kernel/process.c
2040@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044-
2045-/*
2046- * Don't forget that the stack pointer must be aligned on a 8 bytes
2047- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048- */
2049-unsigned long arch_align_stack(unsigned long sp)
2050-{
2051- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052- sp -= get_random_int() & ~PAGE_MASK;
2053-
2054- return sp & ALMASK;
2055-}
2056diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057index 060563a..7fbf310 100644
2058--- a/arch/mips/kernel/reset.c
2059+++ b/arch/mips/kernel/reset.c
2060@@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064-void (*_machine_restart)(char *command);
2065-void (*_machine_halt)(void);
2066+void (*__noreturn _machine_restart)(char *command);
2067+void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071@@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075+ BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082+ BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089+ BUG();
2090 }
2091diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092index 3f7f466..3abe0b5 100644
2093--- a/arch/mips/kernel/syscall.c
2094+++ b/arch/mips/kernel/syscall.c
2095@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099+
2100+#ifdef CONFIG_PAX_RANDMMAP
2101+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102+#endif
2103+
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110- if (task_size - len >= addr &&
2111- (!vmm || addr + len <= vmm->vm_start))
2112+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115- addr = TASK_UNMAPPED_BASE;
2116+ addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124- if (!vmm || addr + len <= vmm->vm_start)
2125+ if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130index e97a7a2..f18f5b0 100644
2131--- a/arch/mips/mm/fault.c
2132+++ b/arch/mips/mm/fault.c
2133@@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137+#ifdef CONFIG_PAX_PAGEEXEC
2138+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139+{
2140+ unsigned long i;
2141+
2142+ printk(KERN_ERR "PAX: bytes at PC: ");
2143+ for (i = 0; i < 5; i++) {
2144+ unsigned int c;
2145+ if (get_user(c, (unsigned int *)pc+i))
2146+ printk(KERN_CONT "???????? ");
2147+ else
2148+ printk(KERN_CONT "%08x ", c);
2149+ }
2150+ printk("\n");
2151+}
2152+#endif
2153+
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158index 9c802eb..0592e41 100644
2159--- a/arch/parisc/include/asm/elf.h
2160+++ b/arch/parisc/include/asm/elf.h
2161@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165+#ifdef CONFIG_PAX_ASLR
2166+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167+
2168+#define PAX_DELTA_MMAP_LEN 16
2169+#define PAX_DELTA_STACK_LEN 16
2170+#endif
2171+
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176index a27d2e2..18fd845 100644
2177--- a/arch/parisc/include/asm/pgtable.h
2178+++ b/arch/parisc/include/asm/pgtable.h
2179@@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183+
2184+#ifdef CONFIG_PAX_PAGEEXEC
2185+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188+#else
2189+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190+# define PAGE_COPY_NOEXEC PAGE_COPY
2191+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192+#endif
2193+
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198index 2120746..8d70a5e 100644
2199--- a/arch/parisc/kernel/module.c
2200+++ b/arch/parisc/kernel/module.c
2201@@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205+static inline int in_init_rx(struct module *me, void *loc)
2206+{
2207+ return (loc >= me->module_init_rx &&
2208+ loc < (me->module_init_rx + me->init_size_rx));
2209+}
2210+
2211+static inline int in_init_rw(struct module *me, void *loc)
2212+{
2213+ return (loc >= me->module_init_rw &&
2214+ loc < (me->module_init_rw + me->init_size_rw));
2215+}
2216+
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219- return (loc >= me->module_init &&
2220- loc <= (me->module_init + me->init_size));
2221+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2222+}
2223+
2224+static inline int in_core_rx(struct module *me, void *loc)
2225+{
2226+ return (loc >= me->module_core_rx &&
2227+ loc < (me->module_core_rx + me->core_size_rx));
2228+}
2229+
2230+static inline int in_core_rw(struct module *me, void *loc)
2231+{
2232+ return (loc >= me->module_core_rw &&
2233+ loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238- return (loc >= me->module_core &&
2239- loc <= (me->module_core + me->core_size));
2240+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248- me->core_size = ALIGN(me->core_size, 16);
2249- me->arch.got_offset = me->core_size;
2250- me->core_size += gots * sizeof(struct got_entry);
2251+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252+ me->arch.got_offset = me->core_size_rw;
2253+ me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255- me->core_size = ALIGN(me->core_size, 16);
2256- me->arch.fdesc_offset = me->core_size;
2257- me->core_size += fdescs * sizeof(Elf_Fdesc);
2258+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259+ me->arch.fdesc_offset = me->core_size_rw;
2260+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268- got = me->module_core + me->arch.got_offset;
2269+ got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301index 9147391..f3d949a 100644
2302--- a/arch/parisc/kernel/sys_parisc.c
2303+++ b/arch/parisc/kernel/sys_parisc.c
2304@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308- if (!vma || addr + len <= vma->vm_start)
2309+ if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317- if (!vma || addr + len <= vma->vm_start)
2318+ if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326- addr = TASK_UNMAPPED_BASE;
2327+ addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332index 8b58bf0..7afff03 100644
2333--- a/arch/parisc/kernel/traps.c
2334+++ b/arch/parisc/kernel/traps.c
2335@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339- if (vma && (regs->iaoq[0] >= vma->vm_start)
2340- && (vma->vm_flags & VM_EXEC)) {
2341-
2342+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347index c6afbfc..c5839f6 100644
2348--- a/arch/parisc/mm/fault.c
2349+++ b/arch/parisc/mm/fault.c
2350@@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354+#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362- if (code == 6 || code == 16)
2363+ if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371+#ifdef CONFIG_PAX_PAGEEXEC
2372+/*
2373+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374+ *
2375+ * returns 1 when task should be killed
2376+ * 2 when rt_sigreturn trampoline was detected
2377+ * 3 when unpatched PLT trampoline was detected
2378+ */
2379+static int pax_handle_fetch_fault(struct pt_regs *regs)
2380+{
2381+
2382+#ifdef CONFIG_PAX_EMUPLT
2383+ int err;
2384+
2385+ do { /* PaX: unpatched PLT emulation */
2386+ unsigned int bl, depwi;
2387+
2388+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390+
2391+ if (err)
2392+ break;
2393+
2394+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396+
2397+ err = get_user(ldw, (unsigned int *)addr);
2398+ err |= get_user(bv, (unsigned int *)(addr+4));
2399+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2400+
2401+ if (err)
2402+ break;
2403+
2404+ if (ldw == 0x0E801096U &&
2405+ bv == 0xEAC0C000U &&
2406+ ldw2 == 0x0E881095U)
2407+ {
2408+ unsigned int resolver, map;
2409+
2410+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412+ if (err)
2413+ break;
2414+
2415+ regs->gr[20] = instruction_pointer(regs)+8;
2416+ regs->gr[21] = map;
2417+ regs->gr[22] = resolver;
2418+ regs->iaoq[0] = resolver | 3UL;
2419+ regs->iaoq[1] = regs->iaoq[0] + 4;
2420+ return 3;
2421+ }
2422+ }
2423+ } while (0);
2424+#endif
2425+
2426+#ifdef CONFIG_PAX_EMUTRAMP
2427+
2428+#ifndef CONFIG_PAX_EMUSIGRT
2429+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430+ return 1;
2431+#endif
2432+
2433+ do { /* PaX: rt_sigreturn emulation */
2434+ unsigned int ldi1, ldi2, bel, nop;
2435+
2436+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440+
2441+ if (err)
2442+ break;
2443+
2444+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445+ ldi2 == 0x3414015AU &&
2446+ bel == 0xE4008200U &&
2447+ nop == 0x08000240U)
2448+ {
2449+ regs->gr[25] = (ldi1 & 2) >> 1;
2450+ regs->gr[20] = __NR_rt_sigreturn;
2451+ regs->gr[31] = regs->iaoq[1] + 16;
2452+ regs->sr[0] = regs->iasq[1];
2453+ regs->iaoq[0] = 0x100UL;
2454+ regs->iaoq[1] = regs->iaoq[0] + 4;
2455+ regs->iasq[0] = regs->sr[2];
2456+ regs->iasq[1] = regs->sr[2];
2457+ return 2;
2458+ }
2459+ } while (0);
2460+#endif
2461+
2462+ return 1;
2463+}
2464+
2465+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466+{
2467+ unsigned long i;
2468+
2469+ printk(KERN_ERR "PAX: bytes at PC: ");
2470+ for (i = 0; i < 5; i++) {
2471+ unsigned int c;
2472+ if (get_user(c, (unsigned int *)pc+i))
2473+ printk(KERN_CONT "???????? ");
2474+ else
2475+ printk(KERN_CONT "%08x ", c);
2476+ }
2477+ printk("\n");
2478+}
2479+#endif
2480+
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484@@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488- if ((vma->vm_flags & acc_type) != acc_type)
2489+ if ((vma->vm_flags & acc_type) != acc_type) {
2490+
2491+#ifdef CONFIG_PAX_PAGEEXEC
2492+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493+ (address & ~3UL) == instruction_pointer(regs))
2494+ {
2495+ up_read(&mm->mmap_sem);
2496+ switch (pax_handle_fetch_fault(regs)) {
2497+
2498+#ifdef CONFIG_PAX_EMUPLT
2499+ case 3:
2500+ return;
2501+#endif
2502+
2503+#ifdef CONFIG_PAX_EMUTRAMP
2504+ case 2:
2505+ return;
2506+#endif
2507+
2508+ }
2509+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510+ do_group_exit(SIGKILL);
2511+ }
2512+#endif
2513+
2514 goto bad_area;
2515+ }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520index c107b74..409dc0f 100644
2521--- a/arch/powerpc/Makefile
2522+++ b/arch/powerpc/Makefile
2523@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527+cflags-y += -Wno-sign-compare -Wno-extra
2528+
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533index 6d94d27..50d4cad 100644
2534--- a/arch/powerpc/include/asm/device.h
2535+++ b/arch/powerpc/include/asm/device.h
2536@@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540- struct dma_map_ops *dma_ops;
2541+ const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546index e281dae..2b8a784 100644
2547--- a/arch/powerpc/include/asm/dma-mapping.h
2548+++ b/arch/powerpc/include/asm/dma-mapping.h
2549@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553-extern struct dma_map_ops dma_direct_ops;
2554+extern const struct dma_map_ops dma_direct_ops;
2555
2556-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616index 5698502..5db093c 100644
2617--- a/arch/powerpc/include/asm/elf.h
2618+++ b/arch/powerpc/include/asm/elf.h
2619@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623-extern unsigned long randomize_et_dyn(unsigned long base);
2624-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625+#define ELF_ET_DYN_BASE (0x20000000)
2626+
2627+#ifdef CONFIG_PAX_ASLR
2628+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629+
2630+#ifdef __powerpc64__
2631+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633+#else
2634+#define PAX_DELTA_MMAP_LEN 15
2635+#define PAX_DELTA_STACK_LEN 15
2636+#endif
2637+#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646-#define arch_randomize_brk arch_randomize_brk
2647-
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652index edfc980..1766f59 100644
2653--- a/arch/powerpc/include/asm/iommu.h
2654+++ b/arch/powerpc/include/asm/iommu.h
2655@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659+/* dma-iommu.c */
2660+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661+
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666index 9163695..5a00112 100644
2667--- a/arch/powerpc/include/asm/kmap_types.h
2668+++ b/arch/powerpc/include/asm/kmap_types.h
2669@@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673+ KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678index ff24254..fe45b21 100644
2679--- a/arch/powerpc/include/asm/page.h
2680+++ b/arch/powerpc/include/asm/page.h
2681@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687+#define VM_DATA_DEFAULT_FLAGS32 \
2688+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697+#define ktla_ktva(addr) (addr)
2698+#define ktva_ktla(addr) (addr)
2699+
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704index 3f17b83..1f9e766 100644
2705--- a/arch/powerpc/include/asm/page_64.h
2706+++ b/arch/powerpc/include/asm/page_64.h
2707@@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713+#define VM_STACK_DEFAULT_FLAGS32 \
2714+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720+#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724+#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729index b5ea626..4030822 100644
2730--- a/arch/powerpc/include/asm/pci.h
2731+++ b/arch/powerpc/include/asm/pci.h
2732@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737-extern struct dma_map_ops *get_pci_dma_ops(void);
2738+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739+extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744index 2a5da06..d65bea2 100644
2745--- a/arch/powerpc/include/asm/pgtable.h
2746+++ b/arch/powerpc/include/asm/pgtable.h
2747@@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751+#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756index 4aad413..85d86bf 100644
2757--- a/arch/powerpc/include/asm/pte-hash32.h
2758+++ b/arch/powerpc/include/asm/pte-hash32.h
2759@@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763+#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768index 8c34149..78f425a 100644
2769--- a/arch/powerpc/include/asm/ptrace.h
2770+++ b/arch/powerpc/include/asm/ptrace.h
2771@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781index 32a7c30..be3a8bb 100644
2782--- a/arch/powerpc/include/asm/reg.h
2783+++ b/arch/powerpc/include/asm/reg.h
2784@@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793index 8979d4c..d2fd0d3 100644
2794--- a/arch/powerpc/include/asm/swiotlb.h
2795+++ b/arch/powerpc/include/asm/swiotlb.h
2796@@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800-extern struct dma_map_ops swiotlb_dma_ops;
2801+extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806index 094a12a..877a60a 100644
2807--- a/arch/powerpc/include/asm/system.h
2808+++ b/arch/powerpc/include/asm/system.h
2809@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813-extern unsigned long arch_align_stack(unsigned long sp);
2814+#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819index bd0fb84..a42a14b 100644
2820--- a/arch/powerpc/include/asm/uaccess.h
2821+++ b/arch/powerpc/include/asm/uaccess.h
2822@@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827+
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831@@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835-#ifndef __powerpc64__
2836-
2837-static inline unsigned long copy_from_user(void *to,
2838- const void __user *from, unsigned long n)
2839-{
2840- unsigned long over;
2841-
2842- if (access_ok(VERIFY_READ, from, n))
2843- return __copy_tofrom_user((__force void __user *)to, from, n);
2844- if ((unsigned long)from < TASK_SIZE) {
2845- over = (unsigned long)from + n - TASK_SIZE;
2846- return __copy_tofrom_user((__force void __user *)to, from,
2847- n - over) + over;
2848- }
2849- return n;
2850-}
2851-
2852-static inline unsigned long copy_to_user(void __user *to,
2853- const void *from, unsigned long n)
2854-{
2855- unsigned long over;
2856-
2857- if (access_ok(VERIFY_WRITE, to, n))
2858- return __copy_tofrom_user(to, (__force void __user *)from, n);
2859- if ((unsigned long)to < TASK_SIZE) {
2860- over = (unsigned long)to + n - TASK_SIZE;
2861- return __copy_tofrom_user(to, (__force void __user *)from,
2862- n - over) + over;
2863- }
2864- return n;
2865-}
2866-
2867-#else /* __powerpc64__ */
2868-
2869-#define __copy_in_user(to, from, size) \
2870- __copy_tofrom_user((to), (from), (size))
2871-
2872-extern unsigned long copy_from_user(void *to, const void __user *from,
2873- unsigned long n);
2874-extern unsigned long copy_to_user(void __user *to, const void *from,
2875- unsigned long n);
2876-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877- unsigned long n);
2878-
2879-#endif /* __powerpc64__ */
2880-
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888+
2889+ if (!__builtin_constant_p(n))
2890+ check_object_size(to, n, false);
2891+
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899+
2900+ if (!__builtin_constant_p(n))
2901+ check_object_size(from, n, true);
2902+
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910+#ifndef __powerpc64__
2911+
2912+static inline unsigned long __must_check copy_from_user(void *to,
2913+ const void __user *from, unsigned long n)
2914+{
2915+ unsigned long over;
2916+
2917+ if ((long)n < 0)
2918+ return n;
2919+
2920+ if (access_ok(VERIFY_READ, from, n)) {
2921+ if (!__builtin_constant_p(n))
2922+ check_object_size(to, n, false);
2923+ return __copy_tofrom_user((__force void __user *)to, from, n);
2924+ }
2925+ if ((unsigned long)from < TASK_SIZE) {
2926+ over = (unsigned long)from + n - TASK_SIZE;
2927+ if (!__builtin_constant_p(n - over))
2928+ check_object_size(to, n - over, false);
2929+ return __copy_tofrom_user((__force void __user *)to, from,
2930+ n - over) + over;
2931+ }
2932+ return n;
2933+}
2934+
2935+static inline unsigned long __must_check copy_to_user(void __user *to,
2936+ const void *from, unsigned long n)
2937+{
2938+ unsigned long over;
2939+
2940+ if ((long)n < 0)
2941+ return n;
2942+
2943+ if (access_ok(VERIFY_WRITE, to, n)) {
2944+ if (!__builtin_constant_p(n))
2945+ check_object_size(from, n, true);
2946+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2947+ }
2948+ if ((unsigned long)to < TASK_SIZE) {
2949+ over = (unsigned long)to + n - TASK_SIZE;
2950+ if (!__builtin_constant_p(n))
2951+ check_object_size(from, n - over, true);
2952+ return __copy_tofrom_user(to, (__force void __user *)from,
2953+ n - over) + over;
2954+ }
2955+ return n;
2956+}
2957+
2958+#else /* __powerpc64__ */
2959+
2960+#define __copy_in_user(to, from, size) \
2961+ __copy_tofrom_user((to), (from), (size))
2962+
2963+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964+{
2965+ if ((long)n < 0 || n > INT_MAX)
2966+ return n;
2967+
2968+ if (!__builtin_constant_p(n))
2969+ check_object_size(to, n, false);
2970+
2971+ if (likely(access_ok(VERIFY_READ, from, n)))
2972+ n = __copy_from_user(to, from, n);
2973+ else
2974+ memset(to, 0, n);
2975+ return n;
2976+}
2977+
2978+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979+{
2980+ if ((long)n < 0 || n > INT_MAX)
2981+ return n;
2982+
2983+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984+ if (!__builtin_constant_p(n))
2985+ check_object_size(from, n, true);
2986+ n = __copy_to_user(to, from, n);
2987+ }
2988+ return n;
2989+}
2990+
2991+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992+ unsigned long n);
2993+
2994+#endif /* __powerpc64__ */
2995+
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000index bb37b1d..01fe9ce 100644
3001--- a/arch/powerpc/kernel/cacheinfo.c
3002+++ b/arch/powerpc/kernel/cacheinfo.c
3003@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007-static struct sysfs_ops cache_index_ops = {
3008+static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013index 37771a5..648530c 100644
3014--- a/arch/powerpc/kernel/dma-iommu.c
3015+++ b/arch/powerpc/kernel/dma-iommu.c
3016@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026index e96cbbd..bdd6d41 100644
3027--- a/arch/powerpc/kernel/dma-swiotlb.c
3028+++ b/arch/powerpc/kernel/dma-swiotlb.c
3029@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033-struct dma_map_ops swiotlb_dma_ops = {
3034+const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039index 6215062..ebea59c 100644
3040--- a/arch/powerpc/kernel/dma.c
3041+++ b/arch/powerpc/kernel/dma.c
3042@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046-struct dma_map_ops dma_direct_ops = {
3047+const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052index 24dcc0e..a300455 100644
3053--- a/arch/powerpc/kernel/exceptions-64e.S
3054+++ b/arch/powerpc/kernel/exceptions-64e.S
3055@@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059+ bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063@@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067-1: bl .save_nvgprs
3068- mr r5,r3
3069+1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074index 1808876..9fd206a 100644
3075--- a/arch/powerpc/kernel/exceptions-64s.S
3076+++ b/arch/powerpc/kernel/exceptions-64s.S
3077@@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081+ bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085- bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090index a4c8b38..1b09ad9 100644
3091--- a/arch/powerpc/kernel/ibmebus.c
3092+++ b/arch/powerpc/kernel/ibmebus.c
3093@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097-static struct dma_map_ops ibmebus_dma_ops = {
3098+static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103index 641c74b..8339ad7 100644
3104--- a/arch/powerpc/kernel/kgdb.c
3105+++ b/arch/powerpc/kernel/kgdb.c
3106@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119-struct kgdb_arch arch_kgdb_ops = {
3120+const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125index 477c663..4f50234 100644
3126--- a/arch/powerpc/kernel/module.c
3127+++ b/arch/powerpc/kernel/module.c
3128@@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132+#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138+ return vmalloc(size);
3139+}
3140+
3141+void *module_alloc_exec(unsigned long size)
3142+#else
3143+void *module_alloc(unsigned long size)
3144+#endif
3145+
3146+{
3147+ if (size == 0)
3148+ return NULL;
3149+
3150 return vmalloc_exec(size);
3151 }
3152
3153@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157+#ifdef CONFIG_PAX_KERNEXEC
3158+void module_free_exec(struct module *mod, void *module_region)
3159+{
3160+ module_free(mod, module_region);
3161+}
3162+#endif
3163+
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168index f832773..0507238 100644
3169--- a/arch/powerpc/kernel/module_32.c
3170+++ b/arch/powerpc/kernel/module_32.c
3171@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175- printk("Module doesn't contain .plt or .init.plt sections.\n");
3176+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184- if (location >= mod->module_core
3185- && location < mod->module_core + mod->core_size)
3186+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189- else
3190+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193+ else {
3194+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195+ return ~0UL;
3196+ }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201index cadbed6..b9bbb00 100644
3202--- a/arch/powerpc/kernel/pci-common.c
3203+++ b/arch/powerpc/kernel/pci-common.c
3204@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217-struct dma_map_ops *get_pci_dma_ops(void)
3218+const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223index 7b816da..8d5c277 100644
3224--- a/arch/powerpc/kernel/process.c
3225+++ b/arch/powerpc/kernel/process.c
3226@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245- printk(" (%pS)",
3246+ printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263-
3264-unsigned long arch_align_stack(unsigned long sp)
3265-{
3266- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267- sp -= get_random_int() & ~PAGE_MASK;
3268- return sp & ~0xf;
3269-}
3270-
3271-static inline unsigned long brk_rnd(void)
3272-{
3273- unsigned long rnd = 0;
3274-
3275- /* 8MB for 32bit, 1GB for 64bit */
3276- if (is_32bit_task())
3277- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278- else
3279- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280-
3281- return rnd << PAGE_SHIFT;
3282-}
3283-
3284-unsigned long arch_randomize_brk(struct mm_struct *mm)
3285-{
3286- unsigned long base = mm->brk;
3287- unsigned long ret;
3288-
3289-#ifdef CONFIG_PPC_STD_MMU_64
3290- /*
3291- * If we are using 1TB segments and we are allowed to randomise
3292- * the heap, we can put it above 1TB so it is backed by a 1TB
3293- * segment. Otherwise the heap will be in the bottom 1TB
3294- * which always uses 256MB segments and this may result in a
3295- * performance penalty.
3296- */
3297- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299-#endif
3300-
3301- ret = PAGE_ALIGN(base + brk_rnd());
3302-
3303- if (ret < mm->brk)
3304- return mm->brk;
3305-
3306- return ret;
3307-}
3308-
3309-unsigned long randomize_et_dyn(unsigned long base)
3310-{
3311- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312-
3313- if (ret < base)
3314- return base;
3315-
3316- return ret;
3317-}
3318diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319index ef14988..856c4bc 100644
3320--- a/arch/powerpc/kernel/ptrace.c
3321+++ b/arch/powerpc/kernel/ptrace.c
3322@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335- tmp = ptrace_get_reg(child, (int) index);
3336+ tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341index d670429..2bc59b2 100644
3342--- a/arch/powerpc/kernel/signal_32.c
3343+++ b/arch/powerpc/kernel/signal_32.c
3344@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354index 2fe6fc6..ada0d96 100644
3355--- a/arch/powerpc/kernel/signal_64.c
3356+++ b/arch/powerpc/kernel/signal_64.c
3357@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367index b97c2d6..dd01a6a 100644
3368--- a/arch/powerpc/kernel/sys_ppc32.c
3369+++ b/arch/powerpc/kernel/sys_ppc32.c
3370@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384index 6f0ae1a..e4b6a56 100644
3385--- a/arch/powerpc/kernel/traps.c
3386+++ b/arch/powerpc/kernel/traps.c
3387@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391+extern void gr_handle_kernel_exploit(void);
3392+
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400+ gr_handle_kernel_exploit();
3401+
3402 oops_exit();
3403 do_exit(err);
3404
3405diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406index 137dc22..fe57a79 100644
3407--- a/arch/powerpc/kernel/vdso.c
3408+++ b/arch/powerpc/kernel/vdso.c
3409@@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413+#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421- current->mm->context.vdso_base = 0;
3422+ current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430- 0, 0);
3431+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436index 77f6421..829564a 100644
3437--- a/arch/powerpc/kernel/vio.c
3438+++ b/arch/powerpc/kernel/vio.c
3439@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443-struct dma_map_ops vio_dma_mapping_ops = {
3444+static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449+ .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462index 5eea6f3..5d10396 100644
3463--- a/arch/powerpc/lib/usercopy_64.c
3464+++ b/arch/powerpc/lib/usercopy_64.c
3465@@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470-{
3471- if (likely(access_ok(VERIFY_READ, from, n)))
3472- n = __copy_from_user(to, from, n);
3473- else
3474- memset(to, 0, n);
3475- return n;
3476-}
3477-
3478-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479-{
3480- if (likely(access_ok(VERIFY_WRITE, to, n)))
3481- n = __copy_to_user(to, from, n);
3482- return n;
3483-}
3484-
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492-EXPORT_SYMBOL(copy_from_user);
3493-EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497index e7dae82..877ce0d 100644
3498--- a/arch/powerpc/mm/fault.c
3499+++ b/arch/powerpc/mm/fault.c
3500@@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504+#include <linux/slab.h>
3505+#include <linux/pagemap.h>
3506+#include <linux/compiler.h>
3507+#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511@@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515+#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523+#ifdef CONFIG_PAX_PAGEEXEC
3524+/*
3525+ * PaX: decide what to do with offenders (regs->nip = fault address)
3526+ *
3527+ * returns 1 when task should be killed
3528+ */
3529+static int pax_handle_fetch_fault(struct pt_regs *regs)
3530+{
3531+ return 1;
3532+}
3533+
3534+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535+{
3536+ unsigned long i;
3537+
3538+ printk(KERN_ERR "PAX: bytes at PC: ");
3539+ for (i = 0; i < 5; i++) {
3540+ unsigned int c;
3541+ if (get_user(c, (unsigned int __user *)pc+i))
3542+ printk(KERN_CONT "???????? ");
3543+ else
3544+ printk(KERN_CONT "%08x ", c);
3545+ }
3546+ printk("\n");
3547+}
3548+#endif
3549+
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557- error_code &= 0x48200000;
3558+ error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562@@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566- if (error_code & 0x10000000)
3567+ if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571@@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575- if (error_code & DSISR_PROTFAULT)
3576+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580@@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584+
3585+#ifdef CONFIG_PAX_PAGEEXEC
3586+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587+#ifdef CONFIG_PPC_STD_MMU
3588+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589+#else
3590+ if (is_exec && regs->nip == address) {
3591+#endif
3592+ switch (pax_handle_fetch_fault(regs)) {
3593+ }
3594+
3595+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596+ do_group_exit(SIGKILL);
3597+ }
3598+ }
3599+#endif
3600+
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605index 5973631..ad617af 100644
3606--- a/arch/powerpc/mm/mem.c
3607+++ b/arch/powerpc/mm/mem.c
3608@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612- int i;
3613+ unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618index 0d957a4..26d968f 100644
3619--- a/arch/powerpc/mm/mmap_64.c
3620+++ b/arch/powerpc/mm/mmap_64.c
3621@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625+
3626+#ifdef CONFIG_PAX_RANDMMAP
3627+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3628+ mm->mmap_base += mm->delta_mmap;
3629+#endif
3630+
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635+
3636+#ifdef CONFIG_PAX_RANDMMAP
3637+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3638+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639+#endif
3640+
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645index ba51948..23009d9 100644
3646--- a/arch/powerpc/mm/slice.c
3647+++ b/arch/powerpc/mm/slice.c
3648@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652- return (!vma || (addr + len) <= vma->vm_start);
3653+ return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657@@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661- if (!vma || addr + len <= vma->vm_start) {
3662+ if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670- addr = mm->mmap_base;
3671- while (addr > len) {
3672+ if (mm->mmap_base < len)
3673+ addr = -ENOMEM;
3674+ else
3675+ addr = mm->mmap_base - len;
3676+
3677+ while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688- if (!vma || (addr + len) <= vma->vm_start) {
3689+ if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697- addr = vma->vm_start;
3698+ addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706+#ifdef CONFIG_PAX_RANDMMAP
3707+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708+ addr = 0;
3709+#endif
3710+
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715index b5c753d..8f01abe 100644
3716--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722-static struct platform_suspend_ops lite5200_pm_ops = {
3723+static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728index a55b0b6..478c18e 100644
3729--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735-static struct platform_suspend_ops mpc52xx_pm_ops = {
3736+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741index 08e65fc..643d3ac 100644
3742--- a/arch/powerpc/platforms/83xx/suspend.c
3743+++ b/arch/powerpc/platforms/83xx/suspend.c
3744@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754index ca5bfdf..1602e09 100644
3755--- a/arch/powerpc/platforms/cell/iommu.c
3756+++ b/arch/powerpc/platforms/cell/iommu.c
3757@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761-struct dma_map_ops dma_iommu_fixed_ops = {
3762+const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767index e34b305..20e48ec 100644
3768--- a/arch/powerpc/platforms/ps3/system-bus.c
3769+++ b/arch/powerpc/platforms/ps3/system-bus.c
3770@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774-static struct dma_map_ops ps3_sb_dma_ops = {
3775+static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783-static struct dma_map_ops ps3_ioc0_dma_ops = {
3784+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789index f0e6f28..60d53ed 100644
3790--- a/arch/powerpc/platforms/pseries/Kconfig
3791+++ b/arch/powerpc/platforms/pseries/Kconfig
3792@@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796+ select PCI_MSI
3797+ select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802index 43c0aca..42c045b 100644
3803--- a/arch/s390/Kconfig
3804+++ b/arch/s390/Kconfig
3805@@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809+ default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812- space. The kernel parameter switch_amode=on will enable this feature,
3813- default is disabled. Enabling this (via kernel parameter) on machines
3814- earlier than IBM System z9-109 EC/BC will reduce system performance.
3815+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816+ will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819- protection option below. Enabling the execute protection via the
3820- noexec kernel parameter will also switch the addressing modes,
3821- independent of the switch_amode kernel parameter.
3822+ protection option below. Enabling the execute protection will also
3823+ switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828+ default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833- The kernel parameter noexec=on will enable this feature and also
3834- switch the addressing modes, default is disabled. Enabling this (via
3835- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836- will reduce system performance.
3837+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838+ reduce system performance.
3839
3840 comment "Code generation options"
3841
3842diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843index e885442..e3a2817 100644
3844--- a/arch/s390/include/asm/elf.h
3845+++ b/arch/s390/include/asm/elf.h
3846@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850+#ifdef CONFIG_PAX_ASLR
3851+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852+
3853+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3854+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
3855+#endif
3856+
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861index e37478e..9ce0e9f 100644
3862--- a/arch/s390/include/asm/setup.h
3863+++ b/arch/s390/include/asm/setup.h
3864@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868-extern unsigned int switch_amode;
3869+#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875-extern unsigned int s390_noexec;
3876+#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881index 8377e91..e28e6f1 100644
3882--- a/arch/s390/include/asm/uaccess.h
3883+++ b/arch/s390/include/asm/uaccess.h
3884@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888+
3889+ if ((long)n < 0)
3890+ return n;
3891+
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899+ if ((long)n < 0)
3900+ return n;
3901+
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909+
3910+ if ((long)n < 0)
3911+ return n;
3912+
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917index 639380a..72e3c02 100644
3918--- a/arch/s390/kernel/module.c
3919+++ b/arch/s390/kernel/module.c
3920@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924- me->core_size = ALIGN(me->core_size, 4);
3925- me->arch.got_offset = me->core_size;
3926- me->core_size += me->arch.got_size;
3927- me->arch.plt_offset = me->core_size;
3928- me->core_size += me->arch.plt_size;
3929+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930+ me->arch.got_offset = me->core_size_rw;
3931+ me->core_size_rw += me->arch.got_size;
3932+ me->arch.plt_offset = me->core_size_rx;
3933+ me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941- gotent = me->module_core + me->arch.got_offset +
3942+ gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950- (val + (Elf_Addr) me->module_core - loc) >> 1;
3951+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959- ip = me->module_core + me->arch.plt_offset +
3960+ ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968- val = (Elf_Addr) me->module_core +
3969+ val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977- ((Elf_Addr) me->module_core + me->arch.got_offset);
3978+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992index 061479f..dbfb08c 100644
3993--- a/arch/s390/kernel/setup.c
3994+++ b/arch/s390/kernel/setup.c
3995@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999-unsigned int switch_amode = 0;
4000-EXPORT_SYMBOL_GPL(switch_amode);
4001-
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009-
4010-/*
4011- * Switch kernel/user addressing modes?
4012- */
4013-static int __init early_parse_switch_amode(char *p)
4014-{
4015- switch_amode = 1;
4016- return 0;
4017-}
4018-early_param("switch_amode", early_parse_switch_amode);
4019-
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027-#ifdef CONFIG_S390_EXEC_PROTECT
4028-unsigned int s390_noexec = 0;
4029-EXPORT_SYMBOL_GPL(s390_noexec);
4030-
4031-/*
4032- * Enable execute protection?
4033- */
4034-static int __init early_parse_noexec(char *p)
4035-{
4036- if (!strncmp(p, "off", 3))
4037- return 0;
4038- switch_amode = 1;
4039- s390_noexec = 1;
4040- return 0;
4041-}
4042-early_param("noexec", early_parse_noexec);
4043-#endif /* CONFIG_S390_EXEC_PROTECT */
4044-
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049index f4558cc..e461f37 100644
4050--- a/arch/s390/mm/mmap.c
4051+++ b/arch/s390/mm/mmap.c
4052@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066+
4067+#ifdef CONFIG_PAX_RANDMMAP
4068+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4069+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070+#endif
4071+
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079+
4080+#ifdef CONFIG_PAX_RANDMMAP
4081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4082+ mm->mmap_base += mm->delta_mmap;
4083+#endif
4084+
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089+
4090+#ifdef CONFIG_PAX_RANDMMAP
4091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4092+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093+#endif
4094+
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099index 589d5c7..669e274 100644
4100--- a/arch/score/include/asm/system.h
4101+++ b/arch/score/include/asm/system.h
4102@@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106-extern unsigned long arch_align_stack(unsigned long sp);
4107+#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112index 25d0803..d6c8e36 100644
4113--- a/arch/score/kernel/process.c
4114+++ b/arch/score/kernel/process.c
4115@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119-
4120-unsigned long arch_align_stack(unsigned long sp)
4121-{
4122- return sp;
4123-}
4124diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125index d936c1a..304a252 100644
4126--- a/arch/sh/boards/mach-hp6xx/pm.c
4127+++ b/arch/sh/boards/mach-hp6xx/pm.c
4128@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132-static struct platform_suspend_ops hp6x0_pm_ops = {
4133+static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138index 8a8a993..7b3079b 100644
4139--- a/arch/sh/kernel/cpu/sh4/sq.c
4140+++ b/arch/sh/kernel/cpu/sh4/sq.c
4141@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145-static struct sysfs_ops sq_sysfs_ops = {
4146+static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151index ee3c2aa..c49cee6 100644
4152--- a/arch/sh/kernel/cpu/shmobile/pm.c
4153+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158-static struct platform_suspend_ops sh_pm_ops = {
4159+static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164index 3e532d0..9faa306 100644
4165--- a/arch/sh/kernel/kgdb.c
4166+++ b/arch/sh/kernel/kgdb.c
4167@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171-struct kgdb_arch arch_kgdb_ops = {
4172+const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177index afeb710..d1d1289 100644
4178--- a/arch/sh/mm/mmap.c
4179+++ b/arch/sh/mm/mmap.c
4180@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184- if (TASK_SIZE - len >= addr &&
4185- (!vma || addr + len <= vma->vm_start))
4186+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190@@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194- if (likely(!vma || addr + len <= vma->vm_start)) {
4195+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203- if (TASK_SIZE - len >= addr &&
4204- (!vma || addr + len <= vma->vm_start))
4205+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213- if (!vma || addr <= vma->vm_start) {
4214+ if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222- addr = mm->mmap_base-len;
4223- if (do_colour_align)
4224- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225+ addr = mm->mmap_base - len;
4226
4227 do {
4228+ if (do_colour_align)
4229+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236- if (likely(!vma || addr+len <= vma->vm_start)) {
4237+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245- addr = vma->vm_start-len;
4246- if (do_colour_align)
4247- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248- } while (likely(len < vma->vm_start));
4249+ addr = skip_heap_stack_gap(vma, len);
4250+ } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255index 113225b..7fd04e7 100644
4256--- a/arch/sparc/Makefile
4257+++ b/arch/sparc/Makefile
4258@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268index f5cc06f..f858d47 100644
4269--- a/arch/sparc/include/asm/atomic_64.h
4270+++ b/arch/sparc/include/asm/atomic_64.h
4271@@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276+{
4277+ return v->counter;
4278+}
4279 #define atomic64_read(v) ((v)->counter)
4280+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281+{
4282+ return v->counter;
4283+}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287+{
4288+ v->counter = i;
4289+}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292+{
4293+ v->counter = i;
4294+}
4295
4296 extern void atomic_add(int, atomic_t *);
4297+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317+{
4318+ return atomic_add_ret_unchecked(1, v);
4319+}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322+{
4323+ return atomic64_add_ret_unchecked(1, v);
4324+}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331+{
4332+ return atomic_add_ret_unchecked(i, v);
4333+}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336+{
4337+ return atomic64_add_ret_unchecked(i, v);
4338+}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347+{
4348+ return atomic_inc_return_unchecked(v) == 0;
4349+}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358+{
4359+ atomic_add_unchecked(1, v);
4360+}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363+{
4364+ atomic64_add_unchecked(1, v);
4365+}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369+{
4370+ atomic_sub_unchecked(1, v);
4371+}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374+{
4375+ atomic64_sub_unchecked(1, v);
4376+}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383+{
4384+ return cmpxchg(&v->counter, old, new);
4385+}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388+{
4389+ return xchg(&v->counter, new);
4390+}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394- int c, old;
4395+ int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398- if (unlikely(c == (u)))
4399+ if (unlikely(c == u))
4400 break;
4401- old = atomic_cmpxchg((v), c, c + (a));
4402+
4403+ asm volatile("addcc %2, %0, %0\n"
4404+
4405+#ifdef CONFIG_PAX_REFCOUNT
4406+ "tvs %%icc, 6\n"
4407+#endif
4408+
4409+ : "=r" (new)
4410+ : "0" (c), "ir" (a)
4411+ : "cc");
4412+
4413+ old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418- return c != (u);
4419+ return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428+{
4429+ return xchg(&v->counter, new);
4430+}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434- long c, old;
4435+ long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438- if (unlikely(c == (u)))
4439+ if (unlikely(c == u))
4440 break;
4441- old = atomic64_cmpxchg((v), c, c + (a));
4442+
4443+ asm volatile("addcc %2, %0, %0\n"
4444+
4445+#ifdef CONFIG_PAX_REFCOUNT
4446+ "tvs %%xcc, 6\n"
4447+#endif
4448+
4449+ : "=r" (new)
4450+ : "0" (c), "ir" (a)
4451+ : "cc");
4452+
4453+ old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458- return c != (u);
4459+ return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464index 41f85ae..fb54d5e 100644
4465--- a/arch/sparc/include/asm/cache.h
4466+++ b/arch/sparc/include/asm/cache.h
4467@@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471-#define L1_CACHE_BYTES 32
4472+#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477index 5a8c308..38def92 100644
4478--- a/arch/sparc/include/asm/dma-mapping.h
4479+++ b/arch/sparc/include/asm/dma-mapping.h
4480@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497- struct dma_map_ops *ops = get_dma_ops(dev);
4498+ const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506- struct dma_map_ops *ops = get_dma_ops(dev);
4507+ const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512index 381a1b5..b97e3ff 100644
4513--- a/arch/sparc/include/asm/elf_32.h
4514+++ b/arch/sparc/include/asm/elf_32.h
4515@@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519+#ifdef CONFIG_PAX_ASLR
4520+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521+
4522+#define PAX_DELTA_MMAP_LEN 16
4523+#define PAX_DELTA_STACK_LEN 16
4524+#endif
4525+
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530index 9968085..c2106ef 100644
4531--- a/arch/sparc/include/asm/elf_64.h
4532+++ b/arch/sparc/include/asm/elf_64.h
4533@@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537+#ifdef CONFIG_PAX_ASLR
4538+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539+
4540+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542+#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547index e0cabe7..efd60f1 100644
4548--- a/arch/sparc/include/asm/pgtable_32.h
4549+++ b/arch/sparc/include/asm/pgtable_32.h
4550@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554+
4555+#ifdef CONFIG_PAX_PAGEEXEC
4556+BTFIXUPDEF_INT(page_shared_noexec)
4557+BTFIXUPDEF_INT(page_copy_noexec)
4558+BTFIXUPDEF_INT(page_readonly_noexec)
4559+#endif
4560+
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568+#ifdef CONFIG_PAX_PAGEEXEC
4569+extern pgprot_t PAGE_SHARED_NOEXEC;
4570+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572+#else
4573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574+# define PAGE_COPY_NOEXEC PAGE_COPY
4575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576+#endif
4577+
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582index 1407c07..7e10231 100644
4583--- a/arch/sparc/include/asm/pgtsrmmu.h
4584+++ b/arch/sparc/include/asm/pgtsrmmu.h
4585@@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589+
4590+#ifdef CONFIG_PAX_PAGEEXEC
4591+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594+#endif
4595+
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600index 43e5147..47622a1 100644
4601--- a/arch/sparc/include/asm/spinlock_64.h
4602+++ b/arch/sparc/include/asm/spinlock_64.h
4603@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607-static void inline arch_read_lock(raw_rwlock_t *lock)
4608+static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615-"4: add %0, 1, %1\n"
4616+"4: addcc %0, 1, %1\n"
4617+
4618+#ifdef CONFIG_PAX_REFCOUNT
4619+" tvs %%icc, 6\n"
4620+#endif
4621+
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629- : "memory");
4630+ : "memory", "cc");
4631 }
4632
4633-static int inline arch_read_trylock(raw_rwlock_t *lock)
4634+static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642-" add %0, 1, %1\n"
4643+" addcc %0, 1, %1\n"
4644+
4645+#ifdef CONFIG_PAX_REFCOUNT
4646+" tvs %%icc, 6\n"
4647+#endif
4648+
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656-static void inline arch_read_unlock(raw_rwlock_t *lock)
4657+static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663-" sub %0, 1, %1\n"
4664+" subcc %0, 1, %1\n"
4665+
4666+#ifdef CONFIG_PAX_REFCOUNT
4667+" tvs %%icc, 6\n"
4668+#endif
4669+
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677-static void inline arch_write_lock(raw_rwlock_t *lock)
4678+static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686-static void inline arch_write_unlock(raw_rwlock_t *lock)
4687+static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695-static int inline arch_write_trylock(raw_rwlock_t *lock)
4696+static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701index 844d73a..f787fb9 100644
4702--- a/arch/sparc/include/asm/thread_info_32.h
4703+++ b/arch/sparc/include/asm/thread_info_32.h
4704@@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708+
4709+ unsigned long lowest_stack;
4710 };
4711
4712 /*
4713diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714index f78ad9a..9f55fc7 100644
4715--- a/arch/sparc/include/asm/thread_info_64.h
4716+++ b/arch/sparc/include/asm/thread_info_64.h
4717@@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721+ unsigned long lowest_stack;
4722+
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727index e88fbe5..96b0ce5 100644
4728--- a/arch/sparc/include/asm/uaccess.h
4729+++ b/arch/sparc/include/asm/uaccess.h
4730@@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733+
4734+#ifdef __KERNEL__
4735+#ifndef __ASSEMBLY__
4736+#include <linux/types.h>
4737+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738+#endif
4739+#endif
4740+
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745index 8303ac4..07f333d 100644
4746--- a/arch/sparc/include/asm/uaccess_32.h
4747+++ b/arch/sparc/include/asm/uaccess_32.h
4748@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752- if (n && __access_ok((unsigned long) to, n))
4753+ if ((long)n < 0)
4754+ return n;
4755+
4756+ if (n && __access_ok((unsigned long) to, n)) {
4757+ if (!__builtin_constant_p(n))
4758+ check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760- else
4761+ } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767+ if ((long)n < 0)
4768+ return n;
4769+
4770+ if (!__builtin_constant_p(n))
4771+ check_object_size(from, n, true);
4772+
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778- if (n && __access_ok((unsigned long) from, n))
4779+ if ((long)n < 0)
4780+ return n;
4781+
4782+ if (n && __access_ok((unsigned long) from, n)) {
4783+ if (!__builtin_constant_p(n))
4784+ check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786- else
4787+ } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793+ if ((long)n < 0)
4794+ return n;
4795+
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800index 9ea271e..7b8a271 100644
4801--- a/arch/sparc/include/asm/uaccess_64.h
4802+++ b/arch/sparc/include/asm/uaccess_64.h
4803@@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807+#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815- unsigned long ret = ___copy_from_user(to, from, size);
4816+ unsigned long ret;
4817
4818+ if ((long)size < 0 || size > INT_MAX)
4819+ return size;
4820+
4821+ if (!__builtin_constant_p(size))
4822+ check_object_size(to, size, false);
4823+
4824+ ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832- unsigned long ret = ___copy_to_user(to, from, size);
4833+ unsigned long ret;
4834
4835+ if ((long)size < 0 || size > INT_MAX)
4836+ return size;
4837+
4838+ if (!__builtin_constant_p(size))
4839+ check_object_size(from, size, true);
4840+
4841+ ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846index 2782681..77ded84 100644
4847--- a/arch/sparc/kernel/Makefile
4848+++ b/arch/sparc/kernel/Makefile
4849@@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853-ccflags-y := -Werror
4854+#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859index 7690cc2..ece64c9 100644
4860--- a/arch/sparc/kernel/iommu.c
4861+++ b/arch/sparc/kernel/iommu.c
4862@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866-static struct dma_map_ops sun4u_dma_ops = {
4867+static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881index 9f61fd8..bd048db 100644
4882--- a/arch/sparc/kernel/ioport.c
4883+++ b/arch/sparc/kernel/ioport.c
4884@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888-struct dma_map_ops sbus_dma_ops = {
4889+const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906-struct dma_map_ops pci32_dma_ops = {
4907+const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912index 04df4ed..55c4b6e 100644
4913--- a/arch/sparc/kernel/kgdb_32.c
4914+++ b/arch/sparc/kernel/kgdb_32.c
4915@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919-struct kgdb_arch arch_kgdb_ops = {
4920+const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925index f5a0fd4..d886f71 100644
4926--- a/arch/sparc/kernel/kgdb_64.c
4927+++ b/arch/sparc/kernel/kgdb_64.c
4928@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932-struct kgdb_arch arch_kgdb_ops = {
4933+const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938index 23c33ff..d137fbd 100644
4939--- a/arch/sparc/kernel/pci_sun4v.c
4940+++ b/arch/sparc/kernel/pci_sun4v.c
4941@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945-static struct dma_map_ops sun4v_dma_ops = {
4946+static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951index c49865b..b41a81b 100644
4952--- a/arch/sparc/kernel/process_32.c
4953+++ b/arch/sparc/kernel/process_32.c
4954@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958- printk("%pS\n", (void *) rw->ins[7]);
4959+ printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967- printk("PC: <%pS>\n", (void *) r->pc);
4968+ printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984- printk("%pS ] ", (void *) pc);
4985+ printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990index cb70476..3d0c191 100644
4991--- a/arch/sparc/kernel/process_64.c
4992+++ b/arch/sparc/kernel/process_64.c
4993@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005- printk("TPC: <%pS>\n", (void *) regs->tpc);
5006+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029index 6edc4e5..06a69b4 100644
5030--- a/arch/sparc/kernel/sigutil_64.c
5031+++ b/arch/sparc/kernel/sigutil_64.c
5032@@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036+#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041index 3a82e65..ce0a53a 100644
5042--- a/arch/sparc/kernel/sys_sparc_32.c
5043+++ b/arch/sparc/kernel/sys_sparc_32.c
5044@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048- addr = TASK_UNMAPPED_BASE;
5049+ addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057- if (!vmm || addr + len <= vmm->vm_start)
5058+ if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063index cfa0e19..98972ac 100644
5064--- a/arch/sparc/kernel/sys_sparc_64.c
5065+++ b/arch/sparc/kernel/sys_sparc_64.c
5066@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070- if ((flags & MAP_SHARED) &&
5071+ if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079+#ifdef CONFIG_PAX_RANDMMAP
5080+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081+#endif
5082+
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090- if (task_size - len >= addr &&
5091- (!vma || addr + len <= vma->vm_start))
5092+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097- start_addr = addr = mm->free_area_cache;
5098+ start_addr = addr = mm->free_area_cache;
5099 } else {
5100- start_addr = addr = TASK_UNMAPPED_BASE;
5101+ start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105@@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109- if (start_addr != TASK_UNMAPPED_BASE) {
5110- start_addr = addr = TASK_UNMAPPED_BASE;
5111+ if (start_addr != mm->mmap_base) {
5112+ start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118- if (likely(!vma || addr + len <= vma->vm_start)) {
5119+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127- if ((flags & MAP_SHARED) &&
5128+ if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136- if (task_size - len >= addr &&
5137- (!vma || addr + len <= vma->vm_start))
5138+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146- if (!vma || addr <= vma->vm_start) {
5147+ if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155- addr = mm->mmap_base-len;
5156- if (do_color_align)
5157- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158+ addr = mm->mmap_base - len;
5159
5160 do {
5161+ if (do_color_align)
5162+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169- if (likely(!vma || addr+len <= vma->vm_start)) {
5170+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178- addr = vma->vm_start-len;
5179- if (do_color_align)
5180- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181- } while (likely(len < vma->vm_start));
5182+ addr = skip_heap_stack_gap(vma, len);
5183+ } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191+
5192+#ifdef CONFIG_PAX_RANDMMAP
5193+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5194+ mm->mmap_base += mm->delta_mmap;
5195+#endif
5196+
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204+
5205+#ifdef CONFIG_PAX_RANDMMAP
5206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5207+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208+#endif
5209+
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214index c0490c7..84959d1 100644
5215--- a/arch/sparc/kernel/traps_32.c
5216+++ b/arch/sparc/kernel/traps_32.c
5217@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221+extern void gr_handle_kernel_exploit(void);
5222+
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238- if(regs->psr & PSR_PS)
5239+ if(regs->psr & PSR_PS) {
5240+ gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242+ }
5243 do_exit(SIGSEGV);
5244 }
5245
5246diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247index 10f7bb9..cdb6793 100644
5248--- a/arch/sparc/kernel/traps_64.c
5249+++ b/arch/sparc/kernel/traps_64.c
5250@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263+
5264+#ifdef CONFIG_PAX_REFCOUNT
5265+ if (lvl == 6)
5266+ pax_report_refcount_overflow(regs);
5267+#endif
5268+
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276-
5277+
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282+#ifdef CONFIG_PAX_REFCOUNT
5283+ if (lvl == 6)
5284+ pax_report_refcount_overflow(regs);
5285+#endif
5286+
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294- printk("TPC<%pS>\n", (void *) regs->tpc);
5295+ printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345- printk(" [%016lx] %pS\n", pc, (void *) pc);
5346+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354+extern void gr_handle_kernel_exploit(void);
5355+
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372- if (regs->tstate & TSTATE_PRIV)
5373+ if (regs->tstate & TSTATE_PRIV) {
5374+ gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376+ }
5377+
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382index be183fe..1c8d332 100644
5383--- a/arch/sparc/kernel/una_asm_64.S
5384+++ b/arch/sparc/kernel/una_asm_64.S
5385@@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389- .size __do_int_load, .-__do_int_load
5390+ .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395index 3792099..2af17d8 100644
5396--- a/arch/sparc/kernel/unaligned_64.c
5397+++ b/arch/sparc/kernel/unaligned_64.c
5398@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408index e75faf0..24f12f9 100644
5409--- a/arch/sparc/lib/Makefile
5410+++ b/arch/sparc/lib/Makefile
5411@@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415-ccflags-y := -Werror
5416+#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421index 0268210..f0291ca 100644
5422--- a/arch/sparc/lib/atomic_64.S
5423+++ b/arch/sparc/lib/atomic_64.S
5424@@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428- add %g1, %o0, %g7
5429+ addcc %g1, %o0, %g7
5430+
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ tvs %icc, 6
5433+#endif
5434+
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442+ .globl atomic_add_unchecked
5443+ .type atomic_add_unchecked,#function
5444+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445+ BACKOFF_SETUP(%o2)
5446+1: lduw [%o1], %g1
5447+ add %g1, %o0, %g7
5448+ cas [%o1], %g1, %g7
5449+ cmp %g1, %g7
5450+ bne,pn %icc, 2f
5451+ nop
5452+ retl
5453+ nop
5454+2: BACKOFF_SPIN(%o2, %o3, 1b)
5455+ .size atomic_add_unchecked, .-atomic_add_unchecked
5456+
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462- sub %g1, %o0, %g7
5463+ subcc %g1, %o0, %g7
5464+
5465+#ifdef CONFIG_PAX_REFCOUNT
5466+ tvs %icc, 6
5467+#endif
5468+
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476+ .globl atomic_sub_unchecked
5477+ .type atomic_sub_unchecked,#function
5478+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479+ BACKOFF_SETUP(%o2)
5480+1: lduw [%o1], %g1
5481+ sub %g1, %o0, %g7
5482+ cas [%o1], %g1, %g7
5483+ cmp %g1, %g7
5484+ bne,pn %icc, 2f
5485+ nop
5486+ retl
5487+ nop
5488+2: BACKOFF_SPIN(%o2, %o3, 1b)
5489+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490+
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496- add %g1, %o0, %g7
5497+ addcc %g1, %o0, %g7
5498+
5499+#ifdef CONFIG_PAX_REFCOUNT
5500+ tvs %icc, 6
5501+#endif
5502+
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510+ .globl atomic_add_ret_unchecked
5511+ .type atomic_add_ret_unchecked,#function
5512+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513+ BACKOFF_SETUP(%o2)
5514+1: lduw [%o1], %g1
5515+ addcc %g1, %o0, %g7
5516+ cas [%o1], %g1, %g7
5517+ cmp %g1, %g7
5518+ bne,pn %icc, 2f
5519+ add %g7, %o0, %g7
5520+ sra %g7, 0, %o0
5521+ retl
5522+ nop
5523+2: BACKOFF_SPIN(%o2, %o3, 1b)
5524+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525+
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531- sub %g1, %o0, %g7
5532+ subcc %g1, %o0, %g7
5533+
5534+#ifdef CONFIG_PAX_REFCOUNT
5535+ tvs %icc, 6
5536+#endif
5537+
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545- add %g1, %o0, %g7
5546+ addcc %g1, %o0, %g7
5547+
5548+#ifdef CONFIG_PAX_REFCOUNT
5549+ tvs %xcc, 6
5550+#endif
5551+
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559+ .globl atomic64_add_unchecked
5560+ .type atomic64_add_unchecked,#function
5561+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562+ BACKOFF_SETUP(%o2)
5563+1: ldx [%o1], %g1
5564+ addcc %g1, %o0, %g7
5565+ casx [%o1], %g1, %g7
5566+ cmp %g1, %g7
5567+ bne,pn %xcc, 2f
5568+ nop
5569+ retl
5570+ nop
5571+2: BACKOFF_SPIN(%o2, %o3, 1b)
5572+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573+
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579- sub %g1, %o0, %g7
5580+ subcc %g1, %o0, %g7
5581+
5582+#ifdef CONFIG_PAX_REFCOUNT
5583+ tvs %xcc, 6
5584+#endif
5585+
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593+ .globl atomic64_sub_unchecked
5594+ .type atomic64_sub_unchecked,#function
5595+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596+ BACKOFF_SETUP(%o2)
5597+1: ldx [%o1], %g1
5598+ subcc %g1, %o0, %g7
5599+ casx [%o1], %g1, %g7
5600+ cmp %g1, %g7
5601+ bne,pn %xcc, 2f
5602+ nop
5603+ retl
5604+ nop
5605+2: BACKOFF_SPIN(%o2, %o3, 1b)
5606+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607+
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613- add %g1, %o0, %g7
5614+ addcc %g1, %o0, %g7
5615+
5616+#ifdef CONFIG_PAX_REFCOUNT
5617+ tvs %xcc, 6
5618+#endif
5619+
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627+ .globl atomic64_add_ret_unchecked
5628+ .type atomic64_add_ret_unchecked,#function
5629+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630+ BACKOFF_SETUP(%o2)
5631+1: ldx [%o1], %g1
5632+ addcc %g1, %o0, %g7
5633+ casx [%o1], %g1, %g7
5634+ cmp %g1, %g7
5635+ bne,pn %xcc, 2f
5636+ add %g7, %o0, %g7
5637+ mov %g7, %o0
5638+ retl
5639+ nop
5640+2: BACKOFF_SPIN(%o2, %o3, 1b)
5641+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642+
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648- sub %g1, %o0, %g7
5649+ subcc %g1, %o0, %g7
5650+
5651+#ifdef CONFIG_PAX_REFCOUNT
5652+ tvs %xcc, 6
5653+#endif
5654+
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659index 704b126..2e79d76 100644
5660--- a/arch/sparc/lib/ksyms.c
5661+++ b/arch/sparc/lib/ksyms.c
5662@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666+EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670+EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673+EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677+EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682index 91a7d29..ce75c29 100644
5683--- a/arch/sparc/lib/rwsem_64.S
5684+++ b/arch/sparc/lib/rwsem_64.S
5685@@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689- add %g1, 1, %g7
5690+ addcc %g1, 1, %g7
5691+
5692+#ifdef CONFIG_PAX_REFCOUNT
5693+ tvs %icc, 6
5694+#endif
5695+
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699@@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703- add %g1, 1, %g7
5704+ addcc %g1, 1, %g7
5705+
5706+#ifdef CONFIG_PAX_REFCOUNT
5707+ tvs %icc, 6
5708+#endif
5709+
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713@@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717- add %g3, %g1, %g7
5718+ addcc %g3, %g1, %g7
5719+
5720+#ifdef CONFIG_PAX_REFCOUNT
5721+ tvs %icc, 6
5722+#endif
5723+
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727@@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731- add %g3, %g1, %g7
5732+ addcc %g3, %g1, %g7
5733+
5734+#ifdef CONFIG_PAX_REFCOUNT
5735+ tvs %icc, 6
5736+#endif
5737+
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741@@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745- sub %g1, 1, %g7
5746+ subcc %g1, 1, %g7
5747+
5748+#ifdef CONFIG_PAX_REFCOUNT
5749+ tvs %icc, 6
5750+#endif
5751+
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755@@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759- sub %g3, %g1, %g7
5760+ subcc %g3, %g1, %g7
5761+
5762+#ifdef CONFIG_PAX_REFCOUNT
5763+ tvs %icc, 6
5764+#endif
5765+
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769@@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773- sub %g3, %g1, %g7
5774+ subcc %g3, %g1, %g7
5775+
5776+#ifdef CONFIG_PAX_REFCOUNT
5777+ tvs %icc, 6
5778+#endif
5779+
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784index 79836a7..62f47a2 100644
5785--- a/arch/sparc/mm/Makefile
5786+++ b/arch/sparc/mm/Makefile
5787@@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791-ccflags-y := -Werror
5792+#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797index b99f81c..3453e93 100644
5798--- a/arch/sparc/mm/fault_32.c
5799+++ b/arch/sparc/mm/fault_32.c
5800@@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804+#include <linux/slab.h>
5805+#include <linux/pagemap.h>
5806+#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814+#ifdef CONFIG_PAX_PAGEEXEC
5815+#ifdef CONFIG_PAX_DLRESOLVE
5816+static void pax_emuplt_close(struct vm_area_struct *vma)
5817+{
5818+ vma->vm_mm->call_dl_resolve = 0UL;
5819+}
5820+
5821+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822+{
5823+ unsigned int *kaddr;
5824+
5825+ vmf->page = alloc_page(GFP_HIGHUSER);
5826+ if (!vmf->page)
5827+ return VM_FAULT_OOM;
5828+
5829+ kaddr = kmap(vmf->page);
5830+ memset(kaddr, 0, PAGE_SIZE);
5831+ kaddr[0] = 0x9DE3BFA8U; /* save */
5832+ flush_dcache_page(vmf->page);
5833+ kunmap(vmf->page);
5834+ return VM_FAULT_MAJOR;
5835+}
5836+
5837+static const struct vm_operations_struct pax_vm_ops = {
5838+ .close = pax_emuplt_close,
5839+ .fault = pax_emuplt_fault
5840+};
5841+
5842+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843+{
5844+ int ret;
5845+
5846+ vma->vm_mm = current->mm;
5847+ vma->vm_start = addr;
5848+ vma->vm_end = addr + PAGE_SIZE;
5849+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851+ vma->vm_ops = &pax_vm_ops;
5852+
5853+ ret = insert_vm_struct(current->mm, vma);
5854+ if (ret)
5855+ return ret;
5856+
5857+ ++current->mm->total_vm;
5858+ return 0;
5859+}
5860+#endif
5861+
5862+/*
5863+ * PaX: decide what to do with offenders (regs->pc = fault address)
5864+ *
5865+ * returns 1 when task should be killed
5866+ * 2 when patched PLT trampoline was detected
5867+ * 3 when unpatched PLT trampoline was detected
5868+ */
5869+static int pax_handle_fetch_fault(struct pt_regs *regs)
5870+{
5871+
5872+#ifdef CONFIG_PAX_EMUPLT
5873+ int err;
5874+
5875+ do { /* PaX: patched PLT emulation #1 */
5876+ unsigned int sethi1, sethi2, jmpl;
5877+
5878+ err = get_user(sethi1, (unsigned int *)regs->pc);
5879+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881+
5882+ if (err)
5883+ break;
5884+
5885+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888+ {
5889+ unsigned int addr;
5890+
5891+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892+ addr = regs->u_regs[UREG_G1];
5893+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894+ regs->pc = addr;
5895+ regs->npc = addr+4;
5896+ return 2;
5897+ }
5898+ } while (0);
5899+
5900+ { /* PaX: patched PLT emulation #2 */
5901+ unsigned int ba;
5902+
5903+ err = get_user(ba, (unsigned int *)regs->pc);
5904+
5905+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906+ unsigned int addr;
5907+
5908+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909+ regs->pc = addr;
5910+ regs->npc = addr+4;
5911+ return 2;
5912+ }
5913+ }
5914+
5915+ do { /* PaX: patched PLT emulation #3 */
5916+ unsigned int sethi, jmpl, nop;
5917+
5918+ err = get_user(sethi, (unsigned int *)regs->pc);
5919+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921+
5922+ if (err)
5923+ break;
5924+
5925+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927+ nop == 0x01000000U)
5928+ {
5929+ unsigned int addr;
5930+
5931+ addr = (sethi & 0x003FFFFFU) << 10;
5932+ regs->u_regs[UREG_G1] = addr;
5933+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934+ regs->pc = addr;
5935+ regs->npc = addr+4;
5936+ return 2;
5937+ }
5938+ } while (0);
5939+
5940+ do { /* PaX: unpatched PLT emulation step 1 */
5941+ unsigned int sethi, ba, nop;
5942+
5943+ err = get_user(sethi, (unsigned int *)regs->pc);
5944+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946+
5947+ if (err)
5948+ break;
5949+
5950+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952+ nop == 0x01000000U)
5953+ {
5954+ unsigned int addr, save, call;
5955+
5956+ if ((ba & 0xFFC00000U) == 0x30800000U)
5957+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958+ else
5959+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960+
5961+ err = get_user(save, (unsigned int *)addr);
5962+ err |= get_user(call, (unsigned int *)(addr+4));
5963+ err |= get_user(nop, (unsigned int *)(addr+8));
5964+ if (err)
5965+ break;
5966+
5967+#ifdef CONFIG_PAX_DLRESOLVE
5968+ if (save == 0x9DE3BFA8U &&
5969+ (call & 0xC0000000U) == 0x40000000U &&
5970+ nop == 0x01000000U)
5971+ {
5972+ struct vm_area_struct *vma;
5973+ unsigned long call_dl_resolve;
5974+
5975+ down_read(&current->mm->mmap_sem);
5976+ call_dl_resolve = current->mm->call_dl_resolve;
5977+ up_read(&current->mm->mmap_sem);
5978+ if (likely(call_dl_resolve))
5979+ goto emulate;
5980+
5981+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982+
5983+ down_write(&current->mm->mmap_sem);
5984+ if (current->mm->call_dl_resolve) {
5985+ call_dl_resolve = current->mm->call_dl_resolve;
5986+ up_write(&current->mm->mmap_sem);
5987+ if (vma)
5988+ kmem_cache_free(vm_area_cachep, vma);
5989+ goto emulate;
5990+ }
5991+
5992+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994+ up_write(&current->mm->mmap_sem);
5995+ if (vma)
5996+ kmem_cache_free(vm_area_cachep, vma);
5997+ return 1;
5998+ }
5999+
6000+ if (pax_insert_vma(vma, call_dl_resolve)) {
6001+ up_write(&current->mm->mmap_sem);
6002+ kmem_cache_free(vm_area_cachep, vma);
6003+ return 1;
6004+ }
6005+
6006+ current->mm->call_dl_resolve = call_dl_resolve;
6007+ up_write(&current->mm->mmap_sem);
6008+
6009+emulate:
6010+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011+ regs->pc = call_dl_resolve;
6012+ regs->npc = addr+4;
6013+ return 3;
6014+ }
6015+#endif
6016+
6017+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018+ if ((save & 0xFFC00000U) == 0x05000000U &&
6019+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6020+ nop == 0x01000000U)
6021+ {
6022+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023+ regs->u_regs[UREG_G2] = addr + 4;
6024+ addr = (save & 0x003FFFFFU) << 10;
6025+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026+ regs->pc = addr;
6027+ regs->npc = addr+4;
6028+ return 3;
6029+ }
6030+ }
6031+ } while (0);
6032+
6033+ do { /* PaX: unpatched PLT emulation step 2 */
6034+ unsigned int save, call, nop;
6035+
6036+ err = get_user(save, (unsigned int *)(regs->pc-4));
6037+ err |= get_user(call, (unsigned int *)regs->pc);
6038+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039+ if (err)
6040+ break;
6041+
6042+ if (save == 0x9DE3BFA8U &&
6043+ (call & 0xC0000000U) == 0x40000000U &&
6044+ nop == 0x01000000U)
6045+ {
6046+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047+
6048+ regs->u_regs[UREG_RETPC] = regs->pc;
6049+ regs->pc = dl_resolve;
6050+ regs->npc = dl_resolve+4;
6051+ return 3;
6052+ }
6053+ } while (0);
6054+#endif
6055+
6056+ return 1;
6057+}
6058+
6059+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060+{
6061+ unsigned long i;
6062+
6063+ printk(KERN_ERR "PAX: bytes at PC: ");
6064+ for (i = 0; i < 8; i++) {
6065+ unsigned int c;
6066+ if (get_user(c, (unsigned int *)pc+i))
6067+ printk(KERN_CONT "???????? ");
6068+ else
6069+ printk(KERN_CONT "%08x ", c);
6070+ }
6071+ printk("\n");
6072+}
6073+#endif
6074+
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078@@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082+
6083+#ifdef CONFIG_PAX_PAGEEXEC
6084+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085+ up_read(&mm->mmap_sem);
6086+ switch (pax_handle_fetch_fault(regs)) {
6087+
6088+#ifdef CONFIG_PAX_EMUPLT
6089+ case 2:
6090+ case 3:
6091+ return;
6092+#endif
6093+
6094+ }
6095+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096+ do_group_exit(SIGKILL);
6097+ }
6098+#endif
6099+
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104index 43b0da9..a0b78f9 100644
6105--- a/arch/sparc/mm/fault_64.c
6106+++ b/arch/sparc/mm/fault_64.c
6107@@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111+#include <linux/slab.h>
6112+#include <linux/pagemap.h>
6113+#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130+#ifdef CONFIG_PAX_PAGEEXEC
6131+#ifdef CONFIG_PAX_DLRESOLVE
6132+static void pax_emuplt_close(struct vm_area_struct *vma)
6133+{
6134+ vma->vm_mm->call_dl_resolve = 0UL;
6135+}
6136+
6137+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138+{
6139+ unsigned int *kaddr;
6140+
6141+ vmf->page = alloc_page(GFP_HIGHUSER);
6142+ if (!vmf->page)
6143+ return VM_FAULT_OOM;
6144+
6145+ kaddr = kmap(vmf->page);
6146+ memset(kaddr, 0, PAGE_SIZE);
6147+ kaddr[0] = 0x9DE3BFA8U; /* save */
6148+ flush_dcache_page(vmf->page);
6149+ kunmap(vmf->page);
6150+ return VM_FAULT_MAJOR;
6151+}
6152+
6153+static const struct vm_operations_struct pax_vm_ops = {
6154+ .close = pax_emuplt_close,
6155+ .fault = pax_emuplt_fault
6156+};
6157+
6158+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159+{
6160+ int ret;
6161+
6162+ vma->vm_mm = current->mm;
6163+ vma->vm_start = addr;
6164+ vma->vm_end = addr + PAGE_SIZE;
6165+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167+ vma->vm_ops = &pax_vm_ops;
6168+
6169+ ret = insert_vm_struct(current->mm, vma);
6170+ if (ret)
6171+ return ret;
6172+
6173+ ++current->mm->total_vm;
6174+ return 0;
6175+}
6176+#endif
6177+
6178+/*
6179+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6180+ *
6181+ * returns 1 when task should be killed
6182+ * 2 when patched PLT trampoline was detected
6183+ * 3 when unpatched PLT trampoline was detected
6184+ */
6185+static int pax_handle_fetch_fault(struct pt_regs *regs)
6186+{
6187+
6188+#ifdef CONFIG_PAX_EMUPLT
6189+ int err;
6190+
6191+ do { /* PaX: patched PLT emulation #1 */
6192+ unsigned int sethi1, sethi2, jmpl;
6193+
6194+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6195+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197+
6198+ if (err)
6199+ break;
6200+
6201+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204+ {
6205+ unsigned long addr;
6206+
6207+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208+ addr = regs->u_regs[UREG_G1];
6209+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210+
6211+ if (test_thread_flag(TIF_32BIT))
6212+ addr &= 0xFFFFFFFFUL;
6213+
6214+ regs->tpc = addr;
6215+ regs->tnpc = addr+4;
6216+ return 2;
6217+ }
6218+ } while (0);
6219+
6220+ { /* PaX: patched PLT emulation #2 */
6221+ unsigned int ba;
6222+
6223+ err = get_user(ba, (unsigned int *)regs->tpc);
6224+
6225+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226+ unsigned long addr;
6227+
6228+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229+
6230+ if (test_thread_flag(TIF_32BIT))
6231+ addr &= 0xFFFFFFFFUL;
6232+
6233+ regs->tpc = addr;
6234+ regs->tnpc = addr+4;
6235+ return 2;
6236+ }
6237+ }
6238+
6239+ do { /* PaX: patched PLT emulation #3 */
6240+ unsigned int sethi, jmpl, nop;
6241+
6242+ err = get_user(sethi, (unsigned int *)regs->tpc);
6243+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245+
6246+ if (err)
6247+ break;
6248+
6249+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251+ nop == 0x01000000U)
6252+ {
6253+ unsigned long addr;
6254+
6255+ addr = (sethi & 0x003FFFFFU) << 10;
6256+ regs->u_regs[UREG_G1] = addr;
6257+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258+
6259+ if (test_thread_flag(TIF_32BIT))
6260+ addr &= 0xFFFFFFFFUL;
6261+
6262+ regs->tpc = addr;
6263+ regs->tnpc = addr+4;
6264+ return 2;
6265+ }
6266+ } while (0);
6267+
6268+ do { /* PaX: patched PLT emulation #4 */
6269+ unsigned int sethi, mov1, call, mov2;
6270+
6271+ err = get_user(sethi, (unsigned int *)regs->tpc);
6272+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275+
6276+ if (err)
6277+ break;
6278+
6279+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280+ mov1 == 0x8210000FU &&
6281+ (call & 0xC0000000U) == 0x40000000U &&
6282+ mov2 == 0x9E100001U)
6283+ {
6284+ unsigned long addr;
6285+
6286+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288+
6289+ if (test_thread_flag(TIF_32BIT))
6290+ addr &= 0xFFFFFFFFUL;
6291+
6292+ regs->tpc = addr;
6293+ regs->tnpc = addr+4;
6294+ return 2;
6295+ }
6296+ } while (0);
6297+
6298+ do { /* PaX: patched PLT emulation #5 */
6299+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300+
6301+ err = get_user(sethi, (unsigned int *)regs->tpc);
6302+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309+
6310+ if (err)
6311+ break;
6312+
6313+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6317+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318+ sllx == 0x83287020U &&
6319+ jmpl == 0x81C04005U &&
6320+ nop == 0x01000000U)
6321+ {
6322+ unsigned long addr;
6323+
6324+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325+ regs->u_regs[UREG_G1] <<= 32;
6326+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328+ regs->tpc = addr;
6329+ regs->tnpc = addr+4;
6330+ return 2;
6331+ }
6332+ } while (0);
6333+
6334+ do { /* PaX: patched PLT emulation #6 */
6335+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336+
6337+ err = get_user(sethi, (unsigned int *)regs->tpc);
6338+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344+
6345+ if (err)
6346+ break;
6347+
6348+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351+ sllx == 0x83287020U &&
6352+ (or & 0xFFFFE000U) == 0x8A116000U &&
6353+ jmpl == 0x81C04005U &&
6354+ nop == 0x01000000U)
6355+ {
6356+ unsigned long addr;
6357+
6358+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359+ regs->u_regs[UREG_G1] <<= 32;
6360+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362+ regs->tpc = addr;
6363+ regs->tnpc = addr+4;
6364+ return 2;
6365+ }
6366+ } while (0);
6367+
6368+ do { /* PaX: unpatched PLT emulation step 1 */
6369+ unsigned int sethi, ba, nop;
6370+
6371+ err = get_user(sethi, (unsigned int *)regs->tpc);
6372+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374+
6375+ if (err)
6376+ break;
6377+
6378+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380+ nop == 0x01000000U)
6381+ {
6382+ unsigned long addr;
6383+ unsigned int save, call;
6384+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385+
6386+ if ((ba & 0xFFC00000U) == 0x30800000U)
6387+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388+ else
6389+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390+
6391+ if (test_thread_flag(TIF_32BIT))
6392+ addr &= 0xFFFFFFFFUL;
6393+
6394+ err = get_user(save, (unsigned int *)addr);
6395+ err |= get_user(call, (unsigned int *)(addr+4));
6396+ err |= get_user(nop, (unsigned int *)(addr+8));
6397+ if (err)
6398+ break;
6399+
6400+#ifdef CONFIG_PAX_DLRESOLVE
6401+ if (save == 0x9DE3BFA8U &&
6402+ (call & 0xC0000000U) == 0x40000000U &&
6403+ nop == 0x01000000U)
6404+ {
6405+ struct vm_area_struct *vma;
6406+ unsigned long call_dl_resolve;
6407+
6408+ down_read(&current->mm->mmap_sem);
6409+ call_dl_resolve = current->mm->call_dl_resolve;
6410+ up_read(&current->mm->mmap_sem);
6411+ if (likely(call_dl_resolve))
6412+ goto emulate;
6413+
6414+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415+
6416+ down_write(&current->mm->mmap_sem);
6417+ if (current->mm->call_dl_resolve) {
6418+ call_dl_resolve = current->mm->call_dl_resolve;
6419+ up_write(&current->mm->mmap_sem);
6420+ if (vma)
6421+ kmem_cache_free(vm_area_cachep, vma);
6422+ goto emulate;
6423+ }
6424+
6425+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427+ up_write(&current->mm->mmap_sem);
6428+ if (vma)
6429+ kmem_cache_free(vm_area_cachep, vma);
6430+ return 1;
6431+ }
6432+
6433+ if (pax_insert_vma(vma, call_dl_resolve)) {
6434+ up_write(&current->mm->mmap_sem);
6435+ kmem_cache_free(vm_area_cachep, vma);
6436+ return 1;
6437+ }
6438+
6439+ current->mm->call_dl_resolve = call_dl_resolve;
6440+ up_write(&current->mm->mmap_sem);
6441+
6442+emulate:
6443+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444+ regs->tpc = call_dl_resolve;
6445+ regs->tnpc = addr+4;
6446+ return 3;
6447+ }
6448+#endif
6449+
6450+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451+ if ((save & 0xFFC00000U) == 0x05000000U &&
6452+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6453+ nop == 0x01000000U)
6454+ {
6455+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456+ regs->u_regs[UREG_G2] = addr + 4;
6457+ addr = (save & 0x003FFFFFU) << 10;
6458+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459+
6460+ if (test_thread_flag(TIF_32BIT))
6461+ addr &= 0xFFFFFFFFUL;
6462+
6463+ regs->tpc = addr;
6464+ regs->tnpc = addr+4;
6465+ return 3;
6466+ }
6467+
6468+ /* PaX: 64-bit PLT stub */
6469+ err = get_user(sethi1, (unsigned int *)addr);
6470+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6471+ err |= get_user(or1, (unsigned int *)(addr+8));
6472+ err |= get_user(or2, (unsigned int *)(addr+12));
6473+ err |= get_user(sllx, (unsigned int *)(addr+16));
6474+ err |= get_user(add, (unsigned int *)(addr+20));
6475+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6476+ err |= get_user(nop, (unsigned int *)(addr+28));
6477+ if (err)
6478+ break;
6479+
6480+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6483+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484+ sllx == 0x89293020U &&
6485+ add == 0x8A010005U &&
6486+ jmpl == 0x89C14000U &&
6487+ nop == 0x01000000U)
6488+ {
6489+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491+ regs->u_regs[UREG_G4] <<= 32;
6492+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494+ regs->u_regs[UREG_G4] = addr + 24;
6495+ addr = regs->u_regs[UREG_G5];
6496+ regs->tpc = addr;
6497+ regs->tnpc = addr+4;
6498+ return 3;
6499+ }
6500+ }
6501+ } while (0);
6502+
6503+#ifdef CONFIG_PAX_DLRESOLVE
6504+ do { /* PaX: unpatched PLT emulation step 2 */
6505+ unsigned int save, call, nop;
6506+
6507+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6508+ err |= get_user(call, (unsigned int *)regs->tpc);
6509+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510+ if (err)
6511+ break;
6512+
6513+ if (save == 0x9DE3BFA8U &&
6514+ (call & 0xC0000000U) == 0x40000000U &&
6515+ nop == 0x01000000U)
6516+ {
6517+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518+
6519+ if (test_thread_flag(TIF_32BIT))
6520+ dl_resolve &= 0xFFFFFFFFUL;
6521+
6522+ regs->u_regs[UREG_RETPC] = regs->tpc;
6523+ regs->tpc = dl_resolve;
6524+ regs->tnpc = dl_resolve+4;
6525+ return 3;
6526+ }
6527+ } while (0);
6528+#endif
6529+
6530+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531+ unsigned int sethi, ba, nop;
6532+
6533+ err = get_user(sethi, (unsigned int *)regs->tpc);
6534+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536+
6537+ if (err)
6538+ break;
6539+
6540+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541+ (ba & 0xFFF00000U) == 0x30600000U &&
6542+ nop == 0x01000000U)
6543+ {
6544+ unsigned long addr;
6545+
6546+ addr = (sethi & 0x003FFFFFU) << 10;
6547+ regs->u_regs[UREG_G1] = addr;
6548+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549+
6550+ if (test_thread_flag(TIF_32BIT))
6551+ addr &= 0xFFFFFFFFUL;
6552+
6553+ regs->tpc = addr;
6554+ regs->tnpc = addr+4;
6555+ return 2;
6556+ }
6557+ } while (0);
6558+
6559+#endif
6560+
6561+ return 1;
6562+}
6563+
6564+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565+{
6566+ unsigned long i;
6567+
6568+ printk(KERN_ERR "PAX: bytes at PC: ");
6569+ for (i = 0; i < 8; i++) {
6570+ unsigned int c;
6571+ if (get_user(c, (unsigned int *)pc+i))
6572+ printk(KERN_CONT "???????? ");
6573+ else
6574+ printk(KERN_CONT "%08x ", c);
6575+ }
6576+ printk("\n");
6577+}
6578+#endif
6579+
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587+#ifdef CONFIG_PAX_PAGEEXEC
6588+ /* PaX: detect ITLB misses on non-exec pages */
6589+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591+ {
6592+ if (address != regs->tpc)
6593+ goto good_area;
6594+
6595+ up_read(&mm->mmap_sem);
6596+ switch (pax_handle_fetch_fault(regs)) {
6597+
6598+#ifdef CONFIG_PAX_EMUPLT
6599+ case 2:
6600+ case 3:
6601+ return;
6602+#endif
6603+
6604+ }
6605+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606+ do_group_exit(SIGKILL);
6607+ }
6608+#endif
6609+
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614index f27d103..1b06377 100644
6615--- a/arch/sparc/mm/hugetlbpage.c
6616+++ b/arch/sparc/mm/hugetlbpage.c
6617@@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621- if (likely(!vma || addr + len <= vma->vm_start)) {
6622+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630- if (!vma || addr <= vma->vm_start) {
6631+ if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639- addr = (mm->mmap_base-len) & HPAGE_MASK;
6640+ addr = mm->mmap_base - len;
6641
6642 do {
6643+ addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650- if (likely(!vma || addr+len <= vma->vm_start)) {
6651+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659- addr = (vma->vm_start-len) & HPAGE_MASK;
6660- } while (likely(len < vma->vm_start));
6661+ addr = skip_heap_stack_gap(vma, len);
6662+ } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670- if (task_size - len >= addr &&
6671- (!vma || addr + len <= vma->vm_start))
6672+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677index dc7c3b1..34c0070 100644
6678--- a/arch/sparc/mm/init_32.c
6679+++ b/arch/sparc/mm/init_32.c
6680@@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686+
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690@@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694- protection_map[1] = PAGE_READONLY;
6695- protection_map[2] = PAGE_COPY;
6696- protection_map[3] = PAGE_COPY;
6697+ protection_map[1] = PAGE_READONLY_NOEXEC;
6698+ protection_map[2] = PAGE_COPY_NOEXEC;
6699+ protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705- protection_map[9] = PAGE_READONLY;
6706- protection_map[10] = PAGE_SHARED;
6707- protection_map[11] = PAGE_SHARED;
6708+ protection_map[9] = PAGE_READONLY_NOEXEC;
6709+ protection_map[10] = PAGE_SHARED_NOEXEC;
6710+ protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715index 509b1ff..bfd7118 100644
6716--- a/arch/sparc/mm/srmmu.c
6717+++ b/arch/sparc/mm/srmmu.c
6718@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722+
6723+#ifdef CONFIG_PAX_PAGEEXEC
6724+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727+#endif
6728+
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732diff --git a/arch/um/Makefile b/arch/um/Makefile
6733index fc633db..5e1a1c2 100644
6734--- a/arch/um/Makefile
6735+++ b/arch/um/Makefile
6736@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740+ifdef CONSTIFY_PLUGIN
6741+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742+endif
6743+
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748index 6c03acd..a5e0215 100644
6749--- a/arch/um/include/asm/kmap_types.h
6750+++ b/arch/um/include/asm/kmap_types.h
6751@@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755+ KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760index 4cc9b6c..02e5029 100644
6761--- a/arch/um/include/asm/page.h
6762+++ b/arch/um/include/asm/page.h
6763@@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767+#define ktla_ktva(addr) (addr)
6768+#define ktva_ktla(addr) (addr)
6769+
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774index 4a28a15..654dc2a 100644
6775--- a/arch/um/kernel/process.c
6776+++ b/arch/um/kernel/process.c
6777@@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781-/*
6782- * Only x86 and x86_64 have an arch_align_stack().
6783- * All other arches have "#define arch_align_stack(x) (x)"
6784- * in their asm/system.h
6785- * As this is included in UML from asm-um/system-generic.h,
6786- * we can use it to behave as the subarch does.
6787- */
6788-#ifndef arch_align_stack
6789-unsigned long arch_align_stack(unsigned long sp)
6790-{
6791- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792- sp -= get_random_int() % 8192;
6793- return sp & ~0xf;
6794-}
6795-#endif
6796-
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801index d1b93c4..ae1b7fd 100644
6802--- a/arch/um/sys-i386/shared/sysdep/system.h
6803+++ b/arch/um/sys-i386/shared/sysdep/system.h
6804@@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808-extern unsigned long arch_align_stack(unsigned long sp);
6809+#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814index 857ca0b..9a2669d 100644
6815--- a/arch/um/sys-i386/syscalls.c
6816+++ b/arch/um/sys-i386/syscalls.c
6817@@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822+{
6823+ unsigned long pax_task_size = TASK_SIZE;
6824+
6825+#ifdef CONFIG_PAX_SEGMEXEC
6826+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827+ pax_task_size = SEGMEXEC_TASK_SIZE;
6828+#endif
6829+
6830+ if (len > pax_task_size || addr > pax_task_size - len)
6831+ return -EINVAL;
6832+
6833+ return 0;
6834+}
6835+
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840index d1b93c4..ae1b7fd 100644
6841--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843@@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847-extern unsigned long arch_align_stack(unsigned long sp);
6848+#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853index 73ae02a..f932de5 100644
6854--- a/arch/x86/Kconfig
6855+++ b/arch/x86/Kconfig
6856@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860- depends on X86_32 && !CC_STACKPROTECTOR
6861+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865@@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869- depends on !X86_NUMAQ
6870+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878- depends on !X86_NUMAQ
6879+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887- default 0x78000000 if VMSPLIT_2G_OPT
6888+ default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892@@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896+ depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904+ range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912+ range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920- def_bool y
6921+ def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929index 0e566103..1a6b57e 100644
6930--- a/arch/x86/Kconfig.cpu
6931+++ b/arch/x86/Kconfig.cpu
6932@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936- depends on M586MMX || M586TSC || M586 || M486 || M386
6937+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941@@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960index d105f29..c928727 100644
6961--- a/arch/x86/Kconfig.debug
6962+++ b/arch/x86/Kconfig.debug
6963@@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967- depends on DEBUG_KERNEL
6968+ depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973index d2d24c9..0f21f8d 100644
6974--- a/arch/x86/Makefile
6975+++ b/arch/x86/Makefile
6976@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980+ biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984@@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988+
6989+define OLD_LD
6990+
6991+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992+*** Please upgrade your binutils to 2.18 or newer
6993+endef
6994+
6995+archprepare:
6996+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998index ec749c2..bbb5319 100644
6999--- a/arch/x86/boot/Makefile
7000+++ b/arch/x86/boot/Makefile
7001@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005+ifdef CONSTIFY_PLUGIN
7006+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007+endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012index 878e4b9..20537ab 100644
7013--- a/arch/x86/boot/bitops.h
7014+++ b/arch/x86/boot/bitops.h
7015@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034index 98239d2..f40214c 100644
7035--- a/arch/x86/boot/boot.h
7036+++ b/arch/x86/boot/boot.h
7037@@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041- asm("movw %%ds,%0" : "=rm" (seg));
7042+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050- asm("repe; cmpsb; setnz %0"
7051+ asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056index f8ed065..5bf5ff3 100644
7057--- a/arch/x86/boot/compressed/Makefile
7058+++ b/arch/x86/boot/compressed/Makefile
7059@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063+ifdef CONSTIFY_PLUGIN
7064+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065+endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070index f543b70..b60fba8 100644
7071--- a/arch/x86/boot/compressed/head_32.S
7072+++ b/arch/x86/boot/compressed/head_32.S
7073@@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077- movl $LOAD_PHYSICAL_ADDR, %ebx
7078+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082@@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086- subl $LOAD_PHYSICAL_ADDR, %ebx
7087+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091@@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095- testl %ecx, %ecx
7096- jz 2f
7097+ jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102index 077e1b6..2c6b13b 100644
7103--- a/arch/x86/boot/compressed/head_64.S
7104+++ b/arch/x86/boot/compressed/head_64.S
7105@@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109- movl $LOAD_PHYSICAL_ADDR, %ebx
7110+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114@@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118-#include "../../kernel/verify_cpu_64.S"
7119+#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123@@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127- movq $LOAD_PHYSICAL_ADDR, %rbp
7128+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133index 842b2a3..f00178b 100644
7134--- a/arch/x86/boot/compressed/misc.c
7135+++ b/arch/x86/boot/compressed/misc.c
7136@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155index bcbd36c..b1754af 100644
7156--- a/arch/x86/boot/compressed/mkpiggy.c
7157+++ b/arch/x86/boot/compressed/mkpiggy.c
7158@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163+ offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168index bbeb0c3..f5167ab 100644
7169--- a/arch/x86/boot/compressed/relocs.c
7170+++ b/arch/x86/boot/compressed/relocs.c
7171@@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175+#include "../../../../include/linux/autoconf.h"
7176+
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179+static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187- int i;
7188+ unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196+static void read_phdrs(FILE *fp)
7197+{
7198+ unsigned int i;
7199+
7200+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201+ if (!phdr) {
7202+ die("Unable to allocate %d program headers\n",
7203+ ehdr.e_phnum);
7204+ }
7205+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206+ die("Seek to %d failed: %s\n",
7207+ ehdr.e_phoff, strerror(errno));
7208+ }
7209+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210+ die("Cannot read ELF program headers: %s\n",
7211+ strerror(errno));
7212+ }
7213+ for(i = 0; i < ehdr.e_phnum; i++) {
7214+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222+ }
7223+
7224+}
7225+
7226 static void read_shdrs(FILE *fp)
7227 {
7228- int i;
7229+ unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237- int i;
7238+ unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246- int i,j;
7247+ unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255- int i,j;
7256+ unsigned int i,j;
7257+ uint32_t base;
7258+
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266+ base = 0;
7267+ for (j = 0; j < ehdr.e_phnum; j++) {
7268+ if (phdr[j].p_type != PT_LOAD )
7269+ continue;
7270+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271+ continue;
7272+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273+ break;
7274+ }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277- rel->r_offset = elf32_to_cpu(rel->r_offset);
7278+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286- int i;
7287+ unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294- int j;
7295+ unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303- int i, printed = 0;
7304+ unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311- int j;
7312+ unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320- int i;
7321+ unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327- int j;
7328+ unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338+ continue;
7339+
7340+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345+ continue;
7346+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347+ continue;
7348+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349+ continue;
7350+#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358- int i;
7359+ unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367+ read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372index 4d3ff03..e4972ff 100644
7373--- a/arch/x86/boot/cpucheck.c
7374+++ b/arch/x86/boot/cpucheck.c
7375@@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379- asm("movl %%cr0,%0" : "=r" (cr0));
7380+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388- asm("pushfl ; "
7389+ asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393@@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397- asm("cpuid"
7398+ asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402@@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406- asm("cpuid"
7407+ asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411@@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415- asm("cpuid"
7416+ asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420@@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424- asm("cpuid"
7425+ asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459- asm("cpuid"
7460+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462+ asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471index b31cc54..8d69237 100644
7472--- a/arch/x86/boot/header.S
7473+++ b/arch/x86/boot/header.S
7474@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484index cae3feb..ff8ff2a 100644
7485--- a/arch/x86/boot/memory.c
7486+++ b/arch/x86/boot/memory.c
7487@@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491- int count = 0;
7492+ unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497index 11e8c6e..fdbb1ed 100644
7498--- a/arch/x86/boot/video-vesa.c
7499+++ b/arch/x86/boot/video-vesa.c
7500@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504+ boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509index d42da38..787cdf3 100644
7510--- a/arch/x86/boot/video.c
7511+++ b/arch/x86/boot/video.c
7512@@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516- int i, len = 0;
7517+ unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522index 5b577d5..3c1fed4 100644
7523--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525@@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529+#include <asm/alternative-asm.h>
7530+
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538+#define ret pax_force_retaddr 0, 1; ret
7539+
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544index eb0566e..e3ebad8 100644
7545--- a/arch/x86/crypto/aesni-intel_asm.S
7546+++ b/arch/x86/crypto/aesni-intel_asm.S
7547@@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551+#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555@@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559+ pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563@@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567+ pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571@@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575+ pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579@@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583+ pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591+ pax_force_retaddr 0, 1
7592 ret
7593+ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601+ pax_force_retaddr 0, 1
7602 ret
7603+ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607@@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611+ pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615@@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619+ pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627+ pax_force_retaddr 0, 1
7628 ret
7629+ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633@@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637+ pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641@@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645+ pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653+ pax_force_retaddr 0, 1
7654 ret
7655+ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663+ pax_force_retaddr 0, 1
7664 ret
7665+ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673+ pax_force_retaddr 0, 1
7674 ret
7675+ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683+ pax_force_retaddr 0, 1
7684 ret
7685+ENDPROC(aesni_cbc_dec)
7686diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687index 6214a9b..1f4fc9a 100644
7688--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690@@ -1,3 +1,5 @@
7691+#include <asm/alternative-asm.h>
7692+
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700+ pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708+ pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716+ pax_force_retaddr
7717 ret
7718diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719index 35974a5..5662ae2 100644
7720--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722@@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726+#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730@@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734+ pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738@@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742+ pax_force_retaddr 0, 1
7743 ret
7744diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745index 14531ab..a89a0c0 100644
7746--- a/arch/x86/ia32/ia32_aout.c
7747+++ b/arch/x86/ia32/ia32_aout.c
7748@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752+ memset(&dump, 0, sizeof(dump));
7753+
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761- /*
7762- * Finally dump the task struct. Not be used by gdb, but
7763- * could be useful
7764- */
7765- set_fs(KERNEL_DS);
7766- DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771index 588a7aa..a3468b0 100644
7772--- a/arch/x86/ia32/ia32_signal.c
7773+++ b/arch/x86/ia32/ia32_signal.c
7774@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787- void **fpstate)
7788+ void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796- *fpstate = (struct _fpstate_ia32 *) sp;
7797+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805- sp = ((sp + 4) & -16ul) - 4;
7806+ sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823- 0,
7824+ 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832+ else if (current->mm->context.vdso)
7833+ /* Return stub is in 32bit vsyscall page */
7834+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837- rt_sigreturn);
7838+ restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851index 4edd8eb..29124b4 100644
7852--- a/arch/x86/ia32/ia32entry.S
7853+++ b/arch/x86/ia32/ia32entry.S
7854@@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858+#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860+#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868+ .macro pax_enter_kernel_user
7869+ pax_set_fptr_mask
7870+#ifdef CONFIG_PAX_MEMORY_UDEREF
7871+ call pax_enter_kernel_user
7872+#endif
7873+ .endm
7874+
7875+ .macro pax_exit_kernel_user
7876+#ifdef CONFIG_PAX_MEMORY_UDEREF
7877+ call pax_exit_kernel_user
7878+#endif
7879+#ifdef CONFIG_PAX_RANDKSTACK
7880+ pushq %rax
7881+ pushq %r11
7882+ call pax_randomize_kstack
7883+ popq %r11
7884+ popq %rax
7885+#endif
7886+ .endm
7887+
7888+.macro pax_erase_kstack
7889+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890+ call pax_erase_kstack
7891+#endif
7892+.endm
7893+
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901- addq $(KERNEL_STACK_OFFSET),%rsp
7902- /*
7903- * No need to follow this irqs on/off section: the syscall
7904- * disabled irqs, here we enable it straight after entry:
7905- */
7906- ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915- CFI_REGISTER rip,r10
7916+ orl $X86_EFLAGS_IF,(%rsp)
7917+ GET_THREAD_INFO(%r11)
7918+ movl TI_sysenter_return(%r11), %r11d
7919+ CFI_REGISTER rip,r11
7920 pushq $__USER32_CS
7921 CFI_ADJUST_CFA_OFFSET 8
7922 /*CFI_REL_OFFSET cs,0*/
7923 movl %eax, %eax
7924- pushq %r10
7925+ pushq %r11
7926 CFI_ADJUST_CFA_OFFSET 8
7927 CFI_REL_OFFSET rip,0
7928 pushq %rax
7929 CFI_ADJUST_CFA_OFFSET 8
7930 cld
7931 SAVE_ARGS 0,0,1
7932+ pax_enter_kernel_user
7933+ /*
7934+ * No need to follow this irqs on/off section: the syscall
7935+ * disabled irqs, here we enable it straight after entry:
7936+ */
7937+ ENABLE_INTERRUPTS(CLBR_NONE)
7938 /* no need to do an access_ok check here because rbp has been
7939 32bit zero extended */
7940+
7941+#ifdef CONFIG_PAX_MEMORY_UDEREF
7942+ mov $PAX_USER_SHADOW_BASE,%r11
7943+ add %r11,%rbp
7944+#endif
7945+
7946 1: movl (%rbp),%ebp
7947 .section __ex_table,"a"
7948 .quad 1b,ia32_badarg
7949 .previous
7950- GET_THREAD_INFO(%r10)
7951- orl $TS_COMPAT,TI_status(%r10)
7952- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7953+ GET_THREAD_INFO(%r11)
7954+ orl $TS_COMPAT,TI_status(%r11)
7955+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7956 CFI_REMEMBER_STATE
7957 jnz sysenter_tracesys
7958 cmpq $(IA32_NR_syscalls-1),%rax
7959@@ -166,13 +202,15 @@ sysenter_do_call:
7960 sysenter_dispatch:
7961 call *ia32_sys_call_table(,%rax,8)
7962 movq %rax,RAX-ARGOFFSET(%rsp)
7963- GET_THREAD_INFO(%r10)
7964+ GET_THREAD_INFO(%r11)
7965 DISABLE_INTERRUPTS(CLBR_NONE)
7966 TRACE_IRQS_OFF
7967- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7968+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7969 jnz sysexit_audit
7970 sysexit_from_sys_call:
7971- andl $~TS_COMPAT,TI_status(%r10)
7972+ pax_exit_kernel_user
7973+ pax_erase_kstack
7974+ andl $~TS_COMPAT,TI_status(%r11)
7975 /* clear IF, that popfq doesn't enable interrupts early */
7976 andl $~0x200,EFLAGS-R11(%rsp)
7977 movl RIP-R11(%rsp),%edx /* User %eip */
7978@@ -200,6 +238,9 @@ sysexit_from_sys_call:
7979 movl %eax,%esi /* 2nd arg: syscall number */
7980 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7981 call audit_syscall_entry
7982+
7983+ pax_erase_kstack
7984+
7985 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7986 cmpq $(IA32_NR_syscalls-1),%rax
7987 ja ia32_badsys
7988@@ -211,7 +252,7 @@ sysexit_from_sys_call:
7989 .endm
7990
7991 .macro auditsys_exit exit
7992- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7993+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7994 jnz ia32_ret_from_sys_call
7995 TRACE_IRQS_ON
7996 sti
7997@@ -221,12 +262,12 @@ sysexit_from_sys_call:
7998 movzbl %al,%edi /* zero-extend that into %edi */
7999 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8000 call audit_syscall_exit
8001- GET_THREAD_INFO(%r10)
8002+ GET_THREAD_INFO(%r11)
8003 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8004 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8005 cli
8006 TRACE_IRQS_OFF
8007- testl %edi,TI_flags(%r10)
8008+ testl %edi,TI_flags(%r11)
8009 jz \exit
8010 CLEAR_RREGS -ARGOFFSET
8011 jmp int_with_check
8012@@ -244,7 +285,7 @@ sysexit_audit:
8013
8014 sysenter_tracesys:
8015 #ifdef CONFIG_AUDITSYSCALL
8016- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8017+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8018 jz sysenter_auditsys
8019 #endif
8020 SAVE_REST
8021@@ -252,6 +293,9 @@ sysenter_tracesys:
8022 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8023 movq %rsp,%rdi /* &pt_regs -> arg1 */
8024 call syscall_trace_enter
8025+
8026+ pax_erase_kstack
8027+
8028 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8029 RESTORE_REST
8030 cmpq $(IA32_NR_syscalls-1),%rax
8031@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8032 ENTRY(ia32_cstar_target)
8033 CFI_STARTPROC32 simple
8034 CFI_SIGNAL_FRAME
8035- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8036+ CFI_DEF_CFA rsp,0
8037 CFI_REGISTER rip,rcx
8038 /*CFI_REGISTER rflags,r11*/
8039 SWAPGS_UNSAFE_STACK
8040 movl %esp,%r8d
8041 CFI_REGISTER rsp,r8
8042 movq PER_CPU_VAR(kernel_stack),%rsp
8043+ SAVE_ARGS 8*6,1,1
8044+ pax_enter_kernel_user
8045 /*
8046 * No need to follow this irqs on/off section: the syscall
8047 * disabled irqs and here we enable it straight after entry:
8048 */
8049 ENABLE_INTERRUPTS(CLBR_NONE)
8050- SAVE_ARGS 8,1,1
8051 movl %eax,%eax /* zero extension */
8052 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8053 movq %rcx,RIP-ARGOFFSET(%rsp)
8054@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8055 /* no need to do an access_ok check here because r8 has been
8056 32bit zero extended */
8057 /* hardware stack frame is complete now */
8058+
8059+#ifdef CONFIG_PAX_MEMORY_UDEREF
8060+ mov $PAX_USER_SHADOW_BASE,%r11
8061+ add %r11,%r8
8062+#endif
8063+
8064 1: movl (%r8),%r9d
8065 .section __ex_table,"a"
8066 .quad 1b,ia32_badarg
8067 .previous
8068- GET_THREAD_INFO(%r10)
8069- orl $TS_COMPAT,TI_status(%r10)
8070- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8071+ GET_THREAD_INFO(%r11)
8072+ orl $TS_COMPAT,TI_status(%r11)
8073+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8074 CFI_REMEMBER_STATE
8075 jnz cstar_tracesys
8076 cmpq $IA32_NR_syscalls-1,%rax
8077@@ -327,13 +378,15 @@ cstar_do_call:
8078 cstar_dispatch:
8079 call *ia32_sys_call_table(,%rax,8)
8080 movq %rax,RAX-ARGOFFSET(%rsp)
8081- GET_THREAD_INFO(%r10)
8082+ GET_THREAD_INFO(%r11)
8083 DISABLE_INTERRUPTS(CLBR_NONE)
8084 TRACE_IRQS_OFF
8085- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8086+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8087 jnz sysretl_audit
8088 sysretl_from_sys_call:
8089- andl $~TS_COMPAT,TI_status(%r10)
8090+ pax_exit_kernel_user
8091+ pax_erase_kstack
8092+ andl $~TS_COMPAT,TI_status(%r11)
8093 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8094 movl RIP-ARGOFFSET(%rsp),%ecx
8095 CFI_REGISTER rip,rcx
8096@@ -361,7 +414,7 @@ sysretl_audit:
8097
8098 cstar_tracesys:
8099 #ifdef CONFIG_AUDITSYSCALL
8100- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8101+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8102 jz cstar_auditsys
8103 #endif
8104 xchgl %r9d,%ebp
8105@@ -370,6 +423,9 @@ cstar_tracesys:
8106 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8107 movq %rsp,%rdi /* &pt_regs -> arg1 */
8108 call syscall_trace_enter
8109+
8110+ pax_erase_kstack
8111+
8112 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8113 RESTORE_REST
8114 xchgl %ebp,%r9d
8115@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8116 CFI_REL_OFFSET rip,RIP-RIP
8117 PARAVIRT_ADJUST_EXCEPTION_FRAME
8118 SWAPGS
8119- /*
8120- * No need to follow this irqs on/off section: the syscall
8121- * disabled irqs and here we enable it straight after entry:
8122- */
8123- ENABLE_INTERRUPTS(CLBR_NONE)
8124 movl %eax,%eax
8125 pushq %rax
8126 CFI_ADJUST_CFA_OFFSET 8
8127@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8128 /* note the registers are not zero extended to the sf.
8129 this could be a problem. */
8130 SAVE_ARGS 0,0,1
8131- GET_THREAD_INFO(%r10)
8132- orl $TS_COMPAT,TI_status(%r10)
8133- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8134+ pax_enter_kernel_user
8135+ /*
8136+ * No need to follow this irqs on/off section: the syscall
8137+ * disabled irqs and here we enable it straight after entry:
8138+ */
8139+ ENABLE_INTERRUPTS(CLBR_NONE)
8140+ GET_THREAD_INFO(%r11)
8141+ orl $TS_COMPAT,TI_status(%r11)
8142+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8143 jnz ia32_tracesys
8144 cmpq $(IA32_NR_syscalls-1),%rax
8145 ja ia32_badsys
8146@@ -448,6 +505,9 @@ ia32_tracesys:
8147 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8148 movq %rsp,%rdi /* &pt_regs -> arg1 */
8149 call syscall_trace_enter
8150+
8151+ pax_erase_kstack
8152+
8153 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8154 RESTORE_REST
8155 cmpq $(IA32_NR_syscalls-1),%rax
8156@@ -462,6 +522,7 @@ ia32_badsys:
8157
8158 quiet_ni_syscall:
8159 movq $-ENOSYS,%rax
8160+ pax_force_retaddr
8161 ret
8162 CFI_ENDPROC
8163
8164diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8165index 016218c..47ccbdd 100644
8166--- a/arch/x86/ia32/sys_ia32.c
8167+++ b/arch/x86/ia32/sys_ia32.c
8168@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8169 */
8170 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8171 {
8172- typeof(ubuf->st_uid) uid = 0;
8173- typeof(ubuf->st_gid) gid = 0;
8174+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8175+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8176 SET_UID(uid, stat->uid);
8177 SET_GID(gid, stat->gid);
8178 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8179@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8180 }
8181 set_fs(KERNEL_DS);
8182 ret = sys_rt_sigprocmask(how,
8183- set ? (sigset_t __user *)&s : NULL,
8184- oset ? (sigset_t __user *)&s : NULL,
8185+ set ? (sigset_t __force_user *)&s : NULL,
8186+ oset ? (sigset_t __force_user *)&s : NULL,
8187 sigsetsize);
8188 set_fs(old_fs);
8189 if (ret)
8190@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8191 mm_segment_t old_fs = get_fs();
8192
8193 set_fs(KERNEL_DS);
8194- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8195+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8196 set_fs(old_fs);
8197 if (put_compat_timespec(&t, interval))
8198 return -EFAULT;
8199@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8200 mm_segment_t old_fs = get_fs();
8201
8202 set_fs(KERNEL_DS);
8203- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8204+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8205 set_fs(old_fs);
8206 if (!ret) {
8207 switch (_NSIG_WORDS) {
8208@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8209 if (copy_siginfo_from_user32(&info, uinfo))
8210 return -EFAULT;
8211 set_fs(KERNEL_DS);
8212- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8213+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8214 set_fs(old_fs);
8215 return ret;
8216 }
8217@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8218 return -EFAULT;
8219
8220 set_fs(KERNEL_DS);
8221- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8222+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8223 count);
8224 set_fs(old_fs);
8225
8226diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8227index e2077d3..b7a8919 100644
8228--- a/arch/x86/include/asm/alternative-asm.h
8229+++ b/arch/x86/include/asm/alternative-asm.h
8230@@ -8,10 +8,10 @@
8231
8232 #ifdef CONFIG_SMP
8233 .macro LOCK_PREFIX
8234-1: lock
8235+672: lock
8236 .section .smp_locks,"a"
8237 .align 4
8238- X86_ALIGN 1b
8239+ X86_ALIGN 672b
8240 .previous
8241 .endm
8242 #else
8243@@ -19,4 +19,43 @@
8244 .endm
8245 #endif
8246
8247+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8248+ .macro pax_force_retaddr_bts rip=0
8249+ btsq $63,\rip(%rsp)
8250+ .endm
8251+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8252+ .macro pax_force_retaddr rip=0, reload=0
8253+ btsq $63,\rip(%rsp)
8254+ .endm
8255+ .macro pax_force_fptr ptr
8256+ btsq $63,\ptr
8257+ .endm
8258+ .macro pax_set_fptr_mask
8259+ .endm
8260+#endif
8261+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8262+ .macro pax_force_retaddr rip=0, reload=0
8263+ .if \reload
8264+ pax_set_fptr_mask
8265+ .endif
8266+ orq %r10,\rip(%rsp)
8267+ .endm
8268+ .macro pax_force_fptr ptr
8269+ orq %r10,\ptr
8270+ .endm
8271+ .macro pax_set_fptr_mask
8272+ movabs $0x8000000000000000,%r10
8273+ .endm
8274+#endif
8275+#else
8276+ .macro pax_force_retaddr rip=0, reload=0
8277+ .endm
8278+ .macro pax_force_fptr ptr
8279+ .endm
8280+ .macro pax_force_retaddr_bts rip=0
8281+ .endm
8282+ .macro pax_set_fptr_mask
8283+ .endm
8284+#endif
8285+
8286 #endif /* __ASSEMBLY__ */
8287diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8288index c240efc..fdfadf3 100644
8289--- a/arch/x86/include/asm/alternative.h
8290+++ b/arch/x86/include/asm/alternative.h
8291@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8292 " .byte 662b-661b\n" /* sourcelen */ \
8293 " .byte 664f-663f\n" /* replacementlen */ \
8294 ".previous\n" \
8295- ".section .altinstr_replacement, \"ax\"\n" \
8296+ ".section .altinstr_replacement, \"a\"\n" \
8297 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8298 ".previous"
8299
8300diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8301index 474d80d..1f97d58 100644
8302--- a/arch/x86/include/asm/apic.h
8303+++ b/arch/x86/include/asm/apic.h
8304@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8305
8306 #ifdef CONFIG_X86_LOCAL_APIC
8307
8308-extern unsigned int apic_verbosity;
8309+extern int apic_verbosity;
8310 extern int local_apic_timer_c2_ok;
8311
8312 extern int disable_apic;
8313diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8314index 20370c6..a2eb9b0 100644
8315--- a/arch/x86/include/asm/apm.h
8316+++ b/arch/x86/include/asm/apm.h
8317@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8318 __asm__ __volatile__(APM_DO_ZERO_SEGS
8319 "pushl %%edi\n\t"
8320 "pushl %%ebp\n\t"
8321- "lcall *%%cs:apm_bios_entry\n\t"
8322+ "lcall *%%ss:apm_bios_entry\n\t"
8323 "setc %%al\n\t"
8324 "popl %%ebp\n\t"
8325 "popl %%edi\n\t"
8326@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8327 __asm__ __volatile__(APM_DO_ZERO_SEGS
8328 "pushl %%edi\n\t"
8329 "pushl %%ebp\n\t"
8330- "lcall *%%cs:apm_bios_entry\n\t"
8331+ "lcall *%%ss:apm_bios_entry\n\t"
8332 "setc %%bl\n\t"
8333 "popl %%ebp\n\t"
8334 "popl %%edi\n\t"
8335diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8336index dc5a667..939040c 100644
8337--- a/arch/x86/include/asm/atomic_32.h
8338+++ b/arch/x86/include/asm/atomic_32.h
8339@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8340 }
8341
8342 /**
8343+ * atomic_read_unchecked - read atomic variable
8344+ * @v: pointer of type atomic_unchecked_t
8345+ *
8346+ * Atomically reads the value of @v.
8347+ */
8348+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8349+{
8350+ return v->counter;
8351+}
8352+
8353+/**
8354 * atomic_set - set atomic variable
8355 * @v: pointer of type atomic_t
8356 * @i: required value
8357@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8358 }
8359
8360 /**
8361+ * atomic_set_unchecked - set atomic variable
8362+ * @v: pointer of type atomic_unchecked_t
8363+ * @i: required value
8364+ *
8365+ * Atomically sets the value of @v to @i.
8366+ */
8367+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8368+{
8369+ v->counter = i;
8370+}
8371+
8372+/**
8373 * atomic_add - add integer to atomic variable
8374 * @i: integer value to add
8375 * @v: pointer of type atomic_t
8376@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8377 */
8378 static inline void atomic_add(int i, atomic_t *v)
8379 {
8380- asm volatile(LOCK_PREFIX "addl %1,%0"
8381+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8382+
8383+#ifdef CONFIG_PAX_REFCOUNT
8384+ "jno 0f\n"
8385+ LOCK_PREFIX "subl %1,%0\n"
8386+ "int $4\n0:\n"
8387+ _ASM_EXTABLE(0b, 0b)
8388+#endif
8389+
8390+ : "+m" (v->counter)
8391+ : "ir" (i));
8392+}
8393+
8394+/**
8395+ * atomic_add_unchecked - add integer to atomic variable
8396+ * @i: integer value to add
8397+ * @v: pointer of type atomic_unchecked_t
8398+ *
8399+ * Atomically adds @i to @v.
8400+ */
8401+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8402+{
8403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8404 : "+m" (v->counter)
8405 : "ir" (i));
8406 }
8407@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8408 */
8409 static inline void atomic_sub(int i, atomic_t *v)
8410 {
8411- asm volatile(LOCK_PREFIX "subl %1,%0"
8412+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8413+
8414+#ifdef CONFIG_PAX_REFCOUNT
8415+ "jno 0f\n"
8416+ LOCK_PREFIX "addl %1,%0\n"
8417+ "int $4\n0:\n"
8418+ _ASM_EXTABLE(0b, 0b)
8419+#endif
8420+
8421+ : "+m" (v->counter)
8422+ : "ir" (i));
8423+}
8424+
8425+/**
8426+ * atomic_sub_unchecked - subtract integer from atomic variable
8427+ * @i: integer value to subtract
8428+ * @v: pointer of type atomic_unchecked_t
8429+ *
8430+ * Atomically subtracts @i from @v.
8431+ */
8432+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8433+{
8434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8435 : "+m" (v->counter)
8436 : "ir" (i));
8437 }
8438@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8439 {
8440 unsigned char c;
8441
8442- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8443+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8444+
8445+#ifdef CONFIG_PAX_REFCOUNT
8446+ "jno 0f\n"
8447+ LOCK_PREFIX "addl %2,%0\n"
8448+ "int $4\n0:\n"
8449+ _ASM_EXTABLE(0b, 0b)
8450+#endif
8451+
8452+ "sete %1\n"
8453 : "+m" (v->counter), "=qm" (c)
8454 : "ir" (i) : "memory");
8455 return c;
8456@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8457 */
8458 static inline void atomic_inc(atomic_t *v)
8459 {
8460- asm volatile(LOCK_PREFIX "incl %0"
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462+
8463+#ifdef CONFIG_PAX_REFCOUNT
8464+ "jno 0f\n"
8465+ LOCK_PREFIX "decl %0\n"
8466+ "int $4\n0:\n"
8467+ _ASM_EXTABLE(0b, 0b)
8468+#endif
8469+
8470+ : "+m" (v->counter));
8471+}
8472+
8473+/**
8474+ * atomic_inc_unchecked - increment atomic variable
8475+ * @v: pointer of type atomic_unchecked_t
8476+ *
8477+ * Atomically increments @v by 1.
8478+ */
8479+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8480+{
8481+ asm volatile(LOCK_PREFIX "incl %0\n"
8482 : "+m" (v->counter));
8483 }
8484
8485@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8486 */
8487 static inline void atomic_dec(atomic_t *v)
8488 {
8489- asm volatile(LOCK_PREFIX "decl %0"
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491+
8492+#ifdef CONFIG_PAX_REFCOUNT
8493+ "jno 0f\n"
8494+ LOCK_PREFIX "incl %0\n"
8495+ "int $4\n0:\n"
8496+ _ASM_EXTABLE(0b, 0b)
8497+#endif
8498+
8499+ : "+m" (v->counter));
8500+}
8501+
8502+/**
8503+ * atomic_dec_unchecked - decrement atomic variable
8504+ * @v: pointer of type atomic_unchecked_t
8505+ *
8506+ * Atomically decrements @v by 1.
8507+ */
8508+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8509+{
8510+ asm volatile(LOCK_PREFIX "decl %0\n"
8511 : "+m" (v->counter));
8512 }
8513
8514@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8515 {
8516 unsigned char c;
8517
8518- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8519+ asm volatile(LOCK_PREFIX "decl %0\n"
8520+
8521+#ifdef CONFIG_PAX_REFCOUNT
8522+ "jno 0f\n"
8523+ LOCK_PREFIX "incl %0\n"
8524+ "int $4\n0:\n"
8525+ _ASM_EXTABLE(0b, 0b)
8526+#endif
8527+
8528+ "sete %1\n"
8529 : "+m" (v->counter), "=qm" (c)
8530 : : "memory");
8531 return c != 0;
8532@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8533 {
8534 unsigned char c;
8535
8536- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8537+ asm volatile(LOCK_PREFIX "incl %0\n"
8538+
8539+#ifdef CONFIG_PAX_REFCOUNT
8540+ "jno 0f\n"
8541+ LOCK_PREFIX "decl %0\n"
8542+ "into\n0:\n"
8543+ _ASM_EXTABLE(0b, 0b)
8544+#endif
8545+
8546+ "sete %1\n"
8547+ : "+m" (v->counter), "=qm" (c)
8548+ : : "memory");
8549+ return c != 0;
8550+}
8551+
8552+/**
8553+ * atomic_inc_and_test_unchecked - increment and test
8554+ * @v: pointer of type atomic_unchecked_t
8555+ *
8556+ * Atomically increments @v by 1
8557+ * and returns true if the result is zero, or false for all
8558+ * other cases.
8559+ */
8560+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8561+{
8562+ unsigned char c;
8563+
8564+ asm volatile(LOCK_PREFIX "incl %0\n"
8565+ "sete %1\n"
8566 : "+m" (v->counter), "=qm" (c)
8567 : : "memory");
8568 return c != 0;
8569@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8570 {
8571 unsigned char c;
8572
8573- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8574+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8575+
8576+#ifdef CONFIG_PAX_REFCOUNT
8577+ "jno 0f\n"
8578+ LOCK_PREFIX "subl %2,%0\n"
8579+ "int $4\n0:\n"
8580+ _ASM_EXTABLE(0b, 0b)
8581+#endif
8582+
8583+ "sets %1\n"
8584 : "+m" (v->counter), "=qm" (c)
8585 : "ir" (i) : "memory");
8586 return c;
8587@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8588 #endif
8589 /* Modern 486+ processor */
8590 __i = i;
8591- asm volatile(LOCK_PREFIX "xaddl %0, %1"
8592+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8593+
8594+#ifdef CONFIG_PAX_REFCOUNT
8595+ "jno 0f\n"
8596+ "movl %0, %1\n"
8597+ "int $4\n0:\n"
8598+ _ASM_EXTABLE(0b, 0b)
8599+#endif
8600+
8601 : "+r" (i), "+m" (v->counter)
8602 : : "memory");
8603 return i + __i;
8604@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8605 }
8606
8607 /**
8608+ * atomic_add_return_unchecked - add integer and return
8609+ * @v: pointer of type atomic_unchecked_t
8610+ * @i: integer value to add
8611+ *
8612+ * Atomically adds @i to @v and returns @i + @v
8613+ */
8614+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8615+{
8616+ int __i;
8617+#ifdef CONFIG_M386
8618+ unsigned long flags;
8619+ if (unlikely(boot_cpu_data.x86 <= 3))
8620+ goto no_xadd;
8621+#endif
8622+ /* Modern 486+ processor */
8623+ __i = i;
8624+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
8625+ : "+r" (i), "+m" (v->counter)
8626+ : : "memory");
8627+ return i + __i;
8628+
8629+#ifdef CONFIG_M386
8630+no_xadd: /* Legacy 386 processor */
8631+ local_irq_save(flags);
8632+ __i = atomic_read_unchecked(v);
8633+ atomic_set_unchecked(v, i + __i);
8634+ local_irq_restore(flags);
8635+ return i + __i;
8636+#endif
8637+}
8638+
8639+/**
8640 * atomic_sub_return - subtract integer and return
8641 * @v: pointer of type atomic_t
8642 * @i: integer value to subtract
8643@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8644 return cmpxchg(&v->counter, old, new);
8645 }
8646
8647+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8648+{
8649+ return cmpxchg(&v->counter, old, new);
8650+}
8651+
8652 static inline int atomic_xchg(atomic_t *v, int new)
8653 {
8654 return xchg(&v->counter, new);
8655 }
8656
8657+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8658+{
8659+ return xchg(&v->counter, new);
8660+}
8661+
8662 /**
8663 * atomic_add_unless - add unless the number is already a given value
8664 * @v: pointer of type atomic_t
8665@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8666 */
8667 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8668 {
8669- int c, old;
8670+ int c, old, new;
8671 c = atomic_read(v);
8672 for (;;) {
8673- if (unlikely(c == (u)))
8674+ if (unlikely(c == u))
8675 break;
8676- old = atomic_cmpxchg((v), c, c + (a));
8677+
8678+ asm volatile("addl %2,%0\n"
8679+
8680+#ifdef CONFIG_PAX_REFCOUNT
8681+ "jno 0f\n"
8682+ "subl %2,%0\n"
8683+ "int $4\n0:\n"
8684+ _ASM_EXTABLE(0b, 0b)
8685+#endif
8686+
8687+ : "=r" (new)
8688+ : "0" (c), "ir" (a));
8689+
8690+ old = atomic_cmpxchg(v, c, new);
8691 if (likely(old == c))
8692 break;
8693 c = old;
8694 }
8695- return c != (u);
8696+ return c != u;
8697 }
8698
8699 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8700
8701 #define atomic_inc_return(v) (atomic_add_return(1, v))
8702+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8703+{
8704+ return atomic_add_return_unchecked(1, v);
8705+}
8706 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8707
8708 /* These are x86-specific, used by some header files */
8709@@ -266,9 +495,18 @@ typedef struct {
8710 u64 __aligned(8) counter;
8711 } atomic64_t;
8712
8713+#ifdef CONFIG_PAX_REFCOUNT
8714+typedef struct {
8715+ u64 __aligned(8) counter;
8716+} atomic64_unchecked_t;
8717+#else
8718+typedef atomic64_t atomic64_unchecked_t;
8719+#endif
8720+
8721 #define ATOMIC64_INIT(val) { (val) }
8722
8723 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8724+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8725
8726 /**
8727 * atomic64_xchg - xchg atomic64 variable
8728@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8729 * the old value.
8730 */
8731 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8732+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8733
8734 /**
8735 * atomic64_set - set atomic64 variable
8736@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8737 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8738
8739 /**
8740+ * atomic64_unchecked_set - set atomic64 variable
8741+ * @ptr: pointer to type atomic64_unchecked_t
8742+ * @new_val: value to assign
8743+ *
8744+ * Atomically sets the value of @ptr to @new_val.
8745+ */
8746+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8747+
8748+/**
8749 * atomic64_read - read atomic64 variable
8750 * @ptr: pointer to type atomic64_t
8751 *
8752@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8753 return res;
8754 }
8755
8756-extern u64 atomic64_read(atomic64_t *ptr);
8757+/**
8758+ * atomic64_read_unchecked - read atomic64 variable
8759+ * @ptr: pointer to type atomic64_unchecked_t
8760+ *
8761+ * Atomically reads the value of @ptr and returns it.
8762+ */
8763+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8764+{
8765+ u64 res;
8766+
8767+ /*
8768+ * Note, we inline this atomic64_unchecked_t primitive because
8769+ * it only clobbers EAX/EDX and leaves the others
8770+ * untouched. We also (somewhat subtly) rely on the
8771+ * fact that cmpxchg8b returns the current 64-bit value
8772+ * of the memory location we are touching:
8773+ */
8774+ asm volatile(
8775+ "mov %%ebx, %%eax\n\t"
8776+ "mov %%ecx, %%edx\n\t"
8777+ LOCK_PREFIX "cmpxchg8b %1\n"
8778+ : "=&A" (res)
8779+ : "m" (*ptr)
8780+ );
8781+
8782+ return res;
8783+}
8784
8785 /**
8786 * atomic64_add_return - add and return
8787@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8788 * Other variants with different arithmetic operators:
8789 */
8790 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8791+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8792 extern u64 atomic64_inc_return(atomic64_t *ptr);
8793+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8794 extern u64 atomic64_dec_return(atomic64_t *ptr);
8795+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8796
8797 /**
8798 * atomic64_add - add integer to atomic64 variable
8799@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8800 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8801
8802 /**
8803+ * atomic64_add_unchecked - add integer to atomic64 variable
8804+ * @delta: integer value to add
8805+ * @ptr: pointer to type atomic64_unchecked_t
8806+ *
8807+ * Atomically adds @delta to @ptr.
8808+ */
8809+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8810+
8811+/**
8812 * atomic64_sub - subtract the atomic64 variable
8813 * @delta: integer value to subtract
8814 * @ptr: pointer to type atomic64_t
8815@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8816 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8817
8818 /**
8819+ * atomic64_sub_unchecked - subtract the atomic64 variable
8820+ * @delta: integer value to subtract
8821+ * @ptr: pointer to type atomic64_unchecked_t
8822+ *
8823+ * Atomically subtracts @delta from @ptr.
8824+ */
8825+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8826+
8827+/**
8828 * atomic64_sub_and_test - subtract value from variable and test result
8829 * @delta: integer value to subtract
8830 * @ptr: pointer to type atomic64_t
8831@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8832 extern void atomic64_inc(atomic64_t *ptr);
8833
8834 /**
8835+ * atomic64_inc_unchecked - increment atomic64 variable
8836+ * @ptr: pointer to type atomic64_unchecked_t
8837+ *
8838+ * Atomically increments @ptr by 1.
8839+ */
8840+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8841+
8842+/**
8843 * atomic64_dec - decrement atomic64 variable
8844 * @ptr: pointer to type atomic64_t
8845 *
8846@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8847 extern void atomic64_dec(atomic64_t *ptr);
8848
8849 /**
8850+ * atomic64_dec_unchecked - decrement atomic64 variable
8851+ * @ptr: pointer to type atomic64_unchecked_t
8852+ *
8853+ * Atomically decrements @ptr by 1.
8854+ */
8855+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8856+
8857+/**
8858 * atomic64_dec_and_test - decrement and test
8859 * @ptr: pointer to type atomic64_t
8860 *
8861diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8862index d605dc2..fafd7bd 100644
8863--- a/arch/x86/include/asm/atomic_64.h
8864+++ b/arch/x86/include/asm/atomic_64.h
8865@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8866 }
8867
8868 /**
8869+ * atomic_read_unchecked - read atomic variable
8870+ * @v: pointer of type atomic_unchecked_t
8871+ *
8872+ * Atomically reads the value of @v.
8873+ */
8874+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8875+{
8876+ return v->counter;
8877+}
8878+
8879+/**
8880 * atomic_set - set atomic variable
8881 * @v: pointer of type atomic_t
8882 * @i: required value
8883@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8884 }
8885
8886 /**
8887+ * atomic_set_unchecked - set atomic variable
8888+ * @v: pointer of type atomic_unchecked_t
8889+ * @i: required value
8890+ *
8891+ * Atomically sets the value of @v to @i.
8892+ */
8893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8894+{
8895+ v->counter = i;
8896+}
8897+
8898+/**
8899 * atomic_add - add integer to atomic variable
8900 * @i: integer value to add
8901 * @v: pointer of type atomic_t
8902@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8903 */
8904 static inline void atomic_add(int i, atomic_t *v)
8905 {
8906- asm volatile(LOCK_PREFIX "addl %1,%0"
8907+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8908+
8909+#ifdef CONFIG_PAX_REFCOUNT
8910+ "jno 0f\n"
8911+ LOCK_PREFIX "subl %1,%0\n"
8912+ "int $4\n0:\n"
8913+ _ASM_EXTABLE(0b, 0b)
8914+#endif
8915+
8916+ : "=m" (v->counter)
8917+ : "ir" (i), "m" (v->counter));
8918+}
8919+
8920+/**
8921+ * atomic_add_unchecked - add integer to atomic variable
8922+ * @i: integer value to add
8923+ * @v: pointer of type atomic_unchecked_t
8924+ *
8925+ * Atomically adds @i to @v.
8926+ */
8927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8928+{
8929+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8930 : "=m" (v->counter)
8931 : "ir" (i), "m" (v->counter));
8932 }
8933@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8934 */
8935 static inline void atomic_sub(int i, atomic_t *v)
8936 {
8937- asm volatile(LOCK_PREFIX "subl %1,%0"
8938+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8939+
8940+#ifdef CONFIG_PAX_REFCOUNT
8941+ "jno 0f\n"
8942+ LOCK_PREFIX "addl %1,%0\n"
8943+ "int $4\n0:\n"
8944+ _ASM_EXTABLE(0b, 0b)
8945+#endif
8946+
8947+ : "=m" (v->counter)
8948+ : "ir" (i), "m" (v->counter));
8949+}
8950+
8951+/**
8952+ * atomic_sub_unchecked - subtract the atomic variable
8953+ * @i: integer value to subtract
8954+ * @v: pointer of type atomic_unchecked_t
8955+ *
8956+ * Atomically subtracts @i from @v.
8957+ */
8958+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8959+{
8960+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8961 : "=m" (v->counter)
8962 : "ir" (i), "m" (v->counter));
8963 }
8964@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8965 {
8966 unsigned char c;
8967
8968- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8969+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8970+
8971+#ifdef CONFIG_PAX_REFCOUNT
8972+ "jno 0f\n"
8973+ LOCK_PREFIX "addl %2,%0\n"
8974+ "int $4\n0:\n"
8975+ _ASM_EXTABLE(0b, 0b)
8976+#endif
8977+
8978+ "sete %1\n"
8979 : "=m" (v->counter), "=qm" (c)
8980 : "ir" (i), "m" (v->counter) : "memory");
8981 return c;
8982@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8983 */
8984 static inline void atomic_inc(atomic_t *v)
8985 {
8986- asm volatile(LOCK_PREFIX "incl %0"
8987+ asm volatile(LOCK_PREFIX "incl %0\n"
8988+
8989+#ifdef CONFIG_PAX_REFCOUNT
8990+ "jno 0f\n"
8991+ LOCK_PREFIX "decl %0\n"
8992+ "int $4\n0:\n"
8993+ _ASM_EXTABLE(0b, 0b)
8994+#endif
8995+
8996+ : "=m" (v->counter)
8997+ : "m" (v->counter));
8998+}
8999+
9000+/**
9001+ * atomic_inc_unchecked - increment atomic variable
9002+ * @v: pointer of type atomic_unchecked_t
9003+ *
9004+ * Atomically increments @v by 1.
9005+ */
9006+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9007+{
9008+ asm volatile(LOCK_PREFIX "incl %0\n"
9009 : "=m" (v->counter)
9010 : "m" (v->counter));
9011 }
9012@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9013 */
9014 static inline void atomic_dec(atomic_t *v)
9015 {
9016- asm volatile(LOCK_PREFIX "decl %0"
9017+ asm volatile(LOCK_PREFIX "decl %0\n"
9018+
9019+#ifdef CONFIG_PAX_REFCOUNT
9020+ "jno 0f\n"
9021+ LOCK_PREFIX "incl %0\n"
9022+ "int $4\n0:\n"
9023+ _ASM_EXTABLE(0b, 0b)
9024+#endif
9025+
9026+ : "=m" (v->counter)
9027+ : "m" (v->counter));
9028+}
9029+
9030+/**
9031+ * atomic_dec_unchecked - decrement atomic variable
9032+ * @v: pointer of type atomic_unchecked_t
9033+ *
9034+ * Atomically decrements @v by 1.
9035+ */
9036+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9037+{
9038+ asm volatile(LOCK_PREFIX "decl %0\n"
9039 : "=m" (v->counter)
9040 : "m" (v->counter));
9041 }
9042@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9043 {
9044 unsigned char c;
9045
9046- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9047+ asm volatile(LOCK_PREFIX "decl %0\n"
9048+
9049+#ifdef CONFIG_PAX_REFCOUNT
9050+ "jno 0f\n"
9051+ LOCK_PREFIX "incl %0\n"
9052+ "int $4\n0:\n"
9053+ _ASM_EXTABLE(0b, 0b)
9054+#endif
9055+
9056+ "sete %1\n"
9057 : "=m" (v->counter), "=qm" (c)
9058 : "m" (v->counter) : "memory");
9059 return c != 0;
9060@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9061 {
9062 unsigned char c;
9063
9064- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9065+ asm volatile(LOCK_PREFIX "incl %0\n"
9066+
9067+#ifdef CONFIG_PAX_REFCOUNT
9068+ "jno 0f\n"
9069+ LOCK_PREFIX "decl %0\n"
9070+ "int $4\n0:\n"
9071+ _ASM_EXTABLE(0b, 0b)
9072+#endif
9073+
9074+ "sete %1\n"
9075+ : "=m" (v->counter), "=qm" (c)
9076+ : "m" (v->counter) : "memory");
9077+ return c != 0;
9078+}
9079+
9080+/**
9081+ * atomic_inc_and_test_unchecked - increment and test
9082+ * @v: pointer of type atomic_unchecked_t
9083+ *
9084+ * Atomically increments @v by 1
9085+ * and returns true if the result is zero, or false for all
9086+ * other cases.
9087+ */
9088+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9089+{
9090+ unsigned char c;
9091+
9092+ asm volatile(LOCK_PREFIX "incl %0\n"
9093+ "sete %1\n"
9094 : "=m" (v->counter), "=qm" (c)
9095 : "m" (v->counter) : "memory");
9096 return c != 0;
9097@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9098 {
9099 unsigned char c;
9100
9101- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9102+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9103+
9104+#ifdef CONFIG_PAX_REFCOUNT
9105+ "jno 0f\n"
9106+ LOCK_PREFIX "subl %2,%0\n"
9107+ "int $4\n0:\n"
9108+ _ASM_EXTABLE(0b, 0b)
9109+#endif
9110+
9111+ "sets %1\n"
9112 : "=m" (v->counter), "=qm" (c)
9113 : "ir" (i), "m" (v->counter) : "memory");
9114 return c;
9115@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9116 static inline int atomic_add_return(int i, atomic_t *v)
9117 {
9118 int __i = i;
9119- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9120+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9121+
9122+#ifdef CONFIG_PAX_REFCOUNT
9123+ "jno 0f\n"
9124+ "movl %0, %1\n"
9125+ "int $4\n0:\n"
9126+ _ASM_EXTABLE(0b, 0b)
9127+#endif
9128+
9129+ : "+r" (i), "+m" (v->counter)
9130+ : : "memory");
9131+ return i + __i;
9132+}
9133+
9134+/**
9135+ * atomic_add_return_unchecked - add and return
9136+ * @i: integer value to add
9137+ * @v: pointer of type atomic_unchecked_t
9138+ *
9139+ * Atomically adds @i to @v and returns @i + @v
9140+ */
9141+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9142+{
9143+ int __i = i;
9144+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9145 : "+r" (i), "+m" (v->counter)
9146 : : "memory");
9147 return i + __i;
9148@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9149 }
9150
9151 #define atomic_inc_return(v) (atomic_add_return(1, v))
9152+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9153+{
9154+ return atomic_add_return_unchecked(1, v);
9155+}
9156 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9157
9158 /* The 64-bit atomic type */
9159@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9160 }
9161
9162 /**
9163+ * atomic64_read_unchecked - read atomic64 variable
9164+ * @v: pointer of type atomic64_unchecked_t
9165+ *
9166+ * Atomically reads the value of @v.
9167+ * Doesn't imply a read memory barrier.
9168+ */
9169+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9170+{
9171+ return v->counter;
9172+}
9173+
9174+/**
9175 * atomic64_set - set atomic64 variable
9176 * @v: pointer to type atomic64_t
9177 * @i: required value
9178@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9179 }
9180
9181 /**
9182+ * atomic64_set_unchecked - set atomic64 variable
9183+ * @v: pointer to type atomic64_unchecked_t
9184+ * @i: required value
9185+ *
9186+ * Atomically sets the value of @v to @i.
9187+ */
9188+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9189+{
9190+ v->counter = i;
9191+}
9192+
9193+/**
9194 * atomic64_add - add integer to atomic64 variable
9195 * @i: integer value to add
9196 * @v: pointer to type atomic64_t
9197@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9198 */
9199 static inline void atomic64_add(long i, atomic64_t *v)
9200 {
9201+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9202+
9203+#ifdef CONFIG_PAX_REFCOUNT
9204+ "jno 0f\n"
9205+ LOCK_PREFIX "subq %1,%0\n"
9206+ "int $4\n0:\n"
9207+ _ASM_EXTABLE(0b, 0b)
9208+#endif
9209+
9210+ : "=m" (v->counter)
9211+ : "er" (i), "m" (v->counter));
9212+}
9213+
9214+/**
9215+ * atomic64_add_unchecked - add integer to atomic64 variable
9216+ * @i: integer value to add
9217+ * @v: pointer to type atomic64_unchecked_t
9218+ *
9219+ * Atomically adds @i to @v.
9220+ */
9221+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9222+{
9223 asm volatile(LOCK_PREFIX "addq %1,%0"
9224 : "=m" (v->counter)
9225 : "er" (i), "m" (v->counter));
9226@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9227 */
9228 static inline void atomic64_sub(long i, atomic64_t *v)
9229 {
9230- asm volatile(LOCK_PREFIX "subq %1,%0"
9231+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9232+
9233+#ifdef CONFIG_PAX_REFCOUNT
9234+ "jno 0f\n"
9235+ LOCK_PREFIX "addq %1,%0\n"
9236+ "int $4\n0:\n"
9237+ _ASM_EXTABLE(0b, 0b)
9238+#endif
9239+
9240 : "=m" (v->counter)
9241 : "er" (i), "m" (v->counter));
9242 }
9243@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9244 {
9245 unsigned char c;
9246
9247- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9248+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9249+
9250+#ifdef CONFIG_PAX_REFCOUNT
9251+ "jno 0f\n"
9252+ LOCK_PREFIX "addq %2,%0\n"
9253+ "int $4\n0:\n"
9254+ _ASM_EXTABLE(0b, 0b)
9255+#endif
9256+
9257+ "sete %1\n"
9258 : "=m" (v->counter), "=qm" (c)
9259 : "er" (i), "m" (v->counter) : "memory");
9260 return c;
9261@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9262 */
9263 static inline void atomic64_inc(atomic64_t *v)
9264 {
9265+ asm volatile(LOCK_PREFIX "incq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "decq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_inc_unchecked - increment atomic64 variable
9280+ * @v: pointer to type atomic64_unchecked_t
9281+ *
9282+ * Atomically increments @v by 1.
9283+ */
9284+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9285+{
9286 asm volatile(LOCK_PREFIX "incq %0"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9290 */
9291 static inline void atomic64_dec(atomic64_t *v)
9292 {
9293- asm volatile(LOCK_PREFIX "decq %0"
9294+ asm volatile(LOCK_PREFIX "decq %0\n"
9295+
9296+#ifdef CONFIG_PAX_REFCOUNT
9297+ "jno 0f\n"
9298+ LOCK_PREFIX "incq %0\n"
9299+ "int $4\n0:\n"
9300+ _ASM_EXTABLE(0b, 0b)
9301+#endif
9302+
9303+ : "=m" (v->counter)
9304+ : "m" (v->counter));
9305+}
9306+
9307+/**
9308+ * atomic64_dec_unchecked - decrement atomic64 variable
9309+ * @v: pointer to type atomic64_t
9310+ *
9311+ * Atomically decrements @v by 1.
9312+ */
9313+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9314+{
9315+ asm volatile(LOCK_PREFIX "decq %0\n"
9316 : "=m" (v->counter)
9317 : "m" (v->counter));
9318 }
9319@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9320 {
9321 unsigned char c;
9322
9323- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9324+ asm volatile(LOCK_PREFIX "decq %0\n"
9325+
9326+#ifdef CONFIG_PAX_REFCOUNT
9327+ "jno 0f\n"
9328+ LOCK_PREFIX "incq %0\n"
9329+ "int $4\n0:\n"
9330+ _ASM_EXTABLE(0b, 0b)
9331+#endif
9332+
9333+ "sete %1\n"
9334 : "=m" (v->counter), "=qm" (c)
9335 : "m" (v->counter) : "memory");
9336 return c != 0;
9337@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9338 {
9339 unsigned char c;
9340
9341- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9342+ asm volatile(LOCK_PREFIX "incq %0\n"
9343+
9344+#ifdef CONFIG_PAX_REFCOUNT
9345+ "jno 0f\n"
9346+ LOCK_PREFIX "decq %0\n"
9347+ "int $4\n0:\n"
9348+ _ASM_EXTABLE(0b, 0b)
9349+#endif
9350+
9351+ "sete %1\n"
9352 : "=m" (v->counter), "=qm" (c)
9353 : "m" (v->counter) : "memory");
9354 return c != 0;
9355@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9356 {
9357 unsigned char c;
9358
9359- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9360+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9361+
9362+#ifdef CONFIG_PAX_REFCOUNT
9363+ "jno 0f\n"
9364+ LOCK_PREFIX "subq %2,%0\n"
9365+ "int $4\n0:\n"
9366+ _ASM_EXTABLE(0b, 0b)
9367+#endif
9368+
9369+ "sets %1\n"
9370 : "=m" (v->counter), "=qm" (c)
9371 : "er" (i), "m" (v->counter) : "memory");
9372 return c;
9373@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9374 static inline long atomic64_add_return(long i, atomic64_t *v)
9375 {
9376 long __i = i;
9377- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9378+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9379+
9380+#ifdef CONFIG_PAX_REFCOUNT
9381+ "jno 0f\n"
9382+ "movq %0, %1\n"
9383+ "int $4\n0:\n"
9384+ _ASM_EXTABLE(0b, 0b)
9385+#endif
9386+
9387+ : "+r" (i), "+m" (v->counter)
9388+ : : "memory");
9389+ return i + __i;
9390+}
9391+
9392+/**
9393+ * atomic64_add_return_unchecked - add and return
9394+ * @i: integer value to add
9395+ * @v: pointer to type atomic64_unchecked_t
9396+ *
9397+ * Atomically adds @i to @v and returns @i + @v
9398+ */
9399+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9400+{
9401+ long __i = i;
9402+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9403 : "+r" (i), "+m" (v->counter)
9404 : : "memory");
9405 return i + __i;
9406@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9407 }
9408
9409 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9410+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9411+{
9412+ return atomic64_add_return_unchecked(1, v);
9413+}
9414 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9415
9416 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9417@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9418 return cmpxchg(&v->counter, old, new);
9419 }
9420
9421+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9422+{
9423+ return cmpxchg(&v->counter, old, new);
9424+}
9425+
9426 static inline long atomic64_xchg(atomic64_t *v, long new)
9427 {
9428 return xchg(&v->counter, new);
9429 }
9430
9431+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9432+{
9433+ return xchg(&v->counter, new);
9434+}
9435+
9436 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9437 {
9438 return cmpxchg(&v->counter, old, new);
9439 }
9440
9441+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9442+{
9443+ return cmpxchg(&v->counter, old, new);
9444+}
9445+
9446 static inline long atomic_xchg(atomic_t *v, int new)
9447 {
9448 return xchg(&v->counter, new);
9449 }
9450
9451+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9452+{
9453+ return xchg(&v->counter, new);
9454+}
9455+
9456 /**
9457 * atomic_add_unless - add unless the number is a given value
9458 * @v: pointer of type atomic_t
9459@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9460 */
9461 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9462 {
9463- int c, old;
9464+ int c, old, new;
9465 c = atomic_read(v);
9466 for (;;) {
9467- if (unlikely(c == (u)))
9468+ if (unlikely(c == u))
9469 break;
9470- old = atomic_cmpxchg((v), c, c + (a));
9471+
9472+ asm volatile("addl %2,%0\n"
9473+
9474+#ifdef CONFIG_PAX_REFCOUNT
9475+ "jno 0f\n"
9476+ "subl %2,%0\n"
9477+ "int $4\n0:\n"
9478+ _ASM_EXTABLE(0b, 0b)
9479+#endif
9480+
9481+ : "=r" (new)
9482+ : "0" (c), "ir" (a));
9483+
9484+ old = atomic_cmpxchg(v, c, new);
9485 if (likely(old == c))
9486 break;
9487 c = old;
9488 }
9489- return c != (u);
9490+ return c != u;
9491 }
9492
9493 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9494@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9495 */
9496 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9497 {
9498- long c, old;
9499+ long c, old, new;
9500 c = atomic64_read(v);
9501 for (;;) {
9502- if (unlikely(c == (u)))
9503+ if (unlikely(c == u))
9504 break;
9505- old = atomic64_cmpxchg((v), c, c + (a));
9506+
9507+ asm volatile("addq %2,%0\n"
9508+
9509+#ifdef CONFIG_PAX_REFCOUNT
9510+ "jno 0f\n"
9511+ "subq %2,%0\n"
9512+ "int $4\n0:\n"
9513+ _ASM_EXTABLE(0b, 0b)
9514+#endif
9515+
9516+ : "=r" (new)
9517+ : "0" (c), "er" (a));
9518+
9519+ old = atomic64_cmpxchg(v, c, new);
9520 if (likely(old == c))
9521 break;
9522 c = old;
9523 }
9524- return c != (u);
9525+ return c != u;
9526 }
9527
9528 /**
9529diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9530index 02b47a6..d5c4b15 100644
9531--- a/arch/x86/include/asm/bitops.h
9532+++ b/arch/x86/include/asm/bitops.h
9533@@ -38,7 +38,7 @@
9534 * a mask operation on a byte.
9535 */
9536 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9537-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9538+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9539 #define CONST_MASK(nr) (1 << ((nr) & 7))
9540
9541 /**
9542diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9543index 7a10659..8bbf355 100644
9544--- a/arch/x86/include/asm/boot.h
9545+++ b/arch/x86/include/asm/boot.h
9546@@ -11,10 +11,15 @@
9547 #include <asm/pgtable_types.h>
9548
9549 /* Physical address where kernel should be loaded. */
9550-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9552 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9553 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9554
9555+#ifndef __ASSEMBLY__
9556+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9557+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9558+#endif
9559+
9560 /* Minimum kernel alignment, as a power of two */
9561 #ifdef CONFIG_X86_64
9562 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9563diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9564index 549860d..7d45f68 100644
9565--- a/arch/x86/include/asm/cache.h
9566+++ b/arch/x86/include/asm/cache.h
9567@@ -5,9 +5,10 @@
9568
9569 /* L1 cache line size */
9570 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9572+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9573
9574 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9575+#define __read_only __attribute__((__section__(".data.read_only")))
9576
9577 #ifdef CONFIG_X86_VSMP
9578 /* vSMP Internode cacheline shift */
9579diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9580index b54f6af..5b376a6 100644
9581--- a/arch/x86/include/asm/cacheflush.h
9582+++ b/arch/x86/include/asm/cacheflush.h
9583@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9584 static inline unsigned long get_page_memtype(struct page *pg)
9585 {
9586 if (!PageUncached(pg) && !PageWC(pg))
9587- return -1;
9588+ return ~0UL;
9589 else if (!PageUncached(pg) && PageWC(pg))
9590 return _PAGE_CACHE_WC;
9591 else if (PageUncached(pg) && !PageWC(pg))
9592@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9593 SetPageWC(pg);
9594 break;
9595 default:
9596- case -1:
9597+ case ~0UL:
9598 ClearPageUncached(pg);
9599 ClearPageWC(pg);
9600 break;
9601diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9602index 0e63c9a..ab8d972 100644
9603--- a/arch/x86/include/asm/calling.h
9604+++ b/arch/x86/include/asm/calling.h
9605@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9606 * for assembly code:
9607 */
9608
9609-#define R15 0
9610-#define R14 8
9611-#define R13 16
9612-#define R12 24
9613-#define RBP 32
9614-#define RBX 40
9615+#define R15 (0)
9616+#define R14 (8)
9617+#define R13 (16)
9618+#define R12 (24)
9619+#define RBP (32)
9620+#define RBX (40)
9621
9622 /* arguments: interrupts/non tracing syscalls only save up to here: */
9623-#define R11 48
9624-#define R10 56
9625-#define R9 64
9626-#define R8 72
9627-#define RAX 80
9628-#define RCX 88
9629-#define RDX 96
9630-#define RSI 104
9631-#define RDI 112
9632-#define ORIG_RAX 120 /* + error_code */
9633+#define R11 (48)
9634+#define R10 (56)
9635+#define R9 (64)
9636+#define R8 (72)
9637+#define RAX (80)
9638+#define RCX (88)
9639+#define RDX (96)
9640+#define RSI (104)
9641+#define RDI (112)
9642+#define ORIG_RAX (120) /* + error_code */
9643 /* end of arguments */
9644
9645 /* cpu exception frame or undefined in case of fast syscall: */
9646-#define RIP 128
9647-#define CS 136
9648-#define EFLAGS 144
9649-#define RSP 152
9650-#define SS 160
9651+#define RIP (128)
9652+#define CS (136)
9653+#define EFLAGS (144)
9654+#define RSP (152)
9655+#define SS (160)
9656
9657 #define ARGOFFSET R11
9658 #define SWFRAME ORIG_RAX
9659diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9660index 46fc474..b02b0f9 100644
9661--- a/arch/x86/include/asm/checksum_32.h
9662+++ b/arch/x86/include/asm/checksum_32.h
9663@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9664 int len, __wsum sum,
9665 int *src_err_ptr, int *dst_err_ptr);
9666
9667+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9668+ int len, __wsum sum,
9669+ int *src_err_ptr, int *dst_err_ptr);
9670+
9671+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9672+ int len, __wsum sum,
9673+ int *src_err_ptr, int *dst_err_ptr);
9674+
9675 /*
9676 * Note: when you get a NULL pointer exception here this means someone
9677 * passed in an incorrect kernel address to one of these functions.
9678@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9679 int *err_ptr)
9680 {
9681 might_sleep();
9682- return csum_partial_copy_generic((__force void *)src, dst,
9683+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9684 len, sum, err_ptr, NULL);
9685 }
9686
9687@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9688 {
9689 might_sleep();
9690 if (access_ok(VERIFY_WRITE, dst, len))
9691- return csum_partial_copy_generic(src, (__force void *)dst,
9692+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9693 len, sum, NULL, err_ptr);
9694
9695 if (len)
9696diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9697index 617bd56..7b047a1 100644
9698--- a/arch/x86/include/asm/desc.h
9699+++ b/arch/x86/include/asm/desc.h
9700@@ -4,6 +4,7 @@
9701 #include <asm/desc_defs.h>
9702 #include <asm/ldt.h>
9703 #include <asm/mmu.h>
9704+#include <asm/pgtable.h>
9705 #include <linux/smp.h>
9706
9707 static inline void fill_ldt(struct desc_struct *desc,
9708@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9709 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9710 desc->type = (info->read_exec_only ^ 1) << 1;
9711 desc->type |= info->contents << 2;
9712+ desc->type |= info->seg_not_present ^ 1;
9713 desc->s = 1;
9714 desc->dpl = 0x3;
9715 desc->p = info->seg_not_present ^ 1;
9716@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9717 }
9718
9719 extern struct desc_ptr idt_descr;
9720-extern gate_desc idt_table[];
9721-
9722-struct gdt_page {
9723- struct desc_struct gdt[GDT_ENTRIES];
9724-} __attribute__((aligned(PAGE_SIZE)));
9725-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9726+extern gate_desc idt_table[256];
9727
9728+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9729 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9730 {
9731- return per_cpu(gdt_page, cpu).gdt;
9732+ return cpu_gdt_table[cpu];
9733 }
9734
9735 #ifdef CONFIG_X86_64
9736@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9737 unsigned long base, unsigned dpl, unsigned flags,
9738 unsigned short seg)
9739 {
9740- gate->a = (seg << 16) | (base & 0xffff);
9741- gate->b = (base & 0xffff0000) |
9742- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9743+ gate->gate.offset_low = base;
9744+ gate->gate.seg = seg;
9745+ gate->gate.reserved = 0;
9746+ gate->gate.type = type;
9747+ gate->gate.s = 0;
9748+ gate->gate.dpl = dpl;
9749+ gate->gate.p = 1;
9750+ gate->gate.offset_high = base >> 16;
9751 }
9752
9753 #endif
9754@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9755 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9756 const gate_desc *gate)
9757 {
9758+ pax_open_kernel();
9759 memcpy(&idt[entry], gate, sizeof(*gate));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9764 const void *desc)
9765 {
9766+ pax_open_kernel();
9767 memcpy(&ldt[entry], desc, 8);
9768+ pax_close_kernel();
9769 }
9770
9771 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9772@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9773 size = sizeof(struct desc_struct);
9774 break;
9775 }
9776+
9777+ pax_open_kernel();
9778 memcpy(&gdt[entry], desc, size);
9779+ pax_close_kernel();
9780 }
9781
9782 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9783@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9784
9785 static inline void native_load_tr_desc(void)
9786 {
9787+ pax_open_kernel();
9788 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9789+ pax_close_kernel();
9790 }
9791
9792 static inline void native_load_gdt(const struct desc_ptr *dtr)
9793@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9794 unsigned int i;
9795 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9796
9797+ pax_open_kernel();
9798 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9799 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9800+ pax_close_kernel();
9801 }
9802
9803 #define _LDT_empty(info) \
9804@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9805 desc->limit = (limit >> 16) & 0xf;
9806 }
9807
9808-static inline void _set_gate(int gate, unsigned type, void *addr,
9809+static inline void _set_gate(int gate, unsigned type, const void *addr,
9810 unsigned dpl, unsigned ist, unsigned seg)
9811 {
9812 gate_desc s;
9813@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9814 * Pentium F0 0F bugfix can have resulted in the mapped
9815 * IDT being write-protected.
9816 */
9817-static inline void set_intr_gate(unsigned int n, void *addr)
9818+static inline void set_intr_gate(unsigned int n, const void *addr)
9819 {
9820 BUG_ON((unsigned)n > 0xFF);
9821 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9822@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9823 /*
9824 * This routine sets up an interrupt gate at directory privilege level 3.
9825 */
9826-static inline void set_system_intr_gate(unsigned int n, void *addr)
9827+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9828 {
9829 BUG_ON((unsigned)n > 0xFF);
9830 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9831 }
9832
9833-static inline void set_system_trap_gate(unsigned int n, void *addr)
9834+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9835 {
9836 BUG_ON((unsigned)n > 0xFF);
9837 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9838 }
9839
9840-static inline void set_trap_gate(unsigned int n, void *addr)
9841+static inline void set_trap_gate(unsigned int n, const void *addr)
9842 {
9843 BUG_ON((unsigned)n > 0xFF);
9844 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9845@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9846 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9847 {
9848 BUG_ON((unsigned)n > 0xFF);
9849- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9850+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9851 }
9852
9853-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9854+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9855 {
9856 BUG_ON((unsigned)n > 0xFF);
9857 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9858 }
9859
9860-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9861+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9862 {
9863 BUG_ON((unsigned)n > 0xFF);
9864 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9865 }
9866
9867+#ifdef CONFIG_X86_32
9868+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9869+{
9870+ struct desc_struct d;
9871+
9872+ if (likely(limit))
9873+ limit = (limit - 1UL) >> PAGE_SHIFT;
9874+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9875+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9876+}
9877+#endif
9878+
9879 #endif /* _ASM_X86_DESC_H */
9880diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9881index 9d66848..6b4a691 100644
9882--- a/arch/x86/include/asm/desc_defs.h
9883+++ b/arch/x86/include/asm/desc_defs.h
9884@@ -31,6 +31,12 @@ struct desc_struct {
9885 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9886 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9887 };
9888+ struct {
9889+ u16 offset_low;
9890+ u16 seg;
9891+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9892+ unsigned offset_high: 16;
9893+ } gate;
9894 };
9895 } __attribute__((packed));
9896
9897diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9898index cee34e9..a7c3fa2 100644
9899--- a/arch/x86/include/asm/device.h
9900+++ b/arch/x86/include/asm/device.h
9901@@ -6,7 +6,7 @@ struct dev_archdata {
9902 void *acpi_handle;
9903 #endif
9904 #ifdef CONFIG_X86_64
9905-struct dma_map_ops *dma_ops;
9906+ const struct dma_map_ops *dma_ops;
9907 #endif
9908 #ifdef CONFIG_DMAR
9909 void *iommu; /* hook for IOMMU specific extension */
9910diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9911index 6a25d5d..786b202 100644
9912--- a/arch/x86/include/asm/dma-mapping.h
9913+++ b/arch/x86/include/asm/dma-mapping.h
9914@@ -25,9 +25,9 @@ extern int iommu_merge;
9915 extern struct device x86_dma_fallback_dev;
9916 extern int panic_on_overflow;
9917
9918-extern struct dma_map_ops *dma_ops;
9919+extern const struct dma_map_ops *dma_ops;
9920
9921-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9922+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9923 {
9924 #ifdef CONFIG_X86_32
9925 return dma_ops;
9926@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9927 /* Make sure we keep the same behaviour */
9928 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9929 {
9930- struct dma_map_ops *ops = get_dma_ops(dev);
9931+ const struct dma_map_ops *ops = get_dma_ops(dev);
9932 if (ops->mapping_error)
9933 return ops->mapping_error(dev, dma_addr);
9934
9935@@ -122,7 +122,7 @@ static inline void *
9936 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9937 gfp_t gfp)
9938 {
9939- struct dma_map_ops *ops = get_dma_ops(dev);
9940+ const struct dma_map_ops *ops = get_dma_ops(dev);
9941 void *memory;
9942
9943 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9944@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9945 static inline void dma_free_coherent(struct device *dev, size_t size,
9946 void *vaddr, dma_addr_t bus)
9947 {
9948- struct dma_map_ops *ops = get_dma_ops(dev);
9949+ const struct dma_map_ops *ops = get_dma_ops(dev);
9950
9951 WARN_ON(irqs_disabled()); /* for portability */
9952
9953diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9954index 40b4e61..40d8133 100644
9955--- a/arch/x86/include/asm/e820.h
9956+++ b/arch/x86/include/asm/e820.h
9957@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9958 #define ISA_END_ADDRESS 0x100000
9959 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9960
9961-#define BIOS_BEGIN 0x000a0000
9962+#define BIOS_BEGIN 0x000c0000
9963 #define BIOS_END 0x00100000
9964
9965 #ifdef __KERNEL__
9966diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9967index 8ac9d9a..0a6c96e 100644
9968--- a/arch/x86/include/asm/elf.h
9969+++ b/arch/x86/include/asm/elf.h
9970@@ -257,7 +257,25 @@ extern int force_personality32;
9971 the loader. We need to make sure that it is out of the way of the program
9972 that it will "exec", and that there is sufficient room for the brk. */
9973
9974+#ifdef CONFIG_PAX_SEGMEXEC
9975+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9976+#else
9977 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9978+#endif
9979+
9980+#ifdef CONFIG_PAX_ASLR
9981+#ifdef CONFIG_X86_32
9982+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9983+
9984+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9985+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9986+#else
9987+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9988+
9989+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9990+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9991+#endif
9992+#endif
9993
9994 /* This yields a mask that user programs can use to figure out what
9995 instruction set this CPU supports. This could be done in user space,
9996@@ -310,9 +328,7 @@ do { \
9997
9998 #define ARCH_DLINFO \
9999 do { \
10000- if (vdso_enabled) \
10001- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10002- (unsigned long)current->mm->context.vdso); \
10003+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10004 } while (0)
10005
10006 #define AT_SYSINFO 32
10007@@ -323,7 +339,7 @@ do { \
10008
10009 #endif /* !CONFIG_X86_32 */
10010
10011-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10012+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10013
10014 #define VDSO_ENTRY \
10015 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10016@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10017 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10018 #define compat_arch_setup_additional_pages syscall32_setup_pages
10019
10020-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10021-#define arch_randomize_brk arch_randomize_brk
10022-
10023 #endif /* _ASM_X86_ELF_H */
10024diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10025index cc70c1c..d96d011 100644
10026--- a/arch/x86/include/asm/emergency-restart.h
10027+++ b/arch/x86/include/asm/emergency-restart.h
10028@@ -15,6 +15,6 @@ enum reboot_type {
10029
10030 extern enum reboot_type reboot_type;
10031
10032-extern void machine_emergency_restart(void);
10033+extern void machine_emergency_restart(void) __noreturn;
10034
10035 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10036diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10037index 1f11ce4..7caabd1 100644
10038--- a/arch/x86/include/asm/futex.h
10039+++ b/arch/x86/include/asm/futex.h
10040@@ -12,16 +12,18 @@
10041 #include <asm/system.h>
10042
10043 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10044+ typecheck(u32 __user *, uaddr); \
10045 asm volatile("1:\t" insn "\n" \
10046 "2:\t.section .fixup,\"ax\"\n" \
10047 "3:\tmov\t%3, %1\n" \
10048 "\tjmp\t2b\n" \
10049 "\t.previous\n" \
10050 _ASM_EXTABLE(1b, 3b) \
10051- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10052+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10053 : "i" (-EFAULT), "0" (oparg), "1" (0))
10054
10055 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10056+ typecheck(u32 __user *, uaddr); \
10057 asm volatile("1:\tmovl %2, %0\n" \
10058 "\tmovl\t%0, %3\n" \
10059 "\t" insn "\n" \
10060@@ -34,10 +36,10 @@
10061 _ASM_EXTABLE(1b, 4b) \
10062 _ASM_EXTABLE(2b, 4b) \
10063 : "=&a" (oldval), "=&r" (ret), \
10064- "+m" (*uaddr), "=&r" (tem) \
10065+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10066 : "r" (oparg), "i" (-EFAULT), "1" (0))
10067
10068-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10069+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10070 {
10071 int op = (encoded_op >> 28) & 7;
10072 int cmp = (encoded_op >> 24) & 15;
10073@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10074
10075 switch (op) {
10076 case FUTEX_OP_SET:
10077- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10078+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10079 break;
10080 case FUTEX_OP_ADD:
10081- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10082+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10083 uaddr, oparg);
10084 break;
10085 case FUTEX_OP_OR:
10086@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10087 return ret;
10088 }
10089
10090-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10091+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10092 int newval)
10093 {
10094
10095@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10096 return -ENOSYS;
10097 #endif
10098
10099- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10100+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10101 return -EFAULT;
10102
10103- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10104+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10105 "2:\t.section .fixup, \"ax\"\n"
10106 "3:\tmov %2, %0\n"
10107 "\tjmp 2b\n"
10108 "\t.previous\n"
10109 _ASM_EXTABLE(1b, 3b)
10110- : "=a" (oldval), "+m" (*uaddr)
10111+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10112 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10113 : "memory"
10114 );
10115diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10116index ba180d9..3bad351 100644
10117--- a/arch/x86/include/asm/hw_irq.h
10118+++ b/arch/x86/include/asm/hw_irq.h
10119@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10120 extern void enable_IO_APIC(void);
10121
10122 /* Statistics */
10123-extern atomic_t irq_err_count;
10124-extern atomic_t irq_mis_count;
10125+extern atomic_unchecked_t irq_err_count;
10126+extern atomic_unchecked_t irq_mis_count;
10127
10128 /* EISA */
10129 extern void eisa_set_level_irq(unsigned int irq);
10130diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10131index 0b20bbb..4cb1396 100644
10132--- a/arch/x86/include/asm/i387.h
10133+++ b/arch/x86/include/asm/i387.h
10134@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10135 {
10136 int err;
10137
10138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10139+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10140+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10141+#endif
10142+
10143 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10144 "2:\n"
10145 ".section .fixup,\"ax\"\n"
10146@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10147 {
10148 int err;
10149
10150+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10151+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10152+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10153+#endif
10154+
10155 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10156 "2:\n"
10157 ".section .fixup,\"ax\"\n"
10158@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10159 }
10160
10161 /* We need a safe address that is cheap to find and that is already
10162- in L1 during context switch. The best choices are unfortunately
10163- different for UP and SMP */
10164-#ifdef CONFIG_SMP
10165-#define safe_address (__per_cpu_offset[0])
10166-#else
10167-#define safe_address (kstat_cpu(0).cpustat.user)
10168-#endif
10169+ in L1 during context switch. */
10170+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10171
10172 /*
10173 * These must be called with preempt disabled
10174@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10175 struct thread_info *me = current_thread_info();
10176 preempt_disable();
10177 if (me->status & TS_USEDFPU)
10178- __save_init_fpu(me->task);
10179+ __save_init_fpu(current);
10180 else
10181 clts();
10182 }
10183diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10184index a299900..15c5410 100644
10185--- a/arch/x86/include/asm/io_32.h
10186+++ b/arch/x86/include/asm/io_32.h
10187@@ -3,6 +3,7 @@
10188
10189 #include <linux/string.h>
10190 #include <linux/compiler.h>
10191+#include <asm/processor.h>
10192
10193 /*
10194 * This file contains the definitions for the x86 IO instructions
10195@@ -42,6 +43,17 @@
10196
10197 #ifdef __KERNEL__
10198
10199+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10200+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10201+{
10202+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10203+}
10204+
10205+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10206+{
10207+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10208+}
10209+
10210 #include <asm-generic/iomap.h>
10211
10212 #include <linux/vmalloc.h>
10213diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10214index 2440678..c158b88 100644
10215--- a/arch/x86/include/asm/io_64.h
10216+++ b/arch/x86/include/asm/io_64.h
10217@@ -140,6 +140,17 @@ __OUTS(l)
10218
10219 #include <linux/vmalloc.h>
10220
10221+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10222+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10223+{
10224+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10225+}
10226+
10227+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10228+{
10229+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10230+}
10231+
10232 #include <asm-generic/iomap.h>
10233
10234 void __memcpy_fromio(void *, unsigned long, unsigned);
10235diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10236index fd6d21b..8b13915 100644
10237--- a/arch/x86/include/asm/iommu.h
10238+++ b/arch/x86/include/asm/iommu.h
10239@@ -3,7 +3,7 @@
10240
10241 extern void pci_iommu_shutdown(void);
10242 extern void no_iommu_init(void);
10243-extern struct dma_map_ops nommu_dma_ops;
10244+extern const struct dma_map_ops nommu_dma_ops;
10245 extern int force_iommu, no_iommu;
10246 extern int iommu_detected;
10247 extern int iommu_pass_through;
10248diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10249index 9e2b952..557206e 100644
10250--- a/arch/x86/include/asm/irqflags.h
10251+++ b/arch/x86/include/asm/irqflags.h
10252@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10253 sti; \
10254 sysexit
10255
10256+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10257+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10258+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10259+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10260+
10261 #else
10262 #define INTERRUPT_RETURN iret
10263 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10264diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10265index 4fe681d..bb6d40c 100644
10266--- a/arch/x86/include/asm/kprobes.h
10267+++ b/arch/x86/include/asm/kprobes.h
10268@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10269 #define BREAKPOINT_INSTRUCTION 0xcc
10270 #define RELATIVEJUMP_INSTRUCTION 0xe9
10271 #define MAX_INSN_SIZE 16
10272-#define MAX_STACK_SIZE 64
10273-#define MIN_STACK_SIZE(ADDR) \
10274- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10275- THREAD_SIZE - (unsigned long)(ADDR))) \
10276- ? (MAX_STACK_SIZE) \
10277- : (((unsigned long)current_thread_info()) + \
10278- THREAD_SIZE - (unsigned long)(ADDR)))
10279+#define MAX_STACK_SIZE 64UL
10280+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10281
10282 #define flush_insn_slot(p) do { } while (0)
10283
10284diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10285index 08bc2ff..2e88d1f 100644
10286--- a/arch/x86/include/asm/kvm_host.h
10287+++ b/arch/x86/include/asm/kvm_host.h
10288@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10289 bool (*gb_page_enable)(void);
10290
10291 const struct trace_print_flags *exit_reasons_str;
10292-};
10293+} __do_const;
10294
10295-extern struct kvm_x86_ops *kvm_x86_ops;
10296+extern const struct kvm_x86_ops *kvm_x86_ops;
10297
10298 int kvm_mmu_module_init(void);
10299 void kvm_mmu_module_exit(void);
10300diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10301index 47b9b6f..815aaa1 100644
10302--- a/arch/x86/include/asm/local.h
10303+++ b/arch/x86/include/asm/local.h
10304@@ -18,26 +18,58 @@ typedef struct {
10305
10306 static inline void local_inc(local_t *l)
10307 {
10308- asm volatile(_ASM_INC "%0"
10309+ asm volatile(_ASM_INC "%0\n"
10310+
10311+#ifdef CONFIG_PAX_REFCOUNT
10312+ "jno 0f\n"
10313+ _ASM_DEC "%0\n"
10314+ "int $4\n0:\n"
10315+ _ASM_EXTABLE(0b, 0b)
10316+#endif
10317+
10318 : "+m" (l->a.counter));
10319 }
10320
10321 static inline void local_dec(local_t *l)
10322 {
10323- asm volatile(_ASM_DEC "%0"
10324+ asm volatile(_ASM_DEC "%0\n"
10325+
10326+#ifdef CONFIG_PAX_REFCOUNT
10327+ "jno 0f\n"
10328+ _ASM_INC "%0\n"
10329+ "int $4\n0:\n"
10330+ _ASM_EXTABLE(0b, 0b)
10331+#endif
10332+
10333 : "+m" (l->a.counter));
10334 }
10335
10336 static inline void local_add(long i, local_t *l)
10337 {
10338- asm volatile(_ASM_ADD "%1,%0"
10339+ asm volatile(_ASM_ADD "%1,%0\n"
10340+
10341+#ifdef CONFIG_PAX_REFCOUNT
10342+ "jno 0f\n"
10343+ _ASM_SUB "%1,%0\n"
10344+ "int $4\n0:\n"
10345+ _ASM_EXTABLE(0b, 0b)
10346+#endif
10347+
10348 : "+m" (l->a.counter)
10349 : "ir" (i));
10350 }
10351
10352 static inline void local_sub(long i, local_t *l)
10353 {
10354- asm volatile(_ASM_SUB "%1,%0"
10355+ asm volatile(_ASM_SUB "%1,%0\n"
10356+
10357+#ifdef CONFIG_PAX_REFCOUNT
10358+ "jno 0f\n"
10359+ _ASM_ADD "%1,%0\n"
10360+ "int $4\n0:\n"
10361+ _ASM_EXTABLE(0b, 0b)
10362+#endif
10363+
10364 : "+m" (l->a.counter)
10365 : "ir" (i));
10366 }
10367@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10368 {
10369 unsigned char c;
10370
10371- asm volatile(_ASM_SUB "%2,%0; sete %1"
10372+ asm volatile(_ASM_SUB "%2,%0\n"
10373+
10374+#ifdef CONFIG_PAX_REFCOUNT
10375+ "jno 0f\n"
10376+ _ASM_ADD "%2,%0\n"
10377+ "int $4\n0:\n"
10378+ _ASM_EXTABLE(0b, 0b)
10379+#endif
10380+
10381+ "sete %1\n"
10382 : "+m" (l->a.counter), "=qm" (c)
10383 : "ir" (i) : "memory");
10384 return c;
10385@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10386 {
10387 unsigned char c;
10388
10389- asm volatile(_ASM_DEC "%0; sete %1"
10390+ asm volatile(_ASM_DEC "%0\n"
10391+
10392+#ifdef CONFIG_PAX_REFCOUNT
10393+ "jno 0f\n"
10394+ _ASM_INC "%0\n"
10395+ "int $4\n0:\n"
10396+ _ASM_EXTABLE(0b, 0b)
10397+#endif
10398+
10399+ "sete %1\n"
10400 : "+m" (l->a.counter), "=qm" (c)
10401 : : "memory");
10402 return c != 0;
10403@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10404 {
10405 unsigned char c;
10406
10407- asm volatile(_ASM_INC "%0; sete %1"
10408+ asm volatile(_ASM_INC "%0\n"
10409+
10410+#ifdef CONFIG_PAX_REFCOUNT
10411+ "jno 0f\n"
10412+ _ASM_DEC "%0\n"
10413+ "int $4\n0:\n"
10414+ _ASM_EXTABLE(0b, 0b)
10415+#endif
10416+
10417+ "sete %1\n"
10418 : "+m" (l->a.counter), "=qm" (c)
10419 : : "memory");
10420 return c != 0;
10421@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10422 {
10423 unsigned char c;
10424
10425- asm volatile(_ASM_ADD "%2,%0; sets %1"
10426+ asm volatile(_ASM_ADD "%2,%0\n"
10427+
10428+#ifdef CONFIG_PAX_REFCOUNT
10429+ "jno 0f\n"
10430+ _ASM_SUB "%2,%0\n"
10431+ "int $4\n0:\n"
10432+ _ASM_EXTABLE(0b, 0b)
10433+#endif
10434+
10435+ "sets %1\n"
10436 : "+m" (l->a.counter), "=qm" (c)
10437 : "ir" (i) : "memory");
10438 return c;
10439@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10440 #endif
10441 /* Modern 486+ processor */
10442 __i = i;
10443- asm volatile(_ASM_XADD "%0, %1;"
10444+ asm volatile(_ASM_XADD "%0, %1\n"
10445+
10446+#ifdef CONFIG_PAX_REFCOUNT
10447+ "jno 0f\n"
10448+ _ASM_MOV "%0,%1\n"
10449+ "int $4\n0:\n"
10450+ _ASM_EXTABLE(0b, 0b)
10451+#endif
10452+
10453 : "+r" (i), "+m" (l->a.counter)
10454 : : "memory");
10455 return i + __i;
10456diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10457index ef51b50..514ba37 100644
10458--- a/arch/x86/include/asm/microcode.h
10459+++ b/arch/x86/include/asm/microcode.h
10460@@ -12,13 +12,13 @@ struct device;
10461 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10462
10463 struct microcode_ops {
10464- enum ucode_state (*request_microcode_user) (int cpu,
10465+ enum ucode_state (* const request_microcode_user) (int cpu,
10466 const void __user *buf, size_t size);
10467
10468- enum ucode_state (*request_microcode_fw) (int cpu,
10469+ enum ucode_state (* const request_microcode_fw) (int cpu,
10470 struct device *device);
10471
10472- void (*microcode_fini_cpu) (int cpu);
10473+ void (* const microcode_fini_cpu) (int cpu);
10474
10475 /*
10476 * The generic 'microcode_core' part guarantees that
10477@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10478 extern struct ucode_cpu_info ucode_cpu_info[];
10479
10480 #ifdef CONFIG_MICROCODE_INTEL
10481-extern struct microcode_ops * __init init_intel_microcode(void);
10482+extern const struct microcode_ops * __init init_intel_microcode(void);
10483 #else
10484-static inline struct microcode_ops * __init init_intel_microcode(void)
10485+static inline const struct microcode_ops * __init init_intel_microcode(void)
10486 {
10487 return NULL;
10488 }
10489 #endif /* CONFIG_MICROCODE_INTEL */
10490
10491 #ifdef CONFIG_MICROCODE_AMD
10492-extern struct microcode_ops * __init init_amd_microcode(void);
10493+extern const struct microcode_ops * __init init_amd_microcode(void);
10494 #else
10495-static inline struct microcode_ops * __init init_amd_microcode(void)
10496+static inline const struct microcode_ops * __init init_amd_microcode(void)
10497 {
10498 return NULL;
10499 }
10500diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10501index 593e51d..fa69c9a 100644
10502--- a/arch/x86/include/asm/mman.h
10503+++ b/arch/x86/include/asm/mman.h
10504@@ -5,4 +5,14 @@
10505
10506 #include <asm-generic/mman.h>
10507
10508+#ifdef __KERNEL__
10509+#ifndef __ASSEMBLY__
10510+#ifdef CONFIG_X86_32
10511+#define arch_mmap_check i386_mmap_check
10512+int i386_mmap_check(unsigned long addr, unsigned long len,
10513+ unsigned long flags);
10514+#endif
10515+#endif
10516+#endif
10517+
10518 #endif /* _ASM_X86_MMAN_H */
10519diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10520index 80a1dee..239c67d 100644
10521--- a/arch/x86/include/asm/mmu.h
10522+++ b/arch/x86/include/asm/mmu.h
10523@@ -9,10 +9,23 @@
10524 * we put the segment information here.
10525 */
10526 typedef struct {
10527- void *ldt;
10528+ struct desc_struct *ldt;
10529 int size;
10530 struct mutex lock;
10531- void *vdso;
10532+ unsigned long vdso;
10533+
10534+#ifdef CONFIG_X86_32
10535+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10536+ unsigned long user_cs_base;
10537+ unsigned long user_cs_limit;
10538+
10539+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10540+ cpumask_t cpu_user_cs_mask;
10541+#endif
10542+
10543+#endif
10544+#endif
10545+
10546 } mm_context_t;
10547
10548 #ifdef CONFIG_SMP
10549diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10550index 8b5393e..8143173 100644
10551--- a/arch/x86/include/asm/mmu_context.h
10552+++ b/arch/x86/include/asm/mmu_context.h
10553@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10554
10555 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10556 {
10557+
10558+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559+ unsigned int i;
10560+ pgd_t *pgd;
10561+
10562+ pax_open_kernel();
10563+ pgd = get_cpu_pgd(smp_processor_id());
10564+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10565+ set_pgd_batched(pgd+i, native_make_pgd(0));
10566+ pax_close_kernel();
10567+#endif
10568+
10569 #ifdef CONFIG_SMP
10570 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10571 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10572@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10573 struct task_struct *tsk)
10574 {
10575 unsigned cpu = smp_processor_id();
10576+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10577+ int tlbstate = TLBSTATE_OK;
10578+#endif
10579
10580 if (likely(prev != next)) {
10581 #ifdef CONFIG_SMP
10582+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10583+ tlbstate = percpu_read(cpu_tlbstate.state);
10584+#endif
10585 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10586 percpu_write(cpu_tlbstate.active_mm, next);
10587 #endif
10588 cpumask_set_cpu(cpu, mm_cpumask(next));
10589
10590 /* Re-load page tables */
10591+#ifdef CONFIG_PAX_PER_CPU_PGD
10592+ pax_open_kernel();
10593+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10594+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10595+ pax_close_kernel();
10596+ load_cr3(get_cpu_pgd(cpu));
10597+#else
10598 load_cr3(next->pgd);
10599+#endif
10600
10601 /* stop flush ipis for the previous mm */
10602 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10603@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10604 */
10605 if (unlikely(prev->context.ldt != next->context.ldt))
10606 load_LDT_nolock(&next->context);
10607- }
10608+
10609+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10610+ if (!nx_enabled) {
10611+ smp_mb__before_clear_bit();
10612+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10613+ smp_mb__after_clear_bit();
10614+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10615+ }
10616+#endif
10617+
10618+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10619+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10620+ prev->context.user_cs_limit != next->context.user_cs_limit))
10621+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10622 #ifdef CONFIG_SMP
10623+ else if (unlikely(tlbstate != TLBSTATE_OK))
10624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10625+#endif
10626+#endif
10627+
10628+ }
10629 else {
10630+
10631+#ifdef CONFIG_PAX_PER_CPU_PGD
10632+ pax_open_kernel();
10633+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10634+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10635+ pax_close_kernel();
10636+ load_cr3(get_cpu_pgd(cpu));
10637+#endif
10638+
10639+#ifdef CONFIG_SMP
10640 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10641 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10642
10643@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10644 * tlb flush IPI delivery. We must reload CR3
10645 * to make sure to use no freed page tables.
10646 */
10647+
10648+#ifndef CONFIG_PAX_PER_CPU_PGD
10649 load_cr3(next->pgd);
10650+#endif
10651+
10652 load_LDT_nolock(&next->context);
10653+
10654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10655+ if (!nx_enabled)
10656+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10657+#endif
10658+
10659+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10660+#ifdef CONFIG_PAX_PAGEEXEC
10661+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10662+#endif
10663+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10664+#endif
10665+
10666 }
10667+#endif
10668 }
10669-#endif
10670 }
10671
10672 #define activate_mm(prev, next) \
10673diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10674index 3e2ce58..caaf478 100644
10675--- a/arch/x86/include/asm/module.h
10676+++ b/arch/x86/include/asm/module.h
10677@@ -5,6 +5,7 @@
10678
10679 #ifdef CONFIG_X86_64
10680 /* X86_64 does not define MODULE_PROC_FAMILY */
10681+#define MODULE_PROC_FAMILY ""
10682 #elif defined CONFIG_M386
10683 #define MODULE_PROC_FAMILY "386 "
10684 #elif defined CONFIG_M486
10685@@ -59,13 +60,26 @@
10686 #error unknown processor family
10687 #endif
10688
10689-#ifdef CONFIG_X86_32
10690-# ifdef CONFIG_4KSTACKS
10691-# define MODULE_STACKSIZE "4KSTACKS "
10692-# else
10693-# define MODULE_STACKSIZE ""
10694-# endif
10695-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10696+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10697+#define MODULE_STACKSIZE "4KSTACKS "
10698+#else
10699+#define MODULE_STACKSIZE ""
10700 #endif
10701
10702+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10703+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10704+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10705+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10706+#else
10707+#define MODULE_PAX_KERNEXEC ""
10708+#endif
10709+
10710+#ifdef CONFIG_PAX_MEMORY_UDEREF
10711+#define MODULE_PAX_UDEREF "UDEREF "
10712+#else
10713+#define MODULE_PAX_UDEREF ""
10714+#endif
10715+
10716+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10717+
10718 #endif /* _ASM_X86_MODULE_H */
10719diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10720index 7639dbf..e08a58c 100644
10721--- a/arch/x86/include/asm/page_64_types.h
10722+++ b/arch/x86/include/asm/page_64_types.h
10723@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10724
10725 /* duplicated to the one in bootmem.h */
10726 extern unsigned long max_pfn;
10727-extern unsigned long phys_base;
10728+extern const unsigned long phys_base;
10729
10730 extern unsigned long __phys_addr(unsigned long);
10731 #define __phys_reloc_hide(x) (x)
10732diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10733index efb3899..ef30687 100644
10734--- a/arch/x86/include/asm/paravirt.h
10735+++ b/arch/x86/include/asm/paravirt.h
10736@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10737 val);
10738 }
10739
10740+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10741+{
10742+ pgdval_t val = native_pgd_val(pgd);
10743+
10744+ if (sizeof(pgdval_t) > sizeof(long))
10745+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10746+ val, (u64)val >> 32);
10747+ else
10748+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10749+ val);
10750+}
10751+
10752 static inline void pgd_clear(pgd_t *pgdp)
10753 {
10754 set_pgd(pgdp, __pgd(0));
10755@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10756 pv_mmu_ops.set_fixmap(idx, phys, flags);
10757 }
10758
10759+#ifdef CONFIG_PAX_KERNEXEC
10760+static inline unsigned long pax_open_kernel(void)
10761+{
10762+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10763+}
10764+
10765+static inline unsigned long pax_close_kernel(void)
10766+{
10767+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10768+}
10769+#else
10770+static inline unsigned long pax_open_kernel(void) { return 0; }
10771+static inline unsigned long pax_close_kernel(void) { return 0; }
10772+#endif
10773+
10774 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10775
10776 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10777@@ -945,7 +972,7 @@ extern void default_banner(void);
10778
10779 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10780 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10781-#define PARA_INDIRECT(addr) *%cs:addr
10782+#define PARA_INDIRECT(addr) *%ss:addr
10783 #endif
10784
10785 #define INTERRUPT_RETURN \
10786@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10787 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10788 CLBR_NONE, \
10789 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10790+
10791+#define GET_CR0_INTO_RDI \
10792+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10793+ mov %rax,%rdi
10794+
10795+#define SET_RDI_INTO_CR0 \
10796+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10797+
10798+#define GET_CR3_INTO_RDI \
10799+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10800+ mov %rax,%rdi
10801+
10802+#define SET_RDI_INTO_CR3 \
10803+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10804+
10805 #endif /* CONFIG_X86_32 */
10806
10807 #endif /* __ASSEMBLY__ */
10808diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10809index 9357473..aeb2de5 100644
10810--- a/arch/x86/include/asm/paravirt_types.h
10811+++ b/arch/x86/include/asm/paravirt_types.h
10812@@ -78,19 +78,19 @@ struct pv_init_ops {
10813 */
10814 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10815 unsigned long addr, unsigned len);
10816-};
10817+} __no_const;
10818
10819
10820 struct pv_lazy_ops {
10821 /* Set deferred update mode, used for batching operations. */
10822 void (*enter)(void);
10823 void (*leave)(void);
10824-};
10825+} __no_const;
10826
10827 struct pv_time_ops {
10828 unsigned long long (*sched_clock)(void);
10829 unsigned long (*get_tsc_khz)(void);
10830-};
10831+} __no_const;
10832
10833 struct pv_cpu_ops {
10834 /* hooks for various privileged instructions */
10835@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10836
10837 void (*start_context_switch)(struct task_struct *prev);
10838 void (*end_context_switch)(struct task_struct *next);
10839-};
10840+} __no_const;
10841
10842 struct pv_irq_ops {
10843 /*
10844@@ -217,7 +217,7 @@ struct pv_apic_ops {
10845 unsigned long start_eip,
10846 unsigned long start_esp);
10847 #endif
10848-};
10849+} __no_const;
10850
10851 struct pv_mmu_ops {
10852 unsigned long (*read_cr2)(void);
10853@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10854 struct paravirt_callee_save make_pud;
10855
10856 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10857+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10858 #endif /* PAGETABLE_LEVELS == 4 */
10859 #endif /* PAGETABLE_LEVELS >= 3 */
10860
10861@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10862 an mfn. We can tell which is which from the index. */
10863 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10864 phys_addr_t phys, pgprot_t flags);
10865+
10866+#ifdef CONFIG_PAX_KERNEXEC
10867+ unsigned long (*pax_open_kernel)(void);
10868+ unsigned long (*pax_close_kernel)(void);
10869+#endif
10870+
10871 };
10872
10873 struct raw_spinlock;
10874@@ -326,7 +333,7 @@ struct pv_lock_ops {
10875 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10876 int (*spin_trylock)(struct raw_spinlock *lock);
10877 void (*spin_unlock)(struct raw_spinlock *lock);
10878-};
10879+} __no_const;
10880
10881 /* This contains all the paravirt structures: we get a convenient
10882 * number for each function using the offset which we use to indicate
10883diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10884index b399988..3f47c38 100644
10885--- a/arch/x86/include/asm/pci_x86.h
10886+++ b/arch/x86/include/asm/pci_x86.h
10887@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10888 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10889
10890 struct pci_raw_ops {
10891- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10892+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10893 int reg, int len, u32 *val);
10894- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10895+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10896 int reg, int len, u32 val);
10897 };
10898
10899-extern struct pci_raw_ops *raw_pci_ops;
10900-extern struct pci_raw_ops *raw_pci_ext_ops;
10901+extern const struct pci_raw_ops *raw_pci_ops;
10902+extern const struct pci_raw_ops *raw_pci_ext_ops;
10903
10904-extern struct pci_raw_ops pci_direct_conf1;
10905+extern const struct pci_raw_ops pci_direct_conf1;
10906 extern bool port_cf9_safe;
10907
10908 /* arch_initcall level */
10909diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10910index b65a36d..50345a4 100644
10911--- a/arch/x86/include/asm/percpu.h
10912+++ b/arch/x86/include/asm/percpu.h
10913@@ -78,6 +78,7 @@ do { \
10914 if (0) { \
10915 T__ tmp__; \
10916 tmp__ = (val); \
10917+ (void)tmp__; \
10918 } \
10919 switch (sizeof(var)) { \
10920 case 1: \
10921diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10922index 271de94..ef944d6 100644
10923--- a/arch/x86/include/asm/pgalloc.h
10924+++ b/arch/x86/include/asm/pgalloc.h
10925@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10926 pmd_t *pmd, pte_t *pte)
10927 {
10928 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10929+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10930+}
10931+
10932+static inline void pmd_populate_user(struct mm_struct *mm,
10933+ pmd_t *pmd, pte_t *pte)
10934+{
10935+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10936 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10937 }
10938
10939diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10940index 2334982..70bc412 100644
10941--- a/arch/x86/include/asm/pgtable-2level.h
10942+++ b/arch/x86/include/asm/pgtable-2level.h
10943@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10944
10945 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10946 {
10947+ pax_open_kernel();
10948 *pmdp = pmd;
10949+ pax_close_kernel();
10950 }
10951
10952 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10953diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10954index 33927d2..ccde329 100644
10955--- a/arch/x86/include/asm/pgtable-3level.h
10956+++ b/arch/x86/include/asm/pgtable-3level.h
10957@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10958
10959 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10960 {
10961+ pax_open_kernel();
10962 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10963+ pax_close_kernel();
10964 }
10965
10966 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10967 {
10968+ pax_open_kernel();
10969 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10970+ pax_close_kernel();
10971 }
10972
10973 /*
10974diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10975index af6fd36..867ff74 100644
10976--- a/arch/x86/include/asm/pgtable.h
10977+++ b/arch/x86/include/asm/pgtable.h
10978@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10979
10980 #ifndef __PAGETABLE_PUD_FOLDED
10981 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10982+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10983 #define pgd_clear(pgd) native_pgd_clear(pgd)
10984 #endif
10985
10986@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10987
10988 #define arch_end_context_switch(prev) do {} while(0)
10989
10990+#define pax_open_kernel() native_pax_open_kernel()
10991+#define pax_close_kernel() native_pax_close_kernel()
10992 #endif /* CONFIG_PARAVIRT */
10993
10994+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10995+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10996+
10997+#ifdef CONFIG_PAX_KERNEXEC
10998+static inline unsigned long native_pax_open_kernel(void)
10999+{
11000+ unsigned long cr0;
11001+
11002+ preempt_disable();
11003+ barrier();
11004+ cr0 = read_cr0() ^ X86_CR0_WP;
11005+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11006+ write_cr0(cr0);
11007+ return cr0 ^ X86_CR0_WP;
11008+}
11009+
11010+static inline unsigned long native_pax_close_kernel(void)
11011+{
11012+ unsigned long cr0;
11013+
11014+ cr0 = read_cr0() ^ X86_CR0_WP;
11015+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11016+ write_cr0(cr0);
11017+ barrier();
11018+ preempt_enable_no_resched();
11019+ return cr0 ^ X86_CR0_WP;
11020+}
11021+#else
11022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11024+#endif
11025+
11026 /*
11027 * The following only work if pte_present() is true.
11028 * Undefined behaviour if not..
11029 */
11030+static inline int pte_user(pte_t pte)
11031+{
11032+ return pte_val(pte) & _PAGE_USER;
11033+}
11034+
11035 static inline int pte_dirty(pte_t pte)
11036 {
11037 return pte_flags(pte) & _PAGE_DIRTY;
11038@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11039 return pte_clear_flags(pte, _PAGE_RW);
11040 }
11041
11042+static inline pte_t pte_mkread(pte_t pte)
11043+{
11044+ return __pte(pte_val(pte) | _PAGE_USER);
11045+}
11046+
11047 static inline pte_t pte_mkexec(pte_t pte)
11048 {
11049- return pte_clear_flags(pte, _PAGE_NX);
11050+#ifdef CONFIG_X86_PAE
11051+ if (__supported_pte_mask & _PAGE_NX)
11052+ return pte_clear_flags(pte, _PAGE_NX);
11053+ else
11054+#endif
11055+ return pte_set_flags(pte, _PAGE_USER);
11056+}
11057+
11058+static inline pte_t pte_exprotect(pte_t pte)
11059+{
11060+#ifdef CONFIG_X86_PAE
11061+ if (__supported_pte_mask & _PAGE_NX)
11062+ return pte_set_flags(pte, _PAGE_NX);
11063+ else
11064+#endif
11065+ return pte_clear_flags(pte, _PAGE_USER);
11066 }
11067
11068 static inline pte_t pte_mkdirty(pte_t pte)
11069@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11070 #endif
11071
11072 #ifndef __ASSEMBLY__
11073+
11074+#ifdef CONFIG_PAX_PER_CPU_PGD
11075+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11076+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11077+{
11078+ return cpu_pgd[cpu];
11079+}
11080+#endif
11081+
11082 #include <linux/mm_types.h>
11083
11084 static inline int pte_none(pte_t pte)
11085@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11086
11087 static inline int pgd_bad(pgd_t pgd)
11088 {
11089- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11090+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11091 }
11092
11093 static inline int pgd_none(pgd_t pgd)
11094@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11095 * pgd_offset() returns a (pgd_t *)
11096 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11097 */
11098-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11099+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11100+
11101+#ifdef CONFIG_PAX_PER_CPU_PGD
11102+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11103+#endif
11104+
11105 /*
11106 * a shortcut which implies the use of the kernel's pgd, instead
11107 * of a process's
11108@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11109 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11110 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11111
11112+#ifdef CONFIG_X86_32
11113+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11114+#else
11115+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11116+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11117+
11118+#ifdef CONFIG_PAX_MEMORY_UDEREF
11119+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11120+#else
11121+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11122+#endif
11123+
11124+#endif
11125+
11126 #ifndef __ASSEMBLY__
11127
11128 extern int direct_gbpages;
11129@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11130 * dst and src can be on the same page, but the range must not overlap,
11131 * and must not cross a page boundary.
11132 */
11133-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11134+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11135 {
11136- memcpy(dst, src, count * sizeof(pgd_t));
11137+ pax_open_kernel();
11138+ while (count--)
11139+ *dst++ = *src++;
11140+ pax_close_kernel();
11141 }
11142
11143+#ifdef CONFIG_PAX_PER_CPU_PGD
11144+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11145+#endif
11146+
11147+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11148+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11149+#else
11150+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11151+#endif
11152
11153 #include <asm-generic/pgtable.h>
11154 #endif /* __ASSEMBLY__ */
11155diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11156index 750f1bf..971e839 100644
11157--- a/arch/x86/include/asm/pgtable_32.h
11158+++ b/arch/x86/include/asm/pgtable_32.h
11159@@ -26,9 +26,6 @@
11160 struct mm_struct;
11161 struct vm_area_struct;
11162
11163-extern pgd_t swapper_pg_dir[1024];
11164-extern pgd_t trampoline_pg_dir[1024];
11165-
11166 static inline void pgtable_cache_init(void) { }
11167 static inline void check_pgt_cache(void) { }
11168 void paging_init(void);
11169@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11170 # include <asm/pgtable-2level.h>
11171 #endif
11172
11173+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11174+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11175+#ifdef CONFIG_X86_PAE
11176+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11177+#endif
11178+
11179 #if defined(CONFIG_HIGHPTE)
11180 #define __KM_PTE \
11181 (in_nmi() ? KM_NMI_PTE : \
11182@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11183 /* Clear a kernel PTE and flush it from the TLB */
11184 #define kpte_clear_flush(ptep, vaddr) \
11185 do { \
11186+ pax_open_kernel(); \
11187 pte_clear(&init_mm, (vaddr), (ptep)); \
11188+ pax_close_kernel(); \
11189 __flush_tlb_one((vaddr)); \
11190 } while (0)
11191
11192@@ -85,6 +90,9 @@ do { \
11193
11194 #endif /* !__ASSEMBLY__ */
11195
11196+#define HAVE_ARCH_UNMAPPED_AREA
11197+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11198+
11199 /*
11200 * kern_addr_valid() is (1) for FLATMEM and (0) for
11201 * SPARSEMEM and DISCONTIGMEM
11202diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11203index 5e67c15..12d5c47 100644
11204--- a/arch/x86/include/asm/pgtable_32_types.h
11205+++ b/arch/x86/include/asm/pgtable_32_types.h
11206@@ -8,7 +8,7 @@
11207 */
11208 #ifdef CONFIG_X86_PAE
11209 # include <asm/pgtable-3level_types.h>
11210-# define PMD_SIZE (1UL << PMD_SHIFT)
11211+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11212 # define PMD_MASK (~(PMD_SIZE - 1))
11213 #else
11214 # include <asm/pgtable-2level_types.h>
11215@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11216 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11217 #endif
11218
11219+#ifdef CONFIG_PAX_KERNEXEC
11220+#ifndef __ASSEMBLY__
11221+extern unsigned char MODULES_EXEC_VADDR[];
11222+extern unsigned char MODULES_EXEC_END[];
11223+#endif
11224+#include <asm/boot.h>
11225+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11226+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11227+#else
11228+#define ktla_ktva(addr) (addr)
11229+#define ktva_ktla(addr) (addr)
11230+#endif
11231+
11232 #define MODULES_VADDR VMALLOC_START
11233 #define MODULES_END VMALLOC_END
11234 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11235diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11236index c57a301..6b414ff 100644
11237--- a/arch/x86/include/asm/pgtable_64.h
11238+++ b/arch/x86/include/asm/pgtable_64.h
11239@@ -16,10 +16,14 @@
11240
11241 extern pud_t level3_kernel_pgt[512];
11242 extern pud_t level3_ident_pgt[512];
11243+extern pud_t level3_vmalloc_start_pgt[512];
11244+extern pud_t level3_vmalloc_end_pgt[512];
11245+extern pud_t level3_vmemmap_pgt[512];
11246+extern pud_t level2_vmemmap_pgt[512];
11247 extern pmd_t level2_kernel_pgt[512];
11248 extern pmd_t level2_fixmap_pgt[512];
11249-extern pmd_t level2_ident_pgt[512];
11250-extern pgd_t init_level4_pgt[];
11251+extern pmd_t level2_ident_pgt[512*2];
11252+extern pgd_t init_level4_pgt[512];
11253
11254 #define swapper_pg_dir init_level4_pgt
11255
11256@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11257
11258 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11259 {
11260+ pax_open_kernel();
11261 *pmdp = pmd;
11262+ pax_close_kernel();
11263 }
11264
11265 static inline void native_pmd_clear(pmd_t *pmd)
11266@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11267
11268 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11269 {
11270+ pax_open_kernel();
11271+ *pgdp = pgd;
11272+ pax_close_kernel();
11273+}
11274+
11275+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11276+{
11277 *pgdp = pgd;
11278 }
11279
11280diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11281index 766ea16..5b96cb3 100644
11282--- a/arch/x86/include/asm/pgtable_64_types.h
11283+++ b/arch/x86/include/asm/pgtable_64_types.h
11284@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11285 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11286 #define MODULES_END _AC(0xffffffffff000000, UL)
11287 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11288+#define MODULES_EXEC_VADDR MODULES_VADDR
11289+#define MODULES_EXEC_END MODULES_END
11290+
11291+#define ktla_ktva(addr) (addr)
11292+#define ktva_ktla(addr) (addr)
11293
11294 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11295diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11296index d1f4a76..2f46ba1 100644
11297--- a/arch/x86/include/asm/pgtable_types.h
11298+++ b/arch/x86/include/asm/pgtable_types.h
11299@@ -16,12 +16,11 @@
11300 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11301 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11302 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11303-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11304+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11305 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11306 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11307 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11308-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11309-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11310+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11311 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11312
11313 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11314@@ -39,7 +38,6 @@
11315 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11316 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11317 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11318-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11319 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11320 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11321 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11322@@ -55,8 +53,10 @@
11323
11324 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11325 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11326-#else
11327+#elif defined(CONFIG_KMEMCHECK)
11328 #define _PAGE_NX (_AT(pteval_t, 0))
11329+#else
11330+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11331 #endif
11332
11333 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11334@@ -93,6 +93,9 @@
11335 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11336 _PAGE_ACCESSED)
11337
11338+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11339+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11340+
11341 #define __PAGE_KERNEL_EXEC \
11342 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11343 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11344@@ -103,8 +106,8 @@
11345 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11346 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11347 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11348-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11349-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11350+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11351+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11352 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11353 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11354 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11355@@ -163,8 +166,8 @@
11356 * bits are combined, this will alow user to access the high address mapped
11357 * VDSO in the presence of CONFIG_COMPAT_VDSO
11358 */
11359-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11360-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11361+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11362+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11363 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11364 #endif
11365
11366@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11367 {
11368 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11369 }
11370+#endif
11371
11372+#if PAGETABLE_LEVELS == 3
11373+#include <asm-generic/pgtable-nopud.h>
11374+#endif
11375+
11376+#if PAGETABLE_LEVELS == 2
11377+#include <asm-generic/pgtable-nopmd.h>
11378+#endif
11379+
11380+#ifndef __ASSEMBLY__
11381 #if PAGETABLE_LEVELS > 3
11382 typedef struct { pudval_t pud; } pud_t;
11383
11384@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11385 return pud.pud;
11386 }
11387 #else
11388-#include <asm-generic/pgtable-nopud.h>
11389-
11390 static inline pudval_t native_pud_val(pud_t pud)
11391 {
11392 return native_pgd_val(pud.pgd);
11393@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11394 return pmd.pmd;
11395 }
11396 #else
11397-#include <asm-generic/pgtable-nopmd.h>
11398-
11399 static inline pmdval_t native_pmd_val(pmd_t pmd)
11400 {
11401 return native_pgd_val(pmd.pud.pgd);
11402@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11403
11404 extern pteval_t __supported_pte_mask;
11405 extern void set_nx(void);
11406+
11407+#ifdef CONFIG_X86_32
11408+#ifdef CONFIG_X86_PAE
11409 extern int nx_enabled;
11410+#else
11411+#define nx_enabled (0)
11412+#endif
11413+#else
11414+#define nx_enabled (1)
11415+#endif
11416
11417 #define pgprot_writecombine pgprot_writecombine
11418 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11419diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11420index fa04dea..5f823fc 100644
11421--- a/arch/x86/include/asm/processor.h
11422+++ b/arch/x86/include/asm/processor.h
11423@@ -272,7 +272,7 @@ struct tss_struct {
11424
11425 } ____cacheline_aligned;
11426
11427-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11428+extern struct tss_struct init_tss[NR_CPUS];
11429
11430 /*
11431 * Save the original ist values for checking stack pointers during debugging
11432@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11433 */
11434 #define TASK_SIZE PAGE_OFFSET
11435 #define TASK_SIZE_MAX TASK_SIZE
11436+
11437+#ifdef CONFIG_PAX_SEGMEXEC
11438+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11439+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11440+#else
11441 #define STACK_TOP TASK_SIZE
11442-#define STACK_TOP_MAX STACK_TOP
11443+#endif
11444+
11445+#define STACK_TOP_MAX TASK_SIZE
11446
11447 #define INIT_THREAD { \
11448- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11449+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11450 .vm86_info = NULL, \
11451 .sysenter_cs = __KERNEL_CS, \
11452 .io_bitmap_ptr = NULL, \
11453@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11454 */
11455 #define INIT_TSS { \
11456 .x86_tss = { \
11457- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11458+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11459 .ss0 = __KERNEL_DS, \
11460 .ss1 = __KERNEL_CS, \
11461 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11462@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11463 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11464
11465 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11466-#define KSTK_TOP(info) \
11467-({ \
11468- unsigned long *__ptr = (unsigned long *)(info); \
11469- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11470-})
11471+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11472
11473 /*
11474 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11475@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11476 #define task_pt_regs(task) \
11477 ({ \
11478 struct pt_regs *__regs__; \
11479- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11480+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11481 __regs__ - 1; \
11482 })
11483
11484@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11485 /*
11486 * User space process size. 47bits minus one guard page.
11487 */
11488-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11489+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11490
11491 /* This decides where the kernel will search for a free chunk of vm
11492 * space during mmap's.
11493 */
11494 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11495- 0xc0000000 : 0xFFFFe000)
11496+ 0xc0000000 : 0xFFFFf000)
11497
11498 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11499 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11500@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11501 #define STACK_TOP_MAX TASK_SIZE_MAX
11502
11503 #define INIT_THREAD { \
11504- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11505+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11506 }
11507
11508 #define INIT_TSS { \
11509- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11510+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11511 }
11512
11513 /*
11514@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11515 */
11516 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11517
11518+#ifdef CONFIG_PAX_SEGMEXEC
11519+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11520+#endif
11521+
11522 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11523
11524 /* Get/set a process' ability to use the timestamp counter instruction */
11525diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11526index 0f0d908..f2e3da2 100644
11527--- a/arch/x86/include/asm/ptrace.h
11528+++ b/arch/x86/include/asm/ptrace.h
11529@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11530 }
11531
11532 /*
11533- * user_mode_vm(regs) determines whether a register set came from user mode.
11534+ * user_mode(regs) determines whether a register set came from user mode.
11535 * This is true if V8086 mode was enabled OR if the register set was from
11536 * protected mode with RPL-3 CS value. This tricky test checks that with
11537 * one comparison. Many places in the kernel can bypass this full check
11538- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11539+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11540+ * be used.
11541 */
11542-static inline int user_mode(struct pt_regs *regs)
11543+static inline int user_mode_novm(struct pt_regs *regs)
11544 {
11545 #ifdef CONFIG_X86_32
11546 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11547 #else
11548- return !!(regs->cs & 3);
11549+ return !!(regs->cs & SEGMENT_RPL_MASK);
11550 #endif
11551 }
11552
11553-static inline int user_mode_vm(struct pt_regs *regs)
11554+static inline int user_mode(struct pt_regs *regs)
11555 {
11556 #ifdef CONFIG_X86_32
11557 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11558 USER_RPL;
11559 #else
11560- return user_mode(regs);
11561+ return user_mode_novm(regs);
11562 #endif
11563 }
11564
11565diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11566index 562d4fd..6e39df1 100644
11567--- a/arch/x86/include/asm/reboot.h
11568+++ b/arch/x86/include/asm/reboot.h
11569@@ -6,19 +6,19 @@
11570 struct pt_regs;
11571
11572 struct machine_ops {
11573- void (*restart)(char *cmd);
11574- void (*halt)(void);
11575- void (*power_off)(void);
11576+ void (* __noreturn restart)(char *cmd);
11577+ void (* __noreturn halt)(void);
11578+ void (* __noreturn power_off)(void);
11579 void (*shutdown)(void);
11580 void (*crash_shutdown)(struct pt_regs *);
11581- void (*emergency_restart)(void);
11582-};
11583+ void (* __noreturn emergency_restart)(void);
11584+} __no_const;
11585
11586 extern struct machine_ops machine_ops;
11587
11588 void native_machine_crash_shutdown(struct pt_regs *regs);
11589 void native_machine_shutdown(void);
11590-void machine_real_restart(const unsigned char *code, int length);
11591+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11592
11593 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11594 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11595diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11596index 606ede1..dbfff37 100644
11597--- a/arch/x86/include/asm/rwsem.h
11598+++ b/arch/x86/include/asm/rwsem.h
11599@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11600 {
11601 asm volatile("# beginning down_read\n\t"
11602 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* adds 0x00000001, returns the old value */
11612 " jns 1f\n"
11613 " call call_rwsem_down_read_failed\n"
11614@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11615 "1:\n\t"
11616 " mov %1,%2\n\t"
11617 " add %3,%2\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "sub %3,%2\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 " jle 2f\n\t"
11627 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11628 " jnz 1b\n\t"
11629@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11630 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11631 asm volatile("# beginning down_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* subtract 0x0000ffff, returns the old value */
11642 " test %1,%1\n\t"
11643 /* was the count 0 before? */
11644@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11645 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11646 asm volatile("# beginning __up_read\n\t"
11647 LOCK_PREFIX " xadd %1,(%2)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ "mov %1,(%2)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /* subtracts 1, returns the old value */
11657 " jns 1f\n\t"
11658 " call call_rwsem_wake\n"
11659@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11660 rwsem_count_t tmp;
11661 asm volatile("# beginning __up_write\n\t"
11662 LOCK_PREFIX " xadd %1,(%2)\n\t"
11663+
11664+#ifdef CONFIG_PAX_REFCOUNT
11665+ "jno 0f\n"
11666+ "mov %1,(%2)\n"
11667+ "int $4\n0:\n"
11668+ _ASM_EXTABLE(0b, 0b)
11669+#endif
11670+
11671 /* tries to transition
11672 0xffff0001 -> 0x00000000 */
11673 " jz 1f\n"
11674@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11675 {
11676 asm volatile("# beginning __downgrade_write\n\t"
11677 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11678+
11679+#ifdef CONFIG_PAX_REFCOUNT
11680+ "jno 0f\n"
11681+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11682+ "int $4\n0:\n"
11683+ _ASM_EXTABLE(0b, 0b)
11684+#endif
11685+
11686 /*
11687 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11688 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11689@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11690 static inline void rwsem_atomic_add(rwsem_count_t delta,
11691 struct rw_semaphore *sem)
11692 {
11693- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11694+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11695+
11696+#ifdef CONFIG_PAX_REFCOUNT
11697+ "jno 0f\n"
11698+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11699+ "int $4\n0:\n"
11700+ _ASM_EXTABLE(0b, 0b)
11701+#endif
11702+
11703 : "+m" (sem->count)
11704 : "er" (delta));
11705 }
11706@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11707 {
11708 rwsem_count_t tmp = delta;
11709
11710- asm volatile(LOCK_PREFIX "xadd %0,%1"
11711+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11712+
11713+#ifdef CONFIG_PAX_REFCOUNT
11714+ "jno 0f\n"
11715+ "mov %0,%1\n"
11716+ "int $4\n0:\n"
11717+ _ASM_EXTABLE(0b, 0b)
11718+#endif
11719+
11720 : "+r" (tmp), "+m" (sem->count)
11721 : : "memory");
11722
11723diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11724index 14e0ed8..7f7dd5e 100644
11725--- a/arch/x86/include/asm/segment.h
11726+++ b/arch/x86/include/asm/segment.h
11727@@ -62,10 +62,15 @@
11728 * 26 - ESPFIX small SS
11729 * 27 - per-cpu [ offset to per-cpu data area ]
11730 * 28 - stack_canary-20 [ for stack protector ]
11731- * 29 - unused
11732- * 30 - unused
11733+ * 29 - PCI BIOS CS
11734+ * 30 - PCI BIOS DS
11735 * 31 - TSS for double fault handler
11736 */
11737+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11738+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11739+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11740+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11741+
11742 #define GDT_ENTRY_TLS_MIN 6
11743 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11744
11745@@ -77,6 +82,8 @@
11746
11747 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11748
11749+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11750+
11751 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11752
11753 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11754@@ -88,7 +95,7 @@
11755 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11756 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11757
11758-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11759+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11760 #ifdef CONFIG_SMP
11761 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11762 #else
11763@@ -102,6 +109,12 @@
11764 #define __KERNEL_STACK_CANARY 0
11765 #endif
11766
11767+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11768+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11769+
11770+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11771+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11772+
11773 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11774
11775 /*
11776@@ -139,7 +152,7 @@
11777 */
11778
11779 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11780-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11781+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11782
11783
11784 #else
11785@@ -163,6 +176,8 @@
11786 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11787 #define __USER32_DS __USER_DS
11788
11789+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11790+
11791 #define GDT_ENTRY_TSS 8 /* needs two entries */
11792 #define GDT_ENTRY_LDT 10 /* needs two entries */
11793 #define GDT_ENTRY_TLS_MIN 12
11794@@ -183,6 +198,7 @@
11795 #endif
11796
11797 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11798+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11799 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11800 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11801 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11802diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11803index 4c2f63c..5685db2 100644
11804--- a/arch/x86/include/asm/smp.h
11805+++ b/arch/x86/include/asm/smp.h
11806@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11807 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11808 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11809 DECLARE_PER_CPU(u16, cpu_llc_id);
11810-DECLARE_PER_CPU(int, cpu_number);
11811+DECLARE_PER_CPU(unsigned int, cpu_number);
11812
11813 static inline struct cpumask *cpu_sibling_mask(int cpu)
11814 {
11815@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11816 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11817
11818 /* Static state in head.S used to set up a CPU */
11819-extern struct {
11820- void *sp;
11821- unsigned short ss;
11822-} stack_start;
11823+extern unsigned long stack_start; /* Initial stack pointer address */
11824
11825 struct smp_ops {
11826 void (*smp_prepare_boot_cpu)(void);
11827@@ -60,7 +57,7 @@ struct smp_ops {
11828
11829 void (*send_call_func_ipi)(const struct cpumask *mask);
11830 void (*send_call_func_single_ipi)(int cpu);
11831-};
11832+} __no_const;
11833
11834 /* Globals due to paravirt */
11835 extern void set_cpu_sibling_map(int cpu);
11836@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11837 extern int safe_smp_processor_id(void);
11838
11839 #elif defined(CONFIG_X86_64_SMP)
11840-#define raw_smp_processor_id() (percpu_read(cpu_number))
11841-
11842-#define stack_smp_processor_id() \
11843-({ \
11844- struct thread_info *ti; \
11845- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11846- ti->cpu; \
11847-})
11848+#define raw_smp_processor_id() (percpu_read(cpu_number))
11849+#define stack_smp_processor_id() raw_smp_processor_id()
11850 #define safe_smp_processor_id() smp_processor_id()
11851
11852 #endif
11853diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11854index 4e77853..4359783 100644
11855--- a/arch/x86/include/asm/spinlock.h
11856+++ b/arch/x86/include/asm/spinlock.h
11857@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11858 static inline void __raw_read_lock(raw_rwlock_t *rw)
11859 {
11860 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11861+
11862+#ifdef CONFIG_PAX_REFCOUNT
11863+ "jno 0f\n"
11864+ LOCK_PREFIX " addl $1,(%0)\n"
11865+ "int $4\n0:\n"
11866+ _ASM_EXTABLE(0b, 0b)
11867+#endif
11868+
11869 "jns 1f\n"
11870 "call __read_lock_failed\n\t"
11871 "1:\n"
11872@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11873 static inline void __raw_write_lock(raw_rwlock_t *rw)
11874 {
11875 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11876+
11877+#ifdef CONFIG_PAX_REFCOUNT
11878+ "jno 0f\n"
11879+ LOCK_PREFIX " addl %1,(%0)\n"
11880+ "int $4\n0:\n"
11881+ _ASM_EXTABLE(0b, 0b)
11882+#endif
11883+
11884 "jz 1f\n"
11885 "call __write_lock_failed\n\t"
11886 "1:\n"
11887@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11888
11889 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11890 {
11891- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11892+ asm volatile(LOCK_PREFIX "incl %0\n"
11893+
11894+#ifdef CONFIG_PAX_REFCOUNT
11895+ "jno 0f\n"
11896+ LOCK_PREFIX "decl %0\n"
11897+ "int $4\n0:\n"
11898+ _ASM_EXTABLE(0b, 0b)
11899+#endif
11900+
11901+ :"+m" (rw->lock) : : "memory");
11902 }
11903
11904 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11905 {
11906- asm volatile(LOCK_PREFIX "addl %1, %0"
11907+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11908+
11909+#ifdef CONFIG_PAX_REFCOUNT
11910+ "jno 0f\n"
11911+ LOCK_PREFIX "subl %1, %0\n"
11912+ "int $4\n0:\n"
11913+ _ASM_EXTABLE(0b, 0b)
11914+#endif
11915+
11916 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11917 }
11918
11919diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11920index 1575177..cb23f52 100644
11921--- a/arch/x86/include/asm/stackprotector.h
11922+++ b/arch/x86/include/asm/stackprotector.h
11923@@ -48,7 +48,7 @@
11924 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11925 */
11926 #define GDT_STACK_CANARY_INIT \
11927- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11928+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11929
11930 /*
11931 * Initialize the stackprotector canary value.
11932@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11933
11934 static inline void load_stack_canary_segment(void)
11935 {
11936-#ifdef CONFIG_X86_32
11937+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11938 asm volatile ("mov %0, %%gs" : : "r" (0));
11939 #endif
11940 }
11941diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11942index e0fbf29..858ef4a 100644
11943--- a/arch/x86/include/asm/system.h
11944+++ b/arch/x86/include/asm/system.h
11945@@ -132,7 +132,7 @@ do { \
11946 "thread_return:\n\t" \
11947 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11948 __switch_canary \
11949- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11950+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11951 "movq %%rax,%%rdi\n\t" \
11952 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11953 "jnz ret_from_fork\n\t" \
11954@@ -143,7 +143,7 @@ do { \
11955 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11956 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11957 [_tif_fork] "i" (_TIF_FORK), \
11958- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11959+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11960 [current_task] "m" (per_cpu_var(current_task)) \
11961 __switch_canary_iparam \
11962 : "memory", "cc" __EXTRA_CLOBBER)
11963@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11964 {
11965 unsigned long __limit;
11966 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11967- return __limit + 1;
11968+ return __limit;
11969 }
11970
11971 static inline void native_clts(void)
11972@@ -340,12 +340,12 @@ void enable_hlt(void);
11973
11974 void cpu_idle_wait(void);
11975
11976-extern unsigned long arch_align_stack(unsigned long sp);
11977+#define arch_align_stack(x) ((x) & ~0xfUL)
11978 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11979
11980 void default_idle(void);
11981
11982-void stop_this_cpu(void *dummy);
11983+void stop_this_cpu(void *dummy) __noreturn;
11984
11985 /*
11986 * Force strict CPU ordering.
11987diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11988index 19c3ce4..8962535 100644
11989--- a/arch/x86/include/asm/thread_info.h
11990+++ b/arch/x86/include/asm/thread_info.h
11991@@ -10,6 +10,7 @@
11992 #include <linux/compiler.h>
11993 #include <asm/page.h>
11994 #include <asm/types.h>
11995+#include <asm/percpu.h>
11996
11997 /*
11998 * low level task data that entry.S needs immediate access to
11999@@ -24,7 +25,6 @@ struct exec_domain;
12000 #include <asm/atomic.h>
12001
12002 struct thread_info {
12003- struct task_struct *task; /* main task structure */
12004 struct exec_domain *exec_domain; /* execution domain */
12005 __u32 flags; /* low level flags */
12006 __u32 status; /* thread synchronous flags */
12007@@ -34,18 +34,12 @@ struct thread_info {
12008 mm_segment_t addr_limit;
12009 struct restart_block restart_block;
12010 void __user *sysenter_return;
12011-#ifdef CONFIG_X86_32
12012- unsigned long previous_esp; /* ESP of the previous stack in
12013- case of nested (IRQ) stacks
12014- */
12015- __u8 supervisor_stack[0];
12016-#endif
12017+ unsigned long lowest_stack;
12018 int uaccess_err;
12019 };
12020
12021-#define INIT_THREAD_INFO(tsk) \
12022+#define INIT_THREAD_INFO \
12023 { \
12024- .task = &tsk, \
12025 .exec_domain = &default_exec_domain, \
12026 .flags = 0, \
12027 .cpu = 0, \
12028@@ -56,7 +50,7 @@ struct thread_info {
12029 }, \
12030 }
12031
12032-#define init_thread_info (init_thread_union.thread_info)
12033+#define init_thread_info (init_thread_union.stack)
12034 #define init_stack (init_thread_union.stack)
12035
12036 #else /* !__ASSEMBLY__ */
12037@@ -163,45 +157,40 @@ struct thread_info {
12038 #define alloc_thread_info(tsk) \
12039 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12040
12041-#ifdef CONFIG_X86_32
12042-
12043-#define STACK_WARN (THREAD_SIZE/8)
12044-/*
12045- * macros/functions for gaining access to the thread information structure
12046- *
12047- * preempt_count needs to be 1 initially, until the scheduler is functional.
12048- */
12049-#ifndef __ASSEMBLY__
12050-
12051-
12052-/* how to get the current stack pointer from C */
12053-register unsigned long current_stack_pointer asm("esp") __used;
12054-
12055-/* how to get the thread information struct from C */
12056-static inline struct thread_info *current_thread_info(void)
12057-{
12058- return (struct thread_info *)
12059- (current_stack_pointer & ~(THREAD_SIZE - 1));
12060-}
12061-
12062-#else /* !__ASSEMBLY__ */
12063-
12064+#ifdef __ASSEMBLY__
12065 /* how to get the thread information struct from ASM */
12066 #define GET_THREAD_INFO(reg) \
12067- movl $-THREAD_SIZE, reg; \
12068- andl %esp, reg
12069+ mov PER_CPU_VAR(current_tinfo), reg
12070
12071 /* use this one if reg already contains %esp */
12072-#define GET_THREAD_INFO_WITH_ESP(reg) \
12073- andl $-THREAD_SIZE, reg
12074+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12075+#else
12076+/* how to get the thread information struct from C */
12077+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12078+
12079+static __always_inline struct thread_info *current_thread_info(void)
12080+{
12081+ return percpu_read_stable(current_tinfo);
12082+}
12083+#endif
12084+
12085+#ifdef CONFIG_X86_32
12086+
12087+#define STACK_WARN (THREAD_SIZE/8)
12088+/*
12089+ * macros/functions for gaining access to the thread information structure
12090+ *
12091+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12092+ */
12093+#ifndef __ASSEMBLY__
12094+
12095+/* how to get the current stack pointer from C */
12096+register unsigned long current_stack_pointer asm("esp") __used;
12097
12098 #endif
12099
12100 #else /* X86_32 */
12101
12102-#include <asm/percpu.h>
12103-#define KERNEL_STACK_OFFSET (5*8)
12104-
12105 /*
12106 * macros/functions for gaining access to the thread information structure
12107 * preempt_count needs to be 1 initially, until the scheduler is functional.
12108@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12109 #ifndef __ASSEMBLY__
12110 DECLARE_PER_CPU(unsigned long, kernel_stack);
12111
12112-static inline struct thread_info *current_thread_info(void)
12113-{
12114- struct thread_info *ti;
12115- ti = (void *)(percpu_read_stable(kernel_stack) +
12116- KERNEL_STACK_OFFSET - THREAD_SIZE);
12117- return ti;
12118-}
12119-
12120-#else /* !__ASSEMBLY__ */
12121-
12122-/* how to get the thread information struct from ASM */
12123-#define GET_THREAD_INFO(reg) \
12124- movq PER_CPU_VAR(kernel_stack),reg ; \
12125- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12126-
12127+/* how to get the current stack pointer from C */
12128+register unsigned long current_stack_pointer asm("rsp") __used;
12129 #endif
12130
12131 #endif /* !X86_32 */
12132@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12133 extern void free_thread_info(struct thread_info *ti);
12134 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12135 #define arch_task_cache_init arch_task_cache_init
12136+
12137+#define __HAVE_THREAD_FUNCTIONS
12138+#define task_thread_info(task) (&(task)->tinfo)
12139+#define task_stack_page(task) ((task)->stack)
12140+#define setup_thread_stack(p, org) do {} while (0)
12141+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12142+
12143+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12144+extern struct task_struct *alloc_task_struct(void);
12145+extern void free_task_struct(struct task_struct *);
12146+
12147 #endif
12148 #endif /* _ASM_X86_THREAD_INFO_H */
12149diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12150index 61c5874..8a046e9 100644
12151--- a/arch/x86/include/asm/uaccess.h
12152+++ b/arch/x86/include/asm/uaccess.h
12153@@ -8,12 +8,15 @@
12154 #include <linux/thread_info.h>
12155 #include <linux/prefetch.h>
12156 #include <linux/string.h>
12157+#include <linux/sched.h>
12158 #include <asm/asm.h>
12159 #include <asm/page.h>
12160
12161 #define VERIFY_READ 0
12162 #define VERIFY_WRITE 1
12163
12164+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12165+
12166 /*
12167 * The fs value determines whether argument validity checking should be
12168 * performed or not. If get_fs() == USER_DS, checking is performed, with
12169@@ -29,7 +32,12 @@
12170
12171 #define get_ds() (KERNEL_DS)
12172 #define get_fs() (current_thread_info()->addr_limit)
12173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12174+void __set_fs(mm_segment_t x);
12175+void set_fs(mm_segment_t x);
12176+#else
12177 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12178+#endif
12179
12180 #define segment_eq(a, b) ((a).seg == (b).seg)
12181
12182@@ -77,7 +85,33 @@
12183 * checks that the pointer is in the user space range - after calling
12184 * this function, memory access functions may still return -EFAULT.
12185 */
12186-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12187+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12188+#define access_ok(type, addr, size) \
12189+({ \
12190+ long __size = size; \
12191+ unsigned long __addr = (unsigned long)addr; \
12192+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12193+ unsigned long __end_ao = __addr + __size - 1; \
12194+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12195+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12196+ while(__addr_ao <= __end_ao) { \
12197+ char __c_ao; \
12198+ __addr_ao += PAGE_SIZE; \
12199+ if (__size > PAGE_SIZE) \
12200+ cond_resched(); \
12201+ if (__get_user(__c_ao, (char __user *)__addr)) \
12202+ break; \
12203+ if (type != VERIFY_WRITE) { \
12204+ __addr = __addr_ao; \
12205+ continue; \
12206+ } \
12207+ if (__put_user(__c_ao, (char __user *)__addr)) \
12208+ break; \
12209+ __addr = __addr_ao; \
12210+ } \
12211+ } \
12212+ __ret_ao; \
12213+})
12214
12215 /*
12216 * The exception table consists of pairs of addresses: the first is the
12217@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12218 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12219 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12220
12221-
12222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12223+#define __copyuser_seg "gs;"
12224+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12225+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12226+#else
12227+#define __copyuser_seg
12228+#define __COPYUSER_SET_ES
12229+#define __COPYUSER_RESTORE_ES
12230+#endif
12231
12232 #ifdef CONFIG_X86_32
12233 #define __put_user_asm_u64(x, addr, err, errret) \
12234- asm volatile("1: movl %%eax,0(%2)\n" \
12235- "2: movl %%edx,4(%2)\n" \
12236+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12237+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12238 "3:\n" \
12239 ".section .fixup,\"ax\"\n" \
12240 "4: movl %3,%0\n" \
12241@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12242 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12243
12244 #define __put_user_asm_ex_u64(x, addr) \
12245- asm volatile("1: movl %%eax,0(%1)\n" \
12246- "2: movl %%edx,4(%1)\n" \
12247+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12248+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12249 "3:\n" \
12250 _ASM_EXTABLE(1b, 2b - 1b) \
12251 _ASM_EXTABLE(2b, 3b - 2b) \
12252@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12253 __typeof__(*(ptr)) __pu_val; \
12254 __chk_user_ptr(ptr); \
12255 might_fault(); \
12256- __pu_val = x; \
12257+ __pu_val = (x); \
12258 switch (sizeof(*(ptr))) { \
12259 case 1: \
12260 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12261@@ -374,7 +416,7 @@ do { \
12262 } while (0)
12263
12264 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12265- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12266+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12267 "2:\n" \
12268 ".section .fixup,\"ax\"\n" \
12269 "3: mov %3,%0\n" \
12270@@ -382,7 +424,7 @@ do { \
12271 " jmp 2b\n" \
12272 ".previous\n" \
12273 _ASM_EXTABLE(1b, 3b) \
12274- : "=r" (err), ltype(x) \
12275+ : "=r" (err), ltype (x) \
12276 : "m" (__m(addr)), "i" (errret), "0" (err))
12277
12278 #define __get_user_size_ex(x, ptr, size) \
12279@@ -407,7 +449,7 @@ do { \
12280 } while (0)
12281
12282 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12283- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12284+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12285 "2:\n" \
12286 _ASM_EXTABLE(1b, 2b - 1b) \
12287 : ltype(x) : "m" (__m(addr)))
12288@@ -424,13 +466,24 @@ do { \
12289 int __gu_err; \
12290 unsigned long __gu_val; \
12291 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12292- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12293+ (x) = (__typeof__(*(ptr)))__gu_val; \
12294 __gu_err; \
12295 })
12296
12297 /* FIXME: this hack is definitely wrong -AK */
12298 struct __large_struct { unsigned long buf[100]; };
12299-#define __m(x) (*(struct __large_struct __user *)(x))
12300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301+#define ____m(x) \
12302+({ \
12303+ unsigned long ____x = (unsigned long)(x); \
12304+ if (____x < PAX_USER_SHADOW_BASE) \
12305+ ____x += PAX_USER_SHADOW_BASE; \
12306+ (void __user *)____x; \
12307+})
12308+#else
12309+#define ____m(x) (x)
12310+#endif
12311+#define __m(x) (*(struct __large_struct __user *)____m(x))
12312
12313 /*
12314 * Tell gcc we read from memory instead of writing: this is because
12315@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12316 * aliasing issues.
12317 */
12318 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12319- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12320+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12321 "2:\n" \
12322 ".section .fixup,\"ax\"\n" \
12323 "3: mov %3,%0\n" \
12324@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12325 ".previous\n" \
12326 _ASM_EXTABLE(1b, 3b) \
12327 : "=r"(err) \
12328- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12329+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12330
12331 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12332- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12333+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12334 "2:\n" \
12335 _ASM_EXTABLE(1b, 2b - 1b) \
12336 : : ltype(x), "m" (__m(addr)))
12337@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12338 * On error, the variable @x is set to zero.
12339 */
12340
12341+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12342+#define __get_user(x, ptr) get_user((x), (ptr))
12343+#else
12344 #define __get_user(x, ptr) \
12345 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12346+#endif
12347
12348 /**
12349 * __put_user: - Write a simple value into user space, with less checking.
12350@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12351 * Returns zero on success, or -EFAULT on error.
12352 */
12353
12354+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12355+#define __put_user(x, ptr) put_user((x), (ptr))
12356+#else
12357 #define __put_user(x, ptr) \
12358 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12359+#endif
12360
12361 #define __get_user_unaligned __get_user
12362 #define __put_user_unaligned __put_user
12363@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12364 #define get_user_ex(x, ptr) do { \
12365 unsigned long __gue_val; \
12366 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12367- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12368+ (x) = (__typeof__(*(ptr)))__gue_val; \
12369 } while (0)
12370
12371 #ifdef CONFIG_X86_WP_WORKS_OK
12372@@ -567,6 +628,7 @@ extern struct movsl_mask {
12373
12374 #define ARCH_HAS_NOCACHE_UACCESS 1
12375
12376+#define ARCH_HAS_SORT_EXTABLE
12377 #ifdef CONFIG_X86_32
12378 # include "uaccess_32.h"
12379 #else
12380diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12381index 632fb44..e30e334 100644
12382--- a/arch/x86/include/asm/uaccess_32.h
12383+++ b/arch/x86/include/asm/uaccess_32.h
12384@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12385 static __always_inline unsigned long __must_check
12386 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12387 {
12388+ pax_track_stack();
12389+
12390+ if ((long)n < 0)
12391+ return n;
12392+
12393 if (__builtin_constant_p(n)) {
12394 unsigned long ret;
12395
12396@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12397 return ret;
12398 }
12399 }
12400+ if (!__builtin_constant_p(n))
12401+ check_object_size(from, n, true);
12402 return __copy_to_user_ll(to, from, n);
12403 }
12404
12405@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12406 __copy_to_user(void __user *to, const void *from, unsigned long n)
12407 {
12408 might_fault();
12409+
12410 return __copy_to_user_inatomic(to, from, n);
12411 }
12412
12413 static __always_inline unsigned long
12414 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12415 {
12416+ if ((long)n < 0)
12417+ return n;
12418+
12419 /* Avoid zeroing the tail if the copy fails..
12420 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12421 * but as the zeroing behaviour is only significant when n is not
12422@@ -138,6 +149,12 @@ static __always_inline unsigned long
12423 __copy_from_user(void *to, const void __user *from, unsigned long n)
12424 {
12425 might_fault();
12426+
12427+ pax_track_stack();
12428+
12429+ if ((long)n < 0)
12430+ return n;
12431+
12432 if (__builtin_constant_p(n)) {
12433 unsigned long ret;
12434
12435@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12436 return ret;
12437 }
12438 }
12439+ if (!__builtin_constant_p(n))
12440+ check_object_size(to, n, false);
12441 return __copy_from_user_ll(to, from, n);
12442 }
12443
12444@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12445 const void __user *from, unsigned long n)
12446 {
12447 might_fault();
12448+
12449+ if ((long)n < 0)
12450+ return n;
12451+
12452 if (__builtin_constant_p(n)) {
12453 unsigned long ret;
12454
12455@@ -182,14 +205,62 @@ static __always_inline unsigned long
12456 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12457 unsigned long n)
12458 {
12459- return __copy_from_user_ll_nocache_nozero(to, from, n);
12460+ if ((long)n < 0)
12461+ return n;
12462+
12463+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12464+}
12465+
12466+/**
12467+ * copy_to_user: - Copy a block of data into user space.
12468+ * @to: Destination address, in user space.
12469+ * @from: Source address, in kernel space.
12470+ * @n: Number of bytes to copy.
12471+ *
12472+ * Context: User context only. This function may sleep.
12473+ *
12474+ * Copy data from kernel space to user space.
12475+ *
12476+ * Returns number of bytes that could not be copied.
12477+ * On success, this will be zero.
12478+ */
12479+static __always_inline unsigned long __must_check
12480+copy_to_user(void __user *to, const void *from, unsigned long n)
12481+{
12482+ if (access_ok(VERIFY_WRITE, to, n))
12483+ n = __copy_to_user(to, from, n);
12484+ return n;
12485+}
12486+
12487+/**
12488+ * copy_from_user: - Copy a block of data from user space.
12489+ * @to: Destination address, in kernel space.
12490+ * @from: Source address, in user space.
12491+ * @n: Number of bytes to copy.
12492+ *
12493+ * Context: User context only. This function may sleep.
12494+ *
12495+ * Copy data from user space to kernel space.
12496+ *
12497+ * Returns number of bytes that could not be copied.
12498+ * On success, this will be zero.
12499+ *
12500+ * If some data could not be copied, this function will pad the copied
12501+ * data to the requested size using zero bytes.
12502+ */
12503+static __always_inline unsigned long __must_check
12504+copy_from_user(void *to, const void __user *from, unsigned long n)
12505+{
12506+ if (access_ok(VERIFY_READ, from, n))
12507+ n = __copy_from_user(to, from, n);
12508+ else if ((long)n > 0) {
12509+ if (!__builtin_constant_p(n))
12510+ check_object_size(to, n, false);
12511+ memset(to, 0, n);
12512+ }
12513+ return n;
12514 }
12515
12516-unsigned long __must_check copy_to_user(void __user *to,
12517- const void *from, unsigned long n);
12518-unsigned long __must_check copy_from_user(void *to,
12519- const void __user *from,
12520- unsigned long n);
12521 long __must_check strncpy_from_user(char *dst, const char __user *src,
12522 long count);
12523 long __must_check __strncpy_from_user(char *dst,
12524diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12525index db24b21..f595ae7 100644
12526--- a/arch/x86/include/asm/uaccess_64.h
12527+++ b/arch/x86/include/asm/uaccess_64.h
12528@@ -9,6 +9,9 @@
12529 #include <linux/prefetch.h>
12530 #include <linux/lockdep.h>
12531 #include <asm/page.h>
12532+#include <asm/pgtable.h>
12533+
12534+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12535
12536 /*
12537 * Copy To/From Userspace
12538@@ -16,116 +19,205 @@
12539
12540 /* Handles exceptions in both to and from, but doesn't do access_ok */
12541 __must_check unsigned long
12542-copy_user_generic(void *to, const void *from, unsigned len);
12543+copy_user_generic(void *to, const void *from, unsigned long len);
12544
12545 __must_check unsigned long
12546-copy_to_user(void __user *to, const void *from, unsigned len);
12547-__must_check unsigned long
12548-copy_from_user(void *to, const void __user *from, unsigned len);
12549-__must_check unsigned long
12550-copy_in_user(void __user *to, const void __user *from, unsigned len);
12551+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12552
12553 static __always_inline __must_check
12554-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12555+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12556 {
12557- int ret = 0;
12558+ unsigned ret = 0;
12559
12560 might_fault();
12561- if (!__builtin_constant_p(size))
12562- return copy_user_generic(dst, (__force void *)src, size);
12563+
12564+ if (size > INT_MAX)
12565+ return size;
12566+
12567+#ifdef CONFIG_PAX_MEMORY_UDEREF
12568+ if (!__access_ok(VERIFY_READ, src, size))
12569+ return size;
12570+#endif
12571+
12572+ if (!__builtin_constant_p(size)) {
12573+ check_object_size(dst, size, false);
12574+
12575+#ifdef CONFIG_PAX_MEMORY_UDEREF
12576+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12577+ src += PAX_USER_SHADOW_BASE;
12578+#endif
12579+
12580+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12581+ }
12582 switch (size) {
12583- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12584+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12585 ret, "b", "b", "=q", 1);
12586 return ret;
12587- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12588+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12589 ret, "w", "w", "=r", 2);
12590 return ret;
12591- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12592+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12593 ret, "l", "k", "=r", 4);
12594 return ret;
12595- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12596+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12597 ret, "q", "", "=r", 8);
12598 return ret;
12599 case 10:
12600- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12601+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12602 ret, "q", "", "=r", 10);
12603 if (unlikely(ret))
12604 return ret;
12605 __get_user_asm(*(u16 *)(8 + (char *)dst),
12606- (u16 __user *)(8 + (char __user *)src),
12607+ (const u16 __user *)(8 + (const char __user *)src),
12608 ret, "w", "w", "=r", 2);
12609 return ret;
12610 case 16:
12611- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12612+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12613 ret, "q", "", "=r", 16);
12614 if (unlikely(ret))
12615 return ret;
12616 __get_user_asm(*(u64 *)(8 + (char *)dst),
12617- (u64 __user *)(8 + (char __user *)src),
12618+ (const u64 __user *)(8 + (const char __user *)src),
12619 ret, "q", "", "=r", 8);
12620 return ret;
12621 default:
12622- return copy_user_generic(dst, (__force void *)src, size);
12623+
12624+#ifdef CONFIG_PAX_MEMORY_UDEREF
12625+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12626+ src += PAX_USER_SHADOW_BASE;
12627+#endif
12628+
12629+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12630 }
12631 }
12632
12633 static __always_inline __must_check
12634-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12635+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12636 {
12637- int ret = 0;
12638+ unsigned ret = 0;
12639
12640 might_fault();
12641- if (!__builtin_constant_p(size))
12642- return copy_user_generic((__force void *)dst, src, size);
12643+
12644+ pax_track_stack();
12645+
12646+ if (size > INT_MAX)
12647+ return size;
12648+
12649+#ifdef CONFIG_PAX_MEMORY_UDEREF
12650+ if (!__access_ok(VERIFY_WRITE, dst, size))
12651+ return size;
12652+#endif
12653+
12654+ if (!__builtin_constant_p(size)) {
12655+ check_object_size(src, size, true);
12656+
12657+#ifdef CONFIG_PAX_MEMORY_UDEREF
12658+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659+ dst += PAX_USER_SHADOW_BASE;
12660+#endif
12661+
12662+ return copy_user_generic((__force_kernel void *)dst, src, size);
12663+ }
12664 switch (size) {
12665- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12666+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12667 ret, "b", "b", "iq", 1);
12668 return ret;
12669- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12670+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12671 ret, "w", "w", "ir", 2);
12672 return ret;
12673- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12674+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12675 ret, "l", "k", "ir", 4);
12676 return ret;
12677- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12678+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12679 ret, "q", "", "er", 8);
12680 return ret;
12681 case 10:
12682- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12683+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12684 ret, "q", "", "er", 10);
12685 if (unlikely(ret))
12686 return ret;
12687 asm("":::"memory");
12688- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12689+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12690 ret, "w", "w", "ir", 2);
12691 return ret;
12692 case 16:
12693- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12694+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12695 ret, "q", "", "er", 16);
12696 if (unlikely(ret))
12697 return ret;
12698 asm("":::"memory");
12699- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12700+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12701 ret, "q", "", "er", 8);
12702 return ret;
12703 default:
12704- return copy_user_generic((__force void *)dst, src, size);
12705+
12706+#ifdef CONFIG_PAX_MEMORY_UDEREF
12707+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12708+ dst += PAX_USER_SHADOW_BASE;
12709+#endif
12710+
12711+ return copy_user_generic((__force_kernel void *)dst, src, size);
12712+ }
12713+}
12714+
12715+static __always_inline __must_check
12716+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12717+{
12718+ if (access_ok(VERIFY_WRITE, to, len))
12719+ len = __copy_to_user(to, from, len);
12720+ return len;
12721+}
12722+
12723+static __always_inline __must_check
12724+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12725+{
12726+ might_fault();
12727+
12728+ if (access_ok(VERIFY_READ, from, len))
12729+ len = __copy_from_user(to, from, len);
12730+ else if (len < INT_MAX) {
12731+ if (!__builtin_constant_p(len))
12732+ check_object_size(to, len, false);
12733+ memset(to, 0, len);
12734 }
12735+ return len;
12736 }
12737
12738 static __always_inline __must_check
12739-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12740+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12741 {
12742- int ret = 0;
12743+ unsigned ret = 0;
12744
12745 might_fault();
12746- if (!__builtin_constant_p(size))
12747- return copy_user_generic((__force void *)dst,
12748- (__force void *)src, size);
12749+
12750+ pax_track_stack();
12751+
12752+ if (size > INT_MAX)
12753+ return size;
12754+
12755+#ifdef CONFIG_PAX_MEMORY_UDEREF
12756+ if (!__access_ok(VERIFY_READ, src, size))
12757+ return size;
12758+ if (!__access_ok(VERIFY_WRITE, dst, size))
12759+ return size;
12760+#endif
12761+
12762+ if (!__builtin_constant_p(size)) {
12763+
12764+#ifdef CONFIG_PAX_MEMORY_UDEREF
12765+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12766+ src += PAX_USER_SHADOW_BASE;
12767+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12768+ dst += PAX_USER_SHADOW_BASE;
12769+#endif
12770+
12771+ return copy_user_generic((__force_kernel void *)dst,
12772+ (__force_kernel const void *)src, size);
12773+ }
12774 switch (size) {
12775 case 1: {
12776 u8 tmp;
12777- __get_user_asm(tmp, (u8 __user *)src,
12778+ __get_user_asm(tmp, (const u8 __user *)src,
12779 ret, "b", "b", "=q", 1);
12780 if (likely(!ret))
12781 __put_user_asm(tmp, (u8 __user *)dst,
12782@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12783 }
12784 case 2: {
12785 u16 tmp;
12786- __get_user_asm(tmp, (u16 __user *)src,
12787+ __get_user_asm(tmp, (const u16 __user *)src,
12788 ret, "w", "w", "=r", 2);
12789 if (likely(!ret))
12790 __put_user_asm(tmp, (u16 __user *)dst,
12791@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12792
12793 case 4: {
12794 u32 tmp;
12795- __get_user_asm(tmp, (u32 __user *)src,
12796+ __get_user_asm(tmp, (const u32 __user *)src,
12797 ret, "l", "k", "=r", 4);
12798 if (likely(!ret))
12799 __put_user_asm(tmp, (u32 __user *)dst,
12800@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12801 }
12802 case 8: {
12803 u64 tmp;
12804- __get_user_asm(tmp, (u64 __user *)src,
12805+ __get_user_asm(tmp, (const u64 __user *)src,
12806 ret, "q", "", "=r", 8);
12807 if (likely(!ret))
12808 __put_user_asm(tmp, (u64 __user *)dst,
12809@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12810 return ret;
12811 }
12812 default:
12813- return copy_user_generic((__force void *)dst,
12814- (__force void *)src, size);
12815+
12816+#ifdef CONFIG_PAX_MEMORY_UDEREF
12817+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12818+ src += PAX_USER_SHADOW_BASE;
12819+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12820+ dst += PAX_USER_SHADOW_BASE;
12821+#endif
12822+
12823+ return copy_user_generic((__force_kernel void *)dst,
12824+ (__force_kernel const void *)src, size);
12825 }
12826 }
12827
12828@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12829 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12830 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12831
12832-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12833- unsigned size);
12834+static __must_check __always_inline unsigned long
12835+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12836+{
12837+ pax_track_stack();
12838+
12839+ if (size > INT_MAX)
12840+ return size;
12841+
12842+#ifdef CONFIG_PAX_MEMORY_UDEREF
12843+ if (!__access_ok(VERIFY_READ, src, size))
12844+ return size;
12845
12846-static __must_check __always_inline int
12847-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12848+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12849+ src += PAX_USER_SHADOW_BASE;
12850+#endif
12851+
12852+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12853+}
12854+
12855+static __must_check __always_inline unsigned long
12856+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12857 {
12858- return copy_user_generic((__force void *)dst, src, size);
12859+ if (size > INT_MAX)
12860+ return size;
12861+
12862+#ifdef CONFIG_PAX_MEMORY_UDEREF
12863+ if (!__access_ok(VERIFY_WRITE, dst, size))
12864+ return size;
12865+
12866+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12867+ dst += PAX_USER_SHADOW_BASE;
12868+#endif
12869+
12870+ return copy_user_generic((__force_kernel void *)dst, src, size);
12871 }
12872
12873-extern long __copy_user_nocache(void *dst, const void __user *src,
12874- unsigned size, int zerorest);
12875+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12876+ unsigned long size, int zerorest);
12877
12878-static inline int
12879-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12880+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12881 {
12882 might_sleep();
12883+
12884+ if (size > INT_MAX)
12885+ return size;
12886+
12887+#ifdef CONFIG_PAX_MEMORY_UDEREF
12888+ if (!__access_ok(VERIFY_READ, src, size))
12889+ return size;
12890+#endif
12891+
12892 return __copy_user_nocache(dst, src, size, 1);
12893 }
12894
12895-static inline int
12896-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12897- unsigned size)
12898+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12899+ unsigned long size)
12900 {
12901+ if (size > INT_MAX)
12902+ return size;
12903+
12904+#ifdef CONFIG_PAX_MEMORY_UDEREF
12905+ if (!__access_ok(VERIFY_READ, src, size))
12906+ return size;
12907+#endif
12908+
12909 return __copy_user_nocache(dst, src, size, 0);
12910 }
12911
12912-unsigned long
12913-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12914+extern unsigned long
12915+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12916
12917 #endif /* _ASM_X86_UACCESS_64_H */
12918diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12919index 9064052..786cfbc 100644
12920--- a/arch/x86/include/asm/vdso.h
12921+++ b/arch/x86/include/asm/vdso.h
12922@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12923 #define VDSO32_SYMBOL(base, name) \
12924 ({ \
12925 extern const char VDSO32_##name[]; \
12926- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12927+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12928 })
12929 #endif
12930
12931diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12932index 3d61e20..9507180 100644
12933--- a/arch/x86/include/asm/vgtod.h
12934+++ b/arch/x86/include/asm/vgtod.h
12935@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12936 int sysctl_enabled;
12937 struct timezone sys_tz;
12938 struct { /* extract of a clocksource struct */
12939+ char name[8];
12940 cycle_t (*vread)(void);
12941 cycle_t cycle_last;
12942 cycle_t mask;
12943diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12944index 61e08c0..b0da582 100644
12945--- a/arch/x86/include/asm/vmi.h
12946+++ b/arch/x86/include/asm/vmi.h
12947@@ -191,6 +191,7 @@ struct vrom_header {
12948 u8 reserved[96]; /* Reserved for headers */
12949 char vmi_init[8]; /* VMI_Init jump point */
12950 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12951+ char rom_data[8048]; /* rest of the option ROM */
12952 } __attribute__((packed));
12953
12954 struct pnp_header {
12955diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12956index c6e0bee..fcb9f74 100644
12957--- a/arch/x86/include/asm/vmi_time.h
12958+++ b/arch/x86/include/asm/vmi_time.h
12959@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12960 int (*wallclock_updated)(void);
12961 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12962 void (*cancel_alarm)(u32 flags);
12963-} vmi_timer_ops;
12964+} __no_const vmi_timer_ops;
12965
12966 /* Prototypes */
12967 extern void __init vmi_time_init(void);
12968diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12969index d0983d2..1f7c9e9 100644
12970--- a/arch/x86/include/asm/vsyscall.h
12971+++ b/arch/x86/include/asm/vsyscall.h
12972@@ -15,9 +15,10 @@ enum vsyscall_num {
12973
12974 #ifdef __KERNEL__
12975 #include <linux/seqlock.h>
12976+#include <linux/getcpu.h>
12977+#include <linux/time.h>
12978
12979 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12980-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12981
12982 /* Definitions for CONFIG_GENERIC_TIME definitions */
12983 #define __section_vsyscall_gtod_data __attribute__ \
12984@@ -31,7 +32,6 @@ enum vsyscall_num {
12985 #define VGETCPU_LSL 2
12986
12987 extern int __vgetcpu_mode;
12988-extern volatile unsigned long __jiffies;
12989
12990 /* kernel space (writeable) */
12991 extern int vgetcpu_mode;
12992@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12993
12994 extern void map_vsyscall(void);
12995
12996+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12997+extern time_t vtime(time_t *t);
12998+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12999 #endif /* __KERNEL__ */
13000
13001 #endif /* _ASM_X86_VSYSCALL_H */
13002diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13003index 2c756fd..3377e37 100644
13004--- a/arch/x86/include/asm/x86_init.h
13005+++ b/arch/x86/include/asm/x86_init.h
13006@@ -28,7 +28,7 @@ struct x86_init_mpparse {
13007 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13008 void (*find_smp_config)(unsigned int reserve);
13009 void (*get_smp_config)(unsigned int early);
13010-};
13011+} __no_const;
13012
13013 /**
13014 * struct x86_init_resources - platform specific resource related ops
13015@@ -42,7 +42,7 @@ struct x86_init_resources {
13016 void (*probe_roms)(void);
13017 void (*reserve_resources)(void);
13018 char *(*memory_setup)(void);
13019-};
13020+} __no_const;
13021
13022 /**
13023 * struct x86_init_irqs - platform specific interrupt setup
13024@@ -55,7 +55,7 @@ struct x86_init_irqs {
13025 void (*pre_vector_init)(void);
13026 void (*intr_init)(void);
13027 void (*trap_init)(void);
13028-};
13029+} __no_const;
13030
13031 /**
13032 * struct x86_init_oem - oem platform specific customizing functions
13033@@ -65,7 +65,7 @@ struct x86_init_irqs {
13034 struct x86_init_oem {
13035 void (*arch_setup)(void);
13036 void (*banner)(void);
13037-};
13038+} __no_const;
13039
13040 /**
13041 * struct x86_init_paging - platform specific paging functions
13042@@ -75,7 +75,7 @@ struct x86_init_oem {
13043 struct x86_init_paging {
13044 void (*pagetable_setup_start)(pgd_t *base);
13045 void (*pagetable_setup_done)(pgd_t *base);
13046-};
13047+} __no_const;
13048
13049 /**
13050 * struct x86_init_timers - platform specific timer setup
13051@@ -88,7 +88,7 @@ struct x86_init_timers {
13052 void (*setup_percpu_clockev)(void);
13053 void (*tsc_pre_init)(void);
13054 void (*timer_init)(void);
13055-};
13056+} __no_const;
13057
13058 /**
13059 * struct x86_init_ops - functions for platform specific setup
13060@@ -101,7 +101,7 @@ struct x86_init_ops {
13061 struct x86_init_oem oem;
13062 struct x86_init_paging paging;
13063 struct x86_init_timers timers;
13064-};
13065+} __no_const;
13066
13067 /**
13068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13069@@ -109,7 +109,7 @@ struct x86_init_ops {
13070 */
13071 struct x86_cpuinit_ops {
13072 void (*setup_percpu_clockev)(void);
13073-};
13074+} __no_const;
13075
13076 /**
13077 * struct x86_platform_ops - platform specific runtime functions
13078@@ -121,7 +121,7 @@ struct x86_platform_ops {
13079 unsigned long (*calibrate_tsc)(void);
13080 unsigned long (*get_wallclock)(void);
13081 int (*set_wallclock)(unsigned long nowtime);
13082-};
13083+} __no_const;
13084
13085 extern struct x86_init_ops x86_init;
13086 extern struct x86_cpuinit_ops x86_cpuinit;
13087diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13088index 727acc1..554f3eb 100644
13089--- a/arch/x86/include/asm/xsave.h
13090+++ b/arch/x86/include/asm/xsave.h
13091@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13092 static inline int xsave_user(struct xsave_struct __user *buf)
13093 {
13094 int err;
13095+
13096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13097+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13098+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13099+#endif
13100+
13101 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13102 "2:\n"
13103 ".section .fixup,\"ax\"\n"
13104@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13105 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13106 {
13107 int err;
13108- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13109+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13110 u32 lmask = mask;
13111 u32 hmask = mask >> 32;
13112
13113+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13114+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13115+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13116+#endif
13117+
13118 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13119 "2:\n"
13120 ".section .fixup,\"ax\"\n"
13121diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13122index 6a564ac..9b1340c 100644
13123--- a/arch/x86/kernel/acpi/realmode/Makefile
13124+++ b/arch/x86/kernel/acpi/realmode/Makefile
13125@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13126 $(call cc-option, -fno-stack-protector) \
13127 $(call cc-option, -mpreferred-stack-boundary=2)
13128 KBUILD_CFLAGS += $(call cc-option, -m32)
13129+ifdef CONSTIFY_PLUGIN
13130+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13131+endif
13132 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13133 GCOV_PROFILE := n
13134
13135diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13136index 580b4e2..d4129e4 100644
13137--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13138+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13139@@ -91,6 +91,9 @@ _start:
13140 /* Do any other stuff... */
13141
13142 #ifndef CONFIG_64BIT
13143+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13144+ call verify_cpu
13145+
13146 /* This could also be done in C code... */
13147 movl pmode_cr3, %eax
13148 movl %eax, %cr3
13149@@ -104,7 +107,7 @@ _start:
13150 movl %eax, %ecx
13151 orl %edx, %ecx
13152 jz 1f
13153- movl $0xc0000080, %ecx
13154+ mov $MSR_EFER, %ecx
13155 wrmsr
13156 1:
13157
13158@@ -114,6 +117,7 @@ _start:
13159 movl pmode_cr0, %eax
13160 movl %eax, %cr0
13161 jmp pmode_return
13162+# include "../../verify_cpu.S"
13163 #else
13164 pushw $0
13165 pushw trampoline_segment
13166diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13167index ca93638..7042f24 100644
13168--- a/arch/x86/kernel/acpi/sleep.c
13169+++ b/arch/x86/kernel/acpi/sleep.c
13170@@ -11,11 +11,12 @@
13171 #include <linux/cpumask.h>
13172 #include <asm/segment.h>
13173 #include <asm/desc.h>
13174+#include <asm/e820.h>
13175
13176 #include "realmode/wakeup.h"
13177 #include "sleep.h"
13178
13179-unsigned long acpi_wakeup_address;
13180+unsigned long acpi_wakeup_address = 0x2000;
13181 unsigned long acpi_realmode_flags;
13182
13183 /* address in low memory of the wakeup routine. */
13184@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13185 #else /* CONFIG_64BIT */
13186 header->trampoline_segment = setup_trampoline() >> 4;
13187 #ifdef CONFIG_SMP
13188- stack_start.sp = temp_stack + sizeof(temp_stack);
13189+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13190+
13191+ pax_open_kernel();
13192 early_gdt_descr.address =
13193 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13194+ pax_close_kernel();
13195+
13196 initial_gs = per_cpu_offset(smp_processor_id());
13197 #endif
13198 initial_code = (unsigned long)wakeup_long64;
13199@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13200 return;
13201 }
13202
13203- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13204-
13205- if (!acpi_realmode) {
13206- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13207- return;
13208- }
13209-
13210- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13211+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13212+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13213 }
13214
13215
13216diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13217index 8ded418..079961e 100644
13218--- a/arch/x86/kernel/acpi/wakeup_32.S
13219+++ b/arch/x86/kernel/acpi/wakeup_32.S
13220@@ -30,13 +30,11 @@ wakeup_pmode_return:
13221 # and restore the stack ... but you need gdt for this to work
13222 movl saved_context_esp, %esp
13223
13224- movl %cs:saved_magic, %eax
13225- cmpl $0x12345678, %eax
13226+ cmpl $0x12345678, saved_magic
13227 jne bogus_magic
13228
13229 # jump to place where we left off
13230- movl saved_eip, %eax
13231- jmp *%eax
13232+ jmp *(saved_eip)
13233
13234 bogus_magic:
13235 jmp bogus_magic
13236diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13237index de7353c..075da5f 100644
13238--- a/arch/x86/kernel/alternative.c
13239+++ b/arch/x86/kernel/alternative.c
13240@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13241
13242 BUG_ON(p->len > MAX_PATCH_LEN);
13243 /* prep the buffer with the original instructions */
13244- memcpy(insnbuf, p->instr, p->len);
13245+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13246 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13247 (unsigned long)p->instr, p->len);
13248
13249@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13250 if (smp_alt_once)
13251 free_init_pages("SMP alternatives",
13252 (unsigned long)__smp_locks,
13253- (unsigned long)__smp_locks_end);
13254+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13255
13256 restart_nmi();
13257 }
13258@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13259 * instructions. And on the local CPU you need to be protected again NMI or MCE
13260 * handlers seeing an inconsistent instruction while you patch.
13261 */
13262-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13263+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13264 size_t len)
13265 {
13266 unsigned long flags;
13267 local_irq_save(flags);
13268- memcpy(addr, opcode, len);
13269+
13270+ pax_open_kernel();
13271+ memcpy(ktla_ktva(addr), opcode, len);
13272 sync_core();
13273+ pax_close_kernel();
13274+
13275 local_irq_restore(flags);
13276 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13277 that causes hangs on some VIA CPUs. */
13278@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13279 */
13280 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13281 {
13282- unsigned long flags;
13283- char *vaddr;
13284+ unsigned char *vaddr = ktla_ktva(addr);
13285 struct page *pages[2];
13286- int i;
13287+ size_t i;
13288
13289 if (!core_kernel_text((unsigned long)addr)) {
13290- pages[0] = vmalloc_to_page(addr);
13291- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13292+ pages[0] = vmalloc_to_page(vaddr);
13293+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13294 } else {
13295- pages[0] = virt_to_page(addr);
13296+ pages[0] = virt_to_page(vaddr);
13297 WARN_ON(!PageReserved(pages[0]));
13298- pages[1] = virt_to_page(addr + PAGE_SIZE);
13299+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13300 }
13301 BUG_ON(!pages[0]);
13302- local_irq_save(flags);
13303- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13304- if (pages[1])
13305- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13306- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13307- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13308- clear_fixmap(FIX_TEXT_POKE0);
13309- if (pages[1])
13310- clear_fixmap(FIX_TEXT_POKE1);
13311- local_flush_tlb();
13312- sync_core();
13313- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13314- that causes hangs on some VIA CPUs. */
13315+ text_poke_early(addr, opcode, len);
13316 for (i = 0; i < len; i++)
13317- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13318- local_irq_restore(flags);
13319+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13320 return addr;
13321 }
13322diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13323index 3a44b75..1601800 100644
13324--- a/arch/x86/kernel/amd_iommu.c
13325+++ b/arch/x86/kernel/amd_iommu.c
13326@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13327 }
13328 }
13329
13330-static struct dma_map_ops amd_iommu_dma_ops = {
13331+static const struct dma_map_ops amd_iommu_dma_ops = {
13332 .alloc_coherent = alloc_coherent,
13333 .free_coherent = free_coherent,
13334 .map_page = map_page,
13335diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13336index 1d2d670..8e3f477 100644
13337--- a/arch/x86/kernel/apic/apic.c
13338+++ b/arch/x86/kernel/apic/apic.c
13339@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13340 /*
13341 * Debug level, exported for io_apic.c
13342 */
13343-unsigned int apic_verbosity;
13344+int apic_verbosity;
13345
13346 int pic_mode;
13347
13348@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13349 apic_write(APIC_ESR, 0);
13350 v1 = apic_read(APIC_ESR);
13351 ack_APIC_irq();
13352- atomic_inc(&irq_err_count);
13353+ atomic_inc_unchecked(&irq_err_count);
13354
13355 /*
13356 * Here is what the APIC error bits mean:
13357@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13358 u16 *bios_cpu_apicid;
13359 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13360
13361+ pax_track_stack();
13362+
13363 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13364 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13365
13366diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13367index 8928d97..f799cea 100644
13368--- a/arch/x86/kernel/apic/io_apic.c
13369+++ b/arch/x86/kernel/apic/io_apic.c
13370@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13371 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13372 GFP_ATOMIC);
13373 if (!ioapic_entries)
13374- return 0;
13375+ return NULL;
13376
13377 for (apic = 0; apic < nr_ioapics; apic++) {
13378 ioapic_entries[apic] =
13379@@ -733,7 +733,7 @@ nomem:
13380 kfree(ioapic_entries[apic]);
13381 kfree(ioapic_entries);
13382
13383- return 0;
13384+ return NULL;
13385 }
13386
13387 /*
13388@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13389 }
13390 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13391
13392-void lock_vector_lock(void)
13393+void lock_vector_lock(void) __acquires(vector_lock)
13394 {
13395 /* Used to the online set of cpus does not change
13396 * during assign_irq_vector.
13397@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13398 spin_lock(&vector_lock);
13399 }
13400
13401-void unlock_vector_lock(void)
13402+void unlock_vector_lock(void) __releases(vector_lock)
13403 {
13404 spin_unlock(&vector_lock);
13405 }
13406@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13407 ack_APIC_irq();
13408 }
13409
13410-atomic_t irq_mis_count;
13411+atomic_unchecked_t irq_mis_count;
13412
13413 static void ack_apic_level(unsigned int irq)
13414 {
13415@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13416
13417 /* Tail end of version 0x11 I/O APIC bug workaround */
13418 if (!(v & (1 << (i & 0x1f)))) {
13419- atomic_inc(&irq_mis_count);
13420+ atomic_inc_unchecked(&irq_mis_count);
13421 spin_lock(&ioapic_lock);
13422 __mask_and_edge_IO_APIC_irq(cfg);
13423 __unmask_and_level_IO_APIC_irq(cfg);
13424diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13425index 151ace6..f317474 100644
13426--- a/arch/x86/kernel/apm_32.c
13427+++ b/arch/x86/kernel/apm_32.c
13428@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13429 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13430 * even though they are called in protected mode.
13431 */
13432-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13433+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13434 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13435
13436 static const char driver_version[] = "1.16ac"; /* no spaces */
13437@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13438 BUG_ON(cpu != 0);
13439 gdt = get_cpu_gdt_table(cpu);
13440 save_desc_40 = gdt[0x40 / 8];
13441+
13442+ pax_open_kernel();
13443 gdt[0x40 / 8] = bad_bios_desc;
13444+ pax_close_kernel();
13445
13446 apm_irq_save(flags);
13447 APM_DO_SAVE_SEGS;
13448@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13449 &call->esi);
13450 APM_DO_RESTORE_SEGS;
13451 apm_irq_restore(flags);
13452+
13453+ pax_open_kernel();
13454 gdt[0x40 / 8] = save_desc_40;
13455+ pax_close_kernel();
13456+
13457 put_cpu();
13458
13459 return call->eax & 0xff;
13460@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13461 BUG_ON(cpu != 0);
13462 gdt = get_cpu_gdt_table(cpu);
13463 save_desc_40 = gdt[0x40 / 8];
13464+
13465+ pax_open_kernel();
13466 gdt[0x40 / 8] = bad_bios_desc;
13467+ pax_close_kernel();
13468
13469 apm_irq_save(flags);
13470 APM_DO_SAVE_SEGS;
13471@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13472 &call->eax);
13473 APM_DO_RESTORE_SEGS;
13474 apm_irq_restore(flags);
13475+
13476+ pax_open_kernel();
13477 gdt[0x40 / 8] = save_desc_40;
13478+ pax_close_kernel();
13479+
13480 put_cpu();
13481 return error;
13482 }
13483@@ -975,7 +989,7 @@ recalc:
13484
13485 static void apm_power_off(void)
13486 {
13487- unsigned char po_bios_call[] = {
13488+ const unsigned char po_bios_call[] = {
13489 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13490 0x8e, 0xd0, /* movw ax,ss */
13491 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13492@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13493 * code to that CPU.
13494 */
13495 gdt = get_cpu_gdt_table(0);
13496+
13497+ pax_open_kernel();
13498 set_desc_base(&gdt[APM_CS >> 3],
13499 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13500 set_desc_base(&gdt[APM_CS_16 >> 3],
13501 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13502 set_desc_base(&gdt[APM_DS >> 3],
13503 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13504+ pax_close_kernel();
13505
13506 proc_create("apm", 0, NULL, &apm_file_ops);
13507
13508diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13509index dfdbf64..9b2b6ce 100644
13510--- a/arch/x86/kernel/asm-offsets_32.c
13511+++ b/arch/x86/kernel/asm-offsets_32.c
13512@@ -51,7 +51,6 @@ void foo(void)
13513 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13514 BLANK();
13515
13516- OFFSET(TI_task, thread_info, task);
13517 OFFSET(TI_exec_domain, thread_info, exec_domain);
13518 OFFSET(TI_flags, thread_info, flags);
13519 OFFSET(TI_status, thread_info, status);
13520@@ -60,6 +59,8 @@ void foo(void)
13521 OFFSET(TI_restart_block, thread_info, restart_block);
13522 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13523 OFFSET(TI_cpu, thread_info, cpu);
13524+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13525+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13526 BLANK();
13527
13528 OFFSET(GDS_size, desc_ptr, size);
13529@@ -99,6 +100,7 @@ void foo(void)
13530
13531 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13532 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13533+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13534 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13535 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13536 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13537@@ -115,6 +117,11 @@ void foo(void)
13538 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13539 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13540 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13541+
13542+#ifdef CONFIG_PAX_KERNEXEC
13543+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13544+#endif
13545+
13546 #endif
13547
13548 #ifdef CONFIG_XEN
13549diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13550index 4a6aeed..371de20 100644
13551--- a/arch/x86/kernel/asm-offsets_64.c
13552+++ b/arch/x86/kernel/asm-offsets_64.c
13553@@ -44,6 +44,8 @@ int main(void)
13554 ENTRY(addr_limit);
13555 ENTRY(preempt_count);
13556 ENTRY(status);
13557+ ENTRY(lowest_stack);
13558+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13559 #ifdef CONFIG_IA32_EMULATION
13560 ENTRY(sysenter_return);
13561 #endif
13562@@ -63,6 +65,18 @@ int main(void)
13563 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13564 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13565 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13566+
13567+#ifdef CONFIG_PAX_KERNEXEC
13568+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13569+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13570+#endif
13571+
13572+#ifdef CONFIG_PAX_MEMORY_UDEREF
13573+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13574+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13575+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13576+#endif
13577+
13578 #endif
13579
13580
13581@@ -115,6 +129,7 @@ int main(void)
13582 ENTRY(cr8);
13583 BLANK();
13584 #undef ENTRY
13585+ DEFINE(TSS_size, sizeof(struct tss_struct));
13586 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13587 BLANK();
13588 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13589@@ -130,6 +145,7 @@ int main(void)
13590
13591 BLANK();
13592 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13593+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13594 #ifdef CONFIG_XEN
13595 BLANK();
13596 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13597diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13598index ff502cc..dc5133e 100644
13599--- a/arch/x86/kernel/cpu/Makefile
13600+++ b/arch/x86/kernel/cpu/Makefile
13601@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13602 CFLAGS_REMOVE_common.o = -pg
13603 endif
13604
13605-# Make sure load_percpu_segment has no stackprotector
13606-nostackp := $(call cc-option, -fno-stack-protector)
13607-CFLAGS_common.o := $(nostackp)
13608-
13609 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13610 obj-y += proc.o capflags.o powerflags.o common.o
13611 obj-y += vmware.o hypervisor.o sched.o
13612diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13613index 6e082dc..a0b5f36 100644
13614--- a/arch/x86/kernel/cpu/amd.c
13615+++ b/arch/x86/kernel/cpu/amd.c
13616@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13617 unsigned int size)
13618 {
13619 /* AMD errata T13 (order #21922) */
13620- if ((c->x86 == 6)) {
13621+ if (c->x86 == 6) {
13622 /* Duron Rev A0 */
13623 if (c->x86_model == 3 && c->x86_mask == 0)
13624 size = 64;
13625diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13626index 4e34d10..ba6bc97 100644
13627--- a/arch/x86/kernel/cpu/common.c
13628+++ b/arch/x86/kernel/cpu/common.c
13629@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13630
13631 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13632
13633-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13634-#ifdef CONFIG_X86_64
13635- /*
13636- * We need valid kernel segments for data and code in long mode too
13637- * IRET will check the segment types kkeil 2000/10/28
13638- * Also sysret mandates a special GDT layout
13639- *
13640- * TLS descriptors are currently at a different place compared to i386.
13641- * Hopefully nobody expects them at a fixed place (Wine?)
13642- */
13643- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13644- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13645- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13646- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13647- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13648- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13649-#else
13650- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13651- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13652- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13653- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13654- /*
13655- * Segments used for calling PnP BIOS have byte granularity.
13656- * They code segments and data segments have fixed 64k limits,
13657- * the transfer segment sizes are set at run time.
13658- */
13659- /* 32-bit code */
13660- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13661- /* 16-bit code */
13662- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13663- /* 16-bit data */
13664- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13665- /* 16-bit data */
13666- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13667- /* 16-bit data */
13668- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13669- /*
13670- * The APM segments have byte granularity and their bases
13671- * are set at run time. All have 64k limits.
13672- */
13673- /* 32-bit code */
13674- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13675- /* 16-bit code */
13676- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13677- /* data */
13678- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13679-
13680- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13681- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13682- GDT_STACK_CANARY_INIT
13683-#endif
13684-} };
13685-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13686-
13687 static int __init x86_xsave_setup(char *s)
13688 {
13689 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13690@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13691 {
13692 struct desc_ptr gdt_descr;
13693
13694- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13695+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13696 gdt_descr.size = GDT_SIZE - 1;
13697 load_gdt(&gdt_descr);
13698 /* Reload the per-cpu base */
13699@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13700 /* Filter out anything that depends on CPUID levels we don't have */
13701 filter_cpuid_features(c, true);
13702
13703+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13704+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13705+#endif
13706+
13707 /* If the model name is still unset, do table lookup. */
13708 if (!c->x86_model_id[0]) {
13709 const char *p;
13710@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13711 }
13712 __setup("clearcpuid=", setup_disablecpuid);
13713
13714+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13715+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13716+
13717 #ifdef CONFIG_X86_64
13718 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13719
13720@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13721 EXPORT_PER_CPU_SYMBOL(current_task);
13722
13723 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13724- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13725+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13726 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13727
13728 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13729@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13730 {
13731 memset(regs, 0, sizeof(struct pt_regs));
13732 regs->fs = __KERNEL_PERCPU;
13733- regs->gs = __KERNEL_STACK_CANARY;
13734+ savesegment(gs, regs->gs);
13735
13736 return regs;
13737 }
13738@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13739 int i;
13740
13741 cpu = stack_smp_processor_id();
13742- t = &per_cpu(init_tss, cpu);
13743+ t = init_tss + cpu;
13744 orig_ist = &per_cpu(orig_ist, cpu);
13745
13746 #ifdef CONFIG_NUMA
13747@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13748 switch_to_new_gdt(cpu);
13749 loadsegment(fs, 0);
13750
13751- load_idt((const struct desc_ptr *)&idt_descr);
13752+ load_idt(&idt_descr);
13753
13754 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13755 syscall_init();
13756@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13757 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13758 barrier();
13759
13760- check_efer();
13761 if (cpu != 0)
13762 enable_x2apic();
13763
13764@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13765 {
13766 int cpu = smp_processor_id();
13767 struct task_struct *curr = current;
13768- struct tss_struct *t = &per_cpu(init_tss, cpu);
13769+ struct tss_struct *t = init_tss + cpu;
13770 struct thread_struct *thread = &curr->thread;
13771
13772 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13773diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13774index 6a77cca..4f4fca0 100644
13775--- a/arch/x86/kernel/cpu/intel.c
13776+++ b/arch/x86/kernel/cpu/intel.c
13777@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13778 * Update the IDT descriptor and reload the IDT so that
13779 * it uses the read-only mapped virtual address.
13780 */
13781- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13782+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13783 load_idt(&idt_descr);
13784 }
13785 #endif
13786diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13787index 417990f..96dc36b 100644
13788--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13789+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13790@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13791 return ret;
13792 }
13793
13794-static struct sysfs_ops sysfs_ops = {
13795+static const struct sysfs_ops sysfs_ops = {
13796 .show = show,
13797 .store = store,
13798 };
13799diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13800index 472763d..9831e11 100644
13801--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13802+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13803@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13804 static int inject_init(void)
13805 {
13806 printk(KERN_INFO "Machine check injector initialized\n");
13807- mce_chrdev_ops.write = mce_write;
13808+ pax_open_kernel();
13809+ *(void **)&mce_chrdev_ops.write = mce_write;
13810+ pax_close_kernel();
13811 register_die_notifier(&mce_raise_nb);
13812 return 0;
13813 }
13814diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13815index 0f16a2b..21740f5 100644
13816--- a/arch/x86/kernel/cpu/mcheck/mce.c
13817+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13818@@ -43,6 +43,7 @@
13819 #include <asm/ipi.h>
13820 #include <asm/mce.h>
13821 #include <asm/msr.h>
13822+#include <asm/local.h>
13823
13824 #include "mce-internal.h"
13825
13826@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13827 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13828 m->cs, m->ip);
13829
13830- if (m->cs == __KERNEL_CS)
13831+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13832 print_symbol("{%s}", m->ip);
13833 pr_cont("\n");
13834 }
13835@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13836
13837 #define PANIC_TIMEOUT 5 /* 5 seconds */
13838
13839-static atomic_t mce_paniced;
13840+static atomic_unchecked_t mce_paniced;
13841
13842 static int fake_panic;
13843-static atomic_t mce_fake_paniced;
13844+static atomic_unchecked_t mce_fake_paniced;
13845
13846 /* Panic in progress. Enable interrupts and wait for final IPI */
13847 static void wait_for_panic(void)
13848@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13849 /*
13850 * Make sure only one CPU runs in machine check panic
13851 */
13852- if (atomic_inc_return(&mce_paniced) > 1)
13853+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13854 wait_for_panic();
13855 barrier();
13856
13857@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13858 console_verbose();
13859 } else {
13860 /* Don't log too much for fake panic */
13861- if (atomic_inc_return(&mce_fake_paniced) > 1)
13862+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13863 return;
13864 }
13865 print_mce_head();
13866@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13867 * might have been modified by someone else.
13868 */
13869 rmb();
13870- if (atomic_read(&mce_paniced))
13871+ if (atomic_read_unchecked(&mce_paniced))
13872 wait_for_panic();
13873 if (!monarch_timeout)
13874 goto out;
13875@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13876 }
13877
13878 /* Call the installed machine check handler for this CPU setup. */
13879-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13880+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13881 unexpected_machine_check;
13882
13883 /*
13884@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13885 return;
13886 }
13887
13888+ pax_open_kernel();
13889 machine_check_vector = do_machine_check;
13890+ pax_close_kernel();
13891
13892 mce_init();
13893 mce_cpu_features(c);
13894@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13895 */
13896
13897 static DEFINE_SPINLOCK(mce_state_lock);
13898-static int open_count; /* #times opened */
13899+static local_t open_count; /* #times opened */
13900 static int open_exclu; /* already open exclusive? */
13901
13902 static int mce_open(struct inode *inode, struct file *file)
13903 {
13904 spin_lock(&mce_state_lock);
13905
13906- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13907+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13908 spin_unlock(&mce_state_lock);
13909
13910 return -EBUSY;
13911@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13912
13913 if (file->f_flags & O_EXCL)
13914 open_exclu = 1;
13915- open_count++;
13916+ local_inc(&open_count);
13917
13918 spin_unlock(&mce_state_lock);
13919
13920@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13921 {
13922 spin_lock(&mce_state_lock);
13923
13924- open_count--;
13925+ local_dec(&open_count);
13926 open_exclu = 0;
13927
13928 spin_unlock(&mce_state_lock);
13929@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13930 static void mce_reset(void)
13931 {
13932 cpu_missing = 0;
13933- atomic_set(&mce_fake_paniced, 0);
13934+ atomic_set_unchecked(&mce_fake_paniced, 0);
13935 atomic_set(&mce_executing, 0);
13936 atomic_set(&mce_callin, 0);
13937 atomic_set(&global_nwo, 0);
13938diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13939index ef3cd31..9d2f6ab 100644
13940--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13941+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13942@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13943 return ret;
13944 }
13945
13946-static struct sysfs_ops threshold_ops = {
13947+static const struct sysfs_ops threshold_ops = {
13948 .show = show,
13949 .store = store,
13950 };
13951diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13952index 5c0e653..0882b0a 100644
13953--- a/arch/x86/kernel/cpu/mcheck/p5.c
13954+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13955@@ -12,6 +12,7 @@
13956 #include <asm/system.h>
13957 #include <asm/mce.h>
13958 #include <asm/msr.h>
13959+#include <asm/pgtable.h>
13960
13961 /* By default disabled */
13962 int mce_p5_enabled __read_mostly;
13963@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13964 if (!cpu_has(c, X86_FEATURE_MCE))
13965 return;
13966
13967+ pax_open_kernel();
13968 machine_check_vector = pentium_machine_check;
13969+ pax_close_kernel();
13970 /* Make sure the vector pointer is visible before we enable MCEs: */
13971 wmb();
13972
13973diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13974index 54060f5..c1a7577 100644
13975--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13976+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13977@@ -11,6 +11,7 @@
13978 #include <asm/system.h>
13979 #include <asm/mce.h>
13980 #include <asm/msr.h>
13981+#include <asm/pgtable.h>
13982
13983 /* Machine check handler for WinChip C6: */
13984 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13985@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13986 {
13987 u32 lo, hi;
13988
13989+ pax_open_kernel();
13990 machine_check_vector = winchip_machine_check;
13991+ pax_close_kernel();
13992 /* Make sure the vector pointer is visible before we enable MCEs: */
13993 wmb();
13994
13995diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13996index 33af141..92ba9cd 100644
13997--- a/arch/x86/kernel/cpu/mtrr/amd.c
13998+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13999@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14000 return 0;
14001 }
14002
14003-static struct mtrr_ops amd_mtrr_ops = {
14004+static const struct mtrr_ops amd_mtrr_ops = {
14005 .vendor = X86_VENDOR_AMD,
14006 .set = amd_set_mtrr,
14007 .get = amd_get_mtrr,
14008diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14009index de89f14..316fe3e 100644
14010--- a/arch/x86/kernel/cpu/mtrr/centaur.c
14011+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14012@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14013 return 0;
14014 }
14015
14016-static struct mtrr_ops centaur_mtrr_ops = {
14017+static const struct mtrr_ops centaur_mtrr_ops = {
14018 .vendor = X86_VENDOR_CENTAUR,
14019 .set = centaur_set_mcr,
14020 .get = centaur_get_mcr,
14021diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14022index 228d982..68a3343 100644
14023--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14024+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14025@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14026 post_set();
14027 }
14028
14029-static struct mtrr_ops cyrix_mtrr_ops = {
14030+static const struct mtrr_ops cyrix_mtrr_ops = {
14031 .vendor = X86_VENDOR_CYRIX,
14032 .set_all = cyrix_set_all,
14033 .set = cyrix_set_arr,
14034diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14035index 55da0c5..4d75584 100644
14036--- a/arch/x86/kernel/cpu/mtrr/generic.c
14037+++ b/arch/x86/kernel/cpu/mtrr/generic.c
14038@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14039 /*
14040 * Generic structure...
14041 */
14042-struct mtrr_ops generic_mtrr_ops = {
14043+const struct mtrr_ops generic_mtrr_ops = {
14044 .use_intel_if = 1,
14045 .set_all = generic_set_all,
14046 .get = generic_get_mtrr,
14047diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14048index fd60f09..c94ef52 100644
14049--- a/arch/x86/kernel/cpu/mtrr/main.c
14050+++ b/arch/x86/kernel/cpu/mtrr/main.c
14051@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14052 u64 size_or_mask, size_and_mask;
14053 static bool mtrr_aps_delayed_init;
14054
14055-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14056+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14057
14058-struct mtrr_ops *mtrr_if;
14059+const struct mtrr_ops *mtrr_if;
14060
14061 static void set_mtrr(unsigned int reg, unsigned long base,
14062 unsigned long size, mtrr_type type);
14063
14064-void set_mtrr_ops(struct mtrr_ops *ops)
14065+void set_mtrr_ops(const struct mtrr_ops *ops)
14066 {
14067 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14068 mtrr_ops[ops->vendor] = ops;
14069diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14070index a501dee..816c719 100644
14071--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14072+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14073@@ -25,14 +25,14 @@ struct mtrr_ops {
14074 int (*validate_add_page)(unsigned long base, unsigned long size,
14075 unsigned int type);
14076 int (*have_wrcomb)(void);
14077-};
14078+} __do_const;
14079
14080 extern int generic_get_free_region(unsigned long base, unsigned long size,
14081 int replace_reg);
14082 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14083 unsigned int type);
14084
14085-extern struct mtrr_ops generic_mtrr_ops;
14086+extern const struct mtrr_ops generic_mtrr_ops;
14087
14088 extern int positive_have_wrcomb(void);
14089
14090@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14091 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14092 void get_mtrr_state(void);
14093
14094-extern void set_mtrr_ops(struct mtrr_ops *ops);
14095+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14096
14097 extern u64 size_or_mask, size_and_mask;
14098-extern struct mtrr_ops *mtrr_if;
14099+extern const struct mtrr_ops *mtrr_if;
14100
14101 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14102 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14103diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14104index 0ff02ca..fc49a60 100644
14105--- a/arch/x86/kernel/cpu/perf_event.c
14106+++ b/arch/x86/kernel/cpu/perf_event.c
14107@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14108 * count to the generic event atomically:
14109 */
14110 again:
14111- prev_raw_count = atomic64_read(&hwc->prev_count);
14112+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14113 rdmsrl(hwc->event_base + idx, new_raw_count);
14114
14115- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14116+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14117 new_raw_count) != prev_raw_count)
14118 goto again;
14119
14120@@ -741,7 +741,7 @@ again:
14121 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14122 delta >>= shift;
14123
14124- atomic64_add(delta, &event->count);
14125+ atomic64_add_unchecked(delta, &event->count);
14126 atomic64_sub(delta, &hwc->period_left);
14127
14128 return new_raw_count;
14129@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14130 * The hw event starts counting from this event offset,
14131 * mark it to be able to extra future deltas:
14132 */
14133- atomic64_set(&hwc->prev_count, (u64)-left);
14134+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14135
14136 err = checking_wrmsrl(hwc->event_base + idx,
14137 (u64)(-left) & x86_pmu.event_mask);
14138@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14139 break;
14140
14141 callchain_store(entry, frame.return_address);
14142- fp = frame.next_frame;
14143+ fp = (__force const void __user *)frame.next_frame;
14144 }
14145 }
14146
14147diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14148index 898df97..9e82503 100644
14149--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14150+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14151@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14152
14153 /* Interface defining a CPU specific perfctr watchdog */
14154 struct wd_ops {
14155- int (*reserve)(void);
14156- void (*unreserve)(void);
14157- int (*setup)(unsigned nmi_hz);
14158- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14159- void (*stop)(void);
14160+ int (* const reserve)(void);
14161+ void (* const unreserve)(void);
14162+ int (* const setup)(unsigned nmi_hz);
14163+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14164+ void (* const stop)(void);
14165 unsigned perfctr;
14166 unsigned evntsel;
14167 u64 checkbit;
14168@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14169 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14170 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14171
14172+/* cannot be const */
14173 static struct wd_ops intel_arch_wd_ops;
14174
14175 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14176@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14177 return 1;
14178 }
14179
14180+/* cannot be const */
14181 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14182 .reserve = single_msr_reserve,
14183 .unreserve = single_msr_unreserve,
14184diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14185index ff95824..2ffdcb5 100644
14186--- a/arch/x86/kernel/crash.c
14187+++ b/arch/x86/kernel/crash.c
14188@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14189 regs = args->regs;
14190
14191 #ifdef CONFIG_X86_32
14192- if (!user_mode_vm(regs)) {
14193+ if (!user_mode(regs)) {
14194 crash_fixup_ss_esp(&fixed_regs, regs);
14195 regs = &fixed_regs;
14196 }
14197diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14198index 37250fe..bf2ec74 100644
14199--- a/arch/x86/kernel/doublefault_32.c
14200+++ b/arch/x86/kernel/doublefault_32.c
14201@@ -11,7 +11,7 @@
14202
14203 #define DOUBLEFAULT_STACKSIZE (1024)
14204 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14205-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14206+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14207
14208 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14209
14210@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14211 unsigned long gdt, tss;
14212
14213 store_gdt(&gdt_desc);
14214- gdt = gdt_desc.address;
14215+ gdt = (unsigned long)gdt_desc.address;
14216
14217 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14218
14219@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14220 /* 0x2 bit is always set */
14221 .flags = X86_EFLAGS_SF | 0x2,
14222 .sp = STACK_START,
14223- .es = __USER_DS,
14224+ .es = __KERNEL_DS,
14225 .cs = __KERNEL_CS,
14226 .ss = __KERNEL_DS,
14227- .ds = __USER_DS,
14228+ .ds = __KERNEL_DS,
14229 .fs = __KERNEL_PERCPU,
14230
14231 .__cr3 = __pa_nodebug(swapper_pg_dir),
14232diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14233index 2d8a371..4fa6ae6 100644
14234--- a/arch/x86/kernel/dumpstack.c
14235+++ b/arch/x86/kernel/dumpstack.c
14236@@ -2,6 +2,9 @@
14237 * Copyright (C) 1991, 1992 Linus Torvalds
14238 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14239 */
14240+#ifdef CONFIG_GRKERNSEC_HIDESYM
14241+#define __INCLUDED_BY_HIDESYM 1
14242+#endif
14243 #include <linux/kallsyms.h>
14244 #include <linux/kprobes.h>
14245 #include <linux/uaccess.h>
14246@@ -28,7 +31,7 @@ static int die_counter;
14247
14248 void printk_address(unsigned long address, int reliable)
14249 {
14250- printk(" [<%p>] %s%pS\n", (void *) address,
14251+ printk(" [<%p>] %s%pA\n", (void *) address,
14252 reliable ? "" : "? ", (void *) address);
14253 }
14254
14255@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14256 static void
14257 print_ftrace_graph_addr(unsigned long addr, void *data,
14258 const struct stacktrace_ops *ops,
14259- struct thread_info *tinfo, int *graph)
14260+ struct task_struct *task, int *graph)
14261 {
14262- struct task_struct *task = tinfo->task;
14263 unsigned long ret_addr;
14264 int index = task->curr_ret_stack;
14265
14266@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14267 static inline void
14268 print_ftrace_graph_addr(unsigned long addr, void *data,
14269 const struct stacktrace_ops *ops,
14270- struct thread_info *tinfo, int *graph)
14271+ struct task_struct *task, int *graph)
14272 { }
14273 #endif
14274
14275@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14276 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14277 */
14278
14279-static inline int valid_stack_ptr(struct thread_info *tinfo,
14280- void *p, unsigned int size, void *end)
14281+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14282 {
14283- void *t = tinfo;
14284 if (end) {
14285 if (p < end && p >= (end-THREAD_SIZE))
14286 return 1;
14287@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14288 }
14289
14290 unsigned long
14291-print_context_stack(struct thread_info *tinfo,
14292+print_context_stack(struct task_struct *task, void *stack_start,
14293 unsigned long *stack, unsigned long bp,
14294 const struct stacktrace_ops *ops, void *data,
14295 unsigned long *end, int *graph)
14296 {
14297 struct stack_frame *frame = (struct stack_frame *)bp;
14298
14299- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14300+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14301 unsigned long addr;
14302
14303 addr = *stack;
14304@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14305 } else {
14306 ops->address(data, addr, 0);
14307 }
14308- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14309+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14310 }
14311 stack++;
14312 }
14313@@ -180,7 +180,7 @@ void dump_stack(void)
14314 #endif
14315
14316 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14317- current->pid, current->comm, print_tainted(),
14318+ task_pid_nr(current), current->comm, print_tainted(),
14319 init_utsname()->release,
14320 (int)strcspn(init_utsname()->version, " "),
14321 init_utsname()->version);
14322@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14323 return flags;
14324 }
14325
14326+extern void gr_handle_kernel_exploit(void);
14327+
14328 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14329 {
14330 if (regs && kexec_should_crash(current))
14331@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14332 panic("Fatal exception in interrupt");
14333 if (panic_on_oops)
14334 panic("Fatal exception");
14335- do_exit(signr);
14336+
14337+ gr_handle_kernel_exploit();
14338+
14339+ do_group_exit(signr);
14340 }
14341
14342 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14343@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14344 unsigned long flags = oops_begin();
14345 int sig = SIGSEGV;
14346
14347- if (!user_mode_vm(regs))
14348+ if (!user_mode(regs))
14349 report_bug(regs->ip, regs);
14350
14351 if (__die(str, regs, err))
14352diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14353index 81086c2..13e8b17 100644
14354--- a/arch/x86/kernel/dumpstack.h
14355+++ b/arch/x86/kernel/dumpstack.h
14356@@ -15,7 +15,7 @@
14357 #endif
14358
14359 extern unsigned long
14360-print_context_stack(struct thread_info *tinfo,
14361+print_context_stack(struct task_struct *task, void *stack_start,
14362 unsigned long *stack, unsigned long bp,
14363 const struct stacktrace_ops *ops, void *data,
14364 unsigned long *end, int *graph);
14365diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14366index f7dd2a7..504f53b 100644
14367--- a/arch/x86/kernel/dumpstack_32.c
14368+++ b/arch/x86/kernel/dumpstack_32.c
14369@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14370 #endif
14371
14372 for (;;) {
14373- struct thread_info *context;
14374+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14375+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14376
14377- context = (struct thread_info *)
14378- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14379- bp = print_context_stack(context, stack, bp, ops,
14380- data, NULL, &graph);
14381-
14382- stack = (unsigned long *)context->previous_esp;
14383- if (!stack)
14384+ if (stack_start == task_stack_page(task))
14385 break;
14386+ stack = *(unsigned long **)stack_start;
14387 if (ops->stack(data, "IRQ") < 0)
14388 break;
14389 touch_nmi_watchdog();
14390@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14391 * When in-kernel, we also print out the stack and code at the
14392 * time of the fault..
14393 */
14394- if (!user_mode_vm(regs)) {
14395+ if (!user_mode(regs)) {
14396 unsigned int code_prologue = code_bytes * 43 / 64;
14397 unsigned int code_len = code_bytes;
14398 unsigned char c;
14399 u8 *ip;
14400+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14401
14402 printk(KERN_EMERG "Stack:\n");
14403 show_stack_log_lvl(NULL, regs, &regs->sp,
14404@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14405
14406 printk(KERN_EMERG "Code: ");
14407
14408- ip = (u8 *)regs->ip - code_prologue;
14409+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14410 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14411 /* try starting at IP */
14412- ip = (u8 *)regs->ip;
14413+ ip = (u8 *)regs->ip + cs_base;
14414 code_len = code_len - code_prologue + 1;
14415 }
14416 for (i = 0; i < code_len; i++, ip++) {
14417@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14418 printk(" Bad EIP value.");
14419 break;
14420 }
14421- if (ip == (u8 *)regs->ip)
14422+ if (ip == (u8 *)regs->ip + cs_base)
14423 printk("<%02x> ", c);
14424 else
14425 printk("%02x ", c);
14426@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14427 printk("\n");
14428 }
14429
14430+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14431+void pax_check_alloca(unsigned long size)
14432+{
14433+ unsigned long sp = (unsigned long)&sp, stack_left;
14434+
14435+ /* all kernel stacks are of the same size */
14436+ stack_left = sp & (THREAD_SIZE - 1);
14437+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14438+}
14439+EXPORT_SYMBOL(pax_check_alloca);
14440+#endif
14441+
14442 int is_valid_bugaddr(unsigned long ip)
14443 {
14444 unsigned short ud2;
14445
14446+ ip = ktla_ktva(ip);
14447 if (ip < PAGE_OFFSET)
14448 return 0;
14449 if (probe_kernel_address((unsigned short *)ip, ud2))
14450diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14451index a071e6b..36cd585 100644
14452--- a/arch/x86/kernel/dumpstack_64.c
14453+++ b/arch/x86/kernel/dumpstack_64.c
14454@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14455 unsigned long *irq_stack_end =
14456 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14457 unsigned used = 0;
14458- struct thread_info *tinfo;
14459 int graph = 0;
14460+ void *stack_start;
14461
14462 if (!task)
14463 task = current;
14464@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14465 * current stack address. If the stacks consist of nested
14466 * exceptions
14467 */
14468- tinfo = task_thread_info(task);
14469 for (;;) {
14470 char *id;
14471 unsigned long *estack_end;
14472+
14473 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14474 &used, &id);
14475
14476@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14477 if (ops->stack(data, id) < 0)
14478 break;
14479
14480- bp = print_context_stack(tinfo, stack, bp, ops,
14481+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14482 data, estack_end, &graph);
14483 ops->stack(data, "<EOE>");
14484 /*
14485@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14486 if (stack >= irq_stack && stack < irq_stack_end) {
14487 if (ops->stack(data, "IRQ") < 0)
14488 break;
14489- bp = print_context_stack(tinfo, stack, bp,
14490+ bp = print_context_stack(task, irq_stack, stack, bp,
14491 ops, data, irq_stack_end, &graph);
14492 /*
14493 * We link to the next stack (which would be
14494@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14495 /*
14496 * This handles the process stack:
14497 */
14498- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14499+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14500+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14501 put_cpu();
14502 }
14503 EXPORT_SYMBOL(dump_trace);
14504@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14505 return ud2 == 0x0b0f;
14506 }
14507
14508+
14509+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14510+void pax_check_alloca(unsigned long size)
14511+{
14512+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14513+ unsigned cpu, used;
14514+ char *id;
14515+
14516+ /* check the process stack first */
14517+ stack_start = (unsigned long)task_stack_page(current);
14518+ stack_end = stack_start + THREAD_SIZE;
14519+ if (likely(stack_start <= sp && sp < stack_end)) {
14520+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14521+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14522+ return;
14523+ }
14524+
14525+ cpu = get_cpu();
14526+
14527+ /* check the irq stacks */
14528+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14529+ stack_start = stack_end - IRQ_STACK_SIZE;
14530+ if (stack_start <= sp && sp < stack_end) {
14531+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14532+ put_cpu();
14533+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14534+ return;
14535+ }
14536+
14537+ /* check the exception stacks */
14538+ used = 0;
14539+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14540+ stack_start = stack_end - EXCEPTION_STKSZ;
14541+ if (stack_end && stack_start <= sp && sp < stack_end) {
14542+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14543+ put_cpu();
14544+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14545+ return;
14546+ }
14547+
14548+ put_cpu();
14549+
14550+ /* unknown stack */
14551+ BUG();
14552+}
14553+EXPORT_SYMBOL(pax_check_alloca);
14554+#endif
14555diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14556index a89739a..95e0c48 100644
14557--- a/arch/x86/kernel/e820.c
14558+++ b/arch/x86/kernel/e820.c
14559@@ -733,7 +733,7 @@ struct early_res {
14560 };
14561 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14562 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14563- {}
14564+ { 0, 0, {0}, 0 }
14565 };
14566
14567 static int __init find_overlapped_early(u64 start, u64 end)
14568diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14569index b9c830c..1e41a96 100644
14570--- a/arch/x86/kernel/early_printk.c
14571+++ b/arch/x86/kernel/early_printk.c
14572@@ -7,6 +7,7 @@
14573 #include <linux/pci_regs.h>
14574 #include <linux/pci_ids.h>
14575 #include <linux/errno.h>
14576+#include <linux/sched.h>
14577 #include <asm/io.h>
14578 #include <asm/processor.h>
14579 #include <asm/fcntl.h>
14580@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14581 int n;
14582 va_list ap;
14583
14584+ pax_track_stack();
14585+
14586 va_start(ap, fmt);
14587 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14588 early_console->write(early_console, buf, n);
14589diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14590index 5cab48e..b025f9b 100644
14591--- a/arch/x86/kernel/efi_32.c
14592+++ b/arch/x86/kernel/efi_32.c
14593@@ -38,70 +38,56 @@
14594 */
14595
14596 static unsigned long efi_rt_eflags;
14597-static pgd_t efi_bak_pg_dir_pointer[2];
14598+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14599
14600-void efi_call_phys_prelog(void)
14601+void __init efi_call_phys_prelog(void)
14602 {
14603- unsigned long cr4;
14604- unsigned long temp;
14605 struct desc_ptr gdt_descr;
14606
14607+#ifdef CONFIG_PAX_KERNEXEC
14608+ struct desc_struct d;
14609+#endif
14610+
14611 local_irq_save(efi_rt_eflags);
14612
14613- /*
14614- * If I don't have PAE, I should just duplicate two entries in page
14615- * directory. If I have PAE, I just need to duplicate one entry in
14616- * page directory.
14617- */
14618- cr4 = read_cr4_safe();
14619-
14620- if (cr4 & X86_CR4_PAE) {
14621- efi_bak_pg_dir_pointer[0].pgd =
14622- swapper_pg_dir[pgd_index(0)].pgd;
14623- swapper_pg_dir[0].pgd =
14624- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14625- } else {
14626- efi_bak_pg_dir_pointer[0].pgd =
14627- swapper_pg_dir[pgd_index(0)].pgd;
14628- efi_bak_pg_dir_pointer[1].pgd =
14629- swapper_pg_dir[pgd_index(0x400000)].pgd;
14630- swapper_pg_dir[pgd_index(0)].pgd =
14631- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14632- temp = PAGE_OFFSET + 0x400000;
14633- swapper_pg_dir[pgd_index(0x400000)].pgd =
14634- swapper_pg_dir[pgd_index(temp)].pgd;
14635- }
14636+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14637+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14638+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14639
14640 /*
14641 * After the lock is released, the original page table is restored.
14642 */
14643 __flush_tlb_all();
14644
14645+#ifdef CONFIG_PAX_KERNEXEC
14646+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14647+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14648+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14649+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14650+#endif
14651+
14652 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14653 gdt_descr.size = GDT_SIZE - 1;
14654 load_gdt(&gdt_descr);
14655 }
14656
14657-void efi_call_phys_epilog(void)
14658+void __init efi_call_phys_epilog(void)
14659 {
14660- unsigned long cr4;
14661 struct desc_ptr gdt_descr;
14662
14663+#ifdef CONFIG_PAX_KERNEXEC
14664+ struct desc_struct d;
14665+
14666+ memset(&d, 0, sizeof d);
14667+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14668+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14669+#endif
14670+
14671 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14672 gdt_descr.size = GDT_SIZE - 1;
14673 load_gdt(&gdt_descr);
14674
14675- cr4 = read_cr4_safe();
14676-
14677- if (cr4 & X86_CR4_PAE) {
14678- swapper_pg_dir[pgd_index(0)].pgd =
14679- efi_bak_pg_dir_pointer[0].pgd;
14680- } else {
14681- swapper_pg_dir[pgd_index(0)].pgd =
14682- efi_bak_pg_dir_pointer[0].pgd;
14683- swapper_pg_dir[pgd_index(0x400000)].pgd =
14684- efi_bak_pg_dir_pointer[1].pgd;
14685- }
14686+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14687
14688 /*
14689 * After the lock is released, the original page table is restored.
14690diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14691index fbe66e6..c5c0dd2 100644
14692--- a/arch/x86/kernel/efi_stub_32.S
14693+++ b/arch/x86/kernel/efi_stub_32.S
14694@@ -6,7 +6,9 @@
14695 */
14696
14697 #include <linux/linkage.h>
14698+#include <linux/init.h>
14699 #include <asm/page_types.h>
14700+#include <asm/segment.h>
14701
14702 /*
14703 * efi_call_phys(void *, ...) is a function with variable parameters.
14704@@ -20,7 +22,7 @@
14705 * service functions will comply with gcc calling convention, too.
14706 */
14707
14708-.text
14709+__INIT
14710 ENTRY(efi_call_phys)
14711 /*
14712 * 0. The function can only be called in Linux kernel. So CS has been
14713@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14714 * The mapping of lower virtual memory has been created in prelog and
14715 * epilog.
14716 */
14717- movl $1f, %edx
14718- subl $__PAGE_OFFSET, %edx
14719- jmp *%edx
14720+ movl $(__KERNEXEC_EFI_DS), %edx
14721+ mov %edx, %ds
14722+ mov %edx, %es
14723+ mov %edx, %ss
14724+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14725 1:
14726
14727 /*
14728@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14729 * parameter 2, ..., param n. To make things easy, we save the return
14730 * address of efi_call_phys in a global variable.
14731 */
14732- popl %edx
14733- movl %edx, saved_return_addr
14734- /* get the function pointer into ECX*/
14735- popl %ecx
14736- movl %ecx, efi_rt_function_ptr
14737- movl $2f, %edx
14738- subl $__PAGE_OFFSET, %edx
14739- pushl %edx
14740+ popl (saved_return_addr)
14741+ popl (efi_rt_function_ptr)
14742
14743 /*
14744 * 3. Clear PG bit in %CR0.
14745@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14746 /*
14747 * 5. Call the physical function.
14748 */
14749- jmp *%ecx
14750+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14751
14752-2:
14753 /*
14754 * 6. After EFI runtime service returns, control will return to
14755 * following instruction. We'd better readjust stack pointer first.
14756@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14757 movl %cr0, %edx
14758 orl $0x80000000, %edx
14759 movl %edx, %cr0
14760- jmp 1f
14761-1:
14762+
14763 /*
14764 * 8. Now restore the virtual mode from flat mode by
14765 * adding EIP with PAGE_OFFSET.
14766 */
14767- movl $1f, %edx
14768- jmp *%edx
14769+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14770 1:
14771+ movl $(__KERNEL_DS), %edx
14772+ mov %edx, %ds
14773+ mov %edx, %es
14774+ mov %edx, %ss
14775
14776 /*
14777 * 9. Balance the stack. And because EAX contain the return value,
14778 * we'd better not clobber it.
14779 */
14780- leal efi_rt_function_ptr, %edx
14781- movl (%edx), %ecx
14782- pushl %ecx
14783+ pushl (efi_rt_function_ptr)
14784
14785 /*
14786- * 10. Push the saved return address onto the stack and return.
14787+ * 10. Return to the saved return address.
14788 */
14789- leal saved_return_addr, %edx
14790- movl (%edx), %ecx
14791- pushl %ecx
14792- ret
14793+ jmpl *(saved_return_addr)
14794 ENDPROC(efi_call_phys)
14795 .previous
14796
14797-.data
14798+__INITDATA
14799 saved_return_addr:
14800 .long 0
14801 efi_rt_function_ptr:
14802diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14803index 4c07cca..2c8427d 100644
14804--- a/arch/x86/kernel/efi_stub_64.S
14805+++ b/arch/x86/kernel/efi_stub_64.S
14806@@ -7,6 +7,7 @@
14807 */
14808
14809 #include <linux/linkage.h>
14810+#include <asm/alternative-asm.h>
14811
14812 #define SAVE_XMM \
14813 mov %rsp, %rax; \
14814@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14815 call *%rdi
14816 addq $32, %rsp
14817 RESTORE_XMM
14818+ pax_force_retaddr 0, 1
14819 ret
14820 ENDPROC(efi_call0)
14821
14822@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14823 call *%rdi
14824 addq $32, %rsp
14825 RESTORE_XMM
14826+ pax_force_retaddr 0, 1
14827 ret
14828 ENDPROC(efi_call1)
14829
14830@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14831 call *%rdi
14832 addq $32, %rsp
14833 RESTORE_XMM
14834+ pax_force_retaddr 0, 1
14835 ret
14836 ENDPROC(efi_call2)
14837
14838@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14839 call *%rdi
14840 addq $32, %rsp
14841 RESTORE_XMM
14842+ pax_force_retaddr 0, 1
14843 ret
14844 ENDPROC(efi_call3)
14845
14846@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14847 call *%rdi
14848 addq $32, %rsp
14849 RESTORE_XMM
14850+ pax_force_retaddr 0, 1
14851 ret
14852 ENDPROC(efi_call4)
14853
14854@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14855 call *%rdi
14856 addq $48, %rsp
14857 RESTORE_XMM
14858+ pax_force_retaddr 0, 1
14859 ret
14860 ENDPROC(efi_call5)
14861
14862@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14863 call *%rdi
14864 addq $48, %rsp
14865 RESTORE_XMM
14866+ pax_force_retaddr 0, 1
14867 ret
14868 ENDPROC(efi_call6)
14869diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14870index c097e7d..c689cf4 100644
14871--- a/arch/x86/kernel/entry_32.S
14872+++ b/arch/x86/kernel/entry_32.S
14873@@ -185,13 +185,146 @@
14874 /*CFI_REL_OFFSET gs, PT_GS*/
14875 .endm
14876 .macro SET_KERNEL_GS reg
14877+
14878+#ifdef CONFIG_CC_STACKPROTECTOR
14879 movl $(__KERNEL_STACK_CANARY), \reg
14880+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14881+ movl $(__USER_DS), \reg
14882+#else
14883+ xorl \reg, \reg
14884+#endif
14885+
14886 movl \reg, %gs
14887 .endm
14888
14889 #endif /* CONFIG_X86_32_LAZY_GS */
14890
14891-.macro SAVE_ALL
14892+.macro pax_enter_kernel
14893+#ifdef CONFIG_PAX_KERNEXEC
14894+ call pax_enter_kernel
14895+#endif
14896+.endm
14897+
14898+.macro pax_exit_kernel
14899+#ifdef CONFIG_PAX_KERNEXEC
14900+ call pax_exit_kernel
14901+#endif
14902+.endm
14903+
14904+#ifdef CONFIG_PAX_KERNEXEC
14905+ENTRY(pax_enter_kernel)
14906+#ifdef CONFIG_PARAVIRT
14907+ pushl %eax
14908+ pushl %ecx
14909+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14910+ mov %eax, %esi
14911+#else
14912+ mov %cr0, %esi
14913+#endif
14914+ bts $16, %esi
14915+ jnc 1f
14916+ mov %cs, %esi
14917+ cmp $__KERNEL_CS, %esi
14918+ jz 3f
14919+ ljmp $__KERNEL_CS, $3f
14920+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14921+2:
14922+#ifdef CONFIG_PARAVIRT
14923+ mov %esi, %eax
14924+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14925+#else
14926+ mov %esi, %cr0
14927+#endif
14928+3:
14929+#ifdef CONFIG_PARAVIRT
14930+ popl %ecx
14931+ popl %eax
14932+#endif
14933+ ret
14934+ENDPROC(pax_enter_kernel)
14935+
14936+ENTRY(pax_exit_kernel)
14937+#ifdef CONFIG_PARAVIRT
14938+ pushl %eax
14939+ pushl %ecx
14940+#endif
14941+ mov %cs, %esi
14942+ cmp $__KERNEXEC_KERNEL_CS, %esi
14943+ jnz 2f
14944+#ifdef CONFIG_PARAVIRT
14945+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14946+ mov %eax, %esi
14947+#else
14948+ mov %cr0, %esi
14949+#endif
14950+ btr $16, %esi
14951+ ljmp $__KERNEL_CS, $1f
14952+1:
14953+#ifdef CONFIG_PARAVIRT
14954+ mov %esi, %eax
14955+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14956+#else
14957+ mov %esi, %cr0
14958+#endif
14959+2:
14960+#ifdef CONFIG_PARAVIRT
14961+ popl %ecx
14962+ popl %eax
14963+#endif
14964+ ret
14965+ENDPROC(pax_exit_kernel)
14966+#endif
14967+
14968+.macro pax_erase_kstack
14969+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14970+ call pax_erase_kstack
14971+#endif
14972+.endm
14973+
14974+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14975+/*
14976+ * ebp: thread_info
14977+ * ecx, edx: can be clobbered
14978+ */
14979+ENTRY(pax_erase_kstack)
14980+ pushl %edi
14981+ pushl %eax
14982+
14983+ mov TI_lowest_stack(%ebp), %edi
14984+ mov $-0xBEEF, %eax
14985+ std
14986+
14987+1: mov %edi, %ecx
14988+ and $THREAD_SIZE_asm - 1, %ecx
14989+ shr $2, %ecx
14990+ repne scasl
14991+ jecxz 2f
14992+
14993+ cmp $2*16, %ecx
14994+ jc 2f
14995+
14996+ mov $2*16, %ecx
14997+ repe scasl
14998+ jecxz 2f
14999+ jne 1b
15000+
15001+2: cld
15002+ mov %esp, %ecx
15003+ sub %edi, %ecx
15004+ shr $2, %ecx
15005+ rep stosl
15006+
15007+ mov TI_task_thread_sp0(%ebp), %edi
15008+ sub $128, %edi
15009+ mov %edi, TI_lowest_stack(%ebp)
15010+
15011+ popl %eax
15012+ popl %edi
15013+ ret
15014+ENDPROC(pax_erase_kstack)
15015+#endif
15016+
15017+.macro __SAVE_ALL _DS
15018 cld
15019 PUSH_GS
15020 pushl %fs
15021@@ -224,7 +357,7 @@
15022 pushl %ebx
15023 CFI_ADJUST_CFA_OFFSET 4
15024 CFI_REL_OFFSET ebx, 0
15025- movl $(__USER_DS), %edx
15026+ movl $\_DS, %edx
15027 movl %edx, %ds
15028 movl %edx, %es
15029 movl $(__KERNEL_PERCPU), %edx
15030@@ -232,6 +365,15 @@
15031 SET_KERNEL_GS %edx
15032 .endm
15033
15034+.macro SAVE_ALL
15035+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15036+ __SAVE_ALL __KERNEL_DS
15037+ pax_enter_kernel
15038+#else
15039+ __SAVE_ALL __USER_DS
15040+#endif
15041+.endm
15042+
15043 .macro RESTORE_INT_REGS
15044 popl %ebx
15045 CFI_ADJUST_CFA_OFFSET -4
15046@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15047 CFI_ADJUST_CFA_OFFSET -4
15048 jmp syscall_exit
15049 CFI_ENDPROC
15050-END(ret_from_fork)
15051+ENDPROC(ret_from_fork)
15052
15053 /*
15054 * Return to user mode is not as complex as all this looks,
15055@@ -352,7 +494,15 @@ check_userspace:
15056 movb PT_CS(%esp), %al
15057 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15058 cmpl $USER_RPL, %eax
15059+
15060+#ifdef CONFIG_PAX_KERNEXEC
15061+ jae resume_userspace
15062+
15063+ PAX_EXIT_KERNEL
15064+ jmp resume_kernel
15065+#else
15066 jb resume_kernel # not returning to v8086 or userspace
15067+#endif
15068
15069 ENTRY(resume_userspace)
15070 LOCKDEP_SYS_EXIT
15071@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15072 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15073 # int/exception return?
15074 jne work_pending
15075- jmp restore_all
15076-END(ret_from_exception)
15077+ jmp restore_all_pax
15078+ENDPROC(ret_from_exception)
15079
15080 #ifdef CONFIG_PREEMPT
15081 ENTRY(resume_kernel)
15082@@ -380,7 +530,7 @@ need_resched:
15083 jz restore_all
15084 call preempt_schedule_irq
15085 jmp need_resched
15086-END(resume_kernel)
15087+ENDPROC(resume_kernel)
15088 #endif
15089 CFI_ENDPROC
15090
15091@@ -414,25 +564,36 @@ sysenter_past_esp:
15092 /*CFI_REL_OFFSET cs, 0*/
15093 /*
15094 * Push current_thread_info()->sysenter_return to the stack.
15095- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15096- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15097 */
15098- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15099+ pushl $0
15100 CFI_ADJUST_CFA_OFFSET 4
15101 CFI_REL_OFFSET eip, 0
15102
15103 pushl %eax
15104 CFI_ADJUST_CFA_OFFSET 4
15105 SAVE_ALL
15106+ GET_THREAD_INFO(%ebp)
15107+ movl TI_sysenter_return(%ebp),%ebp
15108+ movl %ebp,PT_EIP(%esp)
15109 ENABLE_INTERRUPTS(CLBR_NONE)
15110
15111 /*
15112 * Load the potential sixth argument from user stack.
15113 * Careful about security.
15114 */
15115+ movl PT_OLDESP(%esp),%ebp
15116+
15117+#ifdef CONFIG_PAX_MEMORY_UDEREF
15118+ mov PT_OLDSS(%esp),%ds
15119+1: movl %ds:(%ebp),%ebp
15120+ push %ss
15121+ pop %ds
15122+#else
15123 cmpl $__PAGE_OFFSET-3,%ebp
15124 jae syscall_fault
15125 1: movl (%ebp),%ebp
15126+#endif
15127+
15128 movl %ebp,PT_EBP(%esp)
15129 .section __ex_table,"a"
15130 .align 4
15131@@ -455,12 +616,24 @@ sysenter_do_call:
15132 testl $_TIF_ALLWORK_MASK, %ecx
15133 jne sysexit_audit
15134 sysenter_exit:
15135+
15136+#ifdef CONFIG_PAX_RANDKSTACK
15137+ pushl_cfi %eax
15138+ movl %esp, %eax
15139+ call pax_randomize_kstack
15140+ popl_cfi %eax
15141+#endif
15142+
15143+ pax_erase_kstack
15144+
15145 /* if something modifies registers it must also disable sysexit */
15146 movl PT_EIP(%esp), %edx
15147 movl PT_OLDESP(%esp), %ecx
15148 xorl %ebp,%ebp
15149 TRACE_IRQS_ON
15150 1: mov PT_FS(%esp), %fs
15151+2: mov PT_DS(%esp), %ds
15152+3: mov PT_ES(%esp), %es
15153 PTGS_TO_GS
15154 ENABLE_INTERRUPTS_SYSEXIT
15155
15156@@ -477,6 +650,9 @@ sysenter_audit:
15157 movl %eax,%edx /* 2nd arg: syscall number */
15158 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15159 call audit_syscall_entry
15160+
15161+ pax_erase_kstack
15162+
15163 pushl %ebx
15164 CFI_ADJUST_CFA_OFFSET 4
15165 movl PT_EAX(%esp),%eax /* reload syscall number */
15166@@ -504,11 +680,17 @@ sysexit_audit:
15167
15168 CFI_ENDPROC
15169 .pushsection .fixup,"ax"
15170-2: movl $0,PT_FS(%esp)
15171+4: movl $0,PT_FS(%esp)
15172+ jmp 1b
15173+5: movl $0,PT_DS(%esp)
15174+ jmp 1b
15175+6: movl $0,PT_ES(%esp)
15176 jmp 1b
15177 .section __ex_table,"a"
15178 .align 4
15179- .long 1b,2b
15180+ .long 1b,4b
15181+ .long 2b,5b
15182+ .long 3b,6b
15183 .popsection
15184 PTGS_TO_GS_EX
15185 ENDPROC(ia32_sysenter_target)
15186@@ -538,6 +720,15 @@ syscall_exit:
15187 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15188 jne syscall_exit_work
15189
15190+restore_all_pax:
15191+
15192+#ifdef CONFIG_PAX_RANDKSTACK
15193+ movl %esp, %eax
15194+ call pax_randomize_kstack
15195+#endif
15196+
15197+ pax_erase_kstack
15198+
15199 restore_all:
15200 TRACE_IRQS_IRET
15201 restore_all_notrace:
15202@@ -602,10 +793,29 @@ ldt_ss:
15203 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15204 mov %dx, %ax /* eax: new kernel esp */
15205 sub %eax, %edx /* offset (low word is 0) */
15206- PER_CPU(gdt_page, %ebx)
15207+#ifdef CONFIG_SMP
15208+ movl PER_CPU_VAR(cpu_number), %ebx
15209+ shll $PAGE_SHIFT_asm, %ebx
15210+ addl $cpu_gdt_table, %ebx
15211+#else
15212+ movl $cpu_gdt_table, %ebx
15213+#endif
15214 shr $16, %edx
15215+
15216+#ifdef CONFIG_PAX_KERNEXEC
15217+ mov %cr0, %esi
15218+ btr $16, %esi
15219+ mov %esi, %cr0
15220+#endif
15221+
15222 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15223 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15224+
15225+#ifdef CONFIG_PAX_KERNEXEC
15226+ bts $16, %esi
15227+ mov %esi, %cr0
15228+#endif
15229+
15230 pushl $__ESPFIX_SS
15231 CFI_ADJUST_CFA_OFFSET 4
15232 push %eax /* new kernel esp */
15233@@ -636,36 +846,30 @@ work_resched:
15234 movl TI_flags(%ebp), %ecx
15235 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15236 # than syscall tracing?
15237- jz restore_all
15238+ jz restore_all_pax
15239 testb $_TIF_NEED_RESCHED, %cl
15240 jnz work_resched
15241
15242 work_notifysig: # deal with pending signals and
15243 # notify-resume requests
15244+ movl %esp, %eax
15245 #ifdef CONFIG_VM86
15246 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15247- movl %esp, %eax
15248- jne work_notifysig_v86 # returning to kernel-space or
15249+ jz 1f # returning to kernel-space or
15250 # vm86-space
15251- xorl %edx, %edx
15252- call do_notify_resume
15253- jmp resume_userspace_sig
15254
15255- ALIGN
15256-work_notifysig_v86:
15257 pushl %ecx # save ti_flags for do_notify_resume
15258 CFI_ADJUST_CFA_OFFSET 4
15259 call save_v86_state # %eax contains pt_regs pointer
15260 popl %ecx
15261 CFI_ADJUST_CFA_OFFSET -4
15262 movl %eax, %esp
15263-#else
15264- movl %esp, %eax
15265+1:
15266 #endif
15267 xorl %edx, %edx
15268 call do_notify_resume
15269 jmp resume_userspace_sig
15270-END(work_pending)
15271+ENDPROC(work_pending)
15272
15273 # perform syscall exit tracing
15274 ALIGN
15275@@ -673,11 +877,14 @@ syscall_trace_entry:
15276 movl $-ENOSYS,PT_EAX(%esp)
15277 movl %esp, %eax
15278 call syscall_trace_enter
15279+
15280+ pax_erase_kstack
15281+
15282 /* What it returned is what we'll actually use. */
15283 cmpl $(nr_syscalls), %eax
15284 jnae syscall_call
15285 jmp syscall_exit
15286-END(syscall_trace_entry)
15287+ENDPROC(syscall_trace_entry)
15288
15289 # perform syscall exit tracing
15290 ALIGN
15291@@ -690,20 +897,24 @@ syscall_exit_work:
15292 movl %esp, %eax
15293 call syscall_trace_leave
15294 jmp resume_userspace
15295-END(syscall_exit_work)
15296+ENDPROC(syscall_exit_work)
15297 CFI_ENDPROC
15298
15299 RING0_INT_FRAME # can't unwind into user space anyway
15300 syscall_fault:
15301+#ifdef CONFIG_PAX_MEMORY_UDEREF
15302+ push %ss
15303+ pop %ds
15304+#endif
15305 GET_THREAD_INFO(%ebp)
15306 movl $-EFAULT,PT_EAX(%esp)
15307 jmp resume_userspace
15308-END(syscall_fault)
15309+ENDPROC(syscall_fault)
15310
15311 syscall_badsys:
15312 movl $-ENOSYS,PT_EAX(%esp)
15313 jmp resume_userspace
15314-END(syscall_badsys)
15315+ENDPROC(syscall_badsys)
15316 CFI_ENDPROC
15317
15318 /*
15319@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15320 PTREGSCALL(vm86)
15321 PTREGSCALL(vm86old)
15322
15323+ ALIGN;
15324+ENTRY(kernel_execve)
15325+ push %ebp
15326+ sub $PT_OLDSS+4,%esp
15327+ push %edi
15328+ push %ecx
15329+ push %eax
15330+ lea 3*4(%esp),%edi
15331+ mov $PT_OLDSS/4+1,%ecx
15332+ xorl %eax,%eax
15333+ rep stosl
15334+ pop %eax
15335+ pop %ecx
15336+ pop %edi
15337+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15338+ mov %eax,PT_EBX(%esp)
15339+ mov %edx,PT_ECX(%esp)
15340+ mov %ecx,PT_EDX(%esp)
15341+ mov %esp,%eax
15342+ call sys_execve
15343+ GET_THREAD_INFO(%ebp)
15344+ test %eax,%eax
15345+ jz syscall_exit
15346+ add $PT_OLDSS+4,%esp
15347+ pop %ebp
15348+ ret
15349+
15350 .macro FIXUP_ESPFIX_STACK
15351 /*
15352 * Switch back for ESPFIX stack to the normal zerobased stack
15353@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15354 * normal stack and adjusts ESP with the matching offset.
15355 */
15356 /* fixup the stack */
15357- PER_CPU(gdt_page, %ebx)
15358+#ifdef CONFIG_SMP
15359+ movl PER_CPU_VAR(cpu_number), %ebx
15360+ shll $PAGE_SHIFT_asm, %ebx
15361+ addl $cpu_gdt_table, %ebx
15362+#else
15363+ movl $cpu_gdt_table, %ebx
15364+#endif
15365 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15366 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15367 shl $16, %eax
15368@@ -793,7 +1037,7 @@ vector=vector+1
15369 .endr
15370 2: jmp common_interrupt
15371 .endr
15372-END(irq_entries_start)
15373+ENDPROC(irq_entries_start)
15374
15375 .previous
15376 END(interrupt)
15377@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15378 CFI_ADJUST_CFA_OFFSET 4
15379 jmp error_code
15380 CFI_ENDPROC
15381-END(coprocessor_error)
15382+ENDPROC(coprocessor_error)
15383
15384 ENTRY(simd_coprocessor_error)
15385 RING0_INT_FRAME
15386@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15387 CFI_ADJUST_CFA_OFFSET 4
15388 jmp error_code
15389 CFI_ENDPROC
15390-END(simd_coprocessor_error)
15391+ENDPROC(simd_coprocessor_error)
15392
15393 ENTRY(device_not_available)
15394 RING0_INT_FRAME
15395@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15396 CFI_ADJUST_CFA_OFFSET 4
15397 jmp error_code
15398 CFI_ENDPROC
15399-END(device_not_available)
15400+ENDPROC(device_not_available)
15401
15402 #ifdef CONFIG_PARAVIRT
15403 ENTRY(native_iret)
15404@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15405 .align 4
15406 .long native_iret, iret_exc
15407 .previous
15408-END(native_iret)
15409+ENDPROC(native_iret)
15410
15411 ENTRY(native_irq_enable_sysexit)
15412 sti
15413 sysexit
15414-END(native_irq_enable_sysexit)
15415+ENDPROC(native_irq_enable_sysexit)
15416 #endif
15417
15418 ENTRY(overflow)
15419@@ -885,7 +1129,7 @@ ENTRY(overflow)
15420 CFI_ADJUST_CFA_OFFSET 4
15421 jmp error_code
15422 CFI_ENDPROC
15423-END(overflow)
15424+ENDPROC(overflow)
15425
15426 ENTRY(bounds)
15427 RING0_INT_FRAME
15428@@ -895,7 +1139,7 @@ ENTRY(bounds)
15429 CFI_ADJUST_CFA_OFFSET 4
15430 jmp error_code
15431 CFI_ENDPROC
15432-END(bounds)
15433+ENDPROC(bounds)
15434
15435 ENTRY(invalid_op)
15436 RING0_INT_FRAME
15437@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15438 CFI_ADJUST_CFA_OFFSET 4
15439 jmp error_code
15440 CFI_ENDPROC
15441-END(invalid_op)
15442+ENDPROC(invalid_op)
15443
15444 ENTRY(coprocessor_segment_overrun)
15445 RING0_INT_FRAME
15446@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15447 CFI_ADJUST_CFA_OFFSET 4
15448 jmp error_code
15449 CFI_ENDPROC
15450-END(coprocessor_segment_overrun)
15451+ENDPROC(coprocessor_segment_overrun)
15452
15453 ENTRY(invalid_TSS)
15454 RING0_EC_FRAME
15455@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15456 CFI_ADJUST_CFA_OFFSET 4
15457 jmp error_code
15458 CFI_ENDPROC
15459-END(invalid_TSS)
15460+ENDPROC(invalid_TSS)
15461
15462 ENTRY(segment_not_present)
15463 RING0_EC_FRAME
15464@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15465 CFI_ADJUST_CFA_OFFSET 4
15466 jmp error_code
15467 CFI_ENDPROC
15468-END(segment_not_present)
15469+ENDPROC(segment_not_present)
15470
15471 ENTRY(stack_segment)
15472 RING0_EC_FRAME
15473@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15474 CFI_ADJUST_CFA_OFFSET 4
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(stack_segment)
15478+ENDPROC(stack_segment)
15479
15480 ENTRY(alignment_check)
15481 RING0_EC_FRAME
15482@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15483 CFI_ADJUST_CFA_OFFSET 4
15484 jmp error_code
15485 CFI_ENDPROC
15486-END(alignment_check)
15487+ENDPROC(alignment_check)
15488
15489 ENTRY(divide_error)
15490 RING0_INT_FRAME
15491@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15492 CFI_ADJUST_CFA_OFFSET 4
15493 jmp error_code
15494 CFI_ENDPROC
15495-END(divide_error)
15496+ENDPROC(divide_error)
15497
15498 #ifdef CONFIG_X86_MCE
15499 ENTRY(machine_check)
15500@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15501 CFI_ADJUST_CFA_OFFSET 4
15502 jmp error_code
15503 CFI_ENDPROC
15504-END(machine_check)
15505+ENDPROC(machine_check)
15506 #endif
15507
15508 ENTRY(spurious_interrupt_bug)
15509@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15510 CFI_ADJUST_CFA_OFFSET 4
15511 jmp error_code
15512 CFI_ENDPROC
15513-END(spurious_interrupt_bug)
15514+ENDPROC(spurious_interrupt_bug)
15515
15516 ENTRY(kernel_thread_helper)
15517 pushl $0 # fake return address for unwinder
15518@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15519
15520 ENTRY(mcount)
15521 ret
15522-END(mcount)
15523+ENDPROC(mcount)
15524
15525 ENTRY(ftrace_caller)
15526 cmpl $0, function_trace_stop
15527@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15528 .globl ftrace_stub
15529 ftrace_stub:
15530 ret
15531-END(ftrace_caller)
15532+ENDPROC(ftrace_caller)
15533
15534 #else /* ! CONFIG_DYNAMIC_FTRACE */
15535
15536@@ -1160,7 +1404,7 @@ trace:
15537 popl %ecx
15538 popl %eax
15539 jmp ftrace_stub
15540-END(mcount)
15541+ENDPROC(mcount)
15542 #endif /* CONFIG_DYNAMIC_FTRACE */
15543 #endif /* CONFIG_FUNCTION_TRACER */
15544
15545@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15546 popl %ecx
15547 popl %eax
15548 ret
15549-END(ftrace_graph_caller)
15550+ENDPROC(ftrace_graph_caller)
15551
15552 .globl return_to_handler
15553 return_to_handler:
15554@@ -1198,7 +1442,6 @@ return_to_handler:
15555 ret
15556 #endif
15557
15558-.section .rodata,"a"
15559 #include "syscall_table_32.S"
15560
15561 syscall_table_size=(.-sys_call_table)
15562@@ -1255,15 +1498,18 @@ error_code:
15563 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15564 REG_TO_PTGS %ecx
15565 SET_KERNEL_GS %ecx
15566- movl $(__USER_DS), %ecx
15567+ movl $(__KERNEL_DS), %ecx
15568 movl %ecx, %ds
15569 movl %ecx, %es
15570+
15571+ pax_enter_kernel
15572+
15573 TRACE_IRQS_OFF
15574 movl %esp,%eax # pt_regs pointer
15575 call *%edi
15576 jmp ret_from_exception
15577 CFI_ENDPROC
15578-END(page_fault)
15579+ENDPROC(page_fault)
15580
15581 /*
15582 * Debug traps and NMI can happen at the one SYSENTER instruction
15583@@ -1309,7 +1555,7 @@ debug_stack_correct:
15584 call do_debug
15585 jmp ret_from_exception
15586 CFI_ENDPROC
15587-END(debug)
15588+ENDPROC(debug)
15589
15590 /*
15591 * NMI is doubly nasty. It can happen _while_ we're handling
15592@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15593 xorl %edx,%edx # zero error code
15594 movl %esp,%eax # pt_regs pointer
15595 call do_nmi
15596+
15597+ pax_exit_kernel
15598+
15599 jmp restore_all_notrace
15600 CFI_ENDPROC
15601
15602@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15603 FIXUP_ESPFIX_STACK # %eax == %esp
15604 xorl %edx,%edx # zero error code
15605 call do_nmi
15606+
15607+ pax_exit_kernel
15608+
15609 RESTORE_REGS
15610 lss 12+4(%esp), %esp # back to espfix stack
15611 CFI_ADJUST_CFA_OFFSET -24
15612 jmp irq_return
15613 CFI_ENDPROC
15614-END(nmi)
15615+ENDPROC(nmi)
15616
15617 ENTRY(int3)
15618 RING0_INT_FRAME
15619@@ -1409,7 +1661,7 @@ ENTRY(int3)
15620 call do_int3
15621 jmp ret_from_exception
15622 CFI_ENDPROC
15623-END(int3)
15624+ENDPROC(int3)
15625
15626 ENTRY(general_protection)
15627 RING0_EC_FRAME
15628@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15629 CFI_ADJUST_CFA_OFFSET 4
15630 jmp error_code
15631 CFI_ENDPROC
15632-END(general_protection)
15633+ENDPROC(general_protection)
15634
15635 /*
15636 * End of kprobes section
15637diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15638index 34a56a9..87790b4 100644
15639--- a/arch/x86/kernel/entry_64.S
15640+++ b/arch/x86/kernel/entry_64.S
15641@@ -53,6 +53,8 @@
15642 #include <asm/paravirt.h>
15643 #include <asm/ftrace.h>
15644 #include <asm/percpu.h>
15645+#include <asm/pgtable.h>
15646+#include <asm/alternative-asm.h>
15647
15648 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15649 #include <linux/elf-em.h>
15650@@ -64,8 +66,9 @@
15651 #ifdef CONFIG_FUNCTION_TRACER
15652 #ifdef CONFIG_DYNAMIC_FTRACE
15653 ENTRY(mcount)
15654+ pax_force_retaddr
15655 retq
15656-END(mcount)
15657+ENDPROC(mcount)
15658
15659 ENTRY(ftrace_caller)
15660 cmpl $0, function_trace_stop
15661@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15662 #endif
15663
15664 GLOBAL(ftrace_stub)
15665+ pax_force_retaddr
15666 retq
15667-END(ftrace_caller)
15668+ENDPROC(ftrace_caller)
15669
15670 #else /* ! CONFIG_DYNAMIC_FTRACE */
15671 ENTRY(mcount)
15672@@ -108,6 +112,7 @@ ENTRY(mcount)
15673 #endif
15674
15675 GLOBAL(ftrace_stub)
15676+ pax_force_retaddr
15677 retq
15678
15679 trace:
15680@@ -117,12 +122,13 @@ trace:
15681 movq 8(%rbp), %rsi
15682 subq $MCOUNT_INSN_SIZE, %rdi
15683
15684+ pax_force_fptr ftrace_trace_function
15685 call *ftrace_trace_function
15686
15687 MCOUNT_RESTORE_FRAME
15688
15689 jmp ftrace_stub
15690-END(mcount)
15691+ENDPROC(mcount)
15692 #endif /* CONFIG_DYNAMIC_FTRACE */
15693 #endif /* CONFIG_FUNCTION_TRACER */
15694
15695@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15696
15697 MCOUNT_RESTORE_FRAME
15698
15699+ pax_force_retaddr
15700 retq
15701-END(ftrace_graph_caller)
15702+ENDPROC(ftrace_graph_caller)
15703
15704 GLOBAL(return_to_handler)
15705 subq $24, %rsp
15706@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15707 movq 8(%rsp), %rdx
15708 movq (%rsp), %rax
15709 addq $16, %rsp
15710+ pax_force_retaddr
15711 retq
15712 #endif
15713
15714@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15715 ENDPROC(native_usergs_sysret64)
15716 #endif /* CONFIG_PARAVIRT */
15717
15718+ .macro ljmpq sel, off
15719+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15720+ .byte 0x48; ljmp *1234f(%rip)
15721+ .pushsection .rodata
15722+ .align 16
15723+ 1234: .quad \off; .word \sel
15724+ .popsection
15725+#else
15726+ pushq $\sel
15727+ pushq $\off
15728+ lretq
15729+#endif
15730+ .endm
15731+
15732+ .macro pax_enter_kernel
15733+ pax_set_fptr_mask
15734+#ifdef CONFIG_PAX_KERNEXEC
15735+ call pax_enter_kernel
15736+#endif
15737+ .endm
15738+
15739+ .macro pax_exit_kernel
15740+#ifdef CONFIG_PAX_KERNEXEC
15741+ call pax_exit_kernel
15742+#endif
15743+ .endm
15744+
15745+#ifdef CONFIG_PAX_KERNEXEC
15746+ENTRY(pax_enter_kernel)
15747+ pushq %rdi
15748+
15749+#ifdef CONFIG_PARAVIRT
15750+ PV_SAVE_REGS(CLBR_RDI)
15751+#endif
15752+
15753+ GET_CR0_INTO_RDI
15754+ bts $16,%rdi
15755+ jnc 3f
15756+ mov %cs,%edi
15757+ cmp $__KERNEL_CS,%edi
15758+ jnz 2f
15759+1:
15760+
15761+#ifdef CONFIG_PARAVIRT
15762+ PV_RESTORE_REGS(CLBR_RDI)
15763+#endif
15764+
15765+ popq %rdi
15766+ pax_force_retaddr
15767+ retq
15768+
15769+2: ljmpq __KERNEL_CS,1f
15770+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15771+4: SET_RDI_INTO_CR0
15772+ jmp 1b
15773+ENDPROC(pax_enter_kernel)
15774+
15775+ENTRY(pax_exit_kernel)
15776+ pushq %rdi
15777+
15778+#ifdef CONFIG_PARAVIRT
15779+ PV_SAVE_REGS(CLBR_RDI)
15780+#endif
15781+
15782+ mov %cs,%rdi
15783+ cmp $__KERNEXEC_KERNEL_CS,%edi
15784+ jz 2f
15785+1:
15786+
15787+#ifdef CONFIG_PARAVIRT
15788+ PV_RESTORE_REGS(CLBR_RDI);
15789+#endif
15790+
15791+ popq %rdi
15792+ pax_force_retaddr
15793+ retq
15794+
15795+2: GET_CR0_INTO_RDI
15796+ btr $16,%rdi
15797+ ljmpq __KERNEL_CS,3f
15798+3: SET_RDI_INTO_CR0
15799+ jmp 1b
15800+#ifdef CONFIG_PARAVIRT
15801+ PV_RESTORE_REGS(CLBR_RDI);
15802+#endif
15803+
15804+ popq %rdi
15805+ pax_force_retaddr
15806+ retq
15807+ENDPROC(pax_exit_kernel)
15808+#endif
15809+
15810+ .macro pax_enter_kernel_user
15811+ pax_set_fptr_mask
15812+#ifdef CONFIG_PAX_MEMORY_UDEREF
15813+ call pax_enter_kernel_user
15814+#endif
15815+ .endm
15816+
15817+ .macro pax_exit_kernel_user
15818+#ifdef CONFIG_PAX_MEMORY_UDEREF
15819+ call pax_exit_kernel_user
15820+#endif
15821+#ifdef CONFIG_PAX_RANDKSTACK
15822+ pushq %rax
15823+ call pax_randomize_kstack
15824+ popq %rax
15825+#endif
15826+ .endm
15827+
15828+#ifdef CONFIG_PAX_MEMORY_UDEREF
15829+ENTRY(pax_enter_kernel_user)
15830+ pushq %rdi
15831+ pushq %rbx
15832+
15833+#ifdef CONFIG_PARAVIRT
15834+ PV_SAVE_REGS(CLBR_RDI)
15835+#endif
15836+
15837+ GET_CR3_INTO_RDI
15838+ mov %rdi,%rbx
15839+ add $__START_KERNEL_map,%rbx
15840+ sub phys_base(%rip),%rbx
15841+
15842+#ifdef CONFIG_PARAVIRT
15843+ pushq %rdi
15844+ cmpl $0, pv_info+PARAVIRT_enabled
15845+ jz 1f
15846+ i = 0
15847+ .rept USER_PGD_PTRS
15848+ mov i*8(%rbx),%rsi
15849+ mov $0,%sil
15850+ lea i*8(%rbx),%rdi
15851+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15852+ i = i + 1
15853+ .endr
15854+ jmp 2f
15855+1:
15856+#endif
15857+
15858+ i = 0
15859+ .rept USER_PGD_PTRS
15860+ movb $0,i*8(%rbx)
15861+ i = i + 1
15862+ .endr
15863+
15864+#ifdef CONFIG_PARAVIRT
15865+2: popq %rdi
15866+#endif
15867+ SET_RDI_INTO_CR3
15868+
15869+#ifdef CONFIG_PAX_KERNEXEC
15870+ GET_CR0_INTO_RDI
15871+ bts $16,%rdi
15872+ SET_RDI_INTO_CR0
15873+#endif
15874+
15875+#ifdef CONFIG_PARAVIRT
15876+ PV_RESTORE_REGS(CLBR_RDI)
15877+#endif
15878+
15879+ popq %rbx
15880+ popq %rdi
15881+ pax_force_retaddr
15882+ retq
15883+ENDPROC(pax_enter_kernel_user)
15884+
15885+ENTRY(pax_exit_kernel_user)
15886+ push %rdi
15887+
15888+#ifdef CONFIG_PARAVIRT
15889+ pushq %rbx
15890+ PV_SAVE_REGS(CLBR_RDI)
15891+#endif
15892+
15893+#ifdef CONFIG_PAX_KERNEXEC
15894+ GET_CR0_INTO_RDI
15895+ btr $16,%rdi
15896+ SET_RDI_INTO_CR0
15897+#endif
15898+
15899+ GET_CR3_INTO_RDI
15900+ add $__START_KERNEL_map,%rdi
15901+ sub phys_base(%rip),%rdi
15902+
15903+#ifdef CONFIG_PARAVIRT
15904+ cmpl $0, pv_info+PARAVIRT_enabled
15905+ jz 1f
15906+ mov %rdi,%rbx
15907+ i = 0
15908+ .rept USER_PGD_PTRS
15909+ mov i*8(%rbx),%rsi
15910+ mov $0x67,%sil
15911+ lea i*8(%rbx),%rdi
15912+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15913+ i = i + 1
15914+ .endr
15915+ jmp 2f
15916+1:
15917+#endif
15918+
15919+ i = 0
15920+ .rept USER_PGD_PTRS
15921+ movb $0x67,i*8(%rdi)
15922+ i = i + 1
15923+ .endr
15924+
15925+#ifdef CONFIG_PARAVIRT
15926+2: PV_RESTORE_REGS(CLBR_RDI)
15927+ popq %rbx
15928+#endif
15929+
15930+ popq %rdi
15931+ pax_force_retaddr
15932+ retq
15933+ENDPROC(pax_exit_kernel_user)
15934+#endif
15935+
15936+.macro pax_erase_kstack
15937+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15938+ call pax_erase_kstack
15939+#endif
15940+.endm
15941+
15942+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15943+/*
15944+ * r11: thread_info
15945+ * rcx, rdx: can be clobbered
15946+ */
15947+ENTRY(pax_erase_kstack)
15948+ pushq %rdi
15949+ pushq %rax
15950+ pushq %r11
15951+
15952+ GET_THREAD_INFO(%r11)
15953+ mov TI_lowest_stack(%r11), %rdi
15954+ mov $-0xBEEF, %rax
15955+ std
15956+
15957+1: mov %edi, %ecx
15958+ and $THREAD_SIZE_asm - 1, %ecx
15959+ shr $3, %ecx
15960+ repne scasq
15961+ jecxz 2f
15962+
15963+ cmp $2*8, %ecx
15964+ jc 2f
15965+
15966+ mov $2*8, %ecx
15967+ repe scasq
15968+ jecxz 2f
15969+ jne 1b
15970+
15971+2: cld
15972+ mov %esp, %ecx
15973+ sub %edi, %ecx
15974+
15975+ cmp $THREAD_SIZE_asm, %rcx
15976+ jb 3f
15977+ ud2
15978+3:
15979+
15980+ shr $3, %ecx
15981+ rep stosq
15982+
15983+ mov TI_task_thread_sp0(%r11), %rdi
15984+ sub $256, %rdi
15985+ mov %rdi, TI_lowest_stack(%r11)
15986+
15987+ popq %r11
15988+ popq %rax
15989+ popq %rdi
15990+ pax_force_retaddr
15991+ ret
15992+ENDPROC(pax_erase_kstack)
15993+#endif
15994
15995 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15996 #ifdef CONFIG_TRACE_IRQFLAGS
15997@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15998 .endm
15999
16000 .macro UNFAKE_STACK_FRAME
16001- addq $8*6, %rsp
16002- CFI_ADJUST_CFA_OFFSET -(6*8)
16003+ addq $8*6 + ARG_SKIP, %rsp
16004+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16005 .endm
16006
16007 /*
16008@@ -317,7 +601,7 @@ ENTRY(save_args)
16009 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16010 movq_cfi rbp, 8 /* push %rbp */
16011 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16012- testl $3, CS(%rdi)
16013+ testb $3, CS(%rdi)
16014 je 1f
16015 SWAPGS
16016 /*
16017@@ -337,9 +621,10 @@ ENTRY(save_args)
16018 * We entered an interrupt context - irqs are off:
16019 */
16020 2: TRACE_IRQS_OFF
16021+ pax_force_retaddr
16022 ret
16023 CFI_ENDPROC
16024-END(save_args)
16025+ENDPROC(save_args)
16026
16027 ENTRY(save_rest)
16028 PARTIAL_FRAME 1 REST_SKIP+8
16029@@ -352,9 +637,10 @@ ENTRY(save_rest)
16030 movq_cfi r15, R15+16
16031 movq %r11, 8(%rsp) /* return address */
16032 FIXUP_TOP_OF_STACK %r11, 16
16033+ pax_force_retaddr
16034 ret
16035 CFI_ENDPROC
16036-END(save_rest)
16037+ENDPROC(save_rest)
16038
16039 /* save complete stack frame */
16040 .pushsection .kprobes.text, "ax"
16041@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16042 js 1f /* negative -> in kernel */
16043 SWAPGS
16044 xorl %ebx,%ebx
16045-1: ret
16046+1: pax_force_retaddr_bts
16047+ ret
16048 CFI_ENDPROC
16049-END(save_paranoid)
16050+ENDPROC(save_paranoid)
16051 .popsection
16052
16053 /*
16054@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16055
16056 RESTORE_REST
16057
16058- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16059+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16060 je int_ret_from_sys_call
16061
16062 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16063@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16064 jmp ret_from_sys_call # go to the SYSRET fastpath
16065
16066 CFI_ENDPROC
16067-END(ret_from_fork)
16068+ENDPROC(ret_from_fork)
16069
16070 /*
16071 * System call entry. Upto 6 arguments in registers are supported.
16072@@ -455,7 +742,7 @@ END(ret_from_fork)
16073 ENTRY(system_call)
16074 CFI_STARTPROC simple
16075 CFI_SIGNAL_FRAME
16076- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16077+ CFI_DEF_CFA rsp,0
16078 CFI_REGISTER rip,rcx
16079 /*CFI_REGISTER rflags,r11*/
16080 SWAPGS_UNSAFE_STACK
16081@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16082
16083 movq %rsp,PER_CPU_VAR(old_rsp)
16084 movq PER_CPU_VAR(kernel_stack),%rsp
16085+ SAVE_ARGS 8*6,1
16086+ pax_enter_kernel_user
16087 /*
16088 * No need to follow this irqs off/on section - it's straight
16089 * and short:
16090 */
16091 ENABLE_INTERRUPTS(CLBR_NONE)
16092- SAVE_ARGS 8,1
16093 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16094 movq %rcx,RIP-ARGOFFSET(%rsp)
16095 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16096@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16097 system_call_fastpath:
16098 cmpq $__NR_syscall_max,%rax
16099 ja badsys
16100- movq %r10,%rcx
16101+ movq R10-ARGOFFSET(%rsp),%rcx
16102 call *sys_call_table(,%rax,8) # XXX: rip relative
16103 movq %rax,RAX-ARGOFFSET(%rsp)
16104 /*
16105@@ -502,6 +790,8 @@ sysret_check:
16106 andl %edi,%edx
16107 jnz sysret_careful
16108 CFI_REMEMBER_STATE
16109+ pax_exit_kernel_user
16110+ pax_erase_kstack
16111 /*
16112 * sysretq will re-enable interrupts:
16113 */
16114@@ -555,14 +845,18 @@ badsys:
16115 * jump back to the normal fast path.
16116 */
16117 auditsys:
16118- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16119+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16120 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16121 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16122 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16123 movq %rax,%rsi /* 2nd arg: syscall number */
16124 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16125 call audit_syscall_entry
16126+
16127+ pax_erase_kstack
16128+
16129 LOAD_ARGS 0 /* reload call-clobbered registers */
16130+ pax_set_fptr_mask
16131 jmp system_call_fastpath
16132
16133 /*
16134@@ -592,16 +886,20 @@ tracesys:
16135 FIXUP_TOP_OF_STACK %rdi
16136 movq %rsp,%rdi
16137 call syscall_trace_enter
16138+
16139+ pax_erase_kstack
16140+
16141 /*
16142 * Reload arg registers from stack in case ptrace changed them.
16143 * We don't reload %rax because syscall_trace_enter() returned
16144 * the value it wants us to use in the table lookup.
16145 */
16146 LOAD_ARGS ARGOFFSET, 1
16147+ pax_set_fptr_mask
16148 RESTORE_REST
16149 cmpq $__NR_syscall_max,%rax
16150 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16151- movq %r10,%rcx /* fixup for C */
16152+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16153 call *sys_call_table(,%rax,8)
16154 movq %rax,RAX-ARGOFFSET(%rsp)
16155 /* Use IRET because user could have changed frame */
16156@@ -613,7 +911,7 @@ tracesys:
16157 GLOBAL(int_ret_from_sys_call)
16158 DISABLE_INTERRUPTS(CLBR_NONE)
16159 TRACE_IRQS_OFF
16160- testl $3,CS-ARGOFFSET(%rsp)
16161+ testb $3,CS-ARGOFFSET(%rsp)
16162 je retint_restore_args
16163 movl $_TIF_ALLWORK_MASK,%edi
16164 /* edi: mask to check */
16165@@ -624,6 +922,7 @@ GLOBAL(int_with_check)
16166 andl %edi,%edx
16167 jnz int_careful
16168 andl $~TS_COMPAT,TI_status(%rcx)
16169+ pax_erase_kstack
16170 jmp retint_swapgs
16171
16172 /* Either reschedule or signal or syscall exit tracking needed. */
16173@@ -674,7 +973,7 @@ int_restore_rest:
16174 TRACE_IRQS_OFF
16175 jmp int_with_check
16176 CFI_ENDPROC
16177-END(system_call)
16178+ENDPROC(system_call)
16179
16180 /*
16181 * Certain special system calls that need to save a complete full stack frame.
16182@@ -690,7 +989,7 @@ ENTRY(\label)
16183 call \func
16184 jmp ptregscall_common
16185 CFI_ENDPROC
16186-END(\label)
16187+ENDPROC(\label)
16188 .endm
16189
16190 PTREGSCALL stub_clone, sys_clone, %r8
16191@@ -708,9 +1007,10 @@ ENTRY(ptregscall_common)
16192 movq_cfi_restore R12+8, r12
16193 movq_cfi_restore RBP+8, rbp
16194 movq_cfi_restore RBX+8, rbx
16195+ pax_force_retaddr
16196 ret $REST_SKIP /* pop extended registers */
16197 CFI_ENDPROC
16198-END(ptregscall_common)
16199+ENDPROC(ptregscall_common)
16200
16201 ENTRY(stub_execve)
16202 CFI_STARTPROC
16203@@ -726,7 +1026,7 @@ ENTRY(stub_execve)
16204 RESTORE_REST
16205 jmp int_ret_from_sys_call
16206 CFI_ENDPROC
16207-END(stub_execve)
16208+ENDPROC(stub_execve)
16209
16210 /*
16211 * sigreturn is special because it needs to restore all registers on return.
16212@@ -744,7 +1044,7 @@ ENTRY(stub_rt_sigreturn)
16213 RESTORE_REST
16214 jmp int_ret_from_sys_call
16215 CFI_ENDPROC
16216-END(stub_rt_sigreturn)
16217+ENDPROC(stub_rt_sigreturn)
16218
16219 /*
16220 * Build the entry stubs and pointer table with some assembler magic.
16221@@ -780,7 +1080,7 @@ vector=vector+1
16222 2: jmp common_interrupt
16223 .endr
16224 CFI_ENDPROC
16225-END(irq_entries_start)
16226+ENDPROC(irq_entries_start)
16227
16228 .previous
16229 END(interrupt)
16230@@ -800,6 +1100,16 @@ END(interrupt)
16231 CFI_ADJUST_CFA_OFFSET 10*8
16232 call save_args
16233 PARTIAL_FRAME 0
16234+#ifdef CONFIG_PAX_MEMORY_UDEREF
16235+ testb $3, CS(%rdi)
16236+ jnz 1f
16237+ pax_enter_kernel
16238+ jmp 2f
16239+1: pax_enter_kernel_user
16240+2:
16241+#else
16242+ pax_enter_kernel
16243+#endif
16244 call \func
16245 .endm
16246
16247@@ -822,7 +1132,7 @@ ret_from_intr:
16248 CFI_ADJUST_CFA_OFFSET -8
16249 exit_intr:
16250 GET_THREAD_INFO(%rcx)
16251- testl $3,CS-ARGOFFSET(%rsp)
16252+ testb $3,CS-ARGOFFSET(%rsp)
16253 je retint_kernel
16254
16255 /* Interrupt came from user space */
16256@@ -844,12 +1154,15 @@ retint_swapgs: /* return to user-space */
16257 * The iretq could re-enable interrupts:
16258 */
16259 DISABLE_INTERRUPTS(CLBR_ANY)
16260+ pax_exit_kernel_user
16261 TRACE_IRQS_IRETQ
16262 SWAPGS
16263 jmp restore_args
16264
16265 retint_restore_args: /* return to kernel space */
16266 DISABLE_INTERRUPTS(CLBR_ANY)
16267+ pax_exit_kernel
16268+ pax_force_retaddr RIP-ARGOFFSET
16269 /*
16270 * The iretq could re-enable interrupts:
16271 */
16272@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16273 #endif
16274
16275 CFI_ENDPROC
16276-END(common_interrupt)
16277+ENDPROC(common_interrupt)
16278
16279 /*
16280 * APIC interrupts.
16281@@ -953,7 +1266,7 @@ ENTRY(\sym)
16282 interrupt \do_sym
16283 jmp ret_from_intr
16284 CFI_ENDPROC
16285-END(\sym)
16286+ENDPROC(\sym)
16287 .endm
16288
16289 #ifdef CONFIG_SMP
16290@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16291 CFI_ADJUST_CFA_OFFSET 15*8
16292 call error_entry
16293 DEFAULT_FRAME 0
16294+#ifdef CONFIG_PAX_MEMORY_UDEREF
16295+ testb $3, CS(%rsp)
16296+ jnz 1f
16297+ pax_enter_kernel
16298+ jmp 2f
16299+1: pax_enter_kernel_user
16300+2:
16301+#else
16302+ pax_enter_kernel
16303+#endif
16304 movq %rsp,%rdi /* pt_regs pointer */
16305 xorl %esi,%esi /* no error code */
16306 call \do_sym
16307 jmp error_exit /* %ebx: no swapgs flag */
16308 CFI_ENDPROC
16309-END(\sym)
16310+ENDPROC(\sym)
16311 .endm
16312
16313 .macro paranoidzeroentry sym do_sym
16314@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16315 subq $15*8, %rsp
16316 call save_paranoid
16317 TRACE_IRQS_OFF
16318+#ifdef CONFIG_PAX_MEMORY_UDEREF
16319+ testb $3, CS(%rsp)
16320+ jnz 1f
16321+ pax_enter_kernel
16322+ jmp 2f
16323+1: pax_enter_kernel_user
16324+2:
16325+#else
16326+ pax_enter_kernel
16327+#endif
16328 movq %rsp,%rdi /* pt_regs pointer */
16329 xorl %esi,%esi /* no error code */
16330 call \do_sym
16331 jmp paranoid_exit /* %ebx: no swapgs flag */
16332 CFI_ENDPROC
16333-END(\sym)
16334+ENDPROC(\sym)
16335 .endm
16336
16337 .macro paranoidzeroentry_ist sym do_sym ist
16338@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16339 subq $15*8, %rsp
16340 call save_paranoid
16341 TRACE_IRQS_OFF
16342+#ifdef CONFIG_PAX_MEMORY_UDEREF
16343+ testb $3, CS(%rsp)
16344+ jnz 1f
16345+ pax_enter_kernel
16346+ jmp 2f
16347+1: pax_enter_kernel_user
16348+2:
16349+#else
16350+ pax_enter_kernel
16351+#endif
16352 movq %rsp,%rdi /* pt_regs pointer */
16353 xorl %esi,%esi /* no error code */
16354- PER_CPU(init_tss, %rbp)
16355+#ifdef CONFIG_SMP
16356+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16357+ lea init_tss(%rbp), %rbp
16358+#else
16359+ lea init_tss(%rip), %rbp
16360+#endif
16361 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16362 call \do_sym
16363 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16364 jmp paranoid_exit /* %ebx: no swapgs flag */
16365 CFI_ENDPROC
16366-END(\sym)
16367+ENDPROC(\sym)
16368 .endm
16369
16370 .macro errorentry sym do_sym
16371@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16372 CFI_ADJUST_CFA_OFFSET 15*8
16373 call error_entry
16374 DEFAULT_FRAME 0
16375+#ifdef CONFIG_PAX_MEMORY_UDEREF
16376+ testb $3, CS(%rsp)
16377+ jnz 1f
16378+ pax_enter_kernel
16379+ jmp 2f
16380+1: pax_enter_kernel_user
16381+2:
16382+#else
16383+ pax_enter_kernel
16384+#endif
16385 movq %rsp,%rdi /* pt_regs pointer */
16386 movq ORIG_RAX(%rsp),%rsi /* get error code */
16387 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16388 call \do_sym
16389 jmp error_exit /* %ebx: no swapgs flag */
16390 CFI_ENDPROC
16391-END(\sym)
16392+ENDPROC(\sym)
16393 .endm
16394
16395 /* error code is on the stack already */
16396@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16397 call save_paranoid
16398 DEFAULT_FRAME 0
16399 TRACE_IRQS_OFF
16400+#ifdef CONFIG_PAX_MEMORY_UDEREF
16401+ testb $3, CS(%rsp)
16402+ jnz 1f
16403+ pax_enter_kernel
16404+ jmp 2f
16405+1: pax_enter_kernel_user
16406+2:
16407+#else
16408+ pax_enter_kernel
16409+#endif
16410 movq %rsp,%rdi /* pt_regs pointer */
16411 movq ORIG_RAX(%rsp),%rsi /* get error code */
16412 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16413 call \do_sym
16414 jmp paranoid_exit /* %ebx: no swapgs flag */
16415 CFI_ENDPROC
16416-END(\sym)
16417+ENDPROC(\sym)
16418 .endm
16419
16420 zeroentry divide_error do_divide_error
16421@@ -1141,9 +1509,10 @@ gs_change:
16422 SWAPGS
16423 popf
16424 CFI_ADJUST_CFA_OFFSET -8
16425+ pax_force_retaddr
16426 ret
16427 CFI_ENDPROC
16428-END(native_load_gs_index)
16429+ENDPROC(native_load_gs_index)
16430
16431 .section __ex_table,"a"
16432 .align 8
16433@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16434 * of hacks for example to fork off the per-CPU idle tasks.
16435 * [Hopefully no generic code relies on the reschedule -AK]
16436 */
16437- RESTORE_ALL
16438+ RESTORE_REST
16439 UNFAKE_STACK_FRAME
16440+ pax_force_retaddr
16441 ret
16442 CFI_ENDPROC
16443-END(kernel_thread)
16444+ENDPROC(kernel_thread)
16445
16446 ENTRY(child_rip)
16447 pushq $0 # fake return address
16448@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16449 */
16450 movq %rdi, %rax
16451 movq %rsi, %rdi
16452+ pax_force_fptr %rax
16453 call *%rax
16454 # exit
16455 mov %eax, %edi
16456 call do_exit
16457 ud2 # padding for call trace
16458 CFI_ENDPROC
16459-END(child_rip)
16460+ENDPROC(child_rip)
16461
16462 /*
16463 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16464@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16465 RESTORE_REST
16466 testq %rax,%rax
16467 je int_ret_from_sys_call
16468- RESTORE_ARGS
16469 UNFAKE_STACK_FRAME
16470+ pax_force_retaddr
16471 ret
16472 CFI_ENDPROC
16473-END(kernel_execve)
16474+ENDPROC(kernel_execve)
16475
16476 /* Call softirq on interrupt stack. Interrupts are off. */
16477 ENTRY(call_softirq)
16478@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16479 CFI_DEF_CFA_REGISTER rsp
16480 CFI_ADJUST_CFA_OFFSET -8
16481 decl PER_CPU_VAR(irq_count)
16482+ pax_force_retaddr
16483 ret
16484 CFI_ENDPROC
16485-END(call_softirq)
16486+ENDPROC(call_softirq)
16487
16488 #ifdef CONFIG_XEN
16489 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16490@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16491 decl PER_CPU_VAR(irq_count)
16492 jmp error_exit
16493 CFI_ENDPROC
16494-END(xen_do_hypervisor_callback)
16495+ENDPROC(xen_do_hypervisor_callback)
16496
16497 /*
16498 * Hypervisor uses this for application faults while it executes.
16499@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16500 SAVE_ALL
16501 jmp error_exit
16502 CFI_ENDPROC
16503-END(xen_failsafe_callback)
16504+ENDPROC(xen_failsafe_callback)
16505
16506 #endif /* CONFIG_XEN */
16507
16508@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16509 TRACE_IRQS_OFF
16510 testl %ebx,%ebx /* swapgs needed? */
16511 jnz paranoid_restore
16512- testl $3,CS(%rsp)
16513+ testb $3,CS(%rsp)
16514 jnz paranoid_userspace
16515+#ifdef CONFIG_PAX_MEMORY_UDEREF
16516+ pax_exit_kernel
16517+ TRACE_IRQS_IRETQ 0
16518+ SWAPGS_UNSAFE_STACK
16519+ RESTORE_ALL 8
16520+ pax_force_retaddr_bts
16521+ jmp irq_return
16522+#endif
16523 paranoid_swapgs:
16524+#ifdef CONFIG_PAX_MEMORY_UDEREF
16525+ pax_exit_kernel_user
16526+#else
16527+ pax_exit_kernel
16528+#endif
16529 TRACE_IRQS_IRETQ 0
16530 SWAPGS_UNSAFE_STACK
16531 RESTORE_ALL 8
16532 jmp irq_return
16533 paranoid_restore:
16534+ pax_exit_kernel
16535 TRACE_IRQS_IRETQ 0
16536 RESTORE_ALL 8
16537+ pax_force_retaddr_bts
16538 jmp irq_return
16539 paranoid_userspace:
16540 GET_THREAD_INFO(%rcx)
16541@@ -1443,7 +1830,7 @@ paranoid_schedule:
16542 TRACE_IRQS_OFF
16543 jmp paranoid_userspace
16544 CFI_ENDPROC
16545-END(paranoid_exit)
16546+ENDPROC(paranoid_exit)
16547
16548 /*
16549 * Exception entry point. This expects an error code/orig_rax on the stack.
16550@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16551 movq_cfi r14, R14+8
16552 movq_cfi r15, R15+8
16553 xorl %ebx,%ebx
16554- testl $3,CS+8(%rsp)
16555+ testb $3,CS+8(%rsp)
16556 je error_kernelspace
16557 error_swapgs:
16558 SWAPGS
16559 error_sti:
16560 TRACE_IRQS_OFF
16561+ pax_force_retaddr_bts
16562 ret
16563 CFI_ENDPROC
16564
16565@@ -1497,7 +1885,7 @@ error_kernelspace:
16566 cmpq $gs_change,RIP+8(%rsp)
16567 je error_swapgs
16568 jmp error_sti
16569-END(error_entry)
16570+ENDPROC(error_entry)
16571
16572
16573 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16574@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16575 jnz retint_careful
16576 jmp retint_swapgs
16577 CFI_ENDPROC
16578-END(error_exit)
16579+ENDPROC(error_exit)
16580
16581
16582 /* runs on exception stack */
16583@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16584 CFI_ADJUST_CFA_OFFSET 15*8
16585 call save_paranoid
16586 DEFAULT_FRAME 0
16587+#ifdef CONFIG_PAX_MEMORY_UDEREF
16588+ testb $3, CS(%rsp)
16589+ jnz 1f
16590+ pax_enter_kernel
16591+ jmp 2f
16592+1: pax_enter_kernel_user
16593+2:
16594+#else
16595+ pax_enter_kernel
16596+#endif
16597 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16598 movq %rsp,%rdi
16599 movq $-1,%rsi
16600@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16601 DISABLE_INTERRUPTS(CLBR_NONE)
16602 testl %ebx,%ebx /* swapgs needed? */
16603 jnz nmi_restore
16604- testl $3,CS(%rsp)
16605+ testb $3,CS(%rsp)
16606 jnz nmi_userspace
16607+#ifdef CONFIG_PAX_MEMORY_UDEREF
16608+ pax_exit_kernel
16609+ SWAPGS_UNSAFE_STACK
16610+ RESTORE_ALL 8
16611+ pax_force_retaddr_bts
16612+ jmp irq_return
16613+#endif
16614 nmi_swapgs:
16615+#ifdef CONFIG_PAX_MEMORY_UDEREF
16616+ pax_exit_kernel_user
16617+#else
16618+ pax_exit_kernel
16619+#endif
16620 SWAPGS_UNSAFE_STACK
16621+ RESTORE_ALL 8
16622+ jmp irq_return
16623 nmi_restore:
16624+ pax_exit_kernel
16625 RESTORE_ALL 8
16626+ pax_force_retaddr_bts
16627 jmp irq_return
16628 nmi_userspace:
16629 GET_THREAD_INFO(%rcx)
16630@@ -1573,14 +1987,14 @@ nmi_schedule:
16631 jmp paranoid_exit
16632 CFI_ENDPROC
16633 #endif
16634-END(nmi)
16635+ENDPROC(nmi)
16636
16637 ENTRY(ignore_sysret)
16638 CFI_STARTPROC
16639 mov $-ENOSYS,%eax
16640 sysret
16641 CFI_ENDPROC
16642-END(ignore_sysret)
16643+ENDPROC(ignore_sysret)
16644
16645 /*
16646 * End of kprobes section
16647diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16648index 9dbb527..7b3615a 100644
16649--- a/arch/x86/kernel/ftrace.c
16650+++ b/arch/x86/kernel/ftrace.c
16651@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16652 static void *mod_code_newcode; /* holds the text to write to the IP */
16653
16654 static unsigned nmi_wait_count;
16655-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16656+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16657
16658 int ftrace_arch_read_dyn_info(char *buf, int size)
16659 {
16660@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16661
16662 r = snprintf(buf, size, "%u %u",
16663 nmi_wait_count,
16664- atomic_read(&nmi_update_count));
16665+ atomic_read_unchecked(&nmi_update_count));
16666 return r;
16667 }
16668
16669@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16670 {
16671 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16672 smp_rmb();
16673+ pax_open_kernel();
16674 ftrace_mod_code();
16675- atomic_inc(&nmi_update_count);
16676+ pax_close_kernel();
16677+ atomic_inc_unchecked(&nmi_update_count);
16678 }
16679 /* Must have previous changes seen before executions */
16680 smp_mb();
16681@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16682
16683
16684
16685-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16686+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16687
16688 static unsigned char *ftrace_nop_replace(void)
16689 {
16690@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16691 {
16692 unsigned char replaced[MCOUNT_INSN_SIZE];
16693
16694+ ip = ktla_ktva(ip);
16695+
16696 /*
16697 * Note: Due to modules and __init, code can
16698 * disappear and change, we need to protect against faulting
16699@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16700 unsigned char old[MCOUNT_INSN_SIZE], *new;
16701 int ret;
16702
16703- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16704+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16705 new = ftrace_call_replace(ip, (unsigned long)func);
16706 ret = ftrace_modify_code(ip, old, new);
16707
16708@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16709 switch (faulted) {
16710 case 0:
16711 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16712- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16713+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16714 break;
16715 case 1:
16716 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16717- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16718+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16719 break;
16720 case 2:
16721 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16722- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16723+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16724 break;
16725 }
16726
16727@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16728 {
16729 unsigned char code[MCOUNT_INSN_SIZE];
16730
16731+ ip = ktla_ktva(ip);
16732+
16733 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16734 return -EFAULT;
16735
16736diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16737index 4f8e250..df24706 100644
16738--- a/arch/x86/kernel/head32.c
16739+++ b/arch/x86/kernel/head32.c
16740@@ -16,6 +16,7 @@
16741 #include <asm/apic.h>
16742 #include <asm/io_apic.h>
16743 #include <asm/bios_ebda.h>
16744+#include <asm/boot.h>
16745
16746 static void __init i386_default_early_setup(void)
16747 {
16748@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16749 {
16750 reserve_trampoline_memory();
16751
16752- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16753+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16754
16755 #ifdef CONFIG_BLK_DEV_INITRD
16756 /* Reserve INITRD */
16757diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16758index 34c3308..6fc4e76 100644
16759--- a/arch/x86/kernel/head_32.S
16760+++ b/arch/x86/kernel/head_32.S
16761@@ -19,10 +19,17 @@
16762 #include <asm/setup.h>
16763 #include <asm/processor-flags.h>
16764 #include <asm/percpu.h>
16765+#include <asm/msr-index.h>
16766
16767 /* Physical address */
16768 #define pa(X) ((X) - __PAGE_OFFSET)
16769
16770+#ifdef CONFIG_PAX_KERNEXEC
16771+#define ta(X) (X)
16772+#else
16773+#define ta(X) ((X) - __PAGE_OFFSET)
16774+#endif
16775+
16776 /*
16777 * References to members of the new_cpu_data structure.
16778 */
16779@@ -52,11 +59,7 @@
16780 * and small than max_low_pfn, otherwise will waste some page table entries
16781 */
16782
16783-#if PTRS_PER_PMD > 1
16784-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16785-#else
16786-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16787-#endif
16788+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16789
16790 /* Enough space to fit pagetables for the low memory linear map */
16791 MAPPING_BEYOND_END = \
16792@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16793 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16794
16795 /*
16796+ * Real beginning of normal "text" segment
16797+ */
16798+ENTRY(stext)
16799+ENTRY(_stext)
16800+
16801+/*
16802 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16803 * %esi points to the real-mode code as a 32-bit pointer.
16804 * CS and DS must be 4 GB flat segments, but we don't depend on
16805@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16806 * can.
16807 */
16808 __HEAD
16809+
16810+#ifdef CONFIG_PAX_KERNEXEC
16811+ jmp startup_32
16812+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16813+.fill PAGE_SIZE-5,1,0xcc
16814+#endif
16815+
16816 ENTRY(startup_32)
16817+ movl pa(stack_start),%ecx
16818+
16819 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16820 us to not reload segments */
16821 testb $(1<<6), BP_loadflags(%esi)
16822@@ -95,7 +113,60 @@ ENTRY(startup_32)
16823 movl %eax,%es
16824 movl %eax,%fs
16825 movl %eax,%gs
16826+ movl %eax,%ss
16827 2:
16828+ leal -__PAGE_OFFSET(%ecx),%esp
16829+
16830+#ifdef CONFIG_SMP
16831+ movl $pa(cpu_gdt_table),%edi
16832+ movl $__per_cpu_load,%eax
16833+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16834+ rorl $16,%eax
16835+ movb %al,__KERNEL_PERCPU + 4(%edi)
16836+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16837+ movl $__per_cpu_end - 1,%eax
16838+ subl $__per_cpu_start,%eax
16839+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16840+#endif
16841+
16842+#ifdef CONFIG_PAX_MEMORY_UDEREF
16843+ movl $NR_CPUS,%ecx
16844+ movl $pa(cpu_gdt_table),%edi
16845+1:
16846+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16847+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16848+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16849+ addl $PAGE_SIZE_asm,%edi
16850+ loop 1b
16851+#endif
16852+
16853+#ifdef CONFIG_PAX_KERNEXEC
16854+ movl $pa(boot_gdt),%edi
16855+ movl $__LOAD_PHYSICAL_ADDR,%eax
16856+ movw %ax,__BOOT_CS + 2(%edi)
16857+ rorl $16,%eax
16858+ movb %al,__BOOT_CS + 4(%edi)
16859+ movb %ah,__BOOT_CS + 7(%edi)
16860+ rorl $16,%eax
16861+
16862+ ljmp $(__BOOT_CS),$1f
16863+1:
16864+
16865+ movl $NR_CPUS,%ecx
16866+ movl $pa(cpu_gdt_table),%edi
16867+ addl $__PAGE_OFFSET,%eax
16868+1:
16869+ movw %ax,__KERNEL_CS + 2(%edi)
16870+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16871+ rorl $16,%eax
16872+ movb %al,__KERNEL_CS + 4(%edi)
16873+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16874+ movb %ah,__KERNEL_CS + 7(%edi)
16875+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16876+ rorl $16,%eax
16877+ addl $PAGE_SIZE_asm,%edi
16878+ loop 1b
16879+#endif
16880
16881 /*
16882 * Clear BSS first so that there are no surprises...
16883@@ -140,9 +211,7 @@ ENTRY(startup_32)
16884 cmpl $num_subarch_entries, %eax
16885 jae bad_subarch
16886
16887- movl pa(subarch_entries)(,%eax,4), %eax
16888- subl $__PAGE_OFFSET, %eax
16889- jmp *%eax
16890+ jmp *pa(subarch_entries)(,%eax,4)
16891
16892 bad_subarch:
16893 WEAK(lguest_entry)
16894@@ -154,10 +223,10 @@ WEAK(xen_entry)
16895 __INITDATA
16896
16897 subarch_entries:
16898- .long default_entry /* normal x86/PC */
16899- .long lguest_entry /* lguest hypervisor */
16900- .long xen_entry /* Xen hypervisor */
16901- .long default_entry /* Moorestown MID */
16902+ .long ta(default_entry) /* normal x86/PC */
16903+ .long ta(lguest_entry) /* lguest hypervisor */
16904+ .long ta(xen_entry) /* Xen hypervisor */
16905+ .long ta(default_entry) /* Moorestown MID */
16906 num_subarch_entries = (. - subarch_entries) / 4
16907 .previous
16908 #endif /* CONFIG_PARAVIRT */
16909@@ -218,8 +287,11 @@ default_entry:
16910 movl %eax, pa(max_pfn_mapped)
16911
16912 /* Do early initialization of the fixmap area */
16913- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16914- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16915+#ifdef CONFIG_COMPAT_VDSO
16916+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16917+#else
16918+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16919+#endif
16920 #else /* Not PAE */
16921
16922 page_pde_offset = (__PAGE_OFFSET >> 20);
16923@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16924 movl %eax, pa(max_pfn_mapped)
16925
16926 /* Do early initialization of the fixmap area */
16927- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16928- movl %eax,pa(swapper_pg_dir+0xffc)
16929+#ifdef CONFIG_COMPAT_VDSO
16930+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16931+#else
16932+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16933+#endif
16934 #endif
16935 jmp 3f
16936 /*
16937@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16938 movl %eax,%es
16939 movl %eax,%fs
16940 movl %eax,%gs
16941+ movl pa(stack_start),%ecx
16942+ movl %eax,%ss
16943+ leal -__PAGE_OFFSET(%ecx),%esp
16944 #endif /* CONFIG_SMP */
16945 3:
16946
16947@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16948 orl %edx,%eax
16949 movl %eax,%cr4
16950
16951+#ifdef CONFIG_X86_PAE
16952 btl $5, %eax # check if PAE is enabled
16953 jnc 6f
16954
16955@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16956 cpuid
16957 cmpl $0x80000000, %eax
16958 jbe 6f
16959+
16960+ /* Clear bogus XD_DISABLE bits */
16961+ call verify_cpu
16962+
16963 mov $0x80000001, %eax
16964 cpuid
16965 /* Execute Disable bit supported? */
16966@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16967 jnc 6f
16968
16969 /* Setup EFER (Extended Feature Enable Register) */
16970- movl $0xc0000080, %ecx
16971+ movl $MSR_EFER, %ecx
16972 rdmsr
16973
16974 btsl $11, %eax
16975 /* Make changes effective */
16976 wrmsr
16977
16978+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16979+ movl $1,pa(nx_enabled)
16980+#endif
16981+
16982 6:
16983
16984 /*
16985@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16986 movl %eax,%cr0 /* ..and set paging (PG) bit */
16987 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16988 1:
16989- /* Set up the stack pointer */
16990- lss stack_start,%esp
16991+ /* Shift the stack pointer to a virtual address */
16992+ addl $__PAGE_OFFSET, %esp
16993
16994 /*
16995 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16996@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16997
16998 #ifdef CONFIG_SMP
16999 cmpb $0, ready
17000- jz 1f /* Initial CPU cleans BSS */
17001- jmp checkCPUtype
17002-1:
17003+ jnz checkCPUtype
17004 #endif /* CONFIG_SMP */
17005
17006 /*
17007@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17008 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17009 movl %eax,%ss # after changing gdt.
17010
17011- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17012+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17013 movl %eax,%ds
17014 movl %eax,%es
17015
17016@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17017 */
17018 cmpb $0,ready
17019 jne 1f
17020- movl $per_cpu__gdt_page,%eax
17021+ movl $cpu_gdt_table,%eax
17022 movl $per_cpu__stack_canary,%ecx
17023+#ifdef CONFIG_SMP
17024+ addl $__per_cpu_load,%ecx
17025+#endif
17026 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17027 shrl $16, %ecx
17028 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17029 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17030 1:
17031-#endif
17032 movl $(__KERNEL_STACK_CANARY),%eax
17033+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17034+ movl $(__USER_DS),%eax
17035+#else
17036+ xorl %eax,%eax
17037+#endif
17038 movl %eax,%gs
17039
17040 xorl %eax,%eax # Clear LDT
17041@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17042
17043 cld # gcc2 wants the direction flag cleared at all times
17044 pushl $0 # fake return address for unwinder
17045-#ifdef CONFIG_SMP
17046- movb ready, %cl
17047 movb $1, ready
17048- cmpb $0,%cl # the first CPU calls start_kernel
17049- je 1f
17050- movl (stack_start), %esp
17051-1:
17052-#endif /* CONFIG_SMP */
17053 jmp *(initial_code)
17054
17055 /*
17056@@ -546,22 +631,22 @@ early_page_fault:
17057 jmp early_fault
17058
17059 early_fault:
17060- cld
17061 #ifdef CONFIG_PRINTK
17062+ cmpl $1,%ss:early_recursion_flag
17063+ je hlt_loop
17064+ incl %ss:early_recursion_flag
17065+ cld
17066 pusha
17067 movl $(__KERNEL_DS),%eax
17068 movl %eax,%ds
17069 movl %eax,%es
17070- cmpl $2,early_recursion_flag
17071- je hlt_loop
17072- incl early_recursion_flag
17073 movl %cr2,%eax
17074 pushl %eax
17075 pushl %edx /* trapno */
17076 pushl $fault_msg
17077 call printk
17078+; call dump_stack
17079 #endif
17080- call dump_stack
17081 hlt_loop:
17082 hlt
17083 jmp hlt_loop
17084@@ -569,8 +654,11 @@ hlt_loop:
17085 /* This is the default interrupt "handler" :-) */
17086 ALIGN
17087 ignore_int:
17088- cld
17089 #ifdef CONFIG_PRINTK
17090+ cmpl $2,%ss:early_recursion_flag
17091+ je hlt_loop
17092+ incl %ss:early_recursion_flag
17093+ cld
17094 pushl %eax
17095 pushl %ecx
17096 pushl %edx
17097@@ -579,9 +667,6 @@ ignore_int:
17098 movl $(__KERNEL_DS),%eax
17099 movl %eax,%ds
17100 movl %eax,%es
17101- cmpl $2,early_recursion_flag
17102- je hlt_loop
17103- incl early_recursion_flag
17104 pushl 16(%esp)
17105 pushl 24(%esp)
17106 pushl 32(%esp)
17107@@ -600,6 +685,8 @@ ignore_int:
17108 #endif
17109 iret
17110
17111+#include "verify_cpu.S"
17112+
17113 __REFDATA
17114 .align 4
17115 ENTRY(initial_code)
17116@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17117 /*
17118 * BSS section
17119 */
17120-__PAGE_ALIGNED_BSS
17121- .align PAGE_SIZE_asm
17122 #ifdef CONFIG_X86_PAE
17123+.section .swapper_pg_pmd,"a",@progbits
17124 swapper_pg_pmd:
17125 .fill 1024*KPMDS,4,0
17126 #else
17127+.section .swapper_pg_dir,"a",@progbits
17128 ENTRY(swapper_pg_dir)
17129 .fill 1024,4,0
17130 #endif
17131+.section .swapper_pg_fixmap,"a",@progbits
17132 swapper_pg_fixmap:
17133 .fill 1024,4,0
17134 #ifdef CONFIG_X86_TRAMPOLINE
17135+.section .trampoline_pg_dir,"a",@progbits
17136 ENTRY(trampoline_pg_dir)
17137+#ifdef CONFIG_X86_PAE
17138+ .fill 4,8,0
17139+#else
17140 .fill 1024,4,0
17141 #endif
17142+#endif
17143+
17144+.section .empty_zero_page,"a",@progbits
17145 ENTRY(empty_zero_page)
17146 .fill 4096,1,0
17147
17148 /*
17149+ * The IDT has to be page-aligned to simplify the Pentium
17150+ * F0 0F bug workaround.. We have a special link segment
17151+ * for this.
17152+ */
17153+.section .idt,"a",@progbits
17154+ENTRY(idt_table)
17155+ .fill 256,8,0
17156+
17157+/*
17158 * This starts the data section.
17159 */
17160 #ifdef CONFIG_X86_PAE
17161-__PAGE_ALIGNED_DATA
17162- /* Page-aligned for the benefit of paravirt? */
17163- .align PAGE_SIZE_asm
17164+.section .swapper_pg_dir,"a",@progbits
17165+
17166 ENTRY(swapper_pg_dir)
17167 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17168 # if KPMDS == 3
17169@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17170 # error "Kernel PMDs should be 1, 2 or 3"
17171 # endif
17172 .align PAGE_SIZE_asm /* needs to be page-sized too */
17173+
17174+#ifdef CONFIG_PAX_PER_CPU_PGD
17175+ENTRY(cpu_pgd)
17176+ .rept NR_CPUS
17177+ .fill 4,8,0
17178+ .endr
17179+#endif
17180+
17181 #endif
17182
17183 .data
17184+.balign 4
17185 ENTRY(stack_start)
17186- .long init_thread_union+THREAD_SIZE
17187- .long __BOOT_DS
17188+ .long init_thread_union+THREAD_SIZE-8
17189
17190 ready: .byte 0
17191
17192+.section .rodata,"a",@progbits
17193 early_recursion_flag:
17194 .long 0
17195
17196@@ -697,7 +809,7 @@ fault_msg:
17197 .word 0 # 32 bit align gdt_desc.address
17198 boot_gdt_descr:
17199 .word __BOOT_DS+7
17200- .long boot_gdt - __PAGE_OFFSET
17201+ .long pa(boot_gdt)
17202
17203 .word 0 # 32-bit align idt_desc.address
17204 idt_descr:
17205@@ -708,7 +820,7 @@ idt_descr:
17206 .word 0 # 32 bit align gdt_desc.address
17207 ENTRY(early_gdt_descr)
17208 .word GDT_ENTRIES*8-1
17209- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17210+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17211
17212 /*
17213 * The boot_gdt must mirror the equivalent in setup.S and is
17214@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17215 .align L1_CACHE_BYTES
17216 ENTRY(boot_gdt)
17217 .fill GDT_ENTRY_BOOT_CS,8,0
17218- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17219- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17220+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17221+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17222+
17223+ .align PAGE_SIZE_asm
17224+ENTRY(cpu_gdt_table)
17225+ .rept NR_CPUS
17226+ .quad 0x0000000000000000 /* NULL descriptor */
17227+ .quad 0x0000000000000000 /* 0x0b reserved */
17228+ .quad 0x0000000000000000 /* 0x13 reserved */
17229+ .quad 0x0000000000000000 /* 0x1b reserved */
17230+
17231+#ifdef CONFIG_PAX_KERNEXEC
17232+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17233+#else
17234+ .quad 0x0000000000000000 /* 0x20 unused */
17235+#endif
17236+
17237+ .quad 0x0000000000000000 /* 0x28 unused */
17238+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17239+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17240+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17241+ .quad 0x0000000000000000 /* 0x4b reserved */
17242+ .quad 0x0000000000000000 /* 0x53 reserved */
17243+ .quad 0x0000000000000000 /* 0x5b reserved */
17244+
17245+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17246+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17247+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17248+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17249+
17250+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17251+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17252+
17253+ /*
17254+ * Segments used for calling PnP BIOS have byte granularity.
17255+ * The code segments and data segments have fixed 64k limits,
17256+ * the transfer segment sizes are set at run time.
17257+ */
17258+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17259+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17260+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17261+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17262+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17263+
17264+ /*
17265+ * The APM segments have byte granularity and their bases
17266+ * are set at run time. All have 64k limits.
17267+ */
17268+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17269+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17270+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17271+
17272+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17273+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17274+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17275+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17276+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17277+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17278+
17279+ /* Be sure this is zeroed to avoid false validations in Xen */
17280+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17281+ .endr
17282diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17283index 780cd92..758b2a6 100644
17284--- a/arch/x86/kernel/head_64.S
17285+++ b/arch/x86/kernel/head_64.S
17286@@ -19,6 +19,8 @@
17287 #include <asm/cache.h>
17288 #include <asm/processor-flags.h>
17289 #include <asm/percpu.h>
17290+#include <asm/cpufeature.h>
17291+#include <asm/alternative-asm.h>
17292
17293 #ifdef CONFIG_PARAVIRT
17294 #include <asm/asm-offsets.h>
17295@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17296 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17297 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17298 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17299+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17300+L3_VMALLOC_START = pud_index(VMALLOC_START)
17301+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17302+L3_VMALLOC_END = pud_index(VMALLOC_END)
17303+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17304+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17305
17306 .text
17307 __HEAD
17308@@ -85,35 +93,23 @@ startup_64:
17309 */
17310 addq %rbp, init_level4_pgt + 0(%rip)
17311 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17312+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17313+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17314+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17315 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17316
17317 addq %rbp, level3_ident_pgt + 0(%rip)
17318+#ifndef CONFIG_XEN
17319+ addq %rbp, level3_ident_pgt + 8(%rip)
17320+#endif
17321
17322- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17323- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17324+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17325+
17326+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17327+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17328
17329 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17330-
17331- /* Add an Identity mapping if I am above 1G */
17332- leaq _text(%rip), %rdi
17333- andq $PMD_PAGE_MASK, %rdi
17334-
17335- movq %rdi, %rax
17336- shrq $PUD_SHIFT, %rax
17337- andq $(PTRS_PER_PUD - 1), %rax
17338- jz ident_complete
17339-
17340- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17341- leaq level3_ident_pgt(%rip), %rbx
17342- movq %rdx, 0(%rbx, %rax, 8)
17343-
17344- movq %rdi, %rax
17345- shrq $PMD_SHIFT, %rax
17346- andq $(PTRS_PER_PMD - 1), %rax
17347- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17348- leaq level2_spare_pgt(%rip), %rbx
17349- movq %rdx, 0(%rbx, %rax, 8)
17350-ident_complete:
17351+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17352
17353 /*
17354 * Fixup the kernel text+data virtual addresses. Note that
17355@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17356 * after the boot processor executes this code.
17357 */
17358
17359- /* Enable PAE mode and PGE */
17360- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17361+ /* Enable PAE mode and PSE/PGE */
17362+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17363 movq %rax, %cr4
17364
17365 /* Setup early boot stage 4 level pagetables. */
17366@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17367 movl $MSR_EFER, %ecx
17368 rdmsr
17369 btsl $_EFER_SCE, %eax /* Enable System Call */
17370- btl $20,%edi /* No Execute supported? */
17371+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17372 jnc 1f
17373 btsl $_EFER_NX, %eax
17374+ leaq init_level4_pgt(%rip), %rdi
17375+#ifndef CONFIG_EFI
17376+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17377+#endif
17378+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17379+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17380+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17381 1: wrmsr /* Make changes effective */
17382
17383 /* Setup cr0 */
17384@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17385 * jump. In addition we need to ensure %cs is set so we make this
17386 * a far return.
17387 */
17388+ pax_set_fptr_mask
17389 movq initial_code(%rip),%rax
17390 pushq $0 # fake return address to stop unwinder
17391 pushq $__KERNEL_CS # set correct cs
17392@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17393 .quad x86_64_start_kernel
17394 ENTRY(initial_gs)
17395 .quad INIT_PER_CPU_VAR(irq_stack_union)
17396- __FINITDATA
17397
17398 ENTRY(stack_start)
17399 .quad init_thread_union+THREAD_SIZE-8
17400 .word 0
17401+ __FINITDATA
17402
17403 bad_address:
17404 jmp bad_address
17405
17406- .section ".init.text","ax"
17407+ __INIT
17408 #ifdef CONFIG_EARLY_PRINTK
17409 .globl early_idt_handlers
17410 early_idt_handlers:
17411@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17412 #endif /* EARLY_PRINTK */
17413 1: hlt
17414 jmp 1b
17415+ .previous
17416
17417 #ifdef CONFIG_EARLY_PRINTK
17418+ __INITDATA
17419 early_recursion_flag:
17420 .long 0
17421+ .previous
17422
17423+ .section .rodata,"a",@progbits
17424 early_idt_msg:
17425 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17426 early_idt_ripmsg:
17427 .asciz "RIP %s\n"
17428+ .previous
17429 #endif /* CONFIG_EARLY_PRINTK */
17430- .previous
17431
17432+ .section .rodata,"a",@progbits
17433 #define NEXT_PAGE(name) \
17434 .balign PAGE_SIZE; \
17435 ENTRY(name)
17436@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17437 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17438 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17439 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17440+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17441+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17442+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17443+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17444+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17445+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17446 .org init_level4_pgt + L4_START_KERNEL*8, 0
17447 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17448 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17449
17450+#ifdef CONFIG_PAX_PER_CPU_PGD
17451+NEXT_PAGE(cpu_pgd)
17452+ .rept NR_CPUS
17453+ .fill 512,8,0
17454+ .endr
17455+#endif
17456+
17457 NEXT_PAGE(level3_ident_pgt)
17458 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17459+#ifdef CONFIG_XEN
17460 .fill 511,8,0
17461+#else
17462+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17463+ .fill 510,8,0
17464+#endif
17465+
17466+NEXT_PAGE(level3_vmalloc_start_pgt)
17467+ .fill 512,8,0
17468+
17469+NEXT_PAGE(level3_vmalloc_end_pgt)
17470+ .fill 512,8,0
17471+
17472+NEXT_PAGE(level3_vmemmap_pgt)
17473+ .fill L3_VMEMMAP_START,8,0
17474+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17475
17476 NEXT_PAGE(level3_kernel_pgt)
17477 .fill L3_START_KERNEL,8,0
17478@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17479 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17480 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17481
17482+NEXT_PAGE(level2_vmemmap_pgt)
17483+ .fill 512,8,0
17484+
17485 NEXT_PAGE(level2_fixmap_pgt)
17486- .fill 506,8,0
17487- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17488- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17489- .fill 5,8,0
17490+ .fill 507,8,0
17491+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17492+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17493+ .fill 4,8,0
17494
17495-NEXT_PAGE(level1_fixmap_pgt)
17496+NEXT_PAGE(level1_vsyscall_pgt)
17497 .fill 512,8,0
17498
17499-NEXT_PAGE(level2_ident_pgt)
17500- /* Since I easily can, map the first 1G.
17501+ /* Since I easily can, map the first 2G.
17502 * Don't set NX because code runs from these pages.
17503 */
17504- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17505+NEXT_PAGE(level2_ident_pgt)
17506+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17507
17508 NEXT_PAGE(level2_kernel_pgt)
17509 /*
17510@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17511 * If you want to increase this then increase MODULES_VADDR
17512 * too.)
17513 */
17514- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17515- KERNEL_IMAGE_SIZE/PMD_SIZE)
17516-
17517-NEXT_PAGE(level2_spare_pgt)
17518- .fill 512, 8, 0
17519+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17520
17521 #undef PMDS
17522 #undef NEXT_PAGE
17523
17524- .data
17525+ .align PAGE_SIZE
17526+ENTRY(cpu_gdt_table)
17527+ .rept NR_CPUS
17528+ .quad 0x0000000000000000 /* NULL descriptor */
17529+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17530+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17531+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17532+ .quad 0x00cffb000000ffff /* __USER32_CS */
17533+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17534+ .quad 0x00affb000000ffff /* __USER_CS */
17535+
17536+#ifdef CONFIG_PAX_KERNEXEC
17537+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17538+#else
17539+ .quad 0x0 /* unused */
17540+#endif
17541+
17542+ .quad 0,0 /* TSS */
17543+ .quad 0,0 /* LDT */
17544+ .quad 0,0,0 /* three TLS descriptors */
17545+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17546+ /* asm/segment.h:GDT_ENTRIES must match this */
17547+
17548+ /* zero the remaining page */
17549+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17550+ .endr
17551+
17552 .align 16
17553 .globl early_gdt_descr
17554 early_gdt_descr:
17555 .word GDT_ENTRIES*8-1
17556 early_gdt_descr_base:
17557- .quad INIT_PER_CPU_VAR(gdt_page)
17558+ .quad cpu_gdt_table
17559
17560 ENTRY(phys_base)
17561 /* This must match the first entry in level2_kernel_pgt */
17562 .quad 0x0000000000000000
17563
17564 #include "../../x86/xen/xen-head.S"
17565-
17566- .section .bss, "aw", @nobits
17567+
17568+ .section .rodata,"a",@progbits
17569 .align L1_CACHE_BYTES
17570 ENTRY(idt_table)
17571- .skip IDT_ENTRIES * 16
17572+ .fill 512,8,0
17573
17574 __PAGE_ALIGNED_BSS
17575 .align PAGE_SIZE
17576diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17577index 9c3bd4a..e1d9b35 100644
17578--- a/arch/x86/kernel/i386_ksyms_32.c
17579+++ b/arch/x86/kernel/i386_ksyms_32.c
17580@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17581 EXPORT_SYMBOL(cmpxchg8b_emu);
17582 #endif
17583
17584+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17585+
17586 /* Networking helper routines. */
17587 EXPORT_SYMBOL(csum_partial_copy_generic);
17588+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17589+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17590
17591 EXPORT_SYMBOL(__get_user_1);
17592 EXPORT_SYMBOL(__get_user_2);
17593@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17594
17595 EXPORT_SYMBOL(csum_partial);
17596 EXPORT_SYMBOL(empty_zero_page);
17597+
17598+#ifdef CONFIG_PAX_KERNEXEC
17599+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17600+#endif
17601diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17602index df89102..a244320 100644
17603--- a/arch/x86/kernel/i8259.c
17604+++ b/arch/x86/kernel/i8259.c
17605@@ -208,7 +208,7 @@ spurious_8259A_irq:
17606 "spurious 8259A interrupt: IRQ%d.\n", irq);
17607 spurious_irq_mask |= irqmask;
17608 }
17609- atomic_inc(&irq_err_count);
17610+ atomic_inc_unchecked(&irq_err_count);
17611 /*
17612 * Theoretically we do not have to handle this IRQ,
17613 * but in Linux this does not cause problems and is
17614diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17615index 3a54dcb..1c22348 100644
17616--- a/arch/x86/kernel/init_task.c
17617+++ b/arch/x86/kernel/init_task.c
17618@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17619 * way process stacks are handled. This is done by having a special
17620 * "init_task" linker map entry..
17621 */
17622-union thread_union init_thread_union __init_task_data =
17623- { INIT_THREAD_INFO(init_task) };
17624+union thread_union init_thread_union __init_task_data;
17625
17626 /*
17627 * Initial task structure.
17628@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17629 * section. Since TSS's are completely CPU-local, we want them
17630 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17631 */
17632-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17633-
17634+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17635+EXPORT_SYMBOL(init_tss);
17636diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17637index 99c4d30..74c84e9 100644
17638--- a/arch/x86/kernel/ioport.c
17639+++ b/arch/x86/kernel/ioport.c
17640@@ -6,6 +6,7 @@
17641 #include <linux/sched.h>
17642 #include <linux/kernel.h>
17643 #include <linux/capability.h>
17644+#include <linux/security.h>
17645 #include <linux/errno.h>
17646 #include <linux/types.h>
17647 #include <linux/ioport.h>
17648@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17649
17650 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17651 return -EINVAL;
17652+#ifdef CONFIG_GRKERNSEC_IO
17653+ if (turn_on && grsec_disable_privio) {
17654+ gr_handle_ioperm();
17655+ return -EPERM;
17656+ }
17657+#endif
17658 if (turn_on && !capable(CAP_SYS_RAWIO))
17659 return -EPERM;
17660
17661@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17662 * because the ->io_bitmap_max value must match the bitmap
17663 * contents:
17664 */
17665- tss = &per_cpu(init_tss, get_cpu());
17666+ tss = init_tss + get_cpu();
17667
17668 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17669
17670@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17671 return -EINVAL;
17672 /* Trying to gain more privileges? */
17673 if (level > old) {
17674+#ifdef CONFIG_GRKERNSEC_IO
17675+ if (grsec_disable_privio) {
17676+ gr_handle_iopl();
17677+ return -EPERM;
17678+ }
17679+#endif
17680 if (!capable(CAP_SYS_RAWIO))
17681 return -EPERM;
17682 }
17683diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17684index 04bbd52..83a07d9 100644
17685--- a/arch/x86/kernel/irq.c
17686+++ b/arch/x86/kernel/irq.c
17687@@ -15,7 +15,7 @@
17688 #include <asm/mce.h>
17689 #include <asm/hw_irq.h>
17690
17691-atomic_t irq_err_count;
17692+atomic_unchecked_t irq_err_count;
17693
17694 /* Function pointer for generic interrupt vector handling */
17695 void (*generic_interrupt_extension)(void) = NULL;
17696@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17697 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17698 seq_printf(p, " Machine check polls\n");
17699 #endif
17700- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17701+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17702 #if defined(CONFIG_X86_IO_APIC)
17703- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17704+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17705 #endif
17706 return 0;
17707 }
17708@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17709
17710 u64 arch_irq_stat(void)
17711 {
17712- u64 sum = atomic_read(&irq_err_count);
17713+ u64 sum = atomic_read_unchecked(&irq_err_count);
17714
17715 #ifdef CONFIG_X86_IO_APIC
17716- sum += atomic_read(&irq_mis_count);
17717+ sum += atomic_read_unchecked(&irq_mis_count);
17718 #endif
17719 return sum;
17720 }
17721diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17722index 7d35d0f..03f1d52 100644
17723--- a/arch/x86/kernel/irq_32.c
17724+++ b/arch/x86/kernel/irq_32.c
17725@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17726 __asm__ __volatile__("andl %%esp,%0" :
17727 "=r" (sp) : "0" (THREAD_SIZE - 1));
17728
17729- return sp < (sizeof(struct thread_info) + STACK_WARN);
17730+ return sp < STACK_WARN;
17731 }
17732
17733 static void print_stack_overflow(void)
17734@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17735 * per-CPU IRQ handling contexts (thread information and stack)
17736 */
17737 union irq_ctx {
17738- struct thread_info tinfo;
17739- u32 stack[THREAD_SIZE/sizeof(u32)];
17740-} __attribute__((aligned(PAGE_SIZE)));
17741+ unsigned long previous_esp;
17742+ u32 stack[THREAD_SIZE/sizeof(u32)];
17743+} __attribute__((aligned(THREAD_SIZE)));
17744
17745 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17746 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17747@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17748 static inline int
17749 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17750 {
17751- union irq_ctx *curctx, *irqctx;
17752+ union irq_ctx *irqctx;
17753 u32 *isp, arg1, arg2;
17754
17755- curctx = (union irq_ctx *) current_thread_info();
17756 irqctx = __get_cpu_var(hardirq_ctx);
17757
17758 /*
17759@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17760 * handler) we can't do that and just have to keep using the
17761 * current stack (which is the irq stack already after all)
17762 */
17763- if (unlikely(curctx == irqctx))
17764+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17765 return 0;
17766
17767 /* build the stack frame on the IRQ stack */
17768- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17769- irqctx->tinfo.task = curctx->tinfo.task;
17770- irqctx->tinfo.previous_esp = current_stack_pointer;
17771+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17772+ irqctx->previous_esp = current_stack_pointer;
17773
17774- /*
17775- * Copy the softirq bits in preempt_count so that the
17776- * softirq checks work in the hardirq context.
17777- */
17778- irqctx->tinfo.preempt_count =
17779- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17780- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17781+#ifdef CONFIG_PAX_MEMORY_UDEREF
17782+ __set_fs(MAKE_MM_SEG(0));
17783+#endif
17784
17785 if (unlikely(overflow))
17786 call_on_stack(print_stack_overflow, isp);
17787@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17788 : "0" (irq), "1" (desc), "2" (isp),
17789 "D" (desc->handle_irq)
17790 : "memory", "cc", "ecx");
17791+
17792+#ifdef CONFIG_PAX_MEMORY_UDEREF
17793+ __set_fs(current_thread_info()->addr_limit);
17794+#endif
17795+
17796 return 1;
17797 }
17798
17799@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17800 */
17801 void __cpuinit irq_ctx_init(int cpu)
17802 {
17803- union irq_ctx *irqctx;
17804-
17805 if (per_cpu(hardirq_ctx, cpu))
17806 return;
17807
17808- irqctx = &per_cpu(hardirq_stack, cpu);
17809- irqctx->tinfo.task = NULL;
17810- irqctx->tinfo.exec_domain = NULL;
17811- irqctx->tinfo.cpu = cpu;
17812- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17813- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17814-
17815- per_cpu(hardirq_ctx, cpu) = irqctx;
17816-
17817- irqctx = &per_cpu(softirq_stack, cpu);
17818- irqctx->tinfo.task = NULL;
17819- irqctx->tinfo.exec_domain = NULL;
17820- irqctx->tinfo.cpu = cpu;
17821- irqctx->tinfo.preempt_count = 0;
17822- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17823-
17824- per_cpu(softirq_ctx, cpu) = irqctx;
17825+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17826+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17827
17828 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17829 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17830@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17831 asmlinkage void do_softirq(void)
17832 {
17833 unsigned long flags;
17834- struct thread_info *curctx;
17835 union irq_ctx *irqctx;
17836 u32 *isp;
17837
17838@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17839 local_irq_save(flags);
17840
17841 if (local_softirq_pending()) {
17842- curctx = current_thread_info();
17843 irqctx = __get_cpu_var(softirq_ctx);
17844- irqctx->tinfo.task = curctx->task;
17845- irqctx->tinfo.previous_esp = current_stack_pointer;
17846+ irqctx->previous_esp = current_stack_pointer;
17847
17848 /* build the stack frame on the softirq stack */
17849- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17850+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17851+
17852+#ifdef CONFIG_PAX_MEMORY_UDEREF
17853+ __set_fs(MAKE_MM_SEG(0));
17854+#endif
17855
17856 call_on_stack(__do_softirq, isp);
17857+
17858+#ifdef CONFIG_PAX_MEMORY_UDEREF
17859+ __set_fs(current_thread_info()->addr_limit);
17860+#endif
17861+
17862 /*
17863 * Shouldnt happen, we returned above if in_interrupt():
17864 */
17865diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17866index 8d82a77..0baf312 100644
17867--- a/arch/x86/kernel/kgdb.c
17868+++ b/arch/x86/kernel/kgdb.c
17869@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17870
17871 /* clear the trace bit */
17872 linux_regs->flags &= ~X86_EFLAGS_TF;
17873- atomic_set(&kgdb_cpu_doing_single_step, -1);
17874+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17875
17876 /* set the trace bit if we're stepping */
17877 if (remcomInBuffer[0] == 's') {
17878 linux_regs->flags |= X86_EFLAGS_TF;
17879 kgdb_single_step = 1;
17880- atomic_set(&kgdb_cpu_doing_single_step,
17881+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17882 raw_smp_processor_id());
17883 }
17884
17885@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17886 break;
17887
17888 case DIE_DEBUG:
17889- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17890+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17891 raw_smp_processor_id()) {
17892 if (user_mode(regs))
17893 return single_step_cont(regs, args);
17894@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17895 return instruction_pointer(regs);
17896 }
17897
17898-struct kgdb_arch arch_kgdb_ops = {
17899+const struct kgdb_arch arch_kgdb_ops = {
17900 /* Breakpoint instruction: */
17901 .gdb_bpt_instr = { 0xcc },
17902 .flags = KGDB_HW_BREAKPOINT,
17903diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17904index 7a67820..8d15b75 100644
17905--- a/arch/x86/kernel/kprobes.c
17906+++ b/arch/x86/kernel/kprobes.c
17907@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17908 char op;
17909 s32 raddr;
17910 } __attribute__((packed)) * jop;
17911- jop = (struct __arch_jmp_op *)from;
17912+
17913+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17914+
17915+ pax_open_kernel();
17916 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17917 jop->op = RELATIVEJUMP_INSTRUCTION;
17918+ pax_close_kernel();
17919 }
17920
17921 /*
17922@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17923 kprobe_opcode_t opcode;
17924 kprobe_opcode_t *orig_opcodes = opcodes;
17925
17926- if (search_exception_tables((unsigned long)opcodes))
17927+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17928 return 0; /* Page fault may occur on this address. */
17929
17930 retry:
17931@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17932 disp = (u8 *) p->addr + *((s32 *) insn) -
17933 (u8 *) p->ainsn.insn;
17934 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17935+ pax_open_kernel();
17936 *(s32 *)insn = (s32) disp;
17937+ pax_close_kernel();
17938 }
17939 }
17940 #endif
17941@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17942
17943 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17944 {
17945- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17946+ pax_open_kernel();
17947+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17948+ pax_close_kernel();
17949
17950 fix_riprel(p);
17951
17952- if (can_boost(p->addr))
17953+ if (can_boost(ktla_ktva(p->addr)))
17954 p->ainsn.boostable = 0;
17955 else
17956 p->ainsn.boostable = -1;
17957
17958- p->opcode = *p->addr;
17959+ p->opcode = *(ktla_ktva(p->addr));
17960 }
17961
17962 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17963@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17964 if (p->opcode == BREAKPOINT_INSTRUCTION)
17965 regs->ip = (unsigned long)p->addr;
17966 else
17967- regs->ip = (unsigned long)p->ainsn.insn;
17968+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17969 }
17970
17971 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17972@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17973 if (p->ainsn.boostable == 1 && !p->post_handler) {
17974 /* Boost up -- we can execute copied instructions directly */
17975 reset_current_kprobe();
17976- regs->ip = (unsigned long)p->ainsn.insn;
17977+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17978 preempt_enable_no_resched();
17979 return;
17980 }
17981@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17982 struct kprobe_ctlblk *kcb;
17983
17984 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17985- if (*addr != BREAKPOINT_INSTRUCTION) {
17986+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17987 /*
17988 * The breakpoint instruction was removed right
17989 * after we hit it. Another cpu has removed
17990@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17991 /* Skip orig_ax, ip, cs */
17992 " addq $24, %rsp\n"
17993 " popfq\n"
17994+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17995+ " btsq $63,(%rsp)\n"
17996+#endif
17997 #else
17998 " pushf\n"
17999 /*
18000@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
18001 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
18002 {
18003 unsigned long *tos = stack_addr(regs);
18004- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
18005+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
18006 unsigned long orig_ip = (unsigned long)p->addr;
18007 kprobe_opcode_t *insn = p->ainsn.insn;
18008
18009@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18010 struct die_args *args = data;
18011 int ret = NOTIFY_DONE;
18012
18013- if (args->regs && user_mode_vm(args->regs))
18014+ if (args->regs && user_mode(args->regs))
18015 return ret;
18016
18017 switch (val) {
18018diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18019index 63b0ec8..6d92227 100644
18020--- a/arch/x86/kernel/kvm.c
18021+++ b/arch/x86/kernel/kvm.c
18022@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18023 pv_mmu_ops.set_pud = kvm_set_pud;
18024 #if PAGETABLE_LEVELS == 4
18025 pv_mmu_ops.set_pgd = kvm_set_pgd;
18026+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18027 #endif
18028 #endif
18029 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18030diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18031index ec6ef60..ab2c824 100644
18032--- a/arch/x86/kernel/ldt.c
18033+++ b/arch/x86/kernel/ldt.c
18034@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18035 if (reload) {
18036 #ifdef CONFIG_SMP
18037 preempt_disable();
18038- load_LDT(pc);
18039+ load_LDT_nolock(pc);
18040 if (!cpumask_equal(mm_cpumask(current->mm),
18041 cpumask_of(smp_processor_id())))
18042 smp_call_function(flush_ldt, current->mm, 1);
18043 preempt_enable();
18044 #else
18045- load_LDT(pc);
18046+ load_LDT_nolock(pc);
18047 #endif
18048 }
18049 if (oldsize) {
18050@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18051 return err;
18052
18053 for (i = 0; i < old->size; i++)
18054- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18055+ write_ldt_entry(new->ldt, i, old->ldt + i);
18056 return 0;
18057 }
18058
18059@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18060 retval = copy_ldt(&mm->context, &old_mm->context);
18061 mutex_unlock(&old_mm->context.lock);
18062 }
18063+
18064+ if (tsk == current) {
18065+ mm->context.vdso = 0;
18066+
18067+#ifdef CONFIG_X86_32
18068+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18069+ mm->context.user_cs_base = 0UL;
18070+ mm->context.user_cs_limit = ~0UL;
18071+
18072+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18073+ cpus_clear(mm->context.cpu_user_cs_mask);
18074+#endif
18075+
18076+#endif
18077+#endif
18078+
18079+ }
18080+
18081 return retval;
18082 }
18083
18084@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18085 }
18086 }
18087
18088+#ifdef CONFIG_PAX_SEGMEXEC
18089+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18090+ error = -EINVAL;
18091+ goto out_unlock;
18092+ }
18093+#endif
18094+
18095 fill_ldt(&ldt, &ldt_info);
18096 if (oldmode)
18097 ldt.avl = 0;
18098diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18099index c1c429d..f02eaf9 100644
18100--- a/arch/x86/kernel/machine_kexec_32.c
18101+++ b/arch/x86/kernel/machine_kexec_32.c
18102@@ -26,7 +26,7 @@
18103 #include <asm/system.h>
18104 #include <asm/cacheflush.h>
18105
18106-static void set_idt(void *newidt, __u16 limit)
18107+static void set_idt(struct desc_struct *newidt, __u16 limit)
18108 {
18109 struct desc_ptr curidt;
18110
18111@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18112 }
18113
18114
18115-static void set_gdt(void *newgdt, __u16 limit)
18116+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18117 {
18118 struct desc_ptr curgdt;
18119
18120@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18121 }
18122
18123 control_page = page_address(image->control_code_page);
18124- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18125+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18126
18127 relocate_kernel_ptr = control_page;
18128 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18129diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18130index 1e47679..e73449d 100644
18131--- a/arch/x86/kernel/microcode_amd.c
18132+++ b/arch/x86/kernel/microcode_amd.c
18133@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18134 uci->mc = NULL;
18135 }
18136
18137-static struct microcode_ops microcode_amd_ops = {
18138+static const struct microcode_ops microcode_amd_ops = {
18139 .request_microcode_user = request_microcode_user,
18140 .request_microcode_fw = request_microcode_fw,
18141 .collect_cpu_info = collect_cpu_info_amd,
18142@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18143 .microcode_fini_cpu = microcode_fini_cpu_amd,
18144 };
18145
18146-struct microcode_ops * __init init_amd_microcode(void)
18147+const struct microcode_ops * __init init_amd_microcode(void)
18148 {
18149 return &microcode_amd_ops;
18150 }
18151diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18152index 378e9a8..b5a6ea9 100644
18153--- a/arch/x86/kernel/microcode_core.c
18154+++ b/arch/x86/kernel/microcode_core.c
18155@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18156
18157 #define MICROCODE_VERSION "2.00"
18158
18159-static struct microcode_ops *microcode_ops;
18160+static const struct microcode_ops *microcode_ops;
18161
18162 /*
18163 * Synchronization.
18164diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18165index 0d334dd..14cedaf 100644
18166--- a/arch/x86/kernel/microcode_intel.c
18167+++ b/arch/x86/kernel/microcode_intel.c
18168@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18169
18170 static int get_ucode_user(void *to, const void *from, size_t n)
18171 {
18172- return copy_from_user(to, from, n);
18173+ return copy_from_user(to, (const void __force_user *)from, n);
18174 }
18175
18176 static enum ucode_state
18177 request_microcode_user(int cpu, const void __user *buf, size_t size)
18178 {
18179- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18180+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18181 }
18182
18183 static void microcode_fini_cpu(int cpu)
18184@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18185 uci->mc = NULL;
18186 }
18187
18188-static struct microcode_ops microcode_intel_ops = {
18189+static const struct microcode_ops microcode_intel_ops = {
18190 .request_microcode_user = request_microcode_user,
18191 .request_microcode_fw = request_microcode_fw,
18192 .collect_cpu_info = collect_cpu_info,
18193@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18194 .microcode_fini_cpu = microcode_fini_cpu,
18195 };
18196
18197-struct microcode_ops * __init init_intel_microcode(void)
18198+const struct microcode_ops * __init init_intel_microcode(void)
18199 {
18200 return &microcode_intel_ops;
18201 }
18202diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18203index 89f386f..9028f51 100644
18204--- a/arch/x86/kernel/module.c
18205+++ b/arch/x86/kernel/module.c
18206@@ -34,7 +34,7 @@
18207 #define DEBUGP(fmt...)
18208 #endif
18209
18210-void *module_alloc(unsigned long size)
18211+static void *__module_alloc(unsigned long size, pgprot_t prot)
18212 {
18213 struct vm_struct *area;
18214
18215@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18216 if (!area)
18217 return NULL;
18218
18219- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18220- PAGE_KERNEL_EXEC);
18221+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18222+}
18223+
18224+void *module_alloc(unsigned long size)
18225+{
18226+
18227+#ifdef CONFIG_PAX_KERNEXEC
18228+ return __module_alloc(size, PAGE_KERNEL);
18229+#else
18230+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18231+#endif
18232+
18233 }
18234
18235 /* Free memory returned from module_alloc */
18236@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18237 vfree(module_region);
18238 }
18239
18240+#ifdef CONFIG_PAX_KERNEXEC
18241+#ifdef CONFIG_X86_32
18242+void *module_alloc_exec(unsigned long size)
18243+{
18244+ struct vm_struct *area;
18245+
18246+ if (size == 0)
18247+ return NULL;
18248+
18249+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18250+ return area ? area->addr : NULL;
18251+}
18252+EXPORT_SYMBOL(module_alloc_exec);
18253+
18254+void module_free_exec(struct module *mod, void *module_region)
18255+{
18256+ vunmap(module_region);
18257+}
18258+EXPORT_SYMBOL(module_free_exec);
18259+#else
18260+void module_free_exec(struct module *mod, void *module_region)
18261+{
18262+ module_free(mod, module_region);
18263+}
18264+EXPORT_SYMBOL(module_free_exec);
18265+
18266+void *module_alloc_exec(unsigned long size)
18267+{
18268+ return __module_alloc(size, PAGE_KERNEL_RX);
18269+}
18270+EXPORT_SYMBOL(module_alloc_exec);
18271+#endif
18272+#endif
18273+
18274 /* We don't need anything special. */
18275 int module_frob_arch_sections(Elf_Ehdr *hdr,
18276 Elf_Shdr *sechdrs,
18277@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18278 unsigned int i;
18279 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18280 Elf32_Sym *sym;
18281- uint32_t *location;
18282+ uint32_t *plocation, location;
18283
18284 DEBUGP("Applying relocate section %u to %u\n", relsec,
18285 sechdrs[relsec].sh_info);
18286 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18287 /* This is where to make the change */
18288- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18289- + rel[i].r_offset;
18290+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18291+ location = (uint32_t)plocation;
18292+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18293+ plocation = ktla_ktva((void *)plocation);
18294 /* This is the symbol it is referring to. Note that all
18295 undefined symbols have been resolved. */
18296 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18297@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18298 switch (ELF32_R_TYPE(rel[i].r_info)) {
18299 case R_386_32:
18300 /* We add the value into the location given */
18301- *location += sym->st_value;
18302+ pax_open_kernel();
18303+ *plocation += sym->st_value;
18304+ pax_close_kernel();
18305 break;
18306 case R_386_PC32:
18307 /* Add the value, subtract its postition */
18308- *location += sym->st_value - (uint32_t)location;
18309+ pax_open_kernel();
18310+ *plocation += sym->st_value - location;
18311+ pax_close_kernel();
18312 break;
18313 default:
18314 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18315@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18316 case R_X86_64_NONE:
18317 break;
18318 case R_X86_64_64:
18319+ pax_open_kernel();
18320 *(u64 *)loc = val;
18321+ pax_close_kernel();
18322 break;
18323 case R_X86_64_32:
18324+ pax_open_kernel();
18325 *(u32 *)loc = val;
18326+ pax_close_kernel();
18327 if (val != *(u32 *)loc)
18328 goto overflow;
18329 break;
18330 case R_X86_64_32S:
18331+ pax_open_kernel();
18332 *(s32 *)loc = val;
18333+ pax_close_kernel();
18334 if ((s64)val != *(s32 *)loc)
18335 goto overflow;
18336 break;
18337 case R_X86_64_PC32:
18338 val -= (u64)loc;
18339+ pax_open_kernel();
18340 *(u32 *)loc = val;
18341+ pax_close_kernel();
18342+
18343 #if 0
18344 if ((s64)val != *(s32 *)loc)
18345 goto overflow;
18346diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18347index 3a7c5a4..9191528 100644
18348--- a/arch/x86/kernel/paravirt-spinlocks.c
18349+++ b/arch/x86/kernel/paravirt-spinlocks.c
18350@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18351 __raw_spin_lock(lock);
18352 }
18353
18354-struct pv_lock_ops pv_lock_ops = {
18355+struct pv_lock_ops pv_lock_ops __read_only = {
18356 #ifdef CONFIG_SMP
18357 .spin_is_locked = __ticket_spin_is_locked,
18358 .spin_is_contended = __ticket_spin_is_contended,
18359diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18360index 1b1739d..dea6077 100644
18361--- a/arch/x86/kernel/paravirt.c
18362+++ b/arch/x86/kernel/paravirt.c
18363@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18364 {
18365 return x;
18366 }
18367+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18368+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18369+#endif
18370
18371 void __init default_banner(void)
18372 {
18373@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18374 * corresponding structure. */
18375 static void *get_call_destination(u8 type)
18376 {
18377- struct paravirt_patch_template tmpl = {
18378+ const struct paravirt_patch_template tmpl = {
18379 .pv_init_ops = pv_init_ops,
18380 .pv_time_ops = pv_time_ops,
18381 .pv_cpu_ops = pv_cpu_ops,
18382@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18383 .pv_lock_ops = pv_lock_ops,
18384 #endif
18385 };
18386+
18387+ pax_track_stack();
18388 return *((void **)&tmpl + type);
18389 }
18390
18391@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18392 if (opfunc == NULL)
18393 /* If there's no function, patch it with a ud2a (BUG) */
18394 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18395- else if (opfunc == _paravirt_nop)
18396+ else if (opfunc == (void *)_paravirt_nop)
18397 /* If the operation is a nop, then nop the callsite */
18398 ret = paravirt_patch_nop();
18399
18400 /* identity functions just return their single argument */
18401- else if (opfunc == _paravirt_ident_32)
18402+ else if (opfunc == (void *)_paravirt_ident_32)
18403 ret = paravirt_patch_ident_32(insnbuf, len);
18404- else if (opfunc == _paravirt_ident_64)
18405+ else if (opfunc == (void *)_paravirt_ident_64)
18406 ret = paravirt_patch_ident_64(insnbuf, len);
18407+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18408+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18409+ ret = paravirt_patch_ident_64(insnbuf, len);
18410+#endif
18411
18412 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18413 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18414@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18415 if (insn_len > len || start == NULL)
18416 insn_len = len;
18417 else
18418- memcpy(insnbuf, start, insn_len);
18419+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18420
18421 return insn_len;
18422 }
18423@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18424 preempt_enable();
18425 }
18426
18427-struct pv_info pv_info = {
18428+struct pv_info pv_info __read_only = {
18429 .name = "bare hardware",
18430 .paravirt_enabled = 0,
18431 .kernel_rpl = 0,
18432 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18433 };
18434
18435-struct pv_init_ops pv_init_ops = {
18436+struct pv_init_ops pv_init_ops __read_only = {
18437 .patch = native_patch,
18438 };
18439
18440-struct pv_time_ops pv_time_ops = {
18441+struct pv_time_ops pv_time_ops __read_only = {
18442 .sched_clock = native_sched_clock,
18443 };
18444
18445-struct pv_irq_ops pv_irq_ops = {
18446+struct pv_irq_ops pv_irq_ops __read_only = {
18447 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18448 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18449 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18450@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18451 #endif
18452 };
18453
18454-struct pv_cpu_ops pv_cpu_ops = {
18455+struct pv_cpu_ops pv_cpu_ops __read_only = {
18456 .cpuid = native_cpuid,
18457 .get_debugreg = native_get_debugreg,
18458 .set_debugreg = native_set_debugreg,
18459@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18460 .end_context_switch = paravirt_nop,
18461 };
18462
18463-struct pv_apic_ops pv_apic_ops = {
18464+struct pv_apic_ops pv_apic_ops __read_only = {
18465 #ifdef CONFIG_X86_LOCAL_APIC
18466 .startup_ipi_hook = paravirt_nop,
18467 #endif
18468 };
18469
18470-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18471+#ifdef CONFIG_X86_32
18472+#ifdef CONFIG_X86_PAE
18473+/* 64-bit pagetable entries */
18474+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18475+#else
18476 /* 32-bit pagetable entries */
18477 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18478+#endif
18479 #else
18480 /* 64-bit pagetable entries */
18481 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18482 #endif
18483
18484-struct pv_mmu_ops pv_mmu_ops = {
18485+struct pv_mmu_ops pv_mmu_ops __read_only = {
18486
18487 .read_cr2 = native_read_cr2,
18488 .write_cr2 = native_write_cr2,
18489@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18490 .make_pud = PTE_IDENT,
18491
18492 .set_pgd = native_set_pgd,
18493+ .set_pgd_batched = native_set_pgd_batched,
18494 #endif
18495 #endif /* PAGETABLE_LEVELS >= 3 */
18496
18497@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18498 },
18499
18500 .set_fixmap = native_set_fixmap,
18501+
18502+#ifdef CONFIG_PAX_KERNEXEC
18503+ .pax_open_kernel = native_pax_open_kernel,
18504+ .pax_close_kernel = native_pax_close_kernel,
18505+#endif
18506+
18507 };
18508
18509 EXPORT_SYMBOL_GPL(pv_time_ops);
18510diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18511index 1a2d4b1..6a0dd55 100644
18512--- a/arch/x86/kernel/pci-calgary_64.c
18513+++ b/arch/x86/kernel/pci-calgary_64.c
18514@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18515 free_pages((unsigned long)vaddr, get_order(size));
18516 }
18517
18518-static struct dma_map_ops calgary_dma_ops = {
18519+static const struct dma_map_ops calgary_dma_ops = {
18520 .alloc_coherent = calgary_alloc_coherent,
18521 .free_coherent = calgary_free_coherent,
18522 .map_sg = calgary_map_sg,
18523diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18524index 6ac3931..42b4414 100644
18525--- a/arch/x86/kernel/pci-dma.c
18526+++ b/arch/x86/kernel/pci-dma.c
18527@@ -14,7 +14,7 @@
18528
18529 static int forbid_dac __read_mostly;
18530
18531-struct dma_map_ops *dma_ops;
18532+const struct dma_map_ops *dma_ops;
18533 EXPORT_SYMBOL(dma_ops);
18534
18535 static int iommu_sac_force __read_mostly;
18536@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18537
18538 int dma_supported(struct device *dev, u64 mask)
18539 {
18540- struct dma_map_ops *ops = get_dma_ops(dev);
18541+ const struct dma_map_ops *ops = get_dma_ops(dev);
18542
18543 #ifdef CONFIG_PCI
18544 if (mask > 0xffffffff && forbid_dac > 0) {
18545diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18546index 1c76691..e3632db 100644
18547--- a/arch/x86/kernel/pci-gart_64.c
18548+++ b/arch/x86/kernel/pci-gart_64.c
18549@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18550 return -1;
18551 }
18552
18553-static struct dma_map_ops gart_dma_ops = {
18554+static const struct dma_map_ops gart_dma_ops = {
18555 .map_sg = gart_map_sg,
18556 .unmap_sg = gart_unmap_sg,
18557 .map_page = gart_map_page,
18558diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18559index a3933d4..c898869 100644
18560--- a/arch/x86/kernel/pci-nommu.c
18561+++ b/arch/x86/kernel/pci-nommu.c
18562@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18563 flush_write_buffers();
18564 }
18565
18566-struct dma_map_ops nommu_dma_ops = {
18567+const struct dma_map_ops nommu_dma_ops = {
18568 .alloc_coherent = dma_generic_alloc_coherent,
18569 .free_coherent = nommu_free_coherent,
18570 .map_sg = nommu_map_sg,
18571diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18572index aaa6b78..4de1881 100644
18573--- a/arch/x86/kernel/pci-swiotlb.c
18574+++ b/arch/x86/kernel/pci-swiotlb.c
18575@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18576 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18577 }
18578
18579-static struct dma_map_ops swiotlb_dma_ops = {
18580+static const struct dma_map_ops swiotlb_dma_ops = {
18581 .mapping_error = swiotlb_dma_mapping_error,
18582 .alloc_coherent = x86_swiotlb_alloc_coherent,
18583 .free_coherent = swiotlb_free_coherent,
18584diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18585index fc6c84d..0312ca2 100644
18586--- a/arch/x86/kernel/process.c
18587+++ b/arch/x86/kernel/process.c
18588@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18589
18590 void free_thread_info(struct thread_info *ti)
18591 {
18592- free_thread_xstate(ti->task);
18593 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18594 }
18595
18596+static struct kmem_cache *task_struct_cachep;
18597+
18598 void arch_task_cache_init(void)
18599 {
18600- task_xstate_cachep =
18601- kmem_cache_create("task_xstate", xstate_size,
18602+ /* create a slab on which task_structs can be allocated */
18603+ task_struct_cachep =
18604+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18605+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18606+
18607+ task_xstate_cachep =
18608+ kmem_cache_create("task_xstate", xstate_size,
18609 __alignof__(union thread_xstate),
18610- SLAB_PANIC | SLAB_NOTRACK, NULL);
18611+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18612+}
18613+
18614+struct task_struct *alloc_task_struct(void)
18615+{
18616+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18617+}
18618+
18619+void free_task_struct(struct task_struct *task)
18620+{
18621+ free_thread_xstate(task);
18622+ kmem_cache_free(task_struct_cachep, task);
18623 }
18624
18625 /*
18626@@ -73,7 +90,7 @@ void exit_thread(void)
18627 unsigned long *bp = t->io_bitmap_ptr;
18628
18629 if (bp) {
18630- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18631+ struct tss_struct *tss = init_tss + get_cpu();
18632
18633 t->io_bitmap_ptr = NULL;
18634 clear_thread_flag(TIF_IO_BITMAP);
18635@@ -93,6 +110,9 @@ void flush_thread(void)
18636
18637 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18638
18639+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18640+ loadsegment(gs, 0);
18641+#endif
18642 tsk->thread.debugreg0 = 0;
18643 tsk->thread.debugreg1 = 0;
18644 tsk->thread.debugreg2 = 0;
18645@@ -307,7 +327,7 @@ void default_idle(void)
18646 EXPORT_SYMBOL(default_idle);
18647 #endif
18648
18649-void stop_this_cpu(void *dummy)
18650+__noreturn void stop_this_cpu(void *dummy)
18651 {
18652 local_irq_disable();
18653 /*
18654@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18655 }
18656 early_param("idle", idle_setup);
18657
18658-unsigned long arch_align_stack(unsigned long sp)
18659+#ifdef CONFIG_PAX_RANDKSTACK
18660+void pax_randomize_kstack(struct pt_regs *regs)
18661 {
18662- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18663- sp -= get_random_int() % 8192;
18664- return sp & ~0xf;
18665-}
18666+ struct thread_struct *thread = &current->thread;
18667+ unsigned long time;
18668
18669-unsigned long arch_randomize_brk(struct mm_struct *mm)
18670-{
18671- unsigned long range_end = mm->brk + 0x02000000;
18672- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18673+ if (!randomize_va_space)
18674+ return;
18675+
18676+ if (v8086_mode(regs))
18677+ return;
18678+
18679+ rdtscl(time);
18680+
18681+ /* P4 seems to return a 0 LSB, ignore it */
18682+#ifdef CONFIG_MPENTIUM4
18683+ time &= 0x3EUL;
18684+ time <<= 2;
18685+#elif defined(CONFIG_X86_64)
18686+ time &= 0xFUL;
18687+ time <<= 4;
18688+#else
18689+ time &= 0x1FUL;
18690+ time <<= 3;
18691+#endif
18692+
18693+ thread->sp0 ^= time;
18694+ load_sp0(init_tss + smp_processor_id(), thread);
18695+
18696+#ifdef CONFIG_X86_64
18697+ percpu_write(kernel_stack, thread->sp0);
18698+#endif
18699 }
18700+#endif
18701
18702diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18703index c40c432..6e1df72 100644
18704--- a/arch/x86/kernel/process_32.c
18705+++ b/arch/x86/kernel/process_32.c
18706@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18707 unsigned long thread_saved_pc(struct task_struct *tsk)
18708 {
18709 return ((unsigned long *)tsk->thread.sp)[3];
18710+//XXX return tsk->thread.eip;
18711 }
18712
18713 #ifndef CONFIG_SMP
18714@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18715 unsigned short ss, gs;
18716 const char *board;
18717
18718- if (user_mode_vm(regs)) {
18719+ if (user_mode(regs)) {
18720 sp = regs->sp;
18721 ss = regs->ss & 0xffff;
18722- gs = get_user_gs(regs);
18723 } else {
18724 sp = (unsigned long) (&regs->sp);
18725 savesegment(ss, ss);
18726- savesegment(gs, gs);
18727 }
18728+ gs = get_user_gs(regs);
18729
18730 printk("\n");
18731
18732@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18733 regs.bx = (unsigned long) fn;
18734 regs.dx = (unsigned long) arg;
18735
18736- regs.ds = __USER_DS;
18737- regs.es = __USER_DS;
18738+ regs.ds = __KERNEL_DS;
18739+ regs.es = __KERNEL_DS;
18740 regs.fs = __KERNEL_PERCPU;
18741- regs.gs = __KERNEL_STACK_CANARY;
18742+ savesegment(gs, regs.gs);
18743 regs.orig_ax = -1;
18744 regs.ip = (unsigned long) kernel_thread_helper;
18745 regs.cs = __KERNEL_CS | get_kernel_rpl();
18746@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18747 struct task_struct *tsk;
18748 int err;
18749
18750- childregs = task_pt_regs(p);
18751+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18752 *childregs = *regs;
18753 childregs->ax = 0;
18754 childregs->sp = sp;
18755
18756 p->thread.sp = (unsigned long) childregs;
18757 p->thread.sp0 = (unsigned long) (childregs+1);
18758+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18759
18760 p->thread.ip = (unsigned long) ret_from_fork;
18761
18762@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18763 struct thread_struct *prev = &prev_p->thread,
18764 *next = &next_p->thread;
18765 int cpu = smp_processor_id();
18766- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18767+ struct tss_struct *tss = init_tss + cpu;
18768 bool preload_fpu;
18769
18770 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18771@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18772 */
18773 lazy_save_gs(prev->gs);
18774
18775+#ifdef CONFIG_PAX_MEMORY_UDEREF
18776+ __set_fs(task_thread_info(next_p)->addr_limit);
18777+#endif
18778+
18779 /*
18780 * Load the per-thread Thread-Local Storage descriptor.
18781 */
18782@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18783 */
18784 arch_end_context_switch(next_p);
18785
18786+ percpu_write(current_task, next_p);
18787+ percpu_write(current_tinfo, &next_p->tinfo);
18788+
18789 if (preload_fpu)
18790 __math_state_restore();
18791
18792@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18793 if (prev->gs | next->gs)
18794 lazy_load_gs(next->gs);
18795
18796- percpu_write(current_task, next_p);
18797-
18798 return prev_p;
18799 }
18800
18801@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18802 } while (count++ < 16);
18803 return 0;
18804 }
18805-
18806diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18807index 39493bc..196816d 100644
18808--- a/arch/x86/kernel/process_64.c
18809+++ b/arch/x86/kernel/process_64.c
18810@@ -91,7 +91,7 @@ static void __exit_idle(void)
18811 void exit_idle(void)
18812 {
18813 /* idle loop has pid 0 */
18814- if (current->pid)
18815+ if (task_pid_nr(current))
18816 return;
18817 __exit_idle();
18818 }
18819@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18820 if (!board)
18821 board = "";
18822 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18823- current->pid, current->comm, print_tainted(),
18824+ task_pid_nr(current), current->comm, print_tainted(),
18825 init_utsname()->release,
18826 (int)strcspn(init_utsname()->version, " "),
18827 init_utsname()->version, board);
18828@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18829 struct pt_regs *childregs;
18830 struct task_struct *me = current;
18831
18832- childregs = ((struct pt_regs *)
18833- (THREAD_SIZE + task_stack_page(p))) - 1;
18834+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18835 *childregs = *regs;
18836
18837 childregs->ax = 0;
18838@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18839 p->thread.sp = (unsigned long) childregs;
18840 p->thread.sp0 = (unsigned long) (childregs+1);
18841 p->thread.usersp = me->thread.usersp;
18842+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18843
18844 set_tsk_thread_flag(p, TIF_FORK);
18845
18846@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18847 struct thread_struct *prev = &prev_p->thread;
18848 struct thread_struct *next = &next_p->thread;
18849 int cpu = smp_processor_id();
18850- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18851+ struct tss_struct *tss = init_tss + cpu;
18852 unsigned fsindex, gsindex;
18853 bool preload_fpu;
18854
18855@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18856 prev->usersp = percpu_read(old_rsp);
18857 percpu_write(old_rsp, next->usersp);
18858 percpu_write(current_task, next_p);
18859+ percpu_write(current_tinfo, &next_p->tinfo);
18860
18861- percpu_write(kernel_stack,
18862- (unsigned long)task_stack_page(next_p) +
18863- THREAD_SIZE - KERNEL_STACK_OFFSET);
18864+ percpu_write(kernel_stack, next->sp0);
18865
18866 /*
18867 * Now maybe reload the debug registers and handle I/O bitmaps
18868@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18869 if (!p || p == current || p->state == TASK_RUNNING)
18870 return 0;
18871 stack = (unsigned long)task_stack_page(p);
18872- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18873+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18874 return 0;
18875 fp = *(u64 *)(p->thread.sp);
18876 do {
18877- if (fp < (unsigned long)stack ||
18878- fp >= (unsigned long)stack+THREAD_SIZE)
18879+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18880 return 0;
18881 ip = *(u64 *)(fp+8);
18882 if (!in_sched_functions(ip))
18883diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18884index c06acdd..3f5fff5 100644
18885--- a/arch/x86/kernel/ptrace.c
18886+++ b/arch/x86/kernel/ptrace.c
18887@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18888 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18889 {
18890 int ret;
18891- unsigned long __user *datap = (unsigned long __user *)data;
18892+ unsigned long __user *datap = (__force unsigned long __user *)data;
18893
18894 switch (request) {
18895 /* read the word at location addr in the USER area. */
18896@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18897 if (addr < 0)
18898 return -EIO;
18899 ret = do_get_thread_area(child, addr,
18900- (struct user_desc __user *) data);
18901+ (__force struct user_desc __user *) data);
18902 break;
18903
18904 case PTRACE_SET_THREAD_AREA:
18905 if (addr < 0)
18906 return -EIO;
18907 ret = do_set_thread_area(child, addr,
18908- (struct user_desc __user *) data, 0);
18909+ (__force struct user_desc __user *) data, 0);
18910 break;
18911 #endif
18912
18913@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18914 #ifdef CONFIG_X86_PTRACE_BTS
18915 case PTRACE_BTS_CONFIG:
18916 ret = ptrace_bts_config
18917- (child, data, (struct ptrace_bts_config __user *)addr);
18918+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18919 break;
18920
18921 case PTRACE_BTS_STATUS:
18922 ret = ptrace_bts_status
18923- (child, data, (struct ptrace_bts_config __user *)addr);
18924+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18925 break;
18926
18927 case PTRACE_BTS_SIZE:
18928@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18929
18930 case PTRACE_BTS_GET:
18931 ret = ptrace_bts_read_record
18932- (child, data, (struct bts_struct __user *) addr);
18933+ (child, data, (__force struct bts_struct __user *) addr);
18934 break;
18935
18936 case PTRACE_BTS_CLEAR:
18937@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18938
18939 case PTRACE_BTS_DRAIN:
18940 ret = ptrace_bts_drain
18941- (child, data, (struct bts_struct __user *) addr);
18942+ (child, data, (__force struct bts_struct __user *) addr);
18943 break;
18944 #endif /* CONFIG_X86_PTRACE_BTS */
18945
18946@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18947 info.si_code = si_code;
18948
18949 /* User-mode ip? */
18950- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18951+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18952
18953 /* Send us the fake SIGTRAP */
18954 force_sig_info(SIGTRAP, &info, tsk);
18955@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18956 * We must return the syscall number to actually look up in the table.
18957 * This can be -1L to skip running any syscall at all.
18958 */
18959-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18960+long syscall_trace_enter(struct pt_regs *regs)
18961 {
18962 long ret = 0;
18963
18964@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18965 return ret ?: regs->orig_ax;
18966 }
18967
18968-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18969+void syscall_trace_leave(struct pt_regs *regs)
18970 {
18971 if (unlikely(current->audit_context))
18972 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18973diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18974index cf98100..e76e03d 100644
18975--- a/arch/x86/kernel/reboot.c
18976+++ b/arch/x86/kernel/reboot.c
18977@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18978 EXPORT_SYMBOL(pm_power_off);
18979
18980 static const struct desc_ptr no_idt = {};
18981-static int reboot_mode;
18982+static unsigned short reboot_mode;
18983 enum reboot_type reboot_type = BOOT_KBD;
18984 int reboot_force;
18985
18986@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18987 controller to pulse the CPU reset line, which is more thorough, but
18988 doesn't work with at least one type of 486 motherboard. It is easy
18989 to stop this code working; hence the copious comments. */
18990-static const unsigned long long
18991-real_mode_gdt_entries [3] =
18992+static struct desc_struct
18993+real_mode_gdt_entries [3] __read_only =
18994 {
18995- 0x0000000000000000ULL, /* Null descriptor */
18996- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18997- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18998+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18999+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
19000+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
19001 };
19002
19003 static const struct desc_ptr
19004@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
19005 * specified by the code and length parameters.
19006 * We assume that length will aways be less that 100!
19007 */
19008-void machine_real_restart(const unsigned char *code, int length)
19009+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19010 {
19011 local_irq_disable();
19012
19013@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19014 /* Remap the kernel at virtual address zero, as well as offset zero
19015 from the kernel segment. This assumes the kernel segment starts at
19016 virtual address PAGE_OFFSET. */
19017- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19018- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19019+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19020+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19021
19022 /*
19023 * Use `swapper_pg_dir' as our page directory.
19024@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19025 boot)". This seems like a fairly standard thing that gets set by
19026 REBOOT.COM programs, and the previous reset routine did this
19027 too. */
19028- *((unsigned short *)0x472) = reboot_mode;
19029+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19030
19031 /* For the switch to real mode, copy some code to low memory. It has
19032 to be in the first 64k because it is running in 16-bit mode, and it
19033 has to have the same physical and virtual address, because it turns
19034 off paging. Copy it near the end of the first page, out of the way
19035 of BIOS variables. */
19036- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19037- real_mode_switch, sizeof (real_mode_switch));
19038- memcpy((void *)(0x1000 - 100), code, length);
19039+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19040+ memcpy(__va(0x1000 - 100), code, length);
19041
19042 /* Set up the IDT for real mode. */
19043 load_idt(&real_mode_idt);
19044@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19045 __asm__ __volatile__ ("ljmp $0x0008,%0"
19046 :
19047 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19048+ do { } while (1);
19049 }
19050 #ifdef CONFIG_APM_MODULE
19051 EXPORT_SYMBOL(machine_real_restart);
19052@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19053 {
19054 }
19055
19056-static void native_machine_emergency_restart(void)
19057+__noreturn static void native_machine_emergency_restart(void)
19058 {
19059 int i;
19060
19061@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19062 #endif
19063 }
19064
19065-static void __machine_emergency_restart(int emergency)
19066+static __noreturn void __machine_emergency_restart(int emergency)
19067 {
19068 reboot_emergency = emergency;
19069 machine_ops.emergency_restart();
19070 }
19071
19072-static void native_machine_restart(char *__unused)
19073+static __noreturn void native_machine_restart(char *__unused)
19074 {
19075 printk("machine restart\n");
19076
19077@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19078 __machine_emergency_restart(0);
19079 }
19080
19081-static void native_machine_halt(void)
19082+static __noreturn void native_machine_halt(void)
19083 {
19084 /* stop other cpus and apics */
19085 machine_shutdown();
19086@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19087 stop_this_cpu(NULL);
19088 }
19089
19090-static void native_machine_power_off(void)
19091+__noreturn static void native_machine_power_off(void)
19092 {
19093 if (pm_power_off) {
19094 if (!reboot_force)
19095@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19096 }
19097 /* a fallback in case there is no PM info available */
19098 tboot_shutdown(TB_SHUTDOWN_HALT);
19099+ do { } while (1);
19100 }
19101
19102 struct machine_ops machine_ops = {
19103diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19104index 7a6f3b3..976a959 100644
19105--- a/arch/x86/kernel/relocate_kernel_64.S
19106+++ b/arch/x86/kernel/relocate_kernel_64.S
19107@@ -11,6 +11,7 @@
19108 #include <asm/kexec.h>
19109 #include <asm/processor-flags.h>
19110 #include <asm/pgtable_types.h>
19111+#include <asm/alternative-asm.h>
19112
19113 /*
19114 * Must be relocatable PIC code callable as a C function
19115@@ -167,6 +168,7 @@ identity_mapped:
19116 xorq %r14, %r14
19117 xorq %r15, %r15
19118
19119+ pax_force_retaddr 0, 1
19120 ret
19121
19122 1:
19123diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19124index 5449a26..0b6c759 100644
19125--- a/arch/x86/kernel/setup.c
19126+++ b/arch/x86/kernel/setup.c
19127@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19128
19129 if (!boot_params.hdr.root_flags)
19130 root_mountflags &= ~MS_RDONLY;
19131- init_mm.start_code = (unsigned long) _text;
19132- init_mm.end_code = (unsigned long) _etext;
19133+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19134+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19135 init_mm.end_data = (unsigned long) _edata;
19136 init_mm.brk = _brk_end;
19137
19138- code_resource.start = virt_to_phys(_text);
19139- code_resource.end = virt_to_phys(_etext)-1;
19140- data_resource.start = virt_to_phys(_etext);
19141+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19142+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19143+ data_resource.start = virt_to_phys(_sdata);
19144 data_resource.end = virt_to_phys(_edata)-1;
19145 bss_resource.start = virt_to_phys(&__bss_start);
19146 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19147diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19148index d559af9..524c6ad 100644
19149--- a/arch/x86/kernel/setup_percpu.c
19150+++ b/arch/x86/kernel/setup_percpu.c
19151@@ -25,19 +25,17 @@
19152 # define DBG(x...)
19153 #endif
19154
19155-DEFINE_PER_CPU(int, cpu_number);
19156+#ifdef CONFIG_SMP
19157+DEFINE_PER_CPU(unsigned int, cpu_number);
19158 EXPORT_PER_CPU_SYMBOL(cpu_number);
19159+#endif
19160
19161-#ifdef CONFIG_X86_64
19162 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19163-#else
19164-#define BOOT_PERCPU_OFFSET 0
19165-#endif
19166
19167 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19168 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19169
19170-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19171+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19172 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19173 };
19174 EXPORT_SYMBOL(__per_cpu_offset);
19175@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19176 {
19177 #ifdef CONFIG_X86_32
19178 struct desc_struct gdt;
19179+ unsigned long base = per_cpu_offset(cpu);
19180
19181- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19182- 0x2 | DESCTYPE_S, 0x8);
19183- gdt.s = 1;
19184+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19185+ 0x83 | DESCTYPE_S, 0xC);
19186 write_gdt_entry(get_cpu_gdt_table(cpu),
19187 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19188 #endif
19189@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19190 /* alrighty, percpu areas up and running */
19191 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19192 for_each_possible_cpu(cpu) {
19193+#ifdef CONFIG_CC_STACKPROTECTOR
19194+#ifdef CONFIG_X86_32
19195+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19196+#endif
19197+#endif
19198 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19199 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19200 per_cpu(cpu_number, cpu) = cpu;
19201@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19202 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19203 #endif
19204 #endif
19205+#ifdef CONFIG_CC_STACKPROTECTOR
19206+#ifdef CONFIG_X86_32
19207+ if (!cpu)
19208+ per_cpu(stack_canary.canary, cpu) = canary;
19209+#endif
19210+#endif
19211 /*
19212 * Up to this point, the boot CPU has been using .data.init
19213 * area. Reload any changed state for the boot CPU.
19214diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19215index 6a44a76..a9287a1 100644
19216--- a/arch/x86/kernel/signal.c
19217+++ b/arch/x86/kernel/signal.c
19218@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19219 * Align the stack pointer according to the i386 ABI,
19220 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19221 */
19222- sp = ((sp + 4) & -16ul) - 4;
19223+ sp = ((sp - 12) & -16ul) - 4;
19224 #else /* !CONFIG_X86_32 */
19225 sp = round_down(sp, 16) - 8;
19226 #endif
19227@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19228 * Return an always-bogus address instead so we will die with SIGSEGV.
19229 */
19230 if (onsigstack && !likely(on_sig_stack(sp)))
19231- return (void __user *)-1L;
19232+ return (__force void __user *)-1L;
19233
19234 /* save i387 state */
19235 if (used_math() && save_i387_xstate(*fpstate) < 0)
19236- return (void __user *)-1L;
19237+ return (__force void __user *)-1L;
19238
19239 return (void __user *)sp;
19240 }
19241@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19242 }
19243
19244 if (current->mm->context.vdso)
19245- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19246+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19247 else
19248- restorer = &frame->retcode;
19249+ restorer = (void __user *)&frame->retcode;
19250 if (ka->sa.sa_flags & SA_RESTORER)
19251 restorer = ka->sa.sa_restorer;
19252
19253@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19254 * reasons and because gdb uses it as a signature to notice
19255 * signal handler stack frames.
19256 */
19257- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19258+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19259
19260 if (err)
19261 return -EFAULT;
19262@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19263 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19264
19265 /* Set up to return from userspace. */
19266- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19267+ if (current->mm->context.vdso)
19268+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19269+ else
19270+ restorer = (void __user *)&frame->retcode;
19271 if (ka->sa.sa_flags & SA_RESTORER)
19272 restorer = ka->sa.sa_restorer;
19273 put_user_ex(restorer, &frame->pretcode);
19274@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19275 * reasons and because gdb uses it as a signature to notice
19276 * signal handler stack frames.
19277 */
19278- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19279+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19280 } put_user_catch(err);
19281
19282 if (err)
19283@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19284 int signr;
19285 sigset_t *oldset;
19286
19287+ pax_track_stack();
19288+
19289 /*
19290 * We want the common case to go fast, which is why we may in certain
19291 * cases get here from kernel mode. Just return without doing anything
19292@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19293 * X86_32: vm86 regs switched out by assembly code before reaching
19294 * here, so testing against kernel CS suffices.
19295 */
19296- if (!user_mode(regs))
19297+ if (!user_mode_novm(regs))
19298 return;
19299
19300 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19301diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19302index 7e8e905..64d5c32 100644
19303--- a/arch/x86/kernel/smpboot.c
19304+++ b/arch/x86/kernel/smpboot.c
19305@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19306 */
19307 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19308
19309-void cpu_hotplug_driver_lock()
19310+void cpu_hotplug_driver_lock(void)
19311 {
19312- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19313+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19314 }
19315
19316-void cpu_hotplug_driver_unlock()
19317+void cpu_hotplug_driver_unlock(void)
19318 {
19319- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19320+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19321 }
19322
19323 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19324@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19325 * target processor state.
19326 */
19327 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19328- (unsigned long)stack_start.sp);
19329+ stack_start);
19330
19331 /*
19332 * Run STARTUP IPI loop.
19333@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19334 set_idle_for_cpu(cpu, c_idle.idle);
19335 do_rest:
19336 per_cpu(current_task, cpu) = c_idle.idle;
19337+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19338 #ifdef CONFIG_X86_32
19339 /* Stack for startup_32 can be just as for start_secondary onwards */
19340 irq_ctx_init(cpu);
19341@@ -750,13 +751,15 @@ do_rest:
19342 #else
19343 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19344 initial_gs = per_cpu_offset(cpu);
19345- per_cpu(kernel_stack, cpu) =
19346- (unsigned long)task_stack_page(c_idle.idle) -
19347- KERNEL_STACK_OFFSET + THREAD_SIZE;
19348+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19349 #endif
19350+
19351+ pax_open_kernel();
19352 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19353+ pax_close_kernel();
19354+
19355 initial_code = (unsigned long)start_secondary;
19356- stack_start.sp = (void *) c_idle.idle->thread.sp;
19357+ stack_start = c_idle.idle->thread.sp;
19358
19359 /* start_ip had better be page-aligned! */
19360 start_ip = setup_trampoline();
19361@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19362
19363 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19364
19365+#ifdef CONFIG_PAX_PER_CPU_PGD
19366+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19367+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19368+ KERNEL_PGD_PTRS);
19369+#endif
19370+
19371 err = do_boot_cpu(apicid, cpu);
19372
19373 if (err) {
19374diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19375index 3149032..14f1053 100644
19376--- a/arch/x86/kernel/step.c
19377+++ b/arch/x86/kernel/step.c
19378@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19379 struct desc_struct *desc;
19380 unsigned long base;
19381
19382- seg &= ~7UL;
19383+ seg >>= 3;
19384
19385 mutex_lock(&child->mm->context.lock);
19386- if (unlikely((seg >> 3) >= child->mm->context.size))
19387+ if (unlikely(seg >= child->mm->context.size))
19388 addr = -1L; /* bogus selector, access would fault */
19389 else {
19390 desc = child->mm->context.ldt + seg;
19391@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19392 addr += base;
19393 }
19394 mutex_unlock(&child->mm->context.lock);
19395- }
19396+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19397+ addr = ktla_ktva(addr);
19398
19399 return addr;
19400 }
19401@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19402 unsigned char opcode[15];
19403 unsigned long addr = convert_ip_to_linear(child, regs);
19404
19405+ if (addr == -EINVAL)
19406+ return 0;
19407+
19408 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19409 for (i = 0; i < copied; i++) {
19410 switch (opcode[i]) {
19411@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19412
19413 #ifdef CONFIG_X86_64
19414 case 0x40 ... 0x4f:
19415- if (regs->cs != __USER_CS)
19416+ if ((regs->cs & 0xffff) != __USER_CS)
19417 /* 32-bit mode: register increment */
19418 return 0;
19419 /* 64-bit mode: REX prefix */
19420diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19421index dee1ff7..a397f7f 100644
19422--- a/arch/x86/kernel/sys_i386_32.c
19423+++ b/arch/x86/kernel/sys_i386_32.c
19424@@ -24,6 +24,21 @@
19425
19426 #include <asm/syscalls.h>
19427
19428+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19429+{
19430+ unsigned long pax_task_size = TASK_SIZE;
19431+
19432+#ifdef CONFIG_PAX_SEGMEXEC
19433+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19434+ pax_task_size = SEGMEXEC_TASK_SIZE;
19435+#endif
19436+
19437+ if (len > pax_task_size || addr > pax_task_size - len)
19438+ return -EINVAL;
19439+
19440+ return 0;
19441+}
19442+
19443 /*
19444 * Perform the select(nd, in, out, ex, tv) and mmap() system
19445 * calls. Linux/i386 didn't use to be able to handle more than
19446@@ -58,6 +73,212 @@ out:
19447 return err;
19448 }
19449
19450+unsigned long
19451+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19452+ unsigned long len, unsigned long pgoff, unsigned long flags)
19453+{
19454+ struct mm_struct *mm = current->mm;
19455+ struct vm_area_struct *vma;
19456+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19457+
19458+#ifdef CONFIG_PAX_SEGMEXEC
19459+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19460+ pax_task_size = SEGMEXEC_TASK_SIZE;
19461+#endif
19462+
19463+ pax_task_size -= PAGE_SIZE;
19464+
19465+ if (len > pax_task_size)
19466+ return -ENOMEM;
19467+
19468+ if (flags & MAP_FIXED)
19469+ return addr;
19470+
19471+#ifdef CONFIG_PAX_RANDMMAP
19472+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19473+#endif
19474+
19475+ if (addr) {
19476+ addr = PAGE_ALIGN(addr);
19477+ if (pax_task_size - len >= addr) {
19478+ vma = find_vma(mm, addr);
19479+ if (check_heap_stack_gap(vma, addr, len))
19480+ return addr;
19481+ }
19482+ }
19483+ if (len > mm->cached_hole_size) {
19484+ start_addr = addr = mm->free_area_cache;
19485+ } else {
19486+ start_addr = addr = mm->mmap_base;
19487+ mm->cached_hole_size = 0;
19488+ }
19489+
19490+#ifdef CONFIG_PAX_PAGEEXEC
19491+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19492+ start_addr = 0x00110000UL;
19493+
19494+#ifdef CONFIG_PAX_RANDMMAP
19495+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19496+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19497+#endif
19498+
19499+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19500+ start_addr = addr = mm->mmap_base;
19501+ else
19502+ addr = start_addr;
19503+ }
19504+#endif
19505+
19506+full_search:
19507+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19508+ /* At this point: (!vma || addr < vma->vm_end). */
19509+ if (pax_task_size - len < addr) {
19510+ /*
19511+ * Start a new search - just in case we missed
19512+ * some holes.
19513+ */
19514+ if (start_addr != mm->mmap_base) {
19515+ start_addr = addr = mm->mmap_base;
19516+ mm->cached_hole_size = 0;
19517+ goto full_search;
19518+ }
19519+ return -ENOMEM;
19520+ }
19521+ if (check_heap_stack_gap(vma, addr, len))
19522+ break;
19523+ if (addr + mm->cached_hole_size < vma->vm_start)
19524+ mm->cached_hole_size = vma->vm_start - addr;
19525+ addr = vma->vm_end;
19526+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19527+ start_addr = addr = mm->mmap_base;
19528+ mm->cached_hole_size = 0;
19529+ goto full_search;
19530+ }
19531+ }
19532+
19533+ /*
19534+ * Remember the place where we stopped the search:
19535+ */
19536+ mm->free_area_cache = addr + len;
19537+ return addr;
19538+}
19539+
19540+unsigned long
19541+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19542+ const unsigned long len, const unsigned long pgoff,
19543+ const unsigned long flags)
19544+{
19545+ struct vm_area_struct *vma;
19546+ struct mm_struct *mm = current->mm;
19547+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19548+
19549+#ifdef CONFIG_PAX_SEGMEXEC
19550+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19551+ pax_task_size = SEGMEXEC_TASK_SIZE;
19552+#endif
19553+
19554+ pax_task_size -= PAGE_SIZE;
19555+
19556+ /* requested length too big for entire address space */
19557+ if (len > pax_task_size)
19558+ return -ENOMEM;
19559+
19560+ if (flags & MAP_FIXED)
19561+ return addr;
19562+
19563+#ifdef CONFIG_PAX_PAGEEXEC
19564+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19565+ goto bottomup;
19566+#endif
19567+
19568+#ifdef CONFIG_PAX_RANDMMAP
19569+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19570+#endif
19571+
19572+ /* requesting a specific address */
19573+ if (addr) {
19574+ addr = PAGE_ALIGN(addr);
19575+ if (pax_task_size - len >= addr) {
19576+ vma = find_vma(mm, addr);
19577+ if (check_heap_stack_gap(vma, addr, len))
19578+ return addr;
19579+ }
19580+ }
19581+
19582+ /* check if free_area_cache is useful for us */
19583+ if (len <= mm->cached_hole_size) {
19584+ mm->cached_hole_size = 0;
19585+ mm->free_area_cache = mm->mmap_base;
19586+ }
19587+
19588+ /* either no address requested or can't fit in requested address hole */
19589+ addr = mm->free_area_cache;
19590+
19591+ /* make sure it can fit in the remaining address space */
19592+ if (addr > len) {
19593+ vma = find_vma(mm, addr-len);
19594+ if (check_heap_stack_gap(vma, addr - len, len))
19595+ /* remember the address as a hint for next time */
19596+ return (mm->free_area_cache = addr-len);
19597+ }
19598+
19599+ if (mm->mmap_base < len)
19600+ goto bottomup;
19601+
19602+ addr = mm->mmap_base-len;
19603+
19604+ do {
19605+ /*
19606+ * Lookup failure means no vma is above this address,
19607+ * else if new region fits below vma->vm_start,
19608+ * return with success:
19609+ */
19610+ vma = find_vma(mm, addr);
19611+ if (check_heap_stack_gap(vma, addr, len))
19612+ /* remember the address as a hint for next time */
19613+ return (mm->free_area_cache = addr);
19614+
19615+ /* remember the largest hole we saw so far */
19616+ if (addr + mm->cached_hole_size < vma->vm_start)
19617+ mm->cached_hole_size = vma->vm_start - addr;
19618+
19619+ /* try just below the current vma->vm_start */
19620+ addr = skip_heap_stack_gap(vma, len);
19621+ } while (!IS_ERR_VALUE(addr));
19622+
19623+bottomup:
19624+ /*
19625+ * A failed mmap() very likely causes application failure,
19626+ * so fall back to the bottom-up function here. This scenario
19627+ * can happen with large stack limits and large mmap()
19628+ * allocations.
19629+ */
19630+
19631+#ifdef CONFIG_PAX_SEGMEXEC
19632+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19633+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19634+ else
19635+#endif
19636+
19637+ mm->mmap_base = TASK_UNMAPPED_BASE;
19638+
19639+#ifdef CONFIG_PAX_RANDMMAP
19640+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19641+ mm->mmap_base += mm->delta_mmap;
19642+#endif
19643+
19644+ mm->free_area_cache = mm->mmap_base;
19645+ mm->cached_hole_size = ~0UL;
19646+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19647+ /*
19648+ * Restore the topdown base:
19649+ */
19650+ mm->mmap_base = base;
19651+ mm->free_area_cache = base;
19652+ mm->cached_hole_size = ~0UL;
19653+
19654+ return addr;
19655+}
19656
19657 struct sel_arg_struct {
19658 unsigned long n;
19659@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19660 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19661 case SEMTIMEDOP:
19662 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19663- (const struct timespec __user *)fifth);
19664+ (__force const struct timespec __user *)fifth);
19665
19666 case SEMGET:
19667 return sys_semget(first, second, third);
19668@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19669 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19670 if (ret)
19671 return ret;
19672- return put_user(raddr, (ulong __user *) third);
19673+ return put_user(raddr, (__force ulong __user *) third);
19674 }
19675 case 1: /* iBCS2 emulator entry point */
19676 if (!segment_eq(get_fs(), get_ds()))
19677@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19678
19679 return error;
19680 }
19681-
19682-
19683-/*
19684- * Do a system call from kernel instead of calling sys_execve so we
19685- * end up with proper pt_regs.
19686- */
19687-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19688-{
19689- long __res;
19690- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19691- : "=a" (__res)
19692- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19693- return __res;
19694-}
19695diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19696index 8aa2057..b604bc1 100644
19697--- a/arch/x86/kernel/sys_x86_64.c
19698+++ b/arch/x86/kernel/sys_x86_64.c
19699@@ -32,8 +32,8 @@ out:
19700 return error;
19701 }
19702
19703-static void find_start_end(unsigned long flags, unsigned long *begin,
19704- unsigned long *end)
19705+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19706+ unsigned long *begin, unsigned long *end)
19707 {
19708 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19709 unsigned long new_begin;
19710@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19711 *begin = new_begin;
19712 }
19713 } else {
19714- *begin = TASK_UNMAPPED_BASE;
19715+ *begin = mm->mmap_base;
19716 *end = TASK_SIZE;
19717 }
19718 }
19719@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19720 if (flags & MAP_FIXED)
19721 return addr;
19722
19723- find_start_end(flags, &begin, &end);
19724+ find_start_end(mm, flags, &begin, &end);
19725
19726 if (len > end)
19727 return -ENOMEM;
19728
19729+#ifdef CONFIG_PAX_RANDMMAP
19730+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19731+#endif
19732+
19733 if (addr) {
19734 addr = PAGE_ALIGN(addr);
19735 vma = find_vma(mm, addr);
19736- if (end - len >= addr &&
19737- (!vma || addr + len <= vma->vm_start))
19738+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19739 return addr;
19740 }
19741 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19742@@ -106,7 +109,7 @@ full_search:
19743 }
19744 return -ENOMEM;
19745 }
19746- if (!vma || addr + len <= vma->vm_start) {
19747+ if (check_heap_stack_gap(vma, addr, len)) {
19748 /*
19749 * Remember the place where we stopped the search:
19750 */
19751@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19752 {
19753 struct vm_area_struct *vma;
19754 struct mm_struct *mm = current->mm;
19755- unsigned long addr = addr0;
19756+ unsigned long base = mm->mmap_base, addr = addr0;
19757
19758 /* requested length too big for entire address space */
19759 if (len > TASK_SIZE)
19760@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19761 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19762 goto bottomup;
19763
19764+#ifdef CONFIG_PAX_RANDMMAP
19765+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19766+#endif
19767+
19768 /* requesting a specific address */
19769 if (addr) {
19770 addr = PAGE_ALIGN(addr);
19771- vma = find_vma(mm, addr);
19772- if (TASK_SIZE - len >= addr &&
19773- (!vma || addr + len <= vma->vm_start))
19774- return addr;
19775+ if (TASK_SIZE - len >= addr) {
19776+ vma = find_vma(mm, addr);
19777+ if (check_heap_stack_gap(vma, addr, len))
19778+ return addr;
19779+ }
19780 }
19781
19782 /* check if free_area_cache is useful for us */
19783@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19784 /* make sure it can fit in the remaining address space */
19785 if (addr > len) {
19786 vma = find_vma(mm, addr-len);
19787- if (!vma || addr <= vma->vm_start)
19788+ if (check_heap_stack_gap(vma, addr - len, len))
19789 /* remember the address as a hint for next time */
19790 return mm->free_area_cache = addr-len;
19791 }
19792@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19793 * return with success:
19794 */
19795 vma = find_vma(mm, addr);
19796- if (!vma || addr+len <= vma->vm_start)
19797+ if (check_heap_stack_gap(vma, addr, len))
19798 /* remember the address as a hint for next time */
19799 return mm->free_area_cache = addr;
19800
19801@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19802 mm->cached_hole_size = vma->vm_start - addr;
19803
19804 /* try just below the current vma->vm_start */
19805- addr = vma->vm_start-len;
19806- } while (len < vma->vm_start);
19807+ addr = skip_heap_stack_gap(vma, len);
19808+ } while (!IS_ERR_VALUE(addr));
19809
19810 bottomup:
19811 /*
19812@@ -198,13 +206,21 @@ bottomup:
19813 * can happen with large stack limits and large mmap()
19814 * allocations.
19815 */
19816+ mm->mmap_base = TASK_UNMAPPED_BASE;
19817+
19818+#ifdef CONFIG_PAX_RANDMMAP
19819+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19820+ mm->mmap_base += mm->delta_mmap;
19821+#endif
19822+
19823+ mm->free_area_cache = mm->mmap_base;
19824 mm->cached_hole_size = ~0UL;
19825- mm->free_area_cache = TASK_UNMAPPED_BASE;
19826 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19827 /*
19828 * Restore the topdown base:
19829 */
19830- mm->free_area_cache = mm->mmap_base;
19831+ mm->mmap_base = base;
19832+ mm->free_area_cache = base;
19833 mm->cached_hole_size = ~0UL;
19834
19835 return addr;
19836diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19837index 76d70a4..4c94a44 100644
19838--- a/arch/x86/kernel/syscall_table_32.S
19839+++ b/arch/x86/kernel/syscall_table_32.S
19840@@ -1,3 +1,4 @@
19841+.section .rodata,"a",@progbits
19842 ENTRY(sys_call_table)
19843 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19844 .long sys_exit
19845diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19846index 46b8277..3349d55 100644
19847--- a/arch/x86/kernel/tboot.c
19848+++ b/arch/x86/kernel/tboot.c
19849@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19850
19851 void tboot_shutdown(u32 shutdown_type)
19852 {
19853- void (*shutdown)(void);
19854+ void (* __noreturn shutdown)(void);
19855
19856 if (!tboot_enabled())
19857 return;
19858@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19859
19860 switch_to_tboot_pt();
19861
19862- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19863+ shutdown = (void *)tboot->shutdown_entry;
19864 shutdown();
19865
19866 /* should not reach here */
19867@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19868 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19869 }
19870
19871-static atomic_t ap_wfs_count;
19872+static atomic_unchecked_t ap_wfs_count;
19873
19874 static int tboot_wait_for_aps(int num_aps)
19875 {
19876@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19877 {
19878 switch (action) {
19879 case CPU_DYING:
19880- atomic_inc(&ap_wfs_count);
19881+ atomic_inc_unchecked(&ap_wfs_count);
19882 if (num_online_cpus() == 1)
19883- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19884+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19885 return NOTIFY_BAD;
19886 break;
19887 }
19888@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19889
19890 tboot_create_trampoline();
19891
19892- atomic_set(&ap_wfs_count, 0);
19893+ atomic_set_unchecked(&ap_wfs_count, 0);
19894 register_hotcpu_notifier(&tboot_cpu_notifier);
19895 return 0;
19896 }
19897diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19898index be25734..87fe232 100644
19899--- a/arch/x86/kernel/time.c
19900+++ b/arch/x86/kernel/time.c
19901@@ -26,17 +26,13 @@
19902 int timer_ack;
19903 #endif
19904
19905-#ifdef CONFIG_X86_64
19906-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19907-#endif
19908-
19909 unsigned long profile_pc(struct pt_regs *regs)
19910 {
19911 unsigned long pc = instruction_pointer(regs);
19912
19913- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19914+ if (!user_mode(regs) && in_lock_functions(pc)) {
19915 #ifdef CONFIG_FRAME_POINTER
19916- return *(unsigned long *)(regs->bp + sizeof(long));
19917+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19918 #else
19919 unsigned long *sp =
19920 (unsigned long *)kernel_stack_pointer(regs);
19921@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19922 * or above a saved flags. Eflags has bits 22-31 zero,
19923 * kernel addresses don't.
19924 */
19925+
19926+#ifdef CONFIG_PAX_KERNEXEC
19927+ return ktla_ktva(sp[0]);
19928+#else
19929 if (sp[0] >> 22)
19930 return sp[0];
19931 if (sp[1] >> 22)
19932 return sp[1];
19933 #endif
19934+
19935+#endif
19936 }
19937 return pc;
19938 }
19939diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19940index 6bb7b85..dd853e1 100644
19941--- a/arch/x86/kernel/tls.c
19942+++ b/arch/x86/kernel/tls.c
19943@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19944 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19945 return -EINVAL;
19946
19947+#ifdef CONFIG_PAX_SEGMEXEC
19948+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19949+ return -EINVAL;
19950+#endif
19951+
19952 set_tls_desc(p, idx, &info, 1);
19953
19954 return 0;
19955diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19956index 8508237..229b664 100644
19957--- a/arch/x86/kernel/trampoline_32.S
19958+++ b/arch/x86/kernel/trampoline_32.S
19959@@ -32,6 +32,12 @@
19960 #include <asm/segment.h>
19961 #include <asm/page_types.h>
19962
19963+#ifdef CONFIG_PAX_KERNEXEC
19964+#define ta(X) (X)
19965+#else
19966+#define ta(X) ((X) - __PAGE_OFFSET)
19967+#endif
19968+
19969 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19970 __CPUINITRODATA
19971 .code16
19972@@ -60,7 +66,7 @@ r_base = .
19973 inc %ax # protected mode (PE) bit
19974 lmsw %ax # into protected mode
19975 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19976- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19977+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19978
19979 # These need to be in the same 64K segment as the above;
19980 # hence we don't use the boot_gdt_descr defined in head.S
19981diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19982index 3af2dff..ba8aa49 100644
19983--- a/arch/x86/kernel/trampoline_64.S
19984+++ b/arch/x86/kernel/trampoline_64.S
19985@@ -91,7 +91,7 @@ startup_32:
19986 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19987 movl %eax, %ds
19988
19989- movl $X86_CR4_PAE, %eax
19990+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19991 movl %eax, %cr4 # Enable PAE mode
19992
19993 # Setup trampoline 4 level pagetables
19994@@ -127,7 +127,7 @@ startup_64:
19995 no_longmode:
19996 hlt
19997 jmp no_longmode
19998-#include "verify_cpu_64.S"
19999+#include "verify_cpu.S"
20000
20001 # Careful these need to be in the same 64K segment as the above;
20002 tidt:
20003@@ -138,7 +138,7 @@ tidt:
20004 # so the kernel can live anywhere
20005 .balign 4
20006 tgdt:
20007- .short tgdt_end - tgdt # gdt limit
20008+ .short tgdt_end - tgdt - 1 # gdt limit
20009 .long tgdt - r_base
20010 .short 0
20011 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20012diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20013index 7e37dce..ec3f8e5 100644
20014--- a/arch/x86/kernel/traps.c
20015+++ b/arch/x86/kernel/traps.c
20016@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20017
20018 /* Do we ignore FPU interrupts ? */
20019 char ignore_fpu_irq;
20020-
20021-/*
20022- * The IDT has to be page-aligned to simplify the Pentium
20023- * F0 0F bug workaround.
20024- */
20025-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20026 #endif
20027
20028 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20029@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20030 static inline void
20031 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20032 {
20033- if (!user_mode_vm(regs))
20034+ if (!user_mode(regs))
20035 die(str, regs, err);
20036 }
20037 #endif
20038
20039 static void __kprobes
20040-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20041+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20042 long error_code, siginfo_t *info)
20043 {
20044 struct task_struct *tsk = current;
20045
20046 #ifdef CONFIG_X86_32
20047- if (regs->flags & X86_VM_MASK) {
20048+ if (v8086_mode(regs)) {
20049 /*
20050 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20051 * On nmi (interrupt 2), do_trap should not be called.
20052@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20053 }
20054 #endif
20055
20056- if (!user_mode(regs))
20057+ if (!user_mode_novm(regs))
20058 goto kernel_trap;
20059
20060 #ifdef CONFIG_X86_32
20061@@ -158,7 +152,7 @@ trap_signal:
20062 printk_ratelimit()) {
20063 printk(KERN_INFO
20064 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20065- tsk->comm, tsk->pid, str,
20066+ tsk->comm, task_pid_nr(tsk), str,
20067 regs->ip, regs->sp, error_code);
20068 print_vma_addr(" in ", regs->ip);
20069 printk("\n");
20070@@ -175,8 +169,20 @@ kernel_trap:
20071 if (!fixup_exception(regs)) {
20072 tsk->thread.error_code = error_code;
20073 tsk->thread.trap_no = trapnr;
20074+
20075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20076+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20077+ str = "PAX: suspicious stack segment fault";
20078+#endif
20079+
20080 die(str, regs, error_code);
20081 }
20082+
20083+#ifdef CONFIG_PAX_REFCOUNT
20084+ if (trapnr == 4)
20085+ pax_report_refcount_overflow(regs);
20086+#endif
20087+
20088 return;
20089
20090 #ifdef CONFIG_X86_32
20091@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20092 conditional_sti(regs);
20093
20094 #ifdef CONFIG_X86_32
20095- if (regs->flags & X86_VM_MASK)
20096+ if (v8086_mode(regs))
20097 goto gp_in_vm86;
20098 #endif
20099
20100 tsk = current;
20101- if (!user_mode(regs))
20102+ if (!user_mode_novm(regs))
20103 goto gp_in_kernel;
20104
20105+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20106+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20107+ struct mm_struct *mm = tsk->mm;
20108+ unsigned long limit;
20109+
20110+ down_write(&mm->mmap_sem);
20111+ limit = mm->context.user_cs_limit;
20112+ if (limit < TASK_SIZE) {
20113+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20114+ up_write(&mm->mmap_sem);
20115+ return;
20116+ }
20117+ up_write(&mm->mmap_sem);
20118+ }
20119+#endif
20120+
20121 tsk->thread.error_code = error_code;
20122 tsk->thread.trap_no = 13;
20123
20124@@ -305,6 +327,13 @@ gp_in_kernel:
20125 if (notify_die(DIE_GPF, "general protection fault", regs,
20126 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20127 return;
20128+
20129+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20130+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20131+ die("PAX: suspicious general protection fault", regs, error_code);
20132+ else
20133+#endif
20134+
20135 die("general protection fault", regs, error_code);
20136 }
20137
20138@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20139 dotraplinkage notrace __kprobes void
20140 do_nmi(struct pt_regs *regs, long error_code)
20141 {
20142+
20143+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20144+ if (!user_mode(regs)) {
20145+ unsigned long cs = regs->cs & 0xFFFF;
20146+ unsigned long ip = ktva_ktla(regs->ip);
20147+
20148+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20149+ regs->ip = ip;
20150+ }
20151+#endif
20152+
20153 nmi_enter();
20154
20155 inc_irq_stat(__nmi_count);
20156@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20157 }
20158
20159 #ifdef CONFIG_X86_32
20160- if (regs->flags & X86_VM_MASK)
20161+ if (v8086_mode(regs))
20162 goto debug_vm86;
20163 #endif
20164
20165@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20166 * kernel space (but re-enable TF when returning to user mode).
20167 */
20168 if (condition & DR_STEP) {
20169- if (!user_mode(regs))
20170+ if (!user_mode_novm(regs))
20171 goto clear_TF_reenable;
20172 }
20173
20174@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20175 * Handle strange cache flush from user space exception
20176 * in all other cases. This is undocumented behaviour.
20177 */
20178- if (regs->flags & X86_VM_MASK) {
20179+ if (v8086_mode(regs)) {
20180 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20181 return;
20182 }
20183@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20184 void __math_state_restore(void)
20185 {
20186 struct thread_info *thread = current_thread_info();
20187- struct task_struct *tsk = thread->task;
20188+ struct task_struct *tsk = current;
20189
20190 /*
20191 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20192@@ -825,8 +865,7 @@ void __math_state_restore(void)
20193 */
20194 asmlinkage void math_state_restore(void)
20195 {
20196- struct thread_info *thread = current_thread_info();
20197- struct task_struct *tsk = thread->task;
20198+ struct task_struct *tsk = current;
20199
20200 if (!tsk_used_math(tsk)) {
20201 local_irq_enable();
20202diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20203new file mode 100644
20204index 0000000..50c5edd
20205--- /dev/null
20206+++ b/arch/x86/kernel/verify_cpu.S
20207@@ -0,0 +1,140 @@
20208+/*
20209+ *
20210+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20211+ * code has been borrowed from boot/setup.S and was introduced by
20212+ * Andi Kleen.
20213+ *
20214+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20215+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20216+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20217+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20218+ *
20219+ * This source code is licensed under the GNU General Public License,
20220+ * Version 2. See the file COPYING for more details.
20221+ *
20222+ * This is a common code for verification whether CPU supports
20223+ * long mode and SSE or not. It is not called directly instead this
20224+ * file is included at various places and compiled in that context.
20225+ * This file is expected to run in 32bit code. Currently:
20226+ *
20227+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20228+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20229+ * arch/x86/kernel/head_32.S: processor startup
20230+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20231+ *
20232+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20233+ * 0: Success 1: Failure
20234+ *
20235+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20236+ *
20237+ * The caller needs to check for the error code and take the action
20238+ * appropriately. Either display a message or halt.
20239+ */
20240+
20241+#include <asm/cpufeature.h>
20242+#include <asm/msr-index.h>
20243+
20244+verify_cpu:
20245+ pushfl # Save caller passed flags
20246+ pushl $0 # Kill any dangerous flags
20247+ popfl
20248+
20249+ pushfl # standard way to check for cpuid
20250+ popl %eax
20251+ movl %eax,%ebx
20252+ xorl $0x200000,%eax
20253+ pushl %eax
20254+ popfl
20255+ pushfl
20256+ popl %eax
20257+ cmpl %eax,%ebx
20258+ jz verify_cpu_no_longmode # cpu has no cpuid
20259+
20260+ movl $0x0,%eax # See if cpuid 1 is implemented
20261+ cpuid
20262+ cmpl $0x1,%eax
20263+ jb verify_cpu_no_longmode # no cpuid 1
20264+
20265+ xor %di,%di
20266+ cmpl $0x68747541,%ebx # AuthenticAMD
20267+ jnz verify_cpu_noamd
20268+ cmpl $0x69746e65,%edx
20269+ jnz verify_cpu_noamd
20270+ cmpl $0x444d4163,%ecx
20271+ jnz verify_cpu_noamd
20272+ mov $1,%di # cpu is from AMD
20273+ jmp verify_cpu_check
20274+
20275+verify_cpu_noamd:
20276+ cmpl $0x756e6547,%ebx # GenuineIntel?
20277+ jnz verify_cpu_check
20278+ cmpl $0x49656e69,%edx
20279+ jnz verify_cpu_check
20280+ cmpl $0x6c65746e,%ecx
20281+ jnz verify_cpu_check
20282+
20283+ # only call IA32_MISC_ENABLE when:
20284+ # family > 6 || (family == 6 && model >= 0xd)
20285+ movl $0x1, %eax # check CPU family and model
20286+ cpuid
20287+ movl %eax, %ecx
20288+
20289+ andl $0x0ff00f00, %eax # mask family and extended family
20290+ shrl $8, %eax
20291+ cmpl $6, %eax
20292+ ja verify_cpu_clear_xd # family > 6, ok
20293+ jb verify_cpu_check # family < 6, skip
20294+
20295+ andl $0x000f00f0, %ecx # mask model and extended model
20296+ shrl $4, %ecx
20297+ cmpl $0xd, %ecx
20298+ jb verify_cpu_check # family == 6, model < 0xd, skip
20299+
20300+verify_cpu_clear_xd:
20301+ movl $MSR_IA32_MISC_ENABLE, %ecx
20302+ rdmsr
20303+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20304+ jnc verify_cpu_check # only write MSR if bit was changed
20305+ wrmsr
20306+
20307+verify_cpu_check:
20308+ movl $0x1,%eax # Does the cpu have what it takes
20309+ cpuid
20310+ andl $REQUIRED_MASK0,%edx
20311+ xorl $REQUIRED_MASK0,%edx
20312+ jnz verify_cpu_no_longmode
20313+
20314+ movl $0x80000000,%eax # See if extended cpuid is implemented
20315+ cpuid
20316+ cmpl $0x80000001,%eax
20317+ jb verify_cpu_no_longmode # no extended cpuid
20318+
20319+ movl $0x80000001,%eax # Does the cpu have what it takes
20320+ cpuid
20321+ andl $REQUIRED_MASK1,%edx
20322+ xorl $REQUIRED_MASK1,%edx
20323+ jnz verify_cpu_no_longmode
20324+
20325+verify_cpu_sse_test:
20326+ movl $1,%eax
20327+ cpuid
20328+ andl $SSE_MASK,%edx
20329+ cmpl $SSE_MASK,%edx
20330+ je verify_cpu_sse_ok
20331+ test %di,%di
20332+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20333+ movl $MSR_K7_HWCR,%ecx
20334+ rdmsr
20335+ btr $15,%eax # enable SSE
20336+ wrmsr
20337+ xor %di,%di # don't loop
20338+ jmp verify_cpu_sse_test # try again
20339+
20340+verify_cpu_no_longmode:
20341+ popfl # Restore caller passed flags
20342+ movl $1,%eax
20343+ ret
20344+verify_cpu_sse_ok:
20345+ popfl # Restore caller passed flags
20346+ xorl %eax, %eax
20347+ ret
20348diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20349deleted file mode 100644
20350index 45b6f8a..0000000
20351--- a/arch/x86/kernel/verify_cpu_64.S
20352+++ /dev/null
20353@@ -1,105 +0,0 @@
20354-/*
20355- *
20356- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20357- * code has been borrowed from boot/setup.S and was introduced by
20358- * Andi Kleen.
20359- *
20360- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20361- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20362- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20363- *
20364- * This source code is licensed under the GNU General Public License,
20365- * Version 2. See the file COPYING for more details.
20366- *
20367- * This is a common code for verification whether CPU supports
20368- * long mode and SSE or not. It is not called directly instead this
20369- * file is included at various places and compiled in that context.
20370- * Following are the current usage.
20371- *
20372- * This file is included by both 16bit and 32bit code.
20373- *
20374- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20375- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20376- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20377- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20378- *
20379- * verify_cpu, returns the status of cpu check in register %eax.
20380- * 0: Success 1: Failure
20381- *
20382- * The caller needs to check for the error code and take the action
20383- * appropriately. Either display a message or halt.
20384- */
20385-
20386-#include <asm/cpufeature.h>
20387-
20388-verify_cpu:
20389- pushfl # Save caller passed flags
20390- pushl $0 # Kill any dangerous flags
20391- popfl
20392-
20393- pushfl # standard way to check for cpuid
20394- popl %eax
20395- movl %eax,%ebx
20396- xorl $0x200000,%eax
20397- pushl %eax
20398- popfl
20399- pushfl
20400- popl %eax
20401- cmpl %eax,%ebx
20402- jz verify_cpu_no_longmode # cpu has no cpuid
20403-
20404- movl $0x0,%eax # See if cpuid 1 is implemented
20405- cpuid
20406- cmpl $0x1,%eax
20407- jb verify_cpu_no_longmode # no cpuid 1
20408-
20409- xor %di,%di
20410- cmpl $0x68747541,%ebx # AuthenticAMD
20411- jnz verify_cpu_noamd
20412- cmpl $0x69746e65,%edx
20413- jnz verify_cpu_noamd
20414- cmpl $0x444d4163,%ecx
20415- jnz verify_cpu_noamd
20416- mov $1,%di # cpu is from AMD
20417-
20418-verify_cpu_noamd:
20419- movl $0x1,%eax # Does the cpu have what it takes
20420- cpuid
20421- andl $REQUIRED_MASK0,%edx
20422- xorl $REQUIRED_MASK0,%edx
20423- jnz verify_cpu_no_longmode
20424-
20425- movl $0x80000000,%eax # See if extended cpuid is implemented
20426- cpuid
20427- cmpl $0x80000001,%eax
20428- jb verify_cpu_no_longmode # no extended cpuid
20429-
20430- movl $0x80000001,%eax # Does the cpu have what it takes
20431- cpuid
20432- andl $REQUIRED_MASK1,%edx
20433- xorl $REQUIRED_MASK1,%edx
20434- jnz verify_cpu_no_longmode
20435-
20436-verify_cpu_sse_test:
20437- movl $1,%eax
20438- cpuid
20439- andl $SSE_MASK,%edx
20440- cmpl $SSE_MASK,%edx
20441- je verify_cpu_sse_ok
20442- test %di,%di
20443- jz verify_cpu_no_longmode # only try to force SSE on AMD
20444- movl $0xc0010015,%ecx # HWCR
20445- rdmsr
20446- btr $15,%eax # enable SSE
20447- wrmsr
20448- xor %di,%di # don't loop
20449- jmp verify_cpu_sse_test # try again
20450-
20451-verify_cpu_no_longmode:
20452- popfl # Restore caller passed flags
20453- movl $1,%eax
20454- ret
20455-verify_cpu_sse_ok:
20456- popfl # Restore caller passed flags
20457- xorl %eax, %eax
20458- ret
20459diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20460index 9c4e625..c992817 100644
20461--- a/arch/x86/kernel/vm86_32.c
20462+++ b/arch/x86/kernel/vm86_32.c
20463@@ -41,6 +41,7 @@
20464 #include <linux/ptrace.h>
20465 #include <linux/audit.h>
20466 #include <linux/stddef.h>
20467+#include <linux/grsecurity.h>
20468
20469 #include <asm/uaccess.h>
20470 #include <asm/io.h>
20471@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20472 do_exit(SIGSEGV);
20473 }
20474
20475- tss = &per_cpu(init_tss, get_cpu());
20476+ tss = init_tss + get_cpu();
20477 current->thread.sp0 = current->thread.saved_sp0;
20478 current->thread.sysenter_cs = __KERNEL_CS;
20479 load_sp0(tss, &current->thread);
20480@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20481 struct task_struct *tsk;
20482 int tmp, ret = -EPERM;
20483
20484+#ifdef CONFIG_GRKERNSEC_VM86
20485+ if (!capable(CAP_SYS_RAWIO)) {
20486+ gr_handle_vm86();
20487+ goto out;
20488+ }
20489+#endif
20490+
20491 tsk = current;
20492 if (tsk->thread.saved_sp0)
20493 goto out;
20494@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20495 int tmp, ret;
20496 struct vm86plus_struct __user *v86;
20497
20498+#ifdef CONFIG_GRKERNSEC_VM86
20499+ if (!capable(CAP_SYS_RAWIO)) {
20500+ gr_handle_vm86();
20501+ ret = -EPERM;
20502+ goto out;
20503+ }
20504+#endif
20505+
20506 tsk = current;
20507 switch (regs->bx) {
20508 case VM86_REQUEST_IRQ:
20509@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20510 tsk->thread.saved_fs = info->regs32->fs;
20511 tsk->thread.saved_gs = get_user_gs(info->regs32);
20512
20513- tss = &per_cpu(init_tss, get_cpu());
20514+ tss = init_tss + get_cpu();
20515 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20516 if (cpu_has_sep)
20517 tsk->thread.sysenter_cs = 0;
20518@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20519 goto cannot_handle;
20520 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20521 goto cannot_handle;
20522- intr_ptr = (unsigned long __user *) (i << 2);
20523+ intr_ptr = (__force unsigned long __user *) (i << 2);
20524 if (get_user(segoffs, intr_ptr))
20525 goto cannot_handle;
20526 if ((segoffs >> 16) == BIOSSEG)
20527diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20528index d430e4c..831f817 100644
20529--- a/arch/x86/kernel/vmi_32.c
20530+++ b/arch/x86/kernel/vmi_32.c
20531@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20532 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20533
20534 #define call_vrom_func(rom,func) \
20535- (((VROMFUNC *)(rom->func))())
20536+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20537
20538 #define call_vrom_long_func(rom,func,arg) \
20539- (((VROMLONGFUNC *)(rom->func)) (arg))
20540+({\
20541+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20542+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20543+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20544+ __reloc;\
20545+})
20546
20547-static struct vrom_header *vmi_rom;
20548+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20549 static int disable_pge;
20550 static int disable_pse;
20551 static int disable_sep;
20552@@ -76,10 +81,10 @@ static struct {
20553 void (*set_initial_ap_state)(int, int);
20554 void (*halt)(void);
20555 void (*set_lazy_mode)(int mode);
20556-} vmi_ops;
20557+} __no_const vmi_ops __read_only;
20558
20559 /* Cached VMI operations */
20560-struct vmi_timer_ops vmi_timer_ops;
20561+struct vmi_timer_ops vmi_timer_ops __read_only;
20562
20563 /*
20564 * VMI patching routines.
20565@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20566 static inline void patch_offset(void *insnbuf,
20567 unsigned long ip, unsigned long dest)
20568 {
20569- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20570+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20571 }
20572
20573 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20574@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20575 {
20576 u64 reloc;
20577 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20578+
20579 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20580 switch(rel->type) {
20581 case VMI_RELOCATION_CALL_REL:
20582@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20583
20584 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20585 {
20586- const pte_t pte = { .pte = 0 };
20587+ const pte_t pte = __pte(0ULL);
20588 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20589 }
20590
20591 static void vmi_pmd_clear(pmd_t *pmd)
20592 {
20593- const pte_t pte = { .pte = 0 };
20594+ const pte_t pte = __pte(0ULL);
20595 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20596 }
20597 #endif
20598@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20599 ap.ss = __KERNEL_DS;
20600 ap.esp = (unsigned long) start_esp;
20601
20602- ap.ds = __USER_DS;
20603- ap.es = __USER_DS;
20604+ ap.ds = __KERNEL_DS;
20605+ ap.es = __KERNEL_DS;
20606 ap.fs = __KERNEL_PERCPU;
20607- ap.gs = __KERNEL_STACK_CANARY;
20608+ savesegment(gs, ap.gs);
20609
20610 ap.eflags = 0;
20611
20612@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20613 paravirt_leave_lazy_mmu();
20614 }
20615
20616+#ifdef CONFIG_PAX_KERNEXEC
20617+static unsigned long vmi_pax_open_kernel(void)
20618+{
20619+ return 0;
20620+}
20621+
20622+static unsigned long vmi_pax_close_kernel(void)
20623+{
20624+ return 0;
20625+}
20626+#endif
20627+
20628 static inline int __init check_vmi_rom(struct vrom_header *rom)
20629 {
20630 struct pci_header *pci;
20631@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20632 return 0;
20633 if (rom->vrom_signature != VMI_SIGNATURE)
20634 return 0;
20635+ if (rom->rom_length * 512 > sizeof(*rom)) {
20636+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20637+ return 0;
20638+ }
20639 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20640 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20641 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20642@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20643 struct vrom_header *romstart;
20644 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20645 if (check_vmi_rom(romstart)) {
20646- vmi_rom = romstart;
20647+ vmi_rom = *romstart;
20648 return 1;
20649 }
20650 }
20651@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20652
20653 para_fill(pv_irq_ops.safe_halt, Halt);
20654
20655+#ifdef CONFIG_PAX_KERNEXEC
20656+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20657+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20658+#endif
20659+
20660 /*
20661 * Alternative instruction rewriting doesn't happen soon enough
20662 * to convert VMI_IRET to a call instead of a jump; so we have
20663@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20664
20665 void __init vmi_init(void)
20666 {
20667- if (!vmi_rom)
20668+ if (!vmi_rom.rom_signature)
20669 probe_vmi_rom();
20670 else
20671- check_vmi_rom(vmi_rom);
20672+ check_vmi_rom(&vmi_rom);
20673
20674 /* In case probing for or validating the ROM failed, basil */
20675- if (!vmi_rom)
20676+ if (!vmi_rom.rom_signature)
20677 return;
20678
20679- reserve_top_address(-vmi_rom->virtual_top);
20680+ reserve_top_address(-vmi_rom.virtual_top);
20681
20682 #ifdef CONFIG_X86_IO_APIC
20683 /* This is virtual hardware; timer routing is wired correctly */
20684@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20685 {
20686 unsigned long flags;
20687
20688- if (!vmi_rom)
20689+ if (!vmi_rom.rom_signature)
20690 return;
20691
20692 local_irq_save(flags);
20693diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20694index 3c68fe2..12c8280 100644
20695--- a/arch/x86/kernel/vmlinux.lds.S
20696+++ b/arch/x86/kernel/vmlinux.lds.S
20697@@ -26,6 +26,13 @@
20698 #include <asm/page_types.h>
20699 #include <asm/cache.h>
20700 #include <asm/boot.h>
20701+#include <asm/segment.h>
20702+
20703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20704+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20705+#else
20706+#define __KERNEL_TEXT_OFFSET 0
20707+#endif
20708
20709 #undef i386 /* in case the preprocessor is a 32bit one */
20710
20711@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20712 #ifdef CONFIG_X86_32
20713 OUTPUT_ARCH(i386)
20714 ENTRY(phys_startup_32)
20715-jiffies = jiffies_64;
20716 #else
20717 OUTPUT_ARCH(i386:x86-64)
20718 ENTRY(phys_startup_64)
20719-jiffies_64 = jiffies;
20720 #endif
20721
20722 PHDRS {
20723 text PT_LOAD FLAGS(5); /* R_E */
20724- data PT_LOAD FLAGS(7); /* RWE */
20725+#ifdef CONFIG_X86_32
20726+ module PT_LOAD FLAGS(5); /* R_E */
20727+#endif
20728+#ifdef CONFIG_XEN
20729+ rodata PT_LOAD FLAGS(5); /* R_E */
20730+#else
20731+ rodata PT_LOAD FLAGS(4); /* R__ */
20732+#endif
20733+ data PT_LOAD FLAGS(6); /* RW_ */
20734 #ifdef CONFIG_X86_64
20735 user PT_LOAD FLAGS(5); /* R_E */
20736+#endif
20737+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20738 #ifdef CONFIG_SMP
20739 percpu PT_LOAD FLAGS(6); /* RW_ */
20740 #endif
20741+ text.init PT_LOAD FLAGS(5); /* R_E */
20742+ text.exit PT_LOAD FLAGS(5); /* R_E */
20743 init PT_LOAD FLAGS(7); /* RWE */
20744-#endif
20745 note PT_NOTE FLAGS(0); /* ___ */
20746 }
20747
20748 SECTIONS
20749 {
20750 #ifdef CONFIG_X86_32
20751- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20752- phys_startup_32 = startup_32 - LOAD_OFFSET;
20753+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20754 #else
20755- . = __START_KERNEL;
20756- phys_startup_64 = startup_64 - LOAD_OFFSET;
20757+ . = __START_KERNEL;
20758 #endif
20759
20760 /* Text and read-only data */
20761- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20762- _text = .;
20763+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20764 /* bootstrapping code */
20765+#ifdef CONFIG_X86_32
20766+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20767+#else
20768+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20769+#endif
20770+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20771+ _text = .;
20772 HEAD_TEXT
20773 #ifdef CONFIG_X86_32
20774 . = ALIGN(PAGE_SIZE);
20775@@ -82,28 +102,71 @@ SECTIONS
20776 IRQENTRY_TEXT
20777 *(.fixup)
20778 *(.gnu.warning)
20779- /* End of text section */
20780- _etext = .;
20781 } :text = 0x9090
20782
20783- NOTES :text :note
20784+ . += __KERNEL_TEXT_OFFSET;
20785
20786- EXCEPTION_TABLE(16) :text = 0x9090
20787+#ifdef CONFIG_X86_32
20788+ . = ALIGN(PAGE_SIZE);
20789+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20790+ *(.vmi.rom)
20791+ } :module
20792+
20793+ . = ALIGN(PAGE_SIZE);
20794+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20795+
20796+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20797+ MODULES_EXEC_VADDR = .;
20798+ BYTE(0)
20799+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20800+ . = ALIGN(HPAGE_SIZE);
20801+ MODULES_EXEC_END = . - 1;
20802+#endif
20803+
20804+ } :module
20805+#endif
20806+
20807+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20808+ /* End of text section */
20809+ _etext = . - __KERNEL_TEXT_OFFSET;
20810+ }
20811+
20812+#ifdef CONFIG_X86_32
20813+ . = ALIGN(PAGE_SIZE);
20814+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20815+ *(.idt)
20816+ . = ALIGN(PAGE_SIZE);
20817+ *(.empty_zero_page)
20818+ *(.swapper_pg_fixmap)
20819+ *(.swapper_pg_pmd)
20820+ *(.swapper_pg_dir)
20821+ *(.trampoline_pg_dir)
20822+ } :rodata
20823+#endif
20824+
20825+ . = ALIGN(PAGE_SIZE);
20826+ NOTES :rodata :note
20827+
20828+ EXCEPTION_TABLE(16) :rodata
20829
20830 RO_DATA(PAGE_SIZE)
20831
20832 /* Data */
20833 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20834+
20835+#ifdef CONFIG_PAX_KERNEXEC
20836+ . = ALIGN(HPAGE_SIZE);
20837+#else
20838+ . = ALIGN(PAGE_SIZE);
20839+#endif
20840+
20841 /* Start of data section */
20842 _sdata = .;
20843
20844 /* init_task */
20845 INIT_TASK_DATA(THREAD_SIZE)
20846
20847-#ifdef CONFIG_X86_32
20848- /* 32 bit has nosave before _edata */
20849 NOSAVE_DATA
20850-#endif
20851
20852 PAGE_ALIGNED_DATA(PAGE_SIZE)
20853
20854@@ -112,6 +175,8 @@ SECTIONS
20855 DATA_DATA
20856 CONSTRUCTORS
20857
20858+ jiffies = jiffies_64;
20859+
20860 /* rarely changed data like cpu maps */
20861 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20862
20863@@ -166,12 +231,6 @@ SECTIONS
20864 }
20865 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20866
20867- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20868- .jiffies : AT(VLOAD(.jiffies)) {
20869- *(.jiffies)
20870- }
20871- jiffies = VVIRT(.jiffies);
20872-
20873 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20874 *(.vsyscall_3)
20875 }
20876@@ -187,12 +246,19 @@ SECTIONS
20877 #endif /* CONFIG_X86_64 */
20878
20879 /* Init code and data - will be freed after init */
20880- . = ALIGN(PAGE_SIZE);
20881 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20882+ BYTE(0)
20883+
20884+#ifdef CONFIG_PAX_KERNEXEC
20885+ . = ALIGN(HPAGE_SIZE);
20886+#else
20887+ . = ALIGN(PAGE_SIZE);
20888+#endif
20889+
20890 __init_begin = .; /* paired with __init_end */
20891- }
20892+ } :init.begin
20893
20894-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20895+#ifdef CONFIG_SMP
20896 /*
20897 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20898 * output PHDR, so the next output section - .init.text - should
20899@@ -201,12 +267,27 @@ SECTIONS
20900 PERCPU_VADDR(0, :percpu)
20901 #endif
20902
20903- INIT_TEXT_SECTION(PAGE_SIZE)
20904-#ifdef CONFIG_X86_64
20905- :init
20906-#endif
20907+ . = ALIGN(PAGE_SIZE);
20908+ init_begin = .;
20909+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20910+ VMLINUX_SYMBOL(_sinittext) = .;
20911+ INIT_TEXT
20912+ VMLINUX_SYMBOL(_einittext) = .;
20913+ . = ALIGN(PAGE_SIZE);
20914+ } :text.init
20915
20916- INIT_DATA_SECTION(16)
20917+ /*
20918+ * .exit.text is discard at runtime, not link time, to deal with
20919+ * references from .altinstructions and .eh_frame
20920+ */
20921+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20922+ EXIT_TEXT
20923+ . = ALIGN(16);
20924+ } :text.exit
20925+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20926+
20927+ . = ALIGN(PAGE_SIZE);
20928+ INIT_DATA_SECTION(16) :init
20929
20930 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20931 __x86_cpu_dev_start = .;
20932@@ -232,19 +313,11 @@ SECTIONS
20933 *(.altinstr_replacement)
20934 }
20935
20936- /*
20937- * .exit.text is discard at runtime, not link time, to deal with
20938- * references from .altinstructions and .eh_frame
20939- */
20940- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20941- EXIT_TEXT
20942- }
20943-
20944 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20945 EXIT_DATA
20946 }
20947
20948-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20949+#ifndef CONFIG_SMP
20950 PERCPU(PAGE_SIZE)
20951 #endif
20952
20953@@ -267,12 +340,6 @@ SECTIONS
20954 . = ALIGN(PAGE_SIZE);
20955 }
20956
20957-#ifdef CONFIG_X86_64
20958- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20959- NOSAVE_DATA
20960- }
20961-#endif
20962-
20963 /* BSS */
20964 . = ALIGN(PAGE_SIZE);
20965 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20966@@ -288,6 +355,7 @@ SECTIONS
20967 __brk_base = .;
20968 . += 64 * 1024; /* 64k alignment slop space */
20969 *(.brk_reservation) /* areas brk users have reserved */
20970+ . = ALIGN(HPAGE_SIZE);
20971 __brk_limit = .;
20972 }
20973
20974@@ -316,13 +384,12 @@ SECTIONS
20975 * for the boot processor.
20976 */
20977 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20978-INIT_PER_CPU(gdt_page);
20979 INIT_PER_CPU(irq_stack_union);
20980
20981 /*
20982 * Build-time check on the image size:
20983 */
20984-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20985+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20986 "kernel image bigger than KERNEL_IMAGE_SIZE");
20987
20988 #ifdef CONFIG_SMP
20989diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20990index 62f39d7..3bc46a1 100644
20991--- a/arch/x86/kernel/vsyscall_64.c
20992+++ b/arch/x86/kernel/vsyscall_64.c
20993@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20994
20995 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20996 /* copy vsyscall data */
20997+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20998 vsyscall_gtod_data.clock.vread = clock->vread;
20999 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
21000 vsyscall_gtod_data.clock.mask = clock->mask;
21001@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
21002 We do this here because otherwise user space would do it on
21003 its own in a likely inferior way (no access to jiffies).
21004 If you don't like it pass NULL. */
21005- if (tcache && tcache->blob[0] == (j = __jiffies)) {
21006+ if (tcache && tcache->blob[0] == (j = jiffies)) {
21007 p = tcache->blob[1];
21008 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21009 /* Load per CPU data from RDTSCP */
21010diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21011index 3909e3b..5433a97 100644
21012--- a/arch/x86/kernel/x8664_ksyms_64.c
21013+++ b/arch/x86/kernel/x8664_ksyms_64.c
21014@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21015
21016 EXPORT_SYMBOL(copy_user_generic);
21017 EXPORT_SYMBOL(__copy_user_nocache);
21018-EXPORT_SYMBOL(copy_from_user);
21019-EXPORT_SYMBOL(copy_to_user);
21020 EXPORT_SYMBOL(__copy_from_user_inatomic);
21021
21022 EXPORT_SYMBOL(copy_page);
21023diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21024index c5ee17e..d63218f 100644
21025--- a/arch/x86/kernel/xsave.c
21026+++ b/arch/x86/kernel/xsave.c
21027@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21028 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21029 return -1;
21030
21031- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21032+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21033 fx_sw_user->extended_size -
21034 FP_XSTATE_MAGIC2_SIZE));
21035 /*
21036@@ -196,7 +196,7 @@ fx_only:
21037 * the other extended state.
21038 */
21039 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21040- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21041+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21042 }
21043
21044 /*
21045@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21046 if (task_thread_info(tsk)->status & TS_XSAVE)
21047 err = restore_user_xstate(buf);
21048 else
21049- err = fxrstor_checking((__force struct i387_fxsave_struct *)
21050+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
21051 buf);
21052 if (unlikely(err)) {
21053 /*
21054diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21055index 1350e43..a94b011 100644
21056--- a/arch/x86/kvm/emulate.c
21057+++ b/arch/x86/kvm/emulate.c
21058@@ -81,8 +81,8 @@
21059 #define Src2CL (1<<29)
21060 #define Src2ImmByte (2<<29)
21061 #define Src2One (3<<29)
21062-#define Src2Imm16 (4<<29)
21063-#define Src2Mask (7<<29)
21064+#define Src2Imm16 (4U<<29)
21065+#define Src2Mask (7U<<29)
21066
21067 enum {
21068 Group1_80, Group1_81, Group1_82, Group1_83,
21069@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21070
21071 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21072 do { \
21073+ unsigned long _tmp; \
21074 __asm__ __volatile__ ( \
21075 _PRE_EFLAGS("0", "4", "2") \
21076 _op _suffix " %"_x"3,%1; " \
21077@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21078 /* Raw emulation: instruction has two explicit operands. */
21079 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21080 do { \
21081- unsigned long _tmp; \
21082- \
21083 switch ((_dst).bytes) { \
21084 case 2: \
21085 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21086@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21087
21088 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21089 do { \
21090- unsigned long _tmp; \
21091 switch ((_dst).bytes) { \
21092 case 1: \
21093 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21094diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21095index 8dfeaaa..4daa395 100644
21096--- a/arch/x86/kvm/lapic.c
21097+++ b/arch/x86/kvm/lapic.c
21098@@ -52,7 +52,7 @@
21099 #define APIC_BUS_CYCLE_NS 1
21100
21101 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21102-#define apic_debug(fmt, arg...)
21103+#define apic_debug(fmt, arg...) do {} while (0)
21104
21105 #define APIC_LVT_NUM 6
21106 /* 14 is the version for Xeon and Pentium 8.4.8*/
21107diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21108index 3bc2707..dd157e2 100644
21109--- a/arch/x86/kvm/paging_tmpl.h
21110+++ b/arch/x86/kvm/paging_tmpl.h
21111@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21112 int level = PT_PAGE_TABLE_LEVEL;
21113 unsigned long mmu_seq;
21114
21115+ pax_track_stack();
21116+
21117 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21118 kvm_mmu_audit(vcpu, "pre page fault");
21119
21120@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21121 kvm_mmu_free_some_pages(vcpu);
21122 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21123 level, &write_pt, pfn);
21124+ (void)sptep;
21125 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21126 sptep, *sptep, write_pt);
21127
21128diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21129index 7c6e63e..c5d92c1 100644
21130--- a/arch/x86/kvm/svm.c
21131+++ b/arch/x86/kvm/svm.c
21132@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21133 int cpu = raw_smp_processor_id();
21134
21135 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21136+
21137+ pax_open_kernel();
21138 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21139+ pax_close_kernel();
21140+
21141 load_TR_desc();
21142 }
21143
21144@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21145 return true;
21146 }
21147
21148-static struct kvm_x86_ops svm_x86_ops = {
21149+static const struct kvm_x86_ops svm_x86_ops = {
21150 .cpu_has_kvm_support = has_svm,
21151 .disabled_by_bios = is_disabled,
21152 .hardware_setup = svm_hardware_setup,
21153diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21154index e6d925f..e7a4af8 100644
21155--- a/arch/x86/kvm/vmx.c
21156+++ b/arch/x86/kvm/vmx.c
21157@@ -570,7 +570,11 @@ static void reload_tss(void)
21158
21159 kvm_get_gdt(&gdt);
21160 descs = (void *)gdt.base;
21161+
21162+ pax_open_kernel();
21163 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21164+ pax_close_kernel();
21165+
21166 load_TR_desc();
21167 }
21168
21169@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21170 if (!cpu_has_vmx_flexpriority())
21171 flexpriority_enabled = 0;
21172
21173- if (!cpu_has_vmx_tpr_shadow())
21174- kvm_x86_ops->update_cr8_intercept = NULL;
21175+ if (!cpu_has_vmx_tpr_shadow()) {
21176+ pax_open_kernel();
21177+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21178+ pax_close_kernel();
21179+ }
21180
21181 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21182 kvm_disable_largepages();
21183@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21184 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21185
21186 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21187- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21188+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21189 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21190 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21191 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21192@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21193 "jmp .Lkvm_vmx_return \n\t"
21194 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21195 ".Lkvm_vmx_return: "
21196+
21197+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21198+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21199+ ".Lkvm_vmx_return2: "
21200+#endif
21201+
21202 /* Save guest registers, load host registers, keep flags */
21203 "xchg %0, (%%"R"sp) \n\t"
21204 "mov %%"R"ax, %c[rax](%0) \n\t"
21205@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21206 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21207 #endif
21208 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21209+
21210+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21211+ ,[cs]"i"(__KERNEL_CS)
21212+#endif
21213+
21214 : "cc", "memory"
21215- , R"bx", R"di", R"si"
21216+ , R"ax", R"bx", R"di", R"si"
21217 #ifdef CONFIG_X86_64
21218 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21219 #endif
21220@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21221 if (vmx->rmode.irq.pending)
21222 fixup_rmode_irq(vmx);
21223
21224- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21225+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21226+
21227+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21228+ loadsegment(fs, __KERNEL_PERCPU);
21229+#endif
21230+
21231+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21232+ __set_fs(current_thread_info()->addr_limit);
21233+#endif
21234+
21235 vmx->launched = 1;
21236
21237 vmx_complete_interrupts(vmx);
21238@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21239 return false;
21240 }
21241
21242-static struct kvm_x86_ops vmx_x86_ops = {
21243+static const struct kvm_x86_ops vmx_x86_ops = {
21244 .cpu_has_kvm_support = cpu_has_kvm_support,
21245 .disabled_by_bios = vmx_disabled_by_bios,
21246 .hardware_setup = hardware_setup,
21247diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21248index df1cefb..5e882ad 100644
21249--- a/arch/x86/kvm/x86.c
21250+++ b/arch/x86/kvm/x86.c
21251@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21252 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21253 struct kvm_cpuid_entry2 __user *entries);
21254
21255-struct kvm_x86_ops *kvm_x86_ops;
21256+const struct kvm_x86_ops *kvm_x86_ops;
21257 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21258
21259 int ignore_msrs = 0;
21260@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21261 struct kvm_cpuid2 *cpuid,
21262 struct kvm_cpuid_entry2 __user *entries)
21263 {
21264- int r;
21265+ int r, i;
21266
21267 r = -E2BIG;
21268 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21269 goto out;
21270 r = -EFAULT;
21271- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21272- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21273+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21274 goto out;
21275+ for (i = 0; i < cpuid->nent; ++i) {
21276+ struct kvm_cpuid_entry2 cpuid_entry;
21277+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21278+ goto out;
21279+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21280+ }
21281 vcpu->arch.cpuid_nent = cpuid->nent;
21282 kvm_apic_set_version(vcpu);
21283 return 0;
21284@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21285 struct kvm_cpuid2 *cpuid,
21286 struct kvm_cpuid_entry2 __user *entries)
21287 {
21288- int r;
21289+ int r, i;
21290
21291 vcpu_load(vcpu);
21292 r = -E2BIG;
21293 if (cpuid->nent < vcpu->arch.cpuid_nent)
21294 goto out;
21295 r = -EFAULT;
21296- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21297- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21298+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21299 goto out;
21300+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21301+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21302+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21303+ goto out;
21304+ }
21305 return 0;
21306
21307 out:
21308@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21309 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21310 struct kvm_interrupt *irq)
21311 {
21312- if (irq->irq < 0 || irq->irq >= 256)
21313+ if (irq->irq >= 256)
21314 return -EINVAL;
21315 if (irqchip_in_kernel(vcpu->kvm))
21316 return -ENXIO;
21317@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21318 .notifier_call = kvmclock_cpufreq_notifier
21319 };
21320
21321-int kvm_arch_init(void *opaque)
21322+int kvm_arch_init(const void *opaque)
21323 {
21324 int r, cpu;
21325- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21326+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21327
21328 if (kvm_x86_ops) {
21329 printk(KERN_ERR "kvm: already loaded the other module\n");
21330diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21331index 7e59dc1..b88c98f 100644
21332--- a/arch/x86/lguest/boot.c
21333+++ b/arch/x86/lguest/boot.c
21334@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21335 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21336 * Launcher to reboot us.
21337 */
21338-static void lguest_restart(char *reason)
21339+static __noreturn void lguest_restart(char *reason)
21340 {
21341 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21342+ BUG();
21343 }
21344
21345 /*G:050
21346diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21347index 824fa0b..c619e96 100644
21348--- a/arch/x86/lib/atomic64_32.c
21349+++ b/arch/x86/lib/atomic64_32.c
21350@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21351 }
21352 EXPORT_SYMBOL(atomic64_cmpxchg);
21353
21354+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21355+{
21356+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21357+}
21358+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21359+
21360 /**
21361 * atomic64_xchg - xchg atomic64 variable
21362 * @ptr: pointer to type atomic64_t
21363@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21364 EXPORT_SYMBOL(atomic64_xchg);
21365
21366 /**
21367+ * atomic64_xchg_unchecked - xchg atomic64 variable
21368+ * @ptr: pointer to type atomic64_unchecked_t
21369+ * @new_val: value to assign
21370+ *
21371+ * Atomically xchgs the value of @ptr to @new_val and returns
21372+ * the old value.
21373+ */
21374+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21375+{
21376+ /*
21377+ * Try first with a (possibly incorrect) assumption about
21378+ * what we have there. We'll do two loops most likely,
21379+ * but we'll get an ownership MESI transaction straight away
21380+ * instead of a read transaction followed by a
21381+ * flush-for-ownership transaction:
21382+ */
21383+ u64 old_val, real_val = 0;
21384+
21385+ do {
21386+ old_val = real_val;
21387+
21388+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21389+
21390+ } while (real_val != old_val);
21391+
21392+ return old_val;
21393+}
21394+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21395+
21396+/**
21397 * atomic64_set - set atomic64 variable
21398 * @ptr: pointer to type atomic64_t
21399 * @new_val: value to assign
21400@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21401 EXPORT_SYMBOL(atomic64_set);
21402
21403 /**
21404-EXPORT_SYMBOL(atomic64_read);
21405+ * atomic64_unchecked_set - set atomic64 variable
21406+ * @ptr: pointer to type atomic64_unchecked_t
21407+ * @new_val: value to assign
21408+ *
21409+ * Atomically sets the value of @ptr to @new_val.
21410+ */
21411+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21412+{
21413+ atomic64_xchg_unchecked(ptr, new_val);
21414+}
21415+EXPORT_SYMBOL(atomic64_set_unchecked);
21416+
21417+/**
21418 * atomic64_add_return - add and return
21419 * @delta: integer value to add
21420 * @ptr: pointer to type atomic64_t
21421@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21422 }
21423 EXPORT_SYMBOL(atomic64_add_return);
21424
21425+/**
21426+ * atomic64_add_return_unchecked - add and return
21427+ * @delta: integer value to add
21428+ * @ptr: pointer to type atomic64_unchecked_t
21429+ *
21430+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21431+ */
21432+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21433+{
21434+ /*
21435+ * Try first with a (possibly incorrect) assumption about
21436+ * what we have there. We'll do two loops most likely,
21437+ * but we'll get an ownership MESI transaction straight away
21438+ * instead of a read transaction followed by a
21439+ * flush-for-ownership transaction:
21440+ */
21441+ u64 old_val, new_val, real_val = 0;
21442+
21443+ do {
21444+ old_val = real_val;
21445+ new_val = old_val + delta;
21446+
21447+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21448+
21449+ } while (real_val != old_val);
21450+
21451+ return new_val;
21452+}
21453+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21454+
21455 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21456 {
21457 return atomic64_add_return(-delta, ptr);
21458 }
21459 EXPORT_SYMBOL(atomic64_sub_return);
21460
21461+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21462+{
21463+ return atomic64_add_return_unchecked(-delta, ptr);
21464+}
21465+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21466+
21467 u64 atomic64_inc_return(atomic64_t *ptr)
21468 {
21469 return atomic64_add_return(1, ptr);
21470 }
21471 EXPORT_SYMBOL(atomic64_inc_return);
21472
21473+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21474+{
21475+ return atomic64_add_return_unchecked(1, ptr);
21476+}
21477+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21478+
21479 u64 atomic64_dec_return(atomic64_t *ptr)
21480 {
21481 return atomic64_sub_return(1, ptr);
21482 }
21483 EXPORT_SYMBOL(atomic64_dec_return);
21484
21485+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21486+{
21487+ return atomic64_sub_return_unchecked(1, ptr);
21488+}
21489+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21490+
21491 /**
21492 * atomic64_add - add integer to atomic64 variable
21493 * @delta: integer value to add
21494@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21495 EXPORT_SYMBOL(atomic64_add);
21496
21497 /**
21498+ * atomic64_add_unchecked - add integer to atomic64 variable
21499+ * @delta: integer value to add
21500+ * @ptr: pointer to type atomic64_unchecked_t
21501+ *
21502+ * Atomically adds @delta to @ptr.
21503+ */
21504+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21505+{
21506+ atomic64_add_return_unchecked(delta, ptr);
21507+}
21508+EXPORT_SYMBOL(atomic64_add_unchecked);
21509+
21510+/**
21511 * atomic64_sub - subtract the atomic64 variable
21512 * @delta: integer value to subtract
21513 * @ptr: pointer to type atomic64_t
21514@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21515 EXPORT_SYMBOL(atomic64_sub);
21516
21517 /**
21518+ * atomic64_sub_unchecked - subtract the atomic64 variable
21519+ * @delta: integer value to subtract
21520+ * @ptr: pointer to type atomic64_unchecked_t
21521+ *
21522+ * Atomically subtracts @delta from @ptr.
21523+ */
21524+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21525+{
21526+ atomic64_add_unchecked(-delta, ptr);
21527+}
21528+EXPORT_SYMBOL(atomic64_sub_unchecked);
21529+
21530+/**
21531 * atomic64_sub_and_test - subtract value from variable and test result
21532 * @delta: integer value to subtract
21533 * @ptr: pointer to type atomic64_t
21534@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21535 EXPORT_SYMBOL(atomic64_inc);
21536
21537 /**
21538+ * atomic64_inc_unchecked - increment atomic64 variable
21539+ * @ptr: pointer to type atomic64_unchecked_t
21540+ *
21541+ * Atomically increments @ptr by 1.
21542+ */
21543+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21544+{
21545+ atomic64_add_unchecked(1, ptr);
21546+}
21547+EXPORT_SYMBOL(atomic64_inc_unchecked);
21548+
21549+/**
21550 * atomic64_dec - decrement atomic64 variable
21551 * @ptr: pointer to type atomic64_t
21552 *
21553@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21554 EXPORT_SYMBOL(atomic64_dec);
21555
21556 /**
21557+ * atomic64_dec_unchecked - decrement atomic64 variable
21558+ * @ptr: pointer to type atomic64_unchecked_t
21559+ *
21560+ * Atomically decrements @ptr by 1.
21561+ */
21562+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21563+{
21564+ atomic64_sub_unchecked(1, ptr);
21565+}
21566+EXPORT_SYMBOL(atomic64_dec_unchecked);
21567+
21568+/**
21569 * atomic64_dec_and_test - decrement and test
21570 * @ptr: pointer to type atomic64_t
21571 *
21572diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21573index adbccd0..98f96c8 100644
21574--- a/arch/x86/lib/checksum_32.S
21575+++ b/arch/x86/lib/checksum_32.S
21576@@ -28,7 +28,8 @@
21577 #include <linux/linkage.h>
21578 #include <asm/dwarf2.h>
21579 #include <asm/errno.h>
21580-
21581+#include <asm/segment.h>
21582+
21583 /*
21584 * computes a partial checksum, e.g. for TCP/UDP fragments
21585 */
21586@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21587
21588 #define ARGBASE 16
21589 #define FP 12
21590-
21591-ENTRY(csum_partial_copy_generic)
21592+
21593+ENTRY(csum_partial_copy_generic_to_user)
21594 CFI_STARTPROC
21595+
21596+#ifdef CONFIG_PAX_MEMORY_UDEREF
21597+ pushl %gs
21598+ CFI_ADJUST_CFA_OFFSET 4
21599+ popl %es
21600+ CFI_ADJUST_CFA_OFFSET -4
21601+ jmp csum_partial_copy_generic
21602+#endif
21603+
21604+ENTRY(csum_partial_copy_generic_from_user)
21605+
21606+#ifdef CONFIG_PAX_MEMORY_UDEREF
21607+ pushl %gs
21608+ CFI_ADJUST_CFA_OFFSET 4
21609+ popl %ds
21610+ CFI_ADJUST_CFA_OFFSET -4
21611+#endif
21612+
21613+ENTRY(csum_partial_copy_generic)
21614 subl $4,%esp
21615 CFI_ADJUST_CFA_OFFSET 4
21616 pushl %edi
21617@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21618 jmp 4f
21619 SRC(1: movw (%esi), %bx )
21620 addl $2, %esi
21621-DST( movw %bx, (%edi) )
21622+DST( movw %bx, %es:(%edi) )
21623 addl $2, %edi
21624 addw %bx, %ax
21625 adcl $0, %eax
21626@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21627 SRC(1: movl (%esi), %ebx )
21628 SRC( movl 4(%esi), %edx )
21629 adcl %ebx, %eax
21630-DST( movl %ebx, (%edi) )
21631+DST( movl %ebx, %es:(%edi) )
21632 adcl %edx, %eax
21633-DST( movl %edx, 4(%edi) )
21634+DST( movl %edx, %es:4(%edi) )
21635
21636 SRC( movl 8(%esi), %ebx )
21637 SRC( movl 12(%esi), %edx )
21638 adcl %ebx, %eax
21639-DST( movl %ebx, 8(%edi) )
21640+DST( movl %ebx, %es:8(%edi) )
21641 adcl %edx, %eax
21642-DST( movl %edx, 12(%edi) )
21643+DST( movl %edx, %es:12(%edi) )
21644
21645 SRC( movl 16(%esi), %ebx )
21646 SRC( movl 20(%esi), %edx )
21647 adcl %ebx, %eax
21648-DST( movl %ebx, 16(%edi) )
21649+DST( movl %ebx, %es:16(%edi) )
21650 adcl %edx, %eax
21651-DST( movl %edx, 20(%edi) )
21652+DST( movl %edx, %es:20(%edi) )
21653
21654 SRC( movl 24(%esi), %ebx )
21655 SRC( movl 28(%esi), %edx )
21656 adcl %ebx, %eax
21657-DST( movl %ebx, 24(%edi) )
21658+DST( movl %ebx, %es:24(%edi) )
21659 adcl %edx, %eax
21660-DST( movl %edx, 28(%edi) )
21661+DST( movl %edx, %es:28(%edi) )
21662
21663 lea 32(%esi), %esi
21664 lea 32(%edi), %edi
21665@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21666 shrl $2, %edx # This clears CF
21667 SRC(3: movl (%esi), %ebx )
21668 adcl %ebx, %eax
21669-DST( movl %ebx, (%edi) )
21670+DST( movl %ebx, %es:(%edi) )
21671 lea 4(%esi), %esi
21672 lea 4(%edi), %edi
21673 dec %edx
21674@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21675 jb 5f
21676 SRC( movw (%esi), %cx )
21677 leal 2(%esi), %esi
21678-DST( movw %cx, (%edi) )
21679+DST( movw %cx, %es:(%edi) )
21680 leal 2(%edi), %edi
21681 je 6f
21682 shll $16,%ecx
21683 SRC(5: movb (%esi), %cl )
21684-DST( movb %cl, (%edi) )
21685+DST( movb %cl, %es:(%edi) )
21686 6: addl %ecx, %eax
21687 adcl $0, %eax
21688 7:
21689@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21690
21691 6001:
21692 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21693- movl $-EFAULT, (%ebx)
21694+ movl $-EFAULT, %ss:(%ebx)
21695
21696 # zero the complete destination - computing the rest
21697 # is too much work
21698@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21699
21700 6002:
21701 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21702- movl $-EFAULT,(%ebx)
21703+ movl $-EFAULT,%ss:(%ebx)
21704 jmp 5000b
21705
21706 .previous
21707
21708+ pushl %ss
21709+ CFI_ADJUST_CFA_OFFSET 4
21710+ popl %ds
21711+ CFI_ADJUST_CFA_OFFSET -4
21712+ pushl %ss
21713+ CFI_ADJUST_CFA_OFFSET 4
21714+ popl %es
21715+ CFI_ADJUST_CFA_OFFSET -4
21716 popl %ebx
21717 CFI_ADJUST_CFA_OFFSET -4
21718 CFI_RESTORE ebx
21719@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21720 CFI_ADJUST_CFA_OFFSET -4
21721 ret
21722 CFI_ENDPROC
21723-ENDPROC(csum_partial_copy_generic)
21724+ENDPROC(csum_partial_copy_generic_to_user)
21725
21726 #else
21727
21728 /* Version for PentiumII/PPro */
21729
21730 #define ROUND1(x) \
21731+ nop; nop; nop; \
21732 SRC(movl x(%esi), %ebx ) ; \
21733 addl %ebx, %eax ; \
21734- DST(movl %ebx, x(%edi) ) ;
21735+ DST(movl %ebx, %es:x(%edi)) ;
21736
21737 #define ROUND(x) \
21738+ nop; nop; nop; \
21739 SRC(movl x(%esi), %ebx ) ; \
21740 adcl %ebx, %eax ; \
21741- DST(movl %ebx, x(%edi) ) ;
21742+ DST(movl %ebx, %es:x(%edi)) ;
21743
21744 #define ARGBASE 12
21745-
21746-ENTRY(csum_partial_copy_generic)
21747+
21748+ENTRY(csum_partial_copy_generic_to_user)
21749 CFI_STARTPROC
21750+
21751+#ifdef CONFIG_PAX_MEMORY_UDEREF
21752+ pushl %gs
21753+ CFI_ADJUST_CFA_OFFSET 4
21754+ popl %es
21755+ CFI_ADJUST_CFA_OFFSET -4
21756+ jmp csum_partial_copy_generic
21757+#endif
21758+
21759+ENTRY(csum_partial_copy_generic_from_user)
21760+
21761+#ifdef CONFIG_PAX_MEMORY_UDEREF
21762+ pushl %gs
21763+ CFI_ADJUST_CFA_OFFSET 4
21764+ popl %ds
21765+ CFI_ADJUST_CFA_OFFSET -4
21766+#endif
21767+
21768+ENTRY(csum_partial_copy_generic)
21769 pushl %ebx
21770 CFI_ADJUST_CFA_OFFSET 4
21771 CFI_REL_OFFSET ebx, 0
21772@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21773 subl %ebx, %edi
21774 lea -1(%esi),%edx
21775 andl $-32,%edx
21776- lea 3f(%ebx,%ebx), %ebx
21777+ lea 3f(%ebx,%ebx,2), %ebx
21778 testl %esi, %esi
21779 jmp *%ebx
21780 1: addl $64,%esi
21781@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21782 jb 5f
21783 SRC( movw (%esi), %dx )
21784 leal 2(%esi), %esi
21785-DST( movw %dx, (%edi) )
21786+DST( movw %dx, %es:(%edi) )
21787 leal 2(%edi), %edi
21788 je 6f
21789 shll $16,%edx
21790 5:
21791 SRC( movb (%esi), %dl )
21792-DST( movb %dl, (%edi) )
21793+DST( movb %dl, %es:(%edi) )
21794 6: addl %edx, %eax
21795 adcl $0, %eax
21796 7:
21797 .section .fixup, "ax"
21798 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21799- movl $-EFAULT, (%ebx)
21800+ movl $-EFAULT, %ss:(%ebx)
21801 # zero the complete destination (computing the rest is too much work)
21802 movl ARGBASE+8(%esp),%edi # dst
21803 movl ARGBASE+12(%esp),%ecx # len
21804@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21805 rep; stosb
21806 jmp 7b
21807 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21808- movl $-EFAULT, (%ebx)
21809+ movl $-EFAULT, %ss:(%ebx)
21810 jmp 7b
21811 .previous
21812
21813+#ifdef CONFIG_PAX_MEMORY_UDEREF
21814+ pushl %ss
21815+ CFI_ADJUST_CFA_OFFSET 4
21816+ popl %ds
21817+ CFI_ADJUST_CFA_OFFSET -4
21818+ pushl %ss
21819+ CFI_ADJUST_CFA_OFFSET 4
21820+ popl %es
21821+ CFI_ADJUST_CFA_OFFSET -4
21822+#endif
21823+
21824 popl %esi
21825 CFI_ADJUST_CFA_OFFSET -4
21826 CFI_RESTORE esi
21827@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21828 CFI_RESTORE ebx
21829 ret
21830 CFI_ENDPROC
21831-ENDPROC(csum_partial_copy_generic)
21832+ENDPROC(csum_partial_copy_generic_to_user)
21833
21834 #undef ROUND
21835 #undef ROUND1
21836diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21837index ebeafcc..1e3a402 100644
21838--- a/arch/x86/lib/clear_page_64.S
21839+++ b/arch/x86/lib/clear_page_64.S
21840@@ -1,5 +1,6 @@
21841 #include <linux/linkage.h>
21842 #include <asm/dwarf2.h>
21843+#include <asm/alternative-asm.h>
21844
21845 /*
21846 * Zero a page.
21847@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21848 movl $4096/8,%ecx
21849 xorl %eax,%eax
21850 rep stosq
21851+ pax_force_retaddr
21852 ret
21853 CFI_ENDPROC
21854 ENDPROC(clear_page_c)
21855@@ -33,6 +35,7 @@ ENTRY(clear_page)
21856 leaq 64(%rdi),%rdi
21857 jnz .Lloop
21858 nop
21859+ pax_force_retaddr
21860 ret
21861 CFI_ENDPROC
21862 .Lclear_page_end:
21863@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21864
21865 #include <asm/cpufeature.h>
21866
21867- .section .altinstr_replacement,"ax"
21868+ .section .altinstr_replacement,"a"
21869 1: .byte 0xeb /* jmp <disp8> */
21870 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21871 2:
21872diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21873index 727a5d4..333818a 100644
21874--- a/arch/x86/lib/copy_page_64.S
21875+++ b/arch/x86/lib/copy_page_64.S
21876@@ -2,12 +2,14 @@
21877
21878 #include <linux/linkage.h>
21879 #include <asm/dwarf2.h>
21880+#include <asm/alternative-asm.h>
21881
21882 ALIGN
21883 copy_page_c:
21884 CFI_STARTPROC
21885 movl $4096/8,%ecx
21886 rep movsq
21887+ pax_force_retaddr
21888 ret
21889 CFI_ENDPROC
21890 ENDPROC(copy_page_c)
21891@@ -38,7 +40,7 @@ ENTRY(copy_page)
21892 movq 16 (%rsi), %rdx
21893 movq 24 (%rsi), %r8
21894 movq 32 (%rsi), %r9
21895- movq 40 (%rsi), %r10
21896+ movq 40 (%rsi), %r13
21897 movq 48 (%rsi), %r11
21898 movq 56 (%rsi), %r12
21899
21900@@ -49,7 +51,7 @@ ENTRY(copy_page)
21901 movq %rdx, 16 (%rdi)
21902 movq %r8, 24 (%rdi)
21903 movq %r9, 32 (%rdi)
21904- movq %r10, 40 (%rdi)
21905+ movq %r13, 40 (%rdi)
21906 movq %r11, 48 (%rdi)
21907 movq %r12, 56 (%rdi)
21908
21909@@ -68,7 +70,7 @@ ENTRY(copy_page)
21910 movq 16 (%rsi), %rdx
21911 movq 24 (%rsi), %r8
21912 movq 32 (%rsi), %r9
21913- movq 40 (%rsi), %r10
21914+ movq 40 (%rsi), %r13
21915 movq 48 (%rsi), %r11
21916 movq 56 (%rsi), %r12
21917
21918@@ -77,7 +79,7 @@ ENTRY(copy_page)
21919 movq %rdx, 16 (%rdi)
21920 movq %r8, 24 (%rdi)
21921 movq %r9, 32 (%rdi)
21922- movq %r10, 40 (%rdi)
21923+ movq %r13, 40 (%rdi)
21924 movq %r11, 48 (%rdi)
21925 movq %r12, 56 (%rdi)
21926
21927@@ -94,6 +96,7 @@ ENTRY(copy_page)
21928 CFI_RESTORE r13
21929 addq $3*8,%rsp
21930 CFI_ADJUST_CFA_OFFSET -3*8
21931+ pax_force_retaddr
21932 ret
21933 .Lcopy_page_end:
21934 CFI_ENDPROC
21935@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21936
21937 #include <asm/cpufeature.h>
21938
21939- .section .altinstr_replacement,"ax"
21940+ .section .altinstr_replacement,"a"
21941 1: .byte 0xeb /* jmp <disp8> */
21942 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21943 2:
21944diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21945index af8debd..40c75f3 100644
21946--- a/arch/x86/lib/copy_user_64.S
21947+++ b/arch/x86/lib/copy_user_64.S
21948@@ -15,13 +15,15 @@
21949 #include <asm/asm-offsets.h>
21950 #include <asm/thread_info.h>
21951 #include <asm/cpufeature.h>
21952+#include <asm/pgtable.h>
21953+#include <asm/alternative-asm.h>
21954
21955 .macro ALTERNATIVE_JUMP feature,orig,alt
21956 0:
21957 .byte 0xe9 /* 32bit jump */
21958 .long \orig-1f /* by default jump to orig */
21959 1:
21960- .section .altinstr_replacement,"ax"
21961+ .section .altinstr_replacement,"a"
21962 2: .byte 0xe9 /* near jump with 32bit immediate */
21963 .long \alt-1b /* offset */ /* or alternatively to alt */
21964 .previous
21965@@ -64,55 +66,26 @@
21966 #endif
21967 .endm
21968
21969-/* Standard copy_to_user with segment limit checking */
21970-ENTRY(copy_to_user)
21971- CFI_STARTPROC
21972- GET_THREAD_INFO(%rax)
21973- movq %rdi,%rcx
21974- addq %rdx,%rcx
21975- jc bad_to_user
21976- cmpq TI_addr_limit(%rax),%rcx
21977- ja bad_to_user
21978- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21979- CFI_ENDPROC
21980-ENDPROC(copy_to_user)
21981-
21982-/* Standard copy_from_user with segment limit checking */
21983-ENTRY(copy_from_user)
21984- CFI_STARTPROC
21985- GET_THREAD_INFO(%rax)
21986- movq %rsi,%rcx
21987- addq %rdx,%rcx
21988- jc bad_from_user
21989- cmpq TI_addr_limit(%rax),%rcx
21990- ja bad_from_user
21991- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21992- CFI_ENDPROC
21993-ENDPROC(copy_from_user)
21994-
21995 ENTRY(copy_user_generic)
21996 CFI_STARTPROC
21997 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21998 CFI_ENDPROC
21999 ENDPROC(copy_user_generic)
22000
22001-ENTRY(__copy_from_user_inatomic)
22002- CFI_STARTPROC
22003- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
22004- CFI_ENDPROC
22005-ENDPROC(__copy_from_user_inatomic)
22006-
22007 .section .fixup,"ax"
22008 /* must zero dest */
22009 ENTRY(bad_from_user)
22010 bad_from_user:
22011 CFI_STARTPROC
22012+ testl %edx,%edx
22013+ js bad_to_user
22014 movl %edx,%ecx
22015 xorl %eax,%eax
22016 rep
22017 stosb
22018 bad_to_user:
22019 movl %edx,%eax
22020+ pax_force_retaddr
22021 ret
22022 CFI_ENDPROC
22023 ENDPROC(bad_from_user)
22024@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22025 jz 17f
22026 1: movq (%rsi),%r8
22027 2: movq 1*8(%rsi),%r9
22028-3: movq 2*8(%rsi),%r10
22029+3: movq 2*8(%rsi),%rax
22030 4: movq 3*8(%rsi),%r11
22031 5: movq %r8,(%rdi)
22032 6: movq %r9,1*8(%rdi)
22033-7: movq %r10,2*8(%rdi)
22034+7: movq %rax,2*8(%rdi)
22035 8: movq %r11,3*8(%rdi)
22036 9: movq 4*8(%rsi),%r8
22037 10: movq 5*8(%rsi),%r9
22038-11: movq 6*8(%rsi),%r10
22039+11: movq 6*8(%rsi),%rax
22040 12: movq 7*8(%rsi),%r11
22041 13: movq %r8,4*8(%rdi)
22042 14: movq %r9,5*8(%rdi)
22043-15: movq %r10,6*8(%rdi)
22044+15: movq %rax,6*8(%rdi)
22045 16: movq %r11,7*8(%rdi)
22046 leaq 64(%rsi),%rsi
22047 leaq 64(%rdi),%rdi
22048@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22049 decl %ecx
22050 jnz 21b
22051 23: xor %eax,%eax
22052+ pax_force_retaddr
22053 ret
22054
22055 .section .fixup,"ax"
22056@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22057 3: rep
22058 movsb
22059 4: xorl %eax,%eax
22060+ pax_force_retaddr
22061 ret
22062
22063 .section .fixup,"ax"
22064diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22065index cb0c112..e3a6895 100644
22066--- a/arch/x86/lib/copy_user_nocache_64.S
22067+++ b/arch/x86/lib/copy_user_nocache_64.S
22068@@ -8,12 +8,14 @@
22069
22070 #include <linux/linkage.h>
22071 #include <asm/dwarf2.h>
22072+#include <asm/alternative-asm.h>
22073
22074 #define FIX_ALIGNMENT 1
22075
22076 #include <asm/current.h>
22077 #include <asm/asm-offsets.h>
22078 #include <asm/thread_info.h>
22079+#include <asm/pgtable.h>
22080
22081 .macro ALIGN_DESTINATION
22082 #ifdef FIX_ALIGNMENT
22083@@ -50,6 +52,15 @@
22084 */
22085 ENTRY(__copy_user_nocache)
22086 CFI_STARTPROC
22087+
22088+#ifdef CONFIG_PAX_MEMORY_UDEREF
22089+ mov $PAX_USER_SHADOW_BASE,%rcx
22090+ cmp %rcx,%rsi
22091+ jae 1f
22092+ add %rcx,%rsi
22093+1:
22094+#endif
22095+
22096 cmpl $8,%edx
22097 jb 20f /* less then 8 bytes, go to byte copy loop */
22098 ALIGN_DESTINATION
22099@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22100 jz 17f
22101 1: movq (%rsi),%r8
22102 2: movq 1*8(%rsi),%r9
22103-3: movq 2*8(%rsi),%r10
22104+3: movq 2*8(%rsi),%rax
22105 4: movq 3*8(%rsi),%r11
22106 5: movnti %r8,(%rdi)
22107 6: movnti %r9,1*8(%rdi)
22108-7: movnti %r10,2*8(%rdi)
22109+7: movnti %rax,2*8(%rdi)
22110 8: movnti %r11,3*8(%rdi)
22111 9: movq 4*8(%rsi),%r8
22112 10: movq 5*8(%rsi),%r9
22113-11: movq 6*8(%rsi),%r10
22114+11: movq 6*8(%rsi),%rax
22115 12: movq 7*8(%rsi),%r11
22116 13: movnti %r8,4*8(%rdi)
22117 14: movnti %r9,5*8(%rdi)
22118-15: movnti %r10,6*8(%rdi)
22119+15: movnti %rax,6*8(%rdi)
22120 16: movnti %r11,7*8(%rdi)
22121 leaq 64(%rsi),%rsi
22122 leaq 64(%rdi),%rdi
22123@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22124 jnz 21b
22125 23: xorl %eax,%eax
22126 sfence
22127+ pax_force_retaddr
22128 ret
22129
22130 .section .fixup,"ax"
22131diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22132index f0dba36..48cb4d6 100644
22133--- a/arch/x86/lib/csum-copy_64.S
22134+++ b/arch/x86/lib/csum-copy_64.S
22135@@ -8,6 +8,7 @@
22136 #include <linux/linkage.h>
22137 #include <asm/dwarf2.h>
22138 #include <asm/errno.h>
22139+#include <asm/alternative-asm.h>
22140
22141 /*
22142 * Checksum copy with exception handling.
22143@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22144 CFI_RESTORE rbp
22145 addq $7*8,%rsp
22146 CFI_ADJUST_CFA_OFFSET -7*8
22147+ pax_force_retaddr 0, 1
22148 ret
22149 CFI_RESTORE_STATE
22150
22151diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22152index 459b58a..9570bc7 100644
22153--- a/arch/x86/lib/csum-wrappers_64.c
22154+++ b/arch/x86/lib/csum-wrappers_64.c
22155@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22156 len -= 2;
22157 }
22158 }
22159- isum = csum_partial_copy_generic((__force const void *)src,
22160+
22161+#ifdef CONFIG_PAX_MEMORY_UDEREF
22162+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22163+ src += PAX_USER_SHADOW_BASE;
22164+#endif
22165+
22166+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22167 dst, len, isum, errp, NULL);
22168 if (unlikely(*errp))
22169 goto out_err;
22170@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22171 }
22172
22173 *errp = 0;
22174- return csum_partial_copy_generic(src, (void __force *)dst,
22175+
22176+#ifdef CONFIG_PAX_MEMORY_UDEREF
22177+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22178+ dst += PAX_USER_SHADOW_BASE;
22179+#endif
22180+
22181+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22182 len, isum, NULL, errp);
22183 }
22184 EXPORT_SYMBOL(csum_partial_copy_to_user);
22185diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22186index 51f1504..ddac4c1 100644
22187--- a/arch/x86/lib/getuser.S
22188+++ b/arch/x86/lib/getuser.S
22189@@ -33,15 +33,38 @@
22190 #include <asm/asm-offsets.h>
22191 #include <asm/thread_info.h>
22192 #include <asm/asm.h>
22193+#include <asm/segment.h>
22194+#include <asm/pgtable.h>
22195+#include <asm/alternative-asm.h>
22196+
22197+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22198+#define __copyuser_seg gs;
22199+#else
22200+#define __copyuser_seg
22201+#endif
22202
22203 .text
22204 ENTRY(__get_user_1)
22205 CFI_STARTPROC
22206+
22207+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22208 GET_THREAD_INFO(%_ASM_DX)
22209 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22210 jae bad_get_user
22211-1: movzb (%_ASM_AX),%edx
22212+
22213+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22214+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22215+ cmp %_ASM_DX,%_ASM_AX
22216+ jae 1234f
22217+ add %_ASM_DX,%_ASM_AX
22218+1234:
22219+#endif
22220+
22221+#endif
22222+
22223+1: __copyuser_seg movzb (%_ASM_AX),%edx
22224 xor %eax,%eax
22225+ pax_force_retaddr
22226 ret
22227 CFI_ENDPROC
22228 ENDPROC(__get_user_1)
22229@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22230 ENTRY(__get_user_2)
22231 CFI_STARTPROC
22232 add $1,%_ASM_AX
22233+
22234+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22235 jc bad_get_user
22236 GET_THREAD_INFO(%_ASM_DX)
22237 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22238 jae bad_get_user
22239-2: movzwl -1(%_ASM_AX),%edx
22240+
22241+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22242+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22243+ cmp %_ASM_DX,%_ASM_AX
22244+ jae 1234f
22245+ add %_ASM_DX,%_ASM_AX
22246+1234:
22247+#endif
22248+
22249+#endif
22250+
22251+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22252 xor %eax,%eax
22253+ pax_force_retaddr
22254 ret
22255 CFI_ENDPROC
22256 ENDPROC(__get_user_2)
22257@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22258 ENTRY(__get_user_4)
22259 CFI_STARTPROC
22260 add $3,%_ASM_AX
22261+
22262+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22263 jc bad_get_user
22264 GET_THREAD_INFO(%_ASM_DX)
22265 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22266 jae bad_get_user
22267-3: mov -3(%_ASM_AX),%edx
22268+
22269+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22270+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22271+ cmp %_ASM_DX,%_ASM_AX
22272+ jae 1234f
22273+ add %_ASM_DX,%_ASM_AX
22274+1234:
22275+#endif
22276+
22277+#endif
22278+
22279+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22280 xor %eax,%eax
22281+ pax_force_retaddr
22282 ret
22283 CFI_ENDPROC
22284 ENDPROC(__get_user_4)
22285@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22286 GET_THREAD_INFO(%_ASM_DX)
22287 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22288 jae bad_get_user
22289+
22290+#ifdef CONFIG_PAX_MEMORY_UDEREF
22291+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22292+ cmp %_ASM_DX,%_ASM_AX
22293+ jae 1234f
22294+ add %_ASM_DX,%_ASM_AX
22295+1234:
22296+#endif
22297+
22298 4: movq -7(%_ASM_AX),%_ASM_DX
22299 xor %eax,%eax
22300+ pax_force_retaddr
22301 ret
22302 CFI_ENDPROC
22303 ENDPROC(__get_user_8)
22304@@ -91,6 +152,7 @@ bad_get_user:
22305 CFI_STARTPROC
22306 xor %edx,%edx
22307 mov $(-EFAULT),%_ASM_AX
22308+ pax_force_retaddr
22309 ret
22310 CFI_ENDPROC
22311 END(bad_get_user)
22312diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22313index 05a95e7..326f2fa 100644
22314--- a/arch/x86/lib/iomap_copy_64.S
22315+++ b/arch/x86/lib/iomap_copy_64.S
22316@@ -17,6 +17,7 @@
22317
22318 #include <linux/linkage.h>
22319 #include <asm/dwarf2.h>
22320+#include <asm/alternative-asm.h>
22321
22322 /*
22323 * override generic version in lib/iomap_copy.c
22324@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22325 CFI_STARTPROC
22326 movl %edx,%ecx
22327 rep movsd
22328+ pax_force_retaddr
22329 ret
22330 CFI_ENDPROC
22331 ENDPROC(__iowrite32_copy)
22332diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22333index ad5441e..610e351 100644
22334--- a/arch/x86/lib/memcpy_64.S
22335+++ b/arch/x86/lib/memcpy_64.S
22336@@ -4,6 +4,7 @@
22337
22338 #include <asm/cpufeature.h>
22339 #include <asm/dwarf2.h>
22340+#include <asm/alternative-asm.h>
22341
22342 /*
22343 * memcpy - Copy a memory block.
22344@@ -34,6 +35,7 @@ memcpy_c:
22345 rep movsq
22346 movl %edx, %ecx
22347 rep movsb
22348+ pax_force_retaddr
22349 ret
22350 CFI_ENDPROC
22351 ENDPROC(memcpy_c)
22352@@ -118,6 +120,7 @@ ENTRY(memcpy)
22353 jnz .Lloop_1
22354
22355 .Lend:
22356+ pax_force_retaddr 0, 1
22357 ret
22358 CFI_ENDPROC
22359 ENDPROC(memcpy)
22360@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22361 * It is also a lot simpler. Use this when possible:
22362 */
22363
22364- .section .altinstr_replacement, "ax"
22365+ .section .altinstr_replacement, "a"
22366 1: .byte 0xeb /* jmp <disp8> */
22367 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22368 2:
22369diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22370index 2c59481..7e9ba4e 100644
22371--- a/arch/x86/lib/memset_64.S
22372+++ b/arch/x86/lib/memset_64.S
22373@@ -2,6 +2,7 @@
22374
22375 #include <linux/linkage.h>
22376 #include <asm/dwarf2.h>
22377+#include <asm/alternative-asm.h>
22378
22379 /*
22380 * ISO C memset - set a memory block to a byte value.
22381@@ -28,6 +29,7 @@ memset_c:
22382 movl %r8d,%ecx
22383 rep stosb
22384 movq %r9,%rax
22385+ pax_force_retaddr
22386 ret
22387 CFI_ENDPROC
22388 ENDPROC(memset_c)
22389@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22390 ENTRY(memset)
22391 ENTRY(__memset)
22392 CFI_STARTPROC
22393- movq %rdi,%r10
22394 movq %rdx,%r11
22395
22396 /* expand byte value */
22397 movzbl %sil,%ecx
22398 movabs $0x0101010101010101,%rax
22399 mul %rcx /* with rax, clobbers rdx */
22400+ movq %rdi,%rdx
22401
22402 /* align dst */
22403 movl %edi,%r9d
22404@@ -95,7 +97,8 @@ ENTRY(__memset)
22405 jnz .Lloop_1
22406
22407 .Lende:
22408- movq %r10,%rax
22409+ movq %rdx,%rax
22410+ pax_force_retaddr
22411 ret
22412
22413 CFI_RESTORE_STATE
22414@@ -118,7 +121,7 @@ ENDPROC(__memset)
22415
22416 #include <asm/cpufeature.h>
22417
22418- .section .altinstr_replacement,"ax"
22419+ .section .altinstr_replacement,"a"
22420 1: .byte 0xeb /* jmp <disp8> */
22421 .byte (memset_c - memset) - (2f - 1b) /* offset */
22422 2:
22423diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22424index c9f2d9b..e7fd2c0 100644
22425--- a/arch/x86/lib/mmx_32.c
22426+++ b/arch/x86/lib/mmx_32.c
22427@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22428 {
22429 void *p;
22430 int i;
22431+ unsigned long cr0;
22432
22433 if (unlikely(in_interrupt()))
22434 return __memcpy(to, from, len);
22435@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22436 kernel_fpu_begin();
22437
22438 __asm__ __volatile__ (
22439- "1: prefetch (%0)\n" /* This set is 28 bytes */
22440- " prefetch 64(%0)\n"
22441- " prefetch 128(%0)\n"
22442- " prefetch 192(%0)\n"
22443- " prefetch 256(%0)\n"
22444+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22445+ " prefetch 64(%1)\n"
22446+ " prefetch 128(%1)\n"
22447+ " prefetch 192(%1)\n"
22448+ " prefetch 256(%1)\n"
22449 "2: \n"
22450 ".section .fixup, \"ax\"\n"
22451- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22452+ "3: \n"
22453+
22454+#ifdef CONFIG_PAX_KERNEXEC
22455+ " movl %%cr0, %0\n"
22456+ " movl %0, %%eax\n"
22457+ " andl $0xFFFEFFFF, %%eax\n"
22458+ " movl %%eax, %%cr0\n"
22459+#endif
22460+
22461+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22462+
22463+#ifdef CONFIG_PAX_KERNEXEC
22464+ " movl %0, %%cr0\n"
22465+#endif
22466+
22467 " jmp 2b\n"
22468 ".previous\n"
22469 _ASM_EXTABLE(1b, 3b)
22470- : : "r" (from));
22471+ : "=&r" (cr0) : "r" (from) : "ax");
22472
22473 for ( ; i > 5; i--) {
22474 __asm__ __volatile__ (
22475- "1: prefetch 320(%0)\n"
22476- "2: movq (%0), %%mm0\n"
22477- " movq 8(%0), %%mm1\n"
22478- " movq 16(%0), %%mm2\n"
22479- " movq 24(%0), %%mm3\n"
22480- " movq %%mm0, (%1)\n"
22481- " movq %%mm1, 8(%1)\n"
22482- " movq %%mm2, 16(%1)\n"
22483- " movq %%mm3, 24(%1)\n"
22484- " movq 32(%0), %%mm0\n"
22485- " movq 40(%0), %%mm1\n"
22486- " movq 48(%0), %%mm2\n"
22487- " movq 56(%0), %%mm3\n"
22488- " movq %%mm0, 32(%1)\n"
22489- " movq %%mm1, 40(%1)\n"
22490- " movq %%mm2, 48(%1)\n"
22491- " movq %%mm3, 56(%1)\n"
22492+ "1: prefetch 320(%1)\n"
22493+ "2: movq (%1), %%mm0\n"
22494+ " movq 8(%1), %%mm1\n"
22495+ " movq 16(%1), %%mm2\n"
22496+ " movq 24(%1), %%mm3\n"
22497+ " movq %%mm0, (%2)\n"
22498+ " movq %%mm1, 8(%2)\n"
22499+ " movq %%mm2, 16(%2)\n"
22500+ " movq %%mm3, 24(%2)\n"
22501+ " movq 32(%1), %%mm0\n"
22502+ " movq 40(%1), %%mm1\n"
22503+ " movq 48(%1), %%mm2\n"
22504+ " movq 56(%1), %%mm3\n"
22505+ " movq %%mm0, 32(%2)\n"
22506+ " movq %%mm1, 40(%2)\n"
22507+ " movq %%mm2, 48(%2)\n"
22508+ " movq %%mm3, 56(%2)\n"
22509 ".section .fixup, \"ax\"\n"
22510- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22511+ "3:\n"
22512+
22513+#ifdef CONFIG_PAX_KERNEXEC
22514+ " movl %%cr0, %0\n"
22515+ " movl %0, %%eax\n"
22516+ " andl $0xFFFEFFFF, %%eax\n"
22517+ " movl %%eax, %%cr0\n"
22518+#endif
22519+
22520+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22521+
22522+#ifdef CONFIG_PAX_KERNEXEC
22523+ " movl %0, %%cr0\n"
22524+#endif
22525+
22526 " jmp 2b\n"
22527 ".previous\n"
22528 _ASM_EXTABLE(1b, 3b)
22529- : : "r" (from), "r" (to) : "memory");
22530+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22531
22532 from += 64;
22533 to += 64;
22534@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22535 static void fast_copy_page(void *to, void *from)
22536 {
22537 int i;
22538+ unsigned long cr0;
22539
22540 kernel_fpu_begin();
22541
22542@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22543 * but that is for later. -AV
22544 */
22545 __asm__ __volatile__(
22546- "1: prefetch (%0)\n"
22547- " prefetch 64(%0)\n"
22548- " prefetch 128(%0)\n"
22549- " prefetch 192(%0)\n"
22550- " prefetch 256(%0)\n"
22551+ "1: prefetch (%1)\n"
22552+ " prefetch 64(%1)\n"
22553+ " prefetch 128(%1)\n"
22554+ " prefetch 192(%1)\n"
22555+ " prefetch 256(%1)\n"
22556 "2: \n"
22557 ".section .fixup, \"ax\"\n"
22558- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22559+ "3: \n"
22560+
22561+#ifdef CONFIG_PAX_KERNEXEC
22562+ " movl %%cr0, %0\n"
22563+ " movl %0, %%eax\n"
22564+ " andl $0xFFFEFFFF, %%eax\n"
22565+ " movl %%eax, %%cr0\n"
22566+#endif
22567+
22568+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22569+
22570+#ifdef CONFIG_PAX_KERNEXEC
22571+ " movl %0, %%cr0\n"
22572+#endif
22573+
22574 " jmp 2b\n"
22575 ".previous\n"
22576- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22577+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22578
22579 for (i = 0; i < (4096-320)/64; i++) {
22580 __asm__ __volatile__ (
22581- "1: prefetch 320(%0)\n"
22582- "2: movq (%0), %%mm0\n"
22583- " movntq %%mm0, (%1)\n"
22584- " movq 8(%0), %%mm1\n"
22585- " movntq %%mm1, 8(%1)\n"
22586- " movq 16(%0), %%mm2\n"
22587- " movntq %%mm2, 16(%1)\n"
22588- " movq 24(%0), %%mm3\n"
22589- " movntq %%mm3, 24(%1)\n"
22590- " movq 32(%0), %%mm4\n"
22591- " movntq %%mm4, 32(%1)\n"
22592- " movq 40(%0), %%mm5\n"
22593- " movntq %%mm5, 40(%1)\n"
22594- " movq 48(%0), %%mm6\n"
22595- " movntq %%mm6, 48(%1)\n"
22596- " movq 56(%0), %%mm7\n"
22597- " movntq %%mm7, 56(%1)\n"
22598+ "1: prefetch 320(%1)\n"
22599+ "2: movq (%1), %%mm0\n"
22600+ " movntq %%mm0, (%2)\n"
22601+ " movq 8(%1), %%mm1\n"
22602+ " movntq %%mm1, 8(%2)\n"
22603+ " movq 16(%1), %%mm2\n"
22604+ " movntq %%mm2, 16(%2)\n"
22605+ " movq 24(%1), %%mm3\n"
22606+ " movntq %%mm3, 24(%2)\n"
22607+ " movq 32(%1), %%mm4\n"
22608+ " movntq %%mm4, 32(%2)\n"
22609+ " movq 40(%1), %%mm5\n"
22610+ " movntq %%mm5, 40(%2)\n"
22611+ " movq 48(%1), %%mm6\n"
22612+ " movntq %%mm6, 48(%2)\n"
22613+ " movq 56(%1), %%mm7\n"
22614+ " movntq %%mm7, 56(%2)\n"
22615 ".section .fixup, \"ax\"\n"
22616- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22617+ "3:\n"
22618+
22619+#ifdef CONFIG_PAX_KERNEXEC
22620+ " movl %%cr0, %0\n"
22621+ " movl %0, %%eax\n"
22622+ " andl $0xFFFEFFFF, %%eax\n"
22623+ " movl %%eax, %%cr0\n"
22624+#endif
22625+
22626+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22627+
22628+#ifdef CONFIG_PAX_KERNEXEC
22629+ " movl %0, %%cr0\n"
22630+#endif
22631+
22632 " jmp 2b\n"
22633 ".previous\n"
22634- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22635+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22636
22637 from += 64;
22638 to += 64;
22639@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22640 static void fast_copy_page(void *to, void *from)
22641 {
22642 int i;
22643+ unsigned long cr0;
22644
22645 kernel_fpu_begin();
22646
22647 __asm__ __volatile__ (
22648- "1: prefetch (%0)\n"
22649- " prefetch 64(%0)\n"
22650- " prefetch 128(%0)\n"
22651- " prefetch 192(%0)\n"
22652- " prefetch 256(%0)\n"
22653+ "1: prefetch (%1)\n"
22654+ " prefetch 64(%1)\n"
22655+ " prefetch 128(%1)\n"
22656+ " prefetch 192(%1)\n"
22657+ " prefetch 256(%1)\n"
22658 "2: \n"
22659 ".section .fixup, \"ax\"\n"
22660- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22661+ "3: \n"
22662+
22663+#ifdef CONFIG_PAX_KERNEXEC
22664+ " movl %%cr0, %0\n"
22665+ " movl %0, %%eax\n"
22666+ " andl $0xFFFEFFFF, %%eax\n"
22667+ " movl %%eax, %%cr0\n"
22668+#endif
22669+
22670+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22671+
22672+#ifdef CONFIG_PAX_KERNEXEC
22673+ " movl %0, %%cr0\n"
22674+#endif
22675+
22676 " jmp 2b\n"
22677 ".previous\n"
22678- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22679+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22680
22681 for (i = 0; i < 4096/64; i++) {
22682 __asm__ __volatile__ (
22683- "1: prefetch 320(%0)\n"
22684- "2: movq (%0), %%mm0\n"
22685- " movq 8(%0), %%mm1\n"
22686- " movq 16(%0), %%mm2\n"
22687- " movq 24(%0), %%mm3\n"
22688- " movq %%mm0, (%1)\n"
22689- " movq %%mm1, 8(%1)\n"
22690- " movq %%mm2, 16(%1)\n"
22691- " movq %%mm3, 24(%1)\n"
22692- " movq 32(%0), %%mm0\n"
22693- " movq 40(%0), %%mm1\n"
22694- " movq 48(%0), %%mm2\n"
22695- " movq 56(%0), %%mm3\n"
22696- " movq %%mm0, 32(%1)\n"
22697- " movq %%mm1, 40(%1)\n"
22698- " movq %%mm2, 48(%1)\n"
22699- " movq %%mm3, 56(%1)\n"
22700+ "1: prefetch 320(%1)\n"
22701+ "2: movq (%1), %%mm0\n"
22702+ " movq 8(%1), %%mm1\n"
22703+ " movq 16(%1), %%mm2\n"
22704+ " movq 24(%1), %%mm3\n"
22705+ " movq %%mm0, (%2)\n"
22706+ " movq %%mm1, 8(%2)\n"
22707+ " movq %%mm2, 16(%2)\n"
22708+ " movq %%mm3, 24(%2)\n"
22709+ " movq 32(%1), %%mm0\n"
22710+ " movq 40(%1), %%mm1\n"
22711+ " movq 48(%1), %%mm2\n"
22712+ " movq 56(%1), %%mm3\n"
22713+ " movq %%mm0, 32(%2)\n"
22714+ " movq %%mm1, 40(%2)\n"
22715+ " movq %%mm2, 48(%2)\n"
22716+ " movq %%mm3, 56(%2)\n"
22717 ".section .fixup, \"ax\"\n"
22718- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22719+ "3:\n"
22720+
22721+#ifdef CONFIG_PAX_KERNEXEC
22722+ " movl %%cr0, %0\n"
22723+ " movl %0, %%eax\n"
22724+ " andl $0xFFFEFFFF, %%eax\n"
22725+ " movl %%eax, %%cr0\n"
22726+#endif
22727+
22728+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22729+
22730+#ifdef CONFIG_PAX_KERNEXEC
22731+ " movl %0, %%cr0\n"
22732+#endif
22733+
22734 " jmp 2b\n"
22735 ".previous\n"
22736 _ASM_EXTABLE(1b, 3b)
22737- : : "r" (from), "r" (to) : "memory");
22738+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22739
22740 from += 64;
22741 to += 64;
22742diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22743index 69fa106..adda88b 100644
22744--- a/arch/x86/lib/msr-reg.S
22745+++ b/arch/x86/lib/msr-reg.S
22746@@ -3,6 +3,7 @@
22747 #include <asm/dwarf2.h>
22748 #include <asm/asm.h>
22749 #include <asm/msr.h>
22750+#include <asm/alternative-asm.h>
22751
22752 #ifdef CONFIG_X86_64
22753 /*
22754@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22755 CFI_STARTPROC
22756 pushq_cfi %rbx
22757 pushq_cfi %rbp
22758- movq %rdi, %r10 /* Save pointer */
22759+ movq %rdi, %r9 /* Save pointer */
22760 xorl %r11d, %r11d /* Return value */
22761 movl (%rdi), %eax
22762 movl 4(%rdi), %ecx
22763@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22764 movl 28(%rdi), %edi
22765 CFI_REMEMBER_STATE
22766 1: \op
22767-2: movl %eax, (%r10)
22768+2: movl %eax, (%r9)
22769 movl %r11d, %eax /* Return value */
22770- movl %ecx, 4(%r10)
22771- movl %edx, 8(%r10)
22772- movl %ebx, 12(%r10)
22773- movl %ebp, 20(%r10)
22774- movl %esi, 24(%r10)
22775- movl %edi, 28(%r10)
22776+ movl %ecx, 4(%r9)
22777+ movl %edx, 8(%r9)
22778+ movl %ebx, 12(%r9)
22779+ movl %ebp, 20(%r9)
22780+ movl %esi, 24(%r9)
22781+ movl %edi, 28(%r9)
22782 popq_cfi %rbp
22783 popq_cfi %rbx
22784+ pax_force_retaddr
22785 ret
22786 3:
22787 CFI_RESTORE_STATE
22788diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22789index 36b0d15..d381858 100644
22790--- a/arch/x86/lib/putuser.S
22791+++ b/arch/x86/lib/putuser.S
22792@@ -15,7 +15,9 @@
22793 #include <asm/thread_info.h>
22794 #include <asm/errno.h>
22795 #include <asm/asm.h>
22796-
22797+#include <asm/segment.h>
22798+#include <asm/pgtable.h>
22799+#include <asm/alternative-asm.h>
22800
22801 /*
22802 * __put_user_X
22803@@ -29,52 +31,119 @@
22804 * as they get called from within inline assembly.
22805 */
22806
22807-#define ENTER CFI_STARTPROC ; \
22808- GET_THREAD_INFO(%_ASM_BX)
22809-#define EXIT ret ; \
22810+#define ENTER CFI_STARTPROC
22811+#define EXIT pax_force_retaddr; ret ; \
22812 CFI_ENDPROC
22813
22814+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22815+#define _DEST %_ASM_CX,%_ASM_BX
22816+#else
22817+#define _DEST %_ASM_CX
22818+#endif
22819+
22820+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22821+#define __copyuser_seg gs;
22822+#else
22823+#define __copyuser_seg
22824+#endif
22825+
22826 .text
22827 ENTRY(__put_user_1)
22828 ENTER
22829+
22830+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22831+ GET_THREAD_INFO(%_ASM_BX)
22832 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22833 jae bad_put_user
22834-1: movb %al,(%_ASM_CX)
22835+
22836+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22837+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22838+ cmp %_ASM_BX,%_ASM_CX
22839+ jb 1234f
22840+ xor %ebx,%ebx
22841+1234:
22842+#endif
22843+
22844+#endif
22845+
22846+1: __copyuser_seg movb %al,(_DEST)
22847 xor %eax,%eax
22848 EXIT
22849 ENDPROC(__put_user_1)
22850
22851 ENTRY(__put_user_2)
22852 ENTER
22853+
22854+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22855+ GET_THREAD_INFO(%_ASM_BX)
22856 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22857 sub $1,%_ASM_BX
22858 cmp %_ASM_BX,%_ASM_CX
22859 jae bad_put_user
22860-2: movw %ax,(%_ASM_CX)
22861+
22862+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22863+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22864+ cmp %_ASM_BX,%_ASM_CX
22865+ jb 1234f
22866+ xor %ebx,%ebx
22867+1234:
22868+#endif
22869+
22870+#endif
22871+
22872+2: __copyuser_seg movw %ax,(_DEST)
22873 xor %eax,%eax
22874 EXIT
22875 ENDPROC(__put_user_2)
22876
22877 ENTRY(__put_user_4)
22878 ENTER
22879+
22880+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22881+ GET_THREAD_INFO(%_ASM_BX)
22882 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22883 sub $3,%_ASM_BX
22884 cmp %_ASM_BX,%_ASM_CX
22885 jae bad_put_user
22886-3: movl %eax,(%_ASM_CX)
22887+
22888+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22889+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22890+ cmp %_ASM_BX,%_ASM_CX
22891+ jb 1234f
22892+ xor %ebx,%ebx
22893+1234:
22894+#endif
22895+
22896+#endif
22897+
22898+3: __copyuser_seg movl %eax,(_DEST)
22899 xor %eax,%eax
22900 EXIT
22901 ENDPROC(__put_user_4)
22902
22903 ENTRY(__put_user_8)
22904 ENTER
22905+
22906+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22907+ GET_THREAD_INFO(%_ASM_BX)
22908 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22909 sub $7,%_ASM_BX
22910 cmp %_ASM_BX,%_ASM_CX
22911 jae bad_put_user
22912-4: mov %_ASM_AX,(%_ASM_CX)
22913+
22914+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22915+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22916+ cmp %_ASM_BX,%_ASM_CX
22917+ jb 1234f
22918+ xor %ebx,%ebx
22919+1234:
22920+#endif
22921+
22922+#endif
22923+
22924+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22925 #ifdef CONFIG_X86_32
22926-5: movl %edx,4(%_ASM_CX)
22927+5: __copyuser_seg movl %edx,4(_DEST)
22928 #endif
22929 xor %eax,%eax
22930 EXIT
22931diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22932index 05ea55f..6345b9a 100644
22933--- a/arch/x86/lib/rwlock_64.S
22934+++ b/arch/x86/lib/rwlock_64.S
22935@@ -2,6 +2,7 @@
22936
22937 #include <linux/linkage.h>
22938 #include <asm/rwlock.h>
22939+#include <asm/asm.h>
22940 #include <asm/alternative-asm.h>
22941 #include <asm/dwarf2.h>
22942
22943@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22944 CFI_STARTPROC
22945 LOCK_PREFIX
22946 addl $RW_LOCK_BIAS,(%rdi)
22947+
22948+#ifdef CONFIG_PAX_REFCOUNT
22949+ jno 1234f
22950+ LOCK_PREFIX
22951+ subl $RW_LOCK_BIAS,(%rdi)
22952+ int $4
22953+1234:
22954+ _ASM_EXTABLE(1234b, 1234b)
22955+#endif
22956+
22957 1: rep
22958 nop
22959 cmpl $RW_LOCK_BIAS,(%rdi)
22960 jne 1b
22961 LOCK_PREFIX
22962 subl $RW_LOCK_BIAS,(%rdi)
22963+
22964+#ifdef CONFIG_PAX_REFCOUNT
22965+ jno 1234f
22966+ LOCK_PREFIX
22967+ addl $RW_LOCK_BIAS,(%rdi)
22968+ int $4
22969+1234:
22970+ _ASM_EXTABLE(1234b, 1234b)
22971+#endif
22972+
22973 jnz __write_lock_failed
22974+ pax_force_retaddr
22975 ret
22976 CFI_ENDPROC
22977 END(__write_lock_failed)
22978@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22979 CFI_STARTPROC
22980 LOCK_PREFIX
22981 incl (%rdi)
22982+
22983+#ifdef CONFIG_PAX_REFCOUNT
22984+ jno 1234f
22985+ LOCK_PREFIX
22986+ decl (%rdi)
22987+ int $4
22988+1234:
22989+ _ASM_EXTABLE(1234b, 1234b)
22990+#endif
22991+
22992 1: rep
22993 nop
22994 cmpl $1,(%rdi)
22995 js 1b
22996 LOCK_PREFIX
22997 decl (%rdi)
22998+
22999+#ifdef CONFIG_PAX_REFCOUNT
23000+ jno 1234f
23001+ LOCK_PREFIX
23002+ incl (%rdi)
23003+ int $4
23004+1234:
23005+ _ASM_EXTABLE(1234b, 1234b)
23006+#endif
23007+
23008 js __read_lock_failed
23009+ pax_force_retaddr
23010 ret
23011 CFI_ENDPROC
23012 END(__read_lock_failed)
23013diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23014index 15acecf..f768b10 100644
23015--- a/arch/x86/lib/rwsem_64.S
23016+++ b/arch/x86/lib/rwsem_64.S
23017@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23018 call rwsem_down_read_failed
23019 popq %rdx
23020 restore_common_regs
23021+ pax_force_retaddr
23022 ret
23023 ENDPROC(call_rwsem_down_read_failed)
23024
23025@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23026 movq %rax,%rdi
23027 call rwsem_down_write_failed
23028 restore_common_regs
23029+ pax_force_retaddr
23030 ret
23031 ENDPROC(call_rwsem_down_write_failed)
23032
23033@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23034 movq %rax,%rdi
23035 call rwsem_wake
23036 restore_common_regs
23037-1: ret
23038+1: pax_force_retaddr
23039+ ret
23040 ENDPROC(call_rwsem_wake)
23041
23042 /* Fix up special calling conventions */
23043@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23044 call rwsem_downgrade_wake
23045 popq %rdx
23046 restore_common_regs
23047+ pax_force_retaddr
23048 ret
23049 ENDPROC(call_rwsem_downgrade_wake)
23050diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23051index bf9a7d5..fb06ab5 100644
23052--- a/arch/x86/lib/thunk_64.S
23053+++ b/arch/x86/lib/thunk_64.S
23054@@ -10,7 +10,8 @@
23055 #include <asm/dwarf2.h>
23056 #include <asm/calling.h>
23057 #include <asm/rwlock.h>
23058-
23059+ #include <asm/alternative-asm.h>
23060+
23061 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23062 .macro thunk name,func
23063 .globl \name
23064@@ -70,6 +71,7 @@
23065 SAVE_ARGS
23066 restore:
23067 RESTORE_ARGS
23068+ pax_force_retaddr
23069 ret
23070 CFI_ENDPROC
23071
23072@@ -77,5 +79,6 @@ restore:
23073 SAVE_ARGS
23074 restore_norax:
23075 RESTORE_ARGS 1
23076+ pax_force_retaddr
23077 ret
23078 CFI_ENDPROC
23079diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23080index 1f118d4..ec4a953 100644
23081--- a/arch/x86/lib/usercopy_32.c
23082+++ b/arch/x86/lib/usercopy_32.c
23083@@ -43,7 +43,7 @@ do { \
23084 __asm__ __volatile__( \
23085 " testl %1,%1\n" \
23086 " jz 2f\n" \
23087- "0: lodsb\n" \
23088+ "0: "__copyuser_seg"lodsb\n" \
23089 " stosb\n" \
23090 " testb %%al,%%al\n" \
23091 " jz 1f\n" \
23092@@ -128,10 +128,12 @@ do { \
23093 int __d0; \
23094 might_fault(); \
23095 __asm__ __volatile__( \
23096+ __COPYUSER_SET_ES \
23097 "0: rep; stosl\n" \
23098 " movl %2,%0\n" \
23099 "1: rep; stosb\n" \
23100 "2:\n" \
23101+ __COPYUSER_RESTORE_ES \
23102 ".section .fixup,\"ax\"\n" \
23103 "3: lea 0(%2,%0,4),%0\n" \
23104 " jmp 2b\n" \
23105@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23106 might_fault();
23107
23108 __asm__ __volatile__(
23109+ __COPYUSER_SET_ES
23110 " testl %0, %0\n"
23111 " jz 3f\n"
23112 " andl %0,%%ecx\n"
23113@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23114 " subl %%ecx,%0\n"
23115 " addl %0,%%eax\n"
23116 "1:\n"
23117+ __COPYUSER_RESTORE_ES
23118 ".section .fixup,\"ax\"\n"
23119 "2: xorl %%eax,%%eax\n"
23120 " jmp 1b\n"
23121@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23122
23123 #ifdef CONFIG_X86_INTEL_USERCOPY
23124 static unsigned long
23125-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23126+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23127 {
23128 int d0, d1;
23129 __asm__ __volatile__(
23130@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23131 " .align 2,0x90\n"
23132 "3: movl 0(%4), %%eax\n"
23133 "4: movl 4(%4), %%edx\n"
23134- "5: movl %%eax, 0(%3)\n"
23135- "6: movl %%edx, 4(%3)\n"
23136+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23137+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23138 "7: movl 8(%4), %%eax\n"
23139 "8: movl 12(%4),%%edx\n"
23140- "9: movl %%eax, 8(%3)\n"
23141- "10: movl %%edx, 12(%3)\n"
23142+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23143+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23144 "11: movl 16(%4), %%eax\n"
23145 "12: movl 20(%4), %%edx\n"
23146- "13: movl %%eax, 16(%3)\n"
23147- "14: movl %%edx, 20(%3)\n"
23148+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23149+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23150 "15: movl 24(%4), %%eax\n"
23151 "16: movl 28(%4), %%edx\n"
23152- "17: movl %%eax, 24(%3)\n"
23153- "18: movl %%edx, 28(%3)\n"
23154+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23155+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23156 "19: movl 32(%4), %%eax\n"
23157 "20: movl 36(%4), %%edx\n"
23158- "21: movl %%eax, 32(%3)\n"
23159- "22: movl %%edx, 36(%3)\n"
23160+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23161+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23162 "23: movl 40(%4), %%eax\n"
23163 "24: movl 44(%4), %%edx\n"
23164- "25: movl %%eax, 40(%3)\n"
23165- "26: movl %%edx, 44(%3)\n"
23166+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23167+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23168 "27: movl 48(%4), %%eax\n"
23169 "28: movl 52(%4), %%edx\n"
23170- "29: movl %%eax, 48(%3)\n"
23171- "30: movl %%edx, 52(%3)\n"
23172+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23173+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23174 "31: movl 56(%4), %%eax\n"
23175 "32: movl 60(%4), %%edx\n"
23176- "33: movl %%eax, 56(%3)\n"
23177- "34: movl %%edx, 60(%3)\n"
23178+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23179+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23180 " addl $-64, %0\n"
23181 " addl $64, %4\n"
23182 " addl $64, %3\n"
23183@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23184 " shrl $2, %0\n"
23185 " andl $3, %%eax\n"
23186 " cld\n"
23187+ __COPYUSER_SET_ES
23188 "99: rep; movsl\n"
23189 "36: movl %%eax, %0\n"
23190 "37: rep; movsb\n"
23191 "100:\n"
23192+ __COPYUSER_RESTORE_ES
23193+ ".section .fixup,\"ax\"\n"
23194+ "101: lea 0(%%eax,%0,4),%0\n"
23195+ " jmp 100b\n"
23196+ ".previous\n"
23197+ ".section __ex_table,\"a\"\n"
23198+ " .align 4\n"
23199+ " .long 1b,100b\n"
23200+ " .long 2b,100b\n"
23201+ " .long 3b,100b\n"
23202+ " .long 4b,100b\n"
23203+ " .long 5b,100b\n"
23204+ " .long 6b,100b\n"
23205+ " .long 7b,100b\n"
23206+ " .long 8b,100b\n"
23207+ " .long 9b,100b\n"
23208+ " .long 10b,100b\n"
23209+ " .long 11b,100b\n"
23210+ " .long 12b,100b\n"
23211+ " .long 13b,100b\n"
23212+ " .long 14b,100b\n"
23213+ " .long 15b,100b\n"
23214+ " .long 16b,100b\n"
23215+ " .long 17b,100b\n"
23216+ " .long 18b,100b\n"
23217+ " .long 19b,100b\n"
23218+ " .long 20b,100b\n"
23219+ " .long 21b,100b\n"
23220+ " .long 22b,100b\n"
23221+ " .long 23b,100b\n"
23222+ " .long 24b,100b\n"
23223+ " .long 25b,100b\n"
23224+ " .long 26b,100b\n"
23225+ " .long 27b,100b\n"
23226+ " .long 28b,100b\n"
23227+ " .long 29b,100b\n"
23228+ " .long 30b,100b\n"
23229+ " .long 31b,100b\n"
23230+ " .long 32b,100b\n"
23231+ " .long 33b,100b\n"
23232+ " .long 34b,100b\n"
23233+ " .long 35b,100b\n"
23234+ " .long 36b,100b\n"
23235+ " .long 37b,100b\n"
23236+ " .long 99b,101b\n"
23237+ ".previous"
23238+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23239+ : "1"(to), "2"(from), "0"(size)
23240+ : "eax", "edx", "memory");
23241+ return size;
23242+}
23243+
23244+static unsigned long
23245+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23246+{
23247+ int d0, d1;
23248+ __asm__ __volatile__(
23249+ " .align 2,0x90\n"
23250+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23251+ " cmpl $67, %0\n"
23252+ " jbe 3f\n"
23253+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23254+ " .align 2,0x90\n"
23255+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23256+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23257+ "5: movl %%eax, 0(%3)\n"
23258+ "6: movl %%edx, 4(%3)\n"
23259+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23260+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23261+ "9: movl %%eax, 8(%3)\n"
23262+ "10: movl %%edx, 12(%3)\n"
23263+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23264+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23265+ "13: movl %%eax, 16(%3)\n"
23266+ "14: movl %%edx, 20(%3)\n"
23267+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23268+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23269+ "17: movl %%eax, 24(%3)\n"
23270+ "18: movl %%edx, 28(%3)\n"
23271+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23272+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23273+ "21: movl %%eax, 32(%3)\n"
23274+ "22: movl %%edx, 36(%3)\n"
23275+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23276+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23277+ "25: movl %%eax, 40(%3)\n"
23278+ "26: movl %%edx, 44(%3)\n"
23279+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23280+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23281+ "29: movl %%eax, 48(%3)\n"
23282+ "30: movl %%edx, 52(%3)\n"
23283+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23284+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23285+ "33: movl %%eax, 56(%3)\n"
23286+ "34: movl %%edx, 60(%3)\n"
23287+ " addl $-64, %0\n"
23288+ " addl $64, %4\n"
23289+ " addl $64, %3\n"
23290+ " cmpl $63, %0\n"
23291+ " ja 1b\n"
23292+ "35: movl %0, %%eax\n"
23293+ " shrl $2, %0\n"
23294+ " andl $3, %%eax\n"
23295+ " cld\n"
23296+ "99: rep; "__copyuser_seg" movsl\n"
23297+ "36: movl %%eax, %0\n"
23298+ "37: rep; "__copyuser_seg" movsb\n"
23299+ "100:\n"
23300 ".section .fixup,\"ax\"\n"
23301 "101: lea 0(%%eax,%0,4),%0\n"
23302 " jmp 100b\n"
23303@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23304 int d0, d1;
23305 __asm__ __volatile__(
23306 " .align 2,0x90\n"
23307- "0: movl 32(%4), %%eax\n"
23308+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23309 " cmpl $67, %0\n"
23310 " jbe 2f\n"
23311- "1: movl 64(%4), %%eax\n"
23312+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23313 " .align 2,0x90\n"
23314- "2: movl 0(%4), %%eax\n"
23315- "21: movl 4(%4), %%edx\n"
23316+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23317+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23318 " movl %%eax, 0(%3)\n"
23319 " movl %%edx, 4(%3)\n"
23320- "3: movl 8(%4), %%eax\n"
23321- "31: movl 12(%4),%%edx\n"
23322+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23323+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23324 " movl %%eax, 8(%3)\n"
23325 " movl %%edx, 12(%3)\n"
23326- "4: movl 16(%4), %%eax\n"
23327- "41: movl 20(%4), %%edx\n"
23328+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23329+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23330 " movl %%eax, 16(%3)\n"
23331 " movl %%edx, 20(%3)\n"
23332- "10: movl 24(%4), %%eax\n"
23333- "51: movl 28(%4), %%edx\n"
23334+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23335+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23336 " movl %%eax, 24(%3)\n"
23337 " movl %%edx, 28(%3)\n"
23338- "11: movl 32(%4), %%eax\n"
23339- "61: movl 36(%4), %%edx\n"
23340+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23341+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23342 " movl %%eax, 32(%3)\n"
23343 " movl %%edx, 36(%3)\n"
23344- "12: movl 40(%4), %%eax\n"
23345- "71: movl 44(%4), %%edx\n"
23346+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23347+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23348 " movl %%eax, 40(%3)\n"
23349 " movl %%edx, 44(%3)\n"
23350- "13: movl 48(%4), %%eax\n"
23351- "81: movl 52(%4), %%edx\n"
23352+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23353+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23354 " movl %%eax, 48(%3)\n"
23355 " movl %%edx, 52(%3)\n"
23356- "14: movl 56(%4), %%eax\n"
23357- "91: movl 60(%4), %%edx\n"
23358+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23359+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23360 " movl %%eax, 56(%3)\n"
23361 " movl %%edx, 60(%3)\n"
23362 " addl $-64, %0\n"
23363@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23364 " shrl $2, %0\n"
23365 " andl $3, %%eax\n"
23366 " cld\n"
23367- "6: rep; movsl\n"
23368+ "6: rep; "__copyuser_seg" movsl\n"
23369 " movl %%eax,%0\n"
23370- "7: rep; movsb\n"
23371+ "7: rep; "__copyuser_seg" movsb\n"
23372 "8:\n"
23373 ".section .fixup,\"ax\"\n"
23374 "9: lea 0(%%eax,%0,4),%0\n"
23375@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23376
23377 __asm__ __volatile__(
23378 " .align 2,0x90\n"
23379- "0: movl 32(%4), %%eax\n"
23380+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23381 " cmpl $67, %0\n"
23382 " jbe 2f\n"
23383- "1: movl 64(%4), %%eax\n"
23384+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23385 " .align 2,0x90\n"
23386- "2: movl 0(%4), %%eax\n"
23387- "21: movl 4(%4), %%edx\n"
23388+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23389+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23390 " movnti %%eax, 0(%3)\n"
23391 " movnti %%edx, 4(%3)\n"
23392- "3: movl 8(%4), %%eax\n"
23393- "31: movl 12(%4),%%edx\n"
23394+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23395+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23396 " movnti %%eax, 8(%3)\n"
23397 " movnti %%edx, 12(%3)\n"
23398- "4: movl 16(%4), %%eax\n"
23399- "41: movl 20(%4), %%edx\n"
23400+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23401+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23402 " movnti %%eax, 16(%3)\n"
23403 " movnti %%edx, 20(%3)\n"
23404- "10: movl 24(%4), %%eax\n"
23405- "51: movl 28(%4), %%edx\n"
23406+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23407+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23408 " movnti %%eax, 24(%3)\n"
23409 " movnti %%edx, 28(%3)\n"
23410- "11: movl 32(%4), %%eax\n"
23411- "61: movl 36(%4), %%edx\n"
23412+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23413+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23414 " movnti %%eax, 32(%3)\n"
23415 " movnti %%edx, 36(%3)\n"
23416- "12: movl 40(%4), %%eax\n"
23417- "71: movl 44(%4), %%edx\n"
23418+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23419+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23420 " movnti %%eax, 40(%3)\n"
23421 " movnti %%edx, 44(%3)\n"
23422- "13: movl 48(%4), %%eax\n"
23423- "81: movl 52(%4), %%edx\n"
23424+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23425+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23426 " movnti %%eax, 48(%3)\n"
23427 " movnti %%edx, 52(%3)\n"
23428- "14: movl 56(%4), %%eax\n"
23429- "91: movl 60(%4), %%edx\n"
23430+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23431+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23432 " movnti %%eax, 56(%3)\n"
23433 " movnti %%edx, 60(%3)\n"
23434 " addl $-64, %0\n"
23435@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23436 " shrl $2, %0\n"
23437 " andl $3, %%eax\n"
23438 " cld\n"
23439- "6: rep; movsl\n"
23440+ "6: rep; "__copyuser_seg" movsl\n"
23441 " movl %%eax,%0\n"
23442- "7: rep; movsb\n"
23443+ "7: rep; "__copyuser_seg" movsb\n"
23444 "8:\n"
23445 ".section .fixup,\"ax\"\n"
23446 "9: lea 0(%%eax,%0,4),%0\n"
23447@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23448
23449 __asm__ __volatile__(
23450 " .align 2,0x90\n"
23451- "0: movl 32(%4), %%eax\n"
23452+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23453 " cmpl $67, %0\n"
23454 " jbe 2f\n"
23455- "1: movl 64(%4), %%eax\n"
23456+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23457 " .align 2,0x90\n"
23458- "2: movl 0(%4), %%eax\n"
23459- "21: movl 4(%4), %%edx\n"
23460+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23461+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23462 " movnti %%eax, 0(%3)\n"
23463 " movnti %%edx, 4(%3)\n"
23464- "3: movl 8(%4), %%eax\n"
23465- "31: movl 12(%4),%%edx\n"
23466+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23467+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23468 " movnti %%eax, 8(%3)\n"
23469 " movnti %%edx, 12(%3)\n"
23470- "4: movl 16(%4), %%eax\n"
23471- "41: movl 20(%4), %%edx\n"
23472+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23473+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23474 " movnti %%eax, 16(%3)\n"
23475 " movnti %%edx, 20(%3)\n"
23476- "10: movl 24(%4), %%eax\n"
23477- "51: movl 28(%4), %%edx\n"
23478+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23479+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23480 " movnti %%eax, 24(%3)\n"
23481 " movnti %%edx, 28(%3)\n"
23482- "11: movl 32(%4), %%eax\n"
23483- "61: movl 36(%4), %%edx\n"
23484+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23485+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23486 " movnti %%eax, 32(%3)\n"
23487 " movnti %%edx, 36(%3)\n"
23488- "12: movl 40(%4), %%eax\n"
23489- "71: movl 44(%4), %%edx\n"
23490+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23491+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23492 " movnti %%eax, 40(%3)\n"
23493 " movnti %%edx, 44(%3)\n"
23494- "13: movl 48(%4), %%eax\n"
23495- "81: movl 52(%4), %%edx\n"
23496+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23497+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23498 " movnti %%eax, 48(%3)\n"
23499 " movnti %%edx, 52(%3)\n"
23500- "14: movl 56(%4), %%eax\n"
23501- "91: movl 60(%4), %%edx\n"
23502+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23503+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23504 " movnti %%eax, 56(%3)\n"
23505 " movnti %%edx, 60(%3)\n"
23506 " addl $-64, %0\n"
23507@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23508 " shrl $2, %0\n"
23509 " andl $3, %%eax\n"
23510 " cld\n"
23511- "6: rep; movsl\n"
23512+ "6: rep; "__copyuser_seg" movsl\n"
23513 " movl %%eax,%0\n"
23514- "7: rep; movsb\n"
23515+ "7: rep; "__copyuser_seg" movsb\n"
23516 "8:\n"
23517 ".section .fixup,\"ax\"\n"
23518 "9: lea 0(%%eax,%0,4),%0\n"
23519@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23520 */
23521 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23522 unsigned long size);
23523-unsigned long __copy_user_intel(void __user *to, const void *from,
23524+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23525+ unsigned long size);
23526+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23527 unsigned long size);
23528 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23529 const void __user *from, unsigned long size);
23530 #endif /* CONFIG_X86_INTEL_USERCOPY */
23531
23532 /* Generic arbitrary sized copy. */
23533-#define __copy_user(to, from, size) \
23534+#define __copy_user(to, from, size, prefix, set, restore) \
23535 do { \
23536 int __d0, __d1, __d2; \
23537 __asm__ __volatile__( \
23538+ set \
23539 " cmp $7,%0\n" \
23540 " jbe 1f\n" \
23541 " movl %1,%0\n" \
23542 " negl %0\n" \
23543 " andl $7,%0\n" \
23544 " subl %0,%3\n" \
23545- "4: rep; movsb\n" \
23546+ "4: rep; "prefix"movsb\n" \
23547 " movl %3,%0\n" \
23548 " shrl $2,%0\n" \
23549 " andl $3,%3\n" \
23550 " .align 2,0x90\n" \
23551- "0: rep; movsl\n" \
23552+ "0: rep; "prefix"movsl\n" \
23553 " movl %3,%0\n" \
23554- "1: rep; movsb\n" \
23555+ "1: rep; "prefix"movsb\n" \
23556 "2:\n" \
23557+ restore \
23558 ".section .fixup,\"ax\"\n" \
23559 "5: addl %3,%0\n" \
23560 " jmp 2b\n" \
23561@@ -682,14 +799,14 @@ do { \
23562 " negl %0\n" \
23563 " andl $7,%0\n" \
23564 " subl %0,%3\n" \
23565- "4: rep; movsb\n" \
23566+ "4: rep; "__copyuser_seg"movsb\n" \
23567 " movl %3,%0\n" \
23568 " shrl $2,%0\n" \
23569 " andl $3,%3\n" \
23570 " .align 2,0x90\n" \
23571- "0: rep; movsl\n" \
23572+ "0: rep; "__copyuser_seg"movsl\n" \
23573 " movl %3,%0\n" \
23574- "1: rep; movsb\n" \
23575+ "1: rep; "__copyuser_seg"movsb\n" \
23576 "2:\n" \
23577 ".section .fixup,\"ax\"\n" \
23578 "5: addl %3,%0\n" \
23579@@ -775,9 +892,9 @@ survive:
23580 }
23581 #endif
23582 if (movsl_is_ok(to, from, n))
23583- __copy_user(to, from, n);
23584+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23585 else
23586- n = __copy_user_intel(to, from, n);
23587+ n = __generic_copy_to_user_intel(to, from, n);
23588 return n;
23589 }
23590 EXPORT_SYMBOL(__copy_to_user_ll);
23591@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23592 unsigned long n)
23593 {
23594 if (movsl_is_ok(to, from, n))
23595- __copy_user(to, from, n);
23596+ __copy_user(to, from, n, __copyuser_seg, "", "");
23597 else
23598- n = __copy_user_intel((void __user *)to,
23599- (const void *)from, n);
23600+ n = __generic_copy_from_user_intel(to, from, n);
23601 return n;
23602 }
23603 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23604@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23605 if (n > 64 && cpu_has_xmm2)
23606 n = __copy_user_intel_nocache(to, from, n);
23607 else
23608- __copy_user(to, from, n);
23609+ __copy_user(to, from, n, __copyuser_seg, "", "");
23610 #else
23611- __copy_user(to, from, n);
23612+ __copy_user(to, from, n, __copyuser_seg, "", "");
23613 #endif
23614 return n;
23615 }
23616 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23617
23618-/**
23619- * copy_to_user: - Copy a block of data into user space.
23620- * @to: Destination address, in user space.
23621- * @from: Source address, in kernel space.
23622- * @n: Number of bytes to copy.
23623- *
23624- * Context: User context only. This function may sleep.
23625- *
23626- * Copy data from kernel space to user space.
23627- *
23628- * Returns number of bytes that could not be copied.
23629- * On success, this will be zero.
23630- */
23631-unsigned long
23632-copy_to_user(void __user *to, const void *from, unsigned long n)
23633+#ifdef CONFIG_PAX_MEMORY_UDEREF
23634+void __set_fs(mm_segment_t x)
23635 {
23636- if (access_ok(VERIFY_WRITE, to, n))
23637- n = __copy_to_user(to, from, n);
23638- return n;
23639+ switch (x.seg) {
23640+ case 0:
23641+ loadsegment(gs, 0);
23642+ break;
23643+ case TASK_SIZE_MAX:
23644+ loadsegment(gs, __USER_DS);
23645+ break;
23646+ case -1UL:
23647+ loadsegment(gs, __KERNEL_DS);
23648+ break;
23649+ default:
23650+ BUG();
23651+ }
23652+ return;
23653 }
23654-EXPORT_SYMBOL(copy_to_user);
23655+EXPORT_SYMBOL(__set_fs);
23656
23657-/**
23658- * copy_from_user: - Copy a block of data from user space.
23659- * @to: Destination address, in kernel space.
23660- * @from: Source address, in user space.
23661- * @n: Number of bytes to copy.
23662- *
23663- * Context: User context only. This function may sleep.
23664- *
23665- * Copy data from user space to kernel space.
23666- *
23667- * Returns number of bytes that could not be copied.
23668- * On success, this will be zero.
23669- *
23670- * If some data could not be copied, this function will pad the copied
23671- * data to the requested size using zero bytes.
23672- */
23673-unsigned long
23674-copy_from_user(void *to, const void __user *from, unsigned long n)
23675+void set_fs(mm_segment_t x)
23676 {
23677- if (access_ok(VERIFY_READ, from, n))
23678- n = __copy_from_user(to, from, n);
23679- else
23680- memset(to, 0, n);
23681- return n;
23682+ current_thread_info()->addr_limit = x;
23683+ __set_fs(x);
23684 }
23685-EXPORT_SYMBOL(copy_from_user);
23686+EXPORT_SYMBOL(set_fs);
23687+#endif
23688diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23689index b7c2849..8633ad8 100644
23690--- a/arch/x86/lib/usercopy_64.c
23691+++ b/arch/x86/lib/usercopy_64.c
23692@@ -42,6 +42,12 @@ long
23693 __strncpy_from_user(char *dst, const char __user *src, long count)
23694 {
23695 long res;
23696+
23697+#ifdef CONFIG_PAX_MEMORY_UDEREF
23698+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23699+ src += PAX_USER_SHADOW_BASE;
23700+#endif
23701+
23702 __do_strncpy_from_user(dst, src, count, res);
23703 return res;
23704 }
23705@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23706 {
23707 long __d0;
23708 might_fault();
23709+
23710+#ifdef CONFIG_PAX_MEMORY_UDEREF
23711+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23712+ addr += PAX_USER_SHADOW_BASE;
23713+#endif
23714+
23715 /* no memory constraint because it doesn't change any memory gcc knows
23716 about */
23717 asm volatile(
23718@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23719 }
23720 EXPORT_SYMBOL(strlen_user);
23721
23722-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23723+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23724 {
23725- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23726- return copy_user_generic((__force void *)to, (__force void *)from, len);
23727- }
23728- return len;
23729+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23730+
23731+#ifdef CONFIG_PAX_MEMORY_UDEREF
23732+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23733+ to += PAX_USER_SHADOW_BASE;
23734+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23735+ from += PAX_USER_SHADOW_BASE;
23736+#endif
23737+
23738+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23739+ }
23740+ return len;
23741 }
23742 EXPORT_SYMBOL(copy_in_user);
23743
23744@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23745 * it is not necessary to optimize tail handling.
23746 */
23747 unsigned long
23748-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23749+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23750 {
23751 char c;
23752 unsigned zero_len;
23753diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23754index 61b41ca..5fef66a 100644
23755--- a/arch/x86/mm/extable.c
23756+++ b/arch/x86/mm/extable.c
23757@@ -1,14 +1,71 @@
23758 #include <linux/module.h>
23759 #include <linux/spinlock.h>
23760+#include <linux/sort.h>
23761 #include <asm/uaccess.h>
23762+#include <asm/pgtable.h>
23763
23764+/*
23765+ * The exception table needs to be sorted so that the binary
23766+ * search that we use to find entries in it works properly.
23767+ * This is used both for the kernel exception table and for
23768+ * the exception tables of modules that get loaded.
23769+ */
23770+static int cmp_ex(const void *a, const void *b)
23771+{
23772+ const struct exception_table_entry *x = a, *y = b;
23773+
23774+ /* avoid overflow */
23775+ if (x->insn > y->insn)
23776+ return 1;
23777+ if (x->insn < y->insn)
23778+ return -1;
23779+ return 0;
23780+}
23781+
23782+static void swap_ex(void *a, void *b, int size)
23783+{
23784+ struct exception_table_entry t, *x = a, *y = b;
23785+
23786+ t = *x;
23787+
23788+ pax_open_kernel();
23789+ *x = *y;
23790+ *y = t;
23791+ pax_close_kernel();
23792+}
23793+
23794+void sort_extable(struct exception_table_entry *start,
23795+ struct exception_table_entry *finish)
23796+{
23797+ sort(start, finish - start, sizeof(struct exception_table_entry),
23798+ cmp_ex, swap_ex);
23799+}
23800+
23801+#ifdef CONFIG_MODULES
23802+/*
23803+ * If the exception table is sorted, any referring to the module init
23804+ * will be at the beginning or the end.
23805+ */
23806+void trim_init_extable(struct module *m)
23807+{
23808+ /*trim the beginning*/
23809+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23810+ m->extable++;
23811+ m->num_exentries--;
23812+ }
23813+ /*trim the end*/
23814+ while (m->num_exentries &&
23815+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23816+ m->num_exentries--;
23817+}
23818+#endif /* CONFIG_MODULES */
23819
23820 int fixup_exception(struct pt_regs *regs)
23821 {
23822 const struct exception_table_entry *fixup;
23823
23824 #ifdef CONFIG_PNPBIOS
23825- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23826+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23827 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23828 extern u32 pnp_bios_is_utter_crap;
23829 pnp_bios_is_utter_crap = 1;
23830diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23831index 8ac0d76..ca501e2 100644
23832--- a/arch/x86/mm/fault.c
23833+++ b/arch/x86/mm/fault.c
23834@@ -11,10 +11,19 @@
23835 #include <linux/kprobes.h> /* __kprobes, ... */
23836 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23837 #include <linux/perf_event.h> /* perf_sw_event */
23838+#include <linux/unistd.h>
23839+#include <linux/compiler.h>
23840
23841 #include <asm/traps.h> /* dotraplinkage, ... */
23842 #include <asm/pgalloc.h> /* pgd_*(), ... */
23843 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23844+#include <asm/vsyscall.h>
23845+#include <asm/tlbflush.h>
23846+
23847+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23848+#include <asm/stacktrace.h>
23849+#include "../kernel/dumpstack.h"
23850+#endif
23851
23852 /*
23853 * Page fault error code bits:
23854@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23855 int ret = 0;
23856
23857 /* kprobe_running() needs smp_processor_id() */
23858- if (kprobes_built_in() && !user_mode_vm(regs)) {
23859+ if (kprobes_built_in() && !user_mode(regs)) {
23860 preempt_disable();
23861 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23862 ret = 1;
23863@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23864 return !instr_lo || (instr_lo>>1) == 1;
23865 case 0x00:
23866 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23867- if (probe_kernel_address(instr, opcode))
23868+ if (user_mode(regs)) {
23869+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23870+ return 0;
23871+ } else if (probe_kernel_address(instr, opcode))
23872 return 0;
23873
23874 *prefetch = (instr_lo == 0xF) &&
23875@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23876 while (instr < max_instr) {
23877 unsigned char opcode;
23878
23879- if (probe_kernel_address(instr, opcode))
23880+ if (user_mode(regs)) {
23881+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23882+ break;
23883+ } else if (probe_kernel_address(instr, opcode))
23884 break;
23885
23886 instr++;
23887@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23888 force_sig_info(si_signo, &info, tsk);
23889 }
23890
23891+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23892+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23893+#endif
23894+
23895+#ifdef CONFIG_PAX_EMUTRAMP
23896+static int pax_handle_fetch_fault(struct pt_regs *regs);
23897+#endif
23898+
23899+#ifdef CONFIG_PAX_PAGEEXEC
23900+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23901+{
23902+ pgd_t *pgd;
23903+ pud_t *pud;
23904+ pmd_t *pmd;
23905+
23906+ pgd = pgd_offset(mm, address);
23907+ if (!pgd_present(*pgd))
23908+ return NULL;
23909+ pud = pud_offset(pgd, address);
23910+ if (!pud_present(*pud))
23911+ return NULL;
23912+ pmd = pmd_offset(pud, address);
23913+ if (!pmd_present(*pmd))
23914+ return NULL;
23915+ return pmd;
23916+}
23917+#endif
23918+
23919 DEFINE_SPINLOCK(pgd_lock);
23920 LIST_HEAD(pgd_list);
23921
23922@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23923 address += PMD_SIZE) {
23924
23925 unsigned long flags;
23926+
23927+#ifdef CONFIG_PAX_PER_CPU_PGD
23928+ unsigned long cpu;
23929+#else
23930 struct page *page;
23931+#endif
23932
23933 spin_lock_irqsave(&pgd_lock, flags);
23934+
23935+#ifdef CONFIG_PAX_PER_CPU_PGD
23936+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23937+ pgd_t *pgd = get_cpu_pgd(cpu);
23938+#else
23939 list_for_each_entry(page, &pgd_list, lru) {
23940- if (!vmalloc_sync_one(page_address(page), address))
23941+ pgd_t *pgd = page_address(page);
23942+#endif
23943+
23944+ if (!vmalloc_sync_one(pgd, address))
23945 break;
23946 }
23947 spin_unlock_irqrestore(&pgd_lock, flags);
23948@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23949 * an interrupt in the middle of a task switch..
23950 */
23951 pgd_paddr = read_cr3();
23952+
23953+#ifdef CONFIG_PAX_PER_CPU_PGD
23954+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23955+#endif
23956+
23957 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23958 if (!pmd_k)
23959 return -1;
23960@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23961
23962 const pgd_t *pgd_ref = pgd_offset_k(address);
23963 unsigned long flags;
23964+
23965+#ifdef CONFIG_PAX_PER_CPU_PGD
23966+ unsigned long cpu;
23967+#else
23968 struct page *page;
23969+#endif
23970
23971 if (pgd_none(*pgd_ref))
23972 continue;
23973
23974 spin_lock_irqsave(&pgd_lock, flags);
23975+
23976+#ifdef CONFIG_PAX_PER_CPU_PGD
23977+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
23978+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23979+#else
23980 list_for_each_entry(page, &pgd_list, lru) {
23981 pgd_t *pgd;
23982 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23983+#endif
23984+
23985 if (pgd_none(*pgd))
23986 set_pgd(pgd, *pgd_ref);
23987 else
23988@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23989 * happen within a race in page table update. In the later
23990 * case just flush:
23991 */
23992+
23993+#ifdef CONFIG_PAX_PER_CPU_PGD
23994+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23995+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23996+#else
23997 pgd = pgd_offset(current->active_mm, address);
23998+#endif
23999+
24000 pgd_ref = pgd_offset_k(address);
24001 if (pgd_none(*pgd_ref))
24002 return -1;
24003@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
24004 static int is_errata100(struct pt_regs *regs, unsigned long address)
24005 {
24006 #ifdef CONFIG_X86_64
24007- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24008+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24009 return 1;
24010 #endif
24011 return 0;
24012@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24013 }
24014
24015 static const char nx_warning[] = KERN_CRIT
24016-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24017+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24018
24019 static void
24020 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24021@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24022 if (!oops_may_print())
24023 return;
24024
24025- if (error_code & PF_INSTR) {
24026+ if (nx_enabled && (error_code & PF_INSTR)) {
24027 unsigned int level;
24028
24029 pte_t *pte = lookup_address(address, &level);
24030
24031 if (pte && pte_present(*pte) && !pte_exec(*pte))
24032- printk(nx_warning, current_uid());
24033+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24034 }
24035
24036+#ifdef CONFIG_PAX_KERNEXEC
24037+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24038+ if (current->signal->curr_ip)
24039+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24040+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24041+ else
24042+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24043+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24044+ }
24045+#endif
24046+
24047 printk(KERN_ALERT "BUG: unable to handle kernel ");
24048 if (address < PAGE_SIZE)
24049 printk(KERN_CONT "NULL pointer dereference");
24050@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24051 {
24052 struct task_struct *tsk = current;
24053
24054+#ifdef CONFIG_X86_64
24055+ struct mm_struct *mm = tsk->mm;
24056+
24057+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24058+ if (regs->ip == (unsigned long)vgettimeofday) {
24059+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24060+ return;
24061+ } else if (regs->ip == (unsigned long)vtime) {
24062+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24063+ return;
24064+ } else if (regs->ip == (unsigned long)vgetcpu) {
24065+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24066+ return;
24067+ }
24068+ }
24069+#endif
24070+
24071 /* User mode accesses just cause a SIGSEGV */
24072 if (error_code & PF_USER) {
24073 /*
24074@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24075 if (is_errata100(regs, address))
24076 return;
24077
24078+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24079+ if (pax_is_fetch_fault(regs, error_code, address)) {
24080+
24081+#ifdef CONFIG_PAX_EMUTRAMP
24082+ switch (pax_handle_fetch_fault(regs)) {
24083+ case 2:
24084+ return;
24085+ }
24086+#endif
24087+
24088+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24089+ do_group_exit(SIGKILL);
24090+ }
24091+#endif
24092+
24093 if (unlikely(show_unhandled_signals))
24094 show_signal_msg(regs, error_code, address, tsk);
24095
24096@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24097 if (fault & VM_FAULT_HWPOISON) {
24098 printk(KERN_ERR
24099 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24100- tsk->comm, tsk->pid, address);
24101+ tsk->comm, task_pid_nr(tsk), address);
24102 code = BUS_MCEERR_AR;
24103 }
24104 #endif
24105@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24106 return 1;
24107 }
24108
24109+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24110+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24111+{
24112+ pte_t *pte;
24113+ pmd_t *pmd;
24114+ spinlock_t *ptl;
24115+ unsigned char pte_mask;
24116+
24117+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24118+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24119+ return 0;
24120+
24121+ /* PaX: it's our fault, let's handle it if we can */
24122+
24123+ /* PaX: take a look at read faults before acquiring any locks */
24124+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24125+ /* instruction fetch attempt from a protected page in user mode */
24126+ up_read(&mm->mmap_sem);
24127+
24128+#ifdef CONFIG_PAX_EMUTRAMP
24129+ switch (pax_handle_fetch_fault(regs)) {
24130+ case 2:
24131+ return 1;
24132+ }
24133+#endif
24134+
24135+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24136+ do_group_exit(SIGKILL);
24137+ }
24138+
24139+ pmd = pax_get_pmd(mm, address);
24140+ if (unlikely(!pmd))
24141+ return 0;
24142+
24143+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24144+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24145+ pte_unmap_unlock(pte, ptl);
24146+ return 0;
24147+ }
24148+
24149+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24150+ /* write attempt to a protected page in user mode */
24151+ pte_unmap_unlock(pte, ptl);
24152+ return 0;
24153+ }
24154+
24155+#ifdef CONFIG_SMP
24156+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24157+#else
24158+ if (likely(address > get_limit(regs->cs)))
24159+#endif
24160+ {
24161+ set_pte(pte, pte_mkread(*pte));
24162+ __flush_tlb_one(address);
24163+ pte_unmap_unlock(pte, ptl);
24164+ up_read(&mm->mmap_sem);
24165+ return 1;
24166+ }
24167+
24168+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24169+
24170+ /*
24171+ * PaX: fill DTLB with user rights and retry
24172+ */
24173+ __asm__ __volatile__ (
24174+ "orb %2,(%1)\n"
24175+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24176+/*
24177+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24178+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24179+ * page fault when examined during a TLB load attempt. this is true not only
24180+ * for PTEs holding a non-present entry but also present entries that will
24181+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24182+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24183+ * for our target pages since their PTEs are simply not in the TLBs at all.
24184+
24185+ * the best thing in omitting it is that we gain around 15-20% speed in the
24186+ * fast path of the page fault handler and can get rid of tracing since we
24187+ * can no longer flush unintended entries.
24188+ */
24189+ "invlpg (%0)\n"
24190+#endif
24191+ __copyuser_seg"testb $0,(%0)\n"
24192+ "xorb %3,(%1)\n"
24193+ :
24194+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24195+ : "memory", "cc");
24196+ pte_unmap_unlock(pte, ptl);
24197+ up_read(&mm->mmap_sem);
24198+ return 1;
24199+}
24200+#endif
24201+
24202 /*
24203 * Handle a spurious fault caused by a stale TLB entry.
24204 *
24205@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24206 static inline int
24207 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24208 {
24209+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24210+ return 1;
24211+
24212 if (write) {
24213 /* write, present and write, not present: */
24214 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24215@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24216 {
24217 struct vm_area_struct *vma;
24218 struct task_struct *tsk;
24219- unsigned long address;
24220 struct mm_struct *mm;
24221 int write;
24222 int fault;
24223
24224- tsk = current;
24225- mm = tsk->mm;
24226-
24227 /* Get the faulting address: */
24228- address = read_cr2();
24229+ unsigned long address = read_cr2();
24230+
24231+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24232+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24233+ if (!search_exception_tables(regs->ip)) {
24234+ bad_area_nosemaphore(regs, error_code, address);
24235+ return;
24236+ }
24237+ if (address < PAX_USER_SHADOW_BASE) {
24238+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24239+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24240+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24241+ } else
24242+ address -= PAX_USER_SHADOW_BASE;
24243+ }
24244+#endif
24245+
24246+ tsk = current;
24247+ mm = tsk->mm;
24248
24249 /*
24250 * Detect and handle instructions that would cause a page fault for
24251@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24252 * User-mode registers count as a user access even for any
24253 * potential system fault or CPU buglet:
24254 */
24255- if (user_mode_vm(regs)) {
24256+ if (user_mode(regs)) {
24257 local_irq_enable();
24258 error_code |= PF_USER;
24259 } else {
24260@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24261 might_sleep();
24262 }
24263
24264+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24265+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24266+ return;
24267+#endif
24268+
24269 vma = find_vma(mm, address);
24270 if (unlikely(!vma)) {
24271 bad_area(regs, error_code, address);
24272@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24273 bad_area(regs, error_code, address);
24274 return;
24275 }
24276- if (error_code & PF_USER) {
24277- /*
24278- * Accessing the stack below %sp is always a bug.
24279- * The large cushion allows instructions like enter
24280- * and pusha to work. ("enter $65535, $31" pushes
24281- * 32 pointers and then decrements %sp by 65535.)
24282- */
24283- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24284- bad_area(regs, error_code, address);
24285- return;
24286- }
24287+ /*
24288+ * Accessing the stack below %sp is always a bug.
24289+ * The large cushion allows instructions like enter
24290+ * and pusha to work. ("enter $65535, $31" pushes
24291+ * 32 pointers and then decrements %sp by 65535.)
24292+ */
24293+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24294+ bad_area(regs, error_code, address);
24295+ return;
24296 }
24297+
24298+#ifdef CONFIG_PAX_SEGMEXEC
24299+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24300+ bad_area(regs, error_code, address);
24301+ return;
24302+ }
24303+#endif
24304+
24305 if (unlikely(expand_stack(vma, address))) {
24306 bad_area(regs, error_code, address);
24307 return;
24308@@ -1146,3 +1390,292 @@ good_area:
24309
24310 up_read(&mm->mmap_sem);
24311 }
24312+
24313+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24314+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24315+{
24316+ struct mm_struct *mm = current->mm;
24317+ unsigned long ip = regs->ip;
24318+
24319+ if (v8086_mode(regs))
24320+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24321+
24322+#ifdef CONFIG_PAX_PAGEEXEC
24323+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24324+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24325+ return true;
24326+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24327+ return true;
24328+ return false;
24329+ }
24330+#endif
24331+
24332+#ifdef CONFIG_PAX_SEGMEXEC
24333+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24334+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24335+ return true;
24336+ return false;
24337+ }
24338+#endif
24339+
24340+ return false;
24341+}
24342+#endif
24343+
24344+#ifdef CONFIG_PAX_EMUTRAMP
24345+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24346+{
24347+ int err;
24348+
24349+ do { /* PaX: libffi trampoline emulation */
24350+ unsigned char mov, jmp;
24351+ unsigned int addr1, addr2;
24352+
24353+#ifdef CONFIG_X86_64
24354+ if ((regs->ip + 9) >> 32)
24355+ break;
24356+#endif
24357+
24358+ err = get_user(mov, (unsigned char __user *)regs->ip);
24359+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24360+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24361+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24362+
24363+ if (err)
24364+ break;
24365+
24366+ if (mov == 0xB8 && jmp == 0xE9) {
24367+ regs->ax = addr1;
24368+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24369+ return 2;
24370+ }
24371+ } while (0);
24372+
24373+ do { /* PaX: gcc trampoline emulation #1 */
24374+ unsigned char mov1, mov2;
24375+ unsigned short jmp;
24376+ unsigned int addr1, addr2;
24377+
24378+#ifdef CONFIG_X86_64
24379+ if ((regs->ip + 11) >> 32)
24380+ break;
24381+#endif
24382+
24383+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24384+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24385+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24386+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24387+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24388+
24389+ if (err)
24390+ break;
24391+
24392+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24393+ regs->cx = addr1;
24394+ regs->ax = addr2;
24395+ regs->ip = addr2;
24396+ return 2;
24397+ }
24398+ } while (0);
24399+
24400+ do { /* PaX: gcc trampoline emulation #2 */
24401+ unsigned char mov, jmp;
24402+ unsigned int addr1, addr2;
24403+
24404+#ifdef CONFIG_X86_64
24405+ if ((regs->ip + 9) >> 32)
24406+ break;
24407+#endif
24408+
24409+ err = get_user(mov, (unsigned char __user *)regs->ip);
24410+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24411+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24412+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24413+
24414+ if (err)
24415+ break;
24416+
24417+ if (mov == 0xB9 && jmp == 0xE9) {
24418+ regs->cx = addr1;
24419+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24420+ return 2;
24421+ }
24422+ } while (0);
24423+
24424+ return 1; /* PaX in action */
24425+}
24426+
24427+#ifdef CONFIG_X86_64
24428+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24429+{
24430+ int err;
24431+
24432+ do { /* PaX: libffi trampoline emulation */
24433+ unsigned short mov1, mov2, jmp1;
24434+ unsigned char stcclc, jmp2;
24435+ unsigned long addr1, addr2;
24436+
24437+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24438+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24439+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24440+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24441+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24442+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24443+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24444+
24445+ if (err)
24446+ break;
24447+
24448+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24449+ regs->r11 = addr1;
24450+ regs->r10 = addr2;
24451+ if (stcclc == 0xF8)
24452+ regs->flags &= ~X86_EFLAGS_CF;
24453+ else
24454+ regs->flags |= X86_EFLAGS_CF;
24455+ regs->ip = addr1;
24456+ return 2;
24457+ }
24458+ } while (0);
24459+
24460+ do { /* PaX: gcc trampoline emulation #1 */
24461+ unsigned short mov1, mov2, jmp1;
24462+ unsigned char jmp2;
24463+ unsigned int addr1;
24464+ unsigned long addr2;
24465+
24466+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24467+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24468+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24469+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24470+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24471+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24472+
24473+ if (err)
24474+ break;
24475+
24476+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24477+ regs->r11 = addr1;
24478+ regs->r10 = addr2;
24479+ regs->ip = addr1;
24480+ return 2;
24481+ }
24482+ } while (0);
24483+
24484+ do { /* PaX: gcc trampoline emulation #2 */
24485+ unsigned short mov1, mov2, jmp1;
24486+ unsigned char jmp2;
24487+ unsigned long addr1, addr2;
24488+
24489+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24490+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24491+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24492+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24493+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24494+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24495+
24496+ if (err)
24497+ break;
24498+
24499+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24500+ regs->r11 = addr1;
24501+ regs->r10 = addr2;
24502+ regs->ip = addr1;
24503+ return 2;
24504+ }
24505+ } while (0);
24506+
24507+ return 1; /* PaX in action */
24508+}
24509+#endif
24510+
24511+/*
24512+ * PaX: decide what to do with offenders (regs->ip = fault address)
24513+ *
24514+ * returns 1 when task should be killed
24515+ * 2 when gcc trampoline was detected
24516+ */
24517+static int pax_handle_fetch_fault(struct pt_regs *regs)
24518+{
24519+ if (v8086_mode(regs))
24520+ return 1;
24521+
24522+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24523+ return 1;
24524+
24525+#ifdef CONFIG_X86_32
24526+ return pax_handle_fetch_fault_32(regs);
24527+#else
24528+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24529+ return pax_handle_fetch_fault_32(regs);
24530+ else
24531+ return pax_handle_fetch_fault_64(regs);
24532+#endif
24533+}
24534+#endif
24535+
24536+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24537+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24538+{
24539+ long i;
24540+
24541+ printk(KERN_ERR "PAX: bytes at PC: ");
24542+ for (i = 0; i < 20; i++) {
24543+ unsigned char c;
24544+ if (get_user(c, (unsigned char __force_user *)pc+i))
24545+ printk(KERN_CONT "?? ");
24546+ else
24547+ printk(KERN_CONT "%02x ", c);
24548+ }
24549+ printk("\n");
24550+
24551+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24552+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24553+ unsigned long c;
24554+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24555+#ifdef CONFIG_X86_32
24556+ printk(KERN_CONT "???????? ");
24557+#else
24558+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24559+ printk(KERN_CONT "???????? ???????? ");
24560+ else
24561+ printk(KERN_CONT "???????????????? ");
24562+#endif
24563+ } else {
24564+#ifdef CONFIG_X86_64
24565+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24566+ printk(KERN_CONT "%08x ", (unsigned int)c);
24567+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24568+ } else
24569+#endif
24570+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24571+ }
24572+ }
24573+ printk("\n");
24574+}
24575+#endif
24576+
24577+/**
24578+ * probe_kernel_write(): safely attempt to write to a location
24579+ * @dst: address to write to
24580+ * @src: pointer to the data that shall be written
24581+ * @size: size of the data chunk
24582+ *
24583+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24584+ * happens, handle that and return -EFAULT.
24585+ */
24586+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24587+{
24588+ long ret;
24589+ mm_segment_t old_fs = get_fs();
24590+
24591+ set_fs(KERNEL_DS);
24592+ pagefault_disable();
24593+ pax_open_kernel();
24594+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24595+ pax_close_kernel();
24596+ pagefault_enable();
24597+ set_fs(old_fs);
24598+
24599+ return ret ? -EFAULT : 0;
24600+}
24601diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24602index 71da1bc..7a16bf4 100644
24603--- a/arch/x86/mm/gup.c
24604+++ b/arch/x86/mm/gup.c
24605@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24606 addr = start;
24607 len = (unsigned long) nr_pages << PAGE_SHIFT;
24608 end = start + len;
24609- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24610+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24611 (void __user *)start, len)))
24612 return 0;
24613
24614diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24615index 63a6ba6..79abd7a 100644
24616--- a/arch/x86/mm/highmem_32.c
24617+++ b/arch/x86/mm/highmem_32.c
24618@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24619 idx = type + KM_TYPE_NR*smp_processor_id();
24620 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24621 BUG_ON(!pte_none(*(kmap_pte-idx)));
24622+
24623+ pax_open_kernel();
24624 set_pte(kmap_pte-idx, mk_pte(page, prot));
24625+ pax_close_kernel();
24626
24627 return (void *)vaddr;
24628 }
24629diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24630index f46c340..6ff9a26 100644
24631--- a/arch/x86/mm/hugetlbpage.c
24632+++ b/arch/x86/mm/hugetlbpage.c
24633@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24634 struct hstate *h = hstate_file(file);
24635 struct mm_struct *mm = current->mm;
24636 struct vm_area_struct *vma;
24637- unsigned long start_addr;
24638+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24639+
24640+#ifdef CONFIG_PAX_SEGMEXEC
24641+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24642+ pax_task_size = SEGMEXEC_TASK_SIZE;
24643+#endif
24644+
24645+ pax_task_size -= PAGE_SIZE;
24646
24647 if (len > mm->cached_hole_size) {
24648- start_addr = mm->free_area_cache;
24649+ start_addr = mm->free_area_cache;
24650 } else {
24651- start_addr = TASK_UNMAPPED_BASE;
24652- mm->cached_hole_size = 0;
24653+ start_addr = mm->mmap_base;
24654+ mm->cached_hole_size = 0;
24655 }
24656
24657 full_search:
24658@@ -281,26 +288,27 @@ full_search:
24659
24660 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24661 /* At this point: (!vma || addr < vma->vm_end). */
24662- if (TASK_SIZE - len < addr) {
24663+ if (pax_task_size - len < addr) {
24664 /*
24665 * Start a new search - just in case we missed
24666 * some holes.
24667 */
24668- if (start_addr != TASK_UNMAPPED_BASE) {
24669- start_addr = TASK_UNMAPPED_BASE;
24670+ if (start_addr != mm->mmap_base) {
24671+ start_addr = mm->mmap_base;
24672 mm->cached_hole_size = 0;
24673 goto full_search;
24674 }
24675 return -ENOMEM;
24676 }
24677- if (!vma || addr + len <= vma->vm_start) {
24678- mm->free_area_cache = addr + len;
24679- return addr;
24680- }
24681+ if (check_heap_stack_gap(vma, addr, len))
24682+ break;
24683 if (addr + mm->cached_hole_size < vma->vm_start)
24684 mm->cached_hole_size = vma->vm_start - addr;
24685 addr = ALIGN(vma->vm_end, huge_page_size(h));
24686 }
24687+
24688+ mm->free_area_cache = addr + len;
24689+ return addr;
24690 }
24691
24692 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24693@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24694 {
24695 struct hstate *h = hstate_file(file);
24696 struct mm_struct *mm = current->mm;
24697- struct vm_area_struct *vma, *prev_vma;
24698- unsigned long base = mm->mmap_base, addr = addr0;
24699+ struct vm_area_struct *vma;
24700+ unsigned long base = mm->mmap_base, addr;
24701 unsigned long largest_hole = mm->cached_hole_size;
24702- int first_time = 1;
24703
24704 /* don't allow allocations above current base */
24705 if (mm->free_area_cache > base)
24706@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24707 largest_hole = 0;
24708 mm->free_area_cache = base;
24709 }
24710-try_again:
24711+
24712 /* make sure it can fit in the remaining address space */
24713 if (mm->free_area_cache < len)
24714 goto fail;
24715
24716 /* either no address requested or cant fit in requested address hole */
24717- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24718+ addr = (mm->free_area_cache - len);
24719 do {
24720+ addr &= huge_page_mask(h);
24721+ vma = find_vma(mm, addr);
24722 /*
24723 * Lookup failure means no vma is above this address,
24724 * i.e. return with success:
24725- */
24726- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24727- return addr;
24728-
24729- /*
24730 * new region fits between prev_vma->vm_end and
24731 * vma->vm_start, use it:
24732 */
24733- if (addr + len <= vma->vm_start &&
24734- (!prev_vma || (addr >= prev_vma->vm_end))) {
24735+ if (check_heap_stack_gap(vma, addr, len)) {
24736 /* remember the address as a hint for next time */
24737- mm->cached_hole_size = largest_hole;
24738- return (mm->free_area_cache = addr);
24739- } else {
24740- /* pull free_area_cache down to the first hole */
24741- if (mm->free_area_cache == vma->vm_end) {
24742- mm->free_area_cache = vma->vm_start;
24743- mm->cached_hole_size = largest_hole;
24744- }
24745+ mm->cached_hole_size = largest_hole;
24746+ return (mm->free_area_cache = addr);
24747+ }
24748+ /* pull free_area_cache down to the first hole */
24749+ if (mm->free_area_cache == vma->vm_end) {
24750+ mm->free_area_cache = vma->vm_start;
24751+ mm->cached_hole_size = largest_hole;
24752 }
24753
24754 /* remember the largest hole we saw so far */
24755 if (addr + largest_hole < vma->vm_start)
24756- largest_hole = vma->vm_start - addr;
24757+ largest_hole = vma->vm_start - addr;
24758
24759 /* try just below the current vma->vm_start */
24760- addr = (vma->vm_start - len) & huge_page_mask(h);
24761- } while (len <= vma->vm_start);
24762+ addr = skip_heap_stack_gap(vma, len);
24763+ } while (!IS_ERR_VALUE(addr));
24764
24765 fail:
24766 /*
24767- * if hint left us with no space for the requested
24768- * mapping then try again:
24769- */
24770- if (first_time) {
24771- mm->free_area_cache = base;
24772- largest_hole = 0;
24773- first_time = 0;
24774- goto try_again;
24775- }
24776- /*
24777 * A failed mmap() very likely causes application failure,
24778 * so fall back to the bottom-up function here. This scenario
24779 * can happen with large stack limits and large mmap()
24780 * allocations.
24781 */
24782- mm->free_area_cache = TASK_UNMAPPED_BASE;
24783+
24784+#ifdef CONFIG_PAX_SEGMEXEC
24785+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24786+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24787+ else
24788+#endif
24789+
24790+ mm->mmap_base = TASK_UNMAPPED_BASE;
24791+
24792+#ifdef CONFIG_PAX_RANDMMAP
24793+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24794+ mm->mmap_base += mm->delta_mmap;
24795+#endif
24796+
24797+ mm->free_area_cache = mm->mmap_base;
24798 mm->cached_hole_size = ~0UL;
24799 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24800 len, pgoff, flags);
24801@@ -387,6 +393,7 @@ fail:
24802 /*
24803 * Restore the topdown base:
24804 */
24805+ mm->mmap_base = base;
24806 mm->free_area_cache = base;
24807 mm->cached_hole_size = ~0UL;
24808
24809@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24810 struct hstate *h = hstate_file(file);
24811 struct mm_struct *mm = current->mm;
24812 struct vm_area_struct *vma;
24813+ unsigned long pax_task_size = TASK_SIZE;
24814
24815 if (len & ~huge_page_mask(h))
24816 return -EINVAL;
24817- if (len > TASK_SIZE)
24818+
24819+#ifdef CONFIG_PAX_SEGMEXEC
24820+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24821+ pax_task_size = SEGMEXEC_TASK_SIZE;
24822+#endif
24823+
24824+ pax_task_size -= PAGE_SIZE;
24825+
24826+ if (len > pax_task_size)
24827 return -ENOMEM;
24828
24829 if (flags & MAP_FIXED) {
24830@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24831 if (addr) {
24832 addr = ALIGN(addr, huge_page_size(h));
24833 vma = find_vma(mm, addr);
24834- if (TASK_SIZE - len >= addr &&
24835- (!vma || addr + len <= vma->vm_start))
24836+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24837 return addr;
24838 }
24839 if (mm->get_unmapped_area == arch_get_unmapped_area)
24840diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24841index 73ffd55..f61c2a7 100644
24842--- a/arch/x86/mm/init.c
24843+++ b/arch/x86/mm/init.c
24844@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24845 * cause a hotspot and fill up ZONE_DMA. The page tables
24846 * need roughly 0.5KB per GB.
24847 */
24848-#ifdef CONFIG_X86_32
24849- start = 0x7000;
24850-#else
24851- start = 0x8000;
24852-#endif
24853+ start = 0x100000;
24854 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24855 tables, PAGE_SIZE);
24856 if (e820_table_start == -1UL)
24857@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24858 #endif
24859
24860 set_nx();
24861- if (nx_enabled)
24862+ if (nx_enabled && cpu_has_nx)
24863 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24864
24865 /* Enable PSE if available */
24866@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24867 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24868 * mmio resources as well as potential bios/acpi data regions.
24869 */
24870+
24871 int devmem_is_allowed(unsigned long pagenr)
24872 {
24873+#ifdef CONFIG_GRKERNSEC_KMEM
24874+ /* allow BDA */
24875+ if (!pagenr)
24876+ return 1;
24877+ /* allow EBDA */
24878+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24879+ return 1;
24880+ /* allow ISA/video mem */
24881+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24882+ return 1;
24883+ /* throw out everything else below 1MB */
24884+ if (pagenr <= 256)
24885+ return 0;
24886+#else
24887 if (pagenr <= 256)
24888 return 1;
24889+#endif
24890+
24891 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24892 return 0;
24893 if (!page_is_ram(pagenr))
24894@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24895
24896 void free_initmem(void)
24897 {
24898+
24899+#ifdef CONFIG_PAX_KERNEXEC
24900+#ifdef CONFIG_X86_32
24901+ /* PaX: limit KERNEL_CS to actual size */
24902+ unsigned long addr, limit;
24903+ struct desc_struct d;
24904+ int cpu;
24905+
24906+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24907+ limit = (limit - 1UL) >> PAGE_SHIFT;
24908+
24909+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24910+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
24911+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24912+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24913+ }
24914+
24915+ /* PaX: make KERNEL_CS read-only */
24916+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24917+ if (!paravirt_enabled())
24918+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24919+/*
24920+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24921+ pgd = pgd_offset_k(addr);
24922+ pud = pud_offset(pgd, addr);
24923+ pmd = pmd_offset(pud, addr);
24924+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24925+ }
24926+*/
24927+#ifdef CONFIG_X86_PAE
24928+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24929+/*
24930+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24931+ pgd = pgd_offset_k(addr);
24932+ pud = pud_offset(pgd, addr);
24933+ pmd = pmd_offset(pud, addr);
24934+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24935+ }
24936+*/
24937+#endif
24938+
24939+#ifdef CONFIG_MODULES
24940+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24941+#endif
24942+
24943+#else
24944+ pgd_t *pgd;
24945+ pud_t *pud;
24946+ pmd_t *pmd;
24947+ unsigned long addr, end;
24948+
24949+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24950+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24951+ pgd = pgd_offset_k(addr);
24952+ pud = pud_offset(pgd, addr);
24953+ pmd = pmd_offset(pud, addr);
24954+ if (!pmd_present(*pmd))
24955+ continue;
24956+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24957+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24958+ else
24959+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24960+ }
24961+
24962+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24963+ end = addr + KERNEL_IMAGE_SIZE;
24964+ for (; addr < end; addr += PMD_SIZE) {
24965+ pgd = pgd_offset_k(addr);
24966+ pud = pud_offset(pgd, addr);
24967+ pmd = pmd_offset(pud, addr);
24968+ if (!pmd_present(*pmd))
24969+ continue;
24970+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24971+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24972+ }
24973+#endif
24974+
24975+ flush_tlb_all();
24976+#endif
24977+
24978 free_init_pages("unused kernel memory",
24979 (unsigned long)(&__init_begin),
24980 (unsigned long)(&__init_end));
24981diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24982index 30938c1..bda3d5d 100644
24983--- a/arch/x86/mm/init_32.c
24984+++ b/arch/x86/mm/init_32.c
24985@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24986 }
24987
24988 /*
24989- * Creates a middle page table and puts a pointer to it in the
24990- * given global directory entry. This only returns the gd entry
24991- * in non-PAE compilation mode, since the middle layer is folded.
24992- */
24993-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24994-{
24995- pud_t *pud;
24996- pmd_t *pmd_table;
24997-
24998-#ifdef CONFIG_X86_PAE
24999- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
25000- if (after_bootmem)
25001- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
25002- else
25003- pmd_table = (pmd_t *)alloc_low_page();
25004- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
25005- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
25006- pud = pud_offset(pgd, 0);
25007- BUG_ON(pmd_table != pmd_offset(pud, 0));
25008-
25009- return pmd_table;
25010- }
25011-#endif
25012- pud = pud_offset(pgd, 0);
25013- pmd_table = pmd_offset(pud, 0);
25014-
25015- return pmd_table;
25016-}
25017-
25018-/*
25019 * Create a page table and place a pointer to it in a middle page
25020 * directory entry:
25021 */
25022@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25023 page_table = (pte_t *)alloc_low_page();
25024
25025 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25026+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25027+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25028+#else
25029 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25030+#endif
25031 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25032 }
25033
25034 return pte_offset_kernel(pmd, 0);
25035 }
25036
25037+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25038+{
25039+ pud_t *pud;
25040+ pmd_t *pmd_table;
25041+
25042+ pud = pud_offset(pgd, 0);
25043+ pmd_table = pmd_offset(pud, 0);
25044+
25045+ return pmd_table;
25046+}
25047+
25048 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25049 {
25050 int pgd_idx = pgd_index(vaddr);
25051@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25052 int pgd_idx, pmd_idx;
25053 unsigned long vaddr;
25054 pgd_t *pgd;
25055+ pud_t *pud;
25056 pmd_t *pmd;
25057 pte_t *pte = NULL;
25058
25059@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25060 pgd = pgd_base + pgd_idx;
25061
25062 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25063- pmd = one_md_table_init(pgd);
25064- pmd = pmd + pmd_index(vaddr);
25065+ pud = pud_offset(pgd, vaddr);
25066+ pmd = pmd_offset(pud, vaddr);
25067+
25068+#ifdef CONFIG_X86_PAE
25069+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25070+#endif
25071+
25072 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25073 pmd++, pmd_idx++) {
25074 pte = page_table_kmap_check(one_page_table_init(pmd),
25075@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25076 }
25077 }
25078
25079-static inline int is_kernel_text(unsigned long addr)
25080+static inline int is_kernel_text(unsigned long start, unsigned long end)
25081 {
25082- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25083- return 1;
25084- return 0;
25085+ if ((start > ktla_ktva((unsigned long)_etext) ||
25086+ end <= ktla_ktva((unsigned long)_stext)) &&
25087+ (start > ktla_ktva((unsigned long)_einittext) ||
25088+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25089+
25090+#ifdef CONFIG_ACPI_SLEEP
25091+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25092+#endif
25093+
25094+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25095+ return 0;
25096+ return 1;
25097 }
25098
25099 /*
25100@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25101 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25102 unsigned long start_pfn, end_pfn;
25103 pgd_t *pgd_base = swapper_pg_dir;
25104- int pgd_idx, pmd_idx, pte_ofs;
25105+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25106 unsigned long pfn;
25107 pgd_t *pgd;
25108+ pud_t *pud;
25109 pmd_t *pmd;
25110 pte_t *pte;
25111 unsigned pages_2m, pages_4k;
25112@@ -278,8 +279,13 @@ repeat:
25113 pfn = start_pfn;
25114 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25115 pgd = pgd_base + pgd_idx;
25116- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25117- pmd = one_md_table_init(pgd);
25118+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25119+ pud = pud_offset(pgd, 0);
25120+ pmd = pmd_offset(pud, 0);
25121+
25122+#ifdef CONFIG_X86_PAE
25123+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25124+#endif
25125
25126 if (pfn >= end_pfn)
25127 continue;
25128@@ -291,14 +297,13 @@ repeat:
25129 #endif
25130 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25131 pmd++, pmd_idx++) {
25132- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25133+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25134
25135 /*
25136 * Map with big pages if possible, otherwise
25137 * create normal page tables:
25138 */
25139 if (use_pse) {
25140- unsigned int addr2;
25141 pgprot_t prot = PAGE_KERNEL_LARGE;
25142 /*
25143 * first pass will use the same initial
25144@@ -308,11 +313,7 @@ repeat:
25145 __pgprot(PTE_IDENT_ATTR |
25146 _PAGE_PSE);
25147
25148- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25149- PAGE_OFFSET + PAGE_SIZE-1;
25150-
25151- if (is_kernel_text(addr) ||
25152- is_kernel_text(addr2))
25153+ if (is_kernel_text(address, address + PMD_SIZE))
25154 prot = PAGE_KERNEL_LARGE_EXEC;
25155
25156 pages_2m++;
25157@@ -329,7 +330,7 @@ repeat:
25158 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25159 pte += pte_ofs;
25160 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25161- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25162+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25163 pgprot_t prot = PAGE_KERNEL;
25164 /*
25165 * first pass will use the same initial
25166@@ -337,7 +338,7 @@ repeat:
25167 */
25168 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25169
25170- if (is_kernel_text(addr))
25171+ if (is_kernel_text(address, address + PAGE_SIZE))
25172 prot = PAGE_KERNEL_EXEC;
25173
25174 pages_4k++;
25175@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25176
25177 pud = pud_offset(pgd, va);
25178 pmd = pmd_offset(pud, va);
25179- if (!pmd_present(*pmd))
25180+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25181 break;
25182
25183 pte = pte_offset_kernel(pmd, va);
25184@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25185
25186 static void __init pagetable_init(void)
25187 {
25188- pgd_t *pgd_base = swapper_pg_dir;
25189-
25190- permanent_kmaps_init(pgd_base);
25191+ permanent_kmaps_init(swapper_pg_dir);
25192 }
25193
25194 #ifdef CONFIG_ACPI_SLEEP
25195@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25196 * ACPI suspend needs this for resume, because things like the intel-agp
25197 * driver might have split up a kernel 4MB mapping.
25198 */
25199-char swsusp_pg_dir[PAGE_SIZE]
25200+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25201 __attribute__ ((aligned(PAGE_SIZE)));
25202
25203 static inline void save_pg_dir(void)
25204 {
25205- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25206+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25207 }
25208 #else /* !CONFIG_ACPI_SLEEP */
25209 static inline void save_pg_dir(void)
25210@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25211 flush_tlb_all();
25212 }
25213
25214-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25215+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25216 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25217
25218 /* user-defined highmem size */
25219@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25220 * Initialize the boot-time allocator (with low memory only):
25221 */
25222 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25223- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25224+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25225 PAGE_SIZE);
25226 if (bootmap == -1L)
25227 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25228@@ -864,6 +863,12 @@ void __init mem_init(void)
25229
25230 pci_iommu_alloc();
25231
25232+#ifdef CONFIG_PAX_PER_CPU_PGD
25233+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25234+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25235+ KERNEL_PGD_PTRS);
25236+#endif
25237+
25238 #ifdef CONFIG_FLATMEM
25239 BUG_ON(!mem_map);
25240 #endif
25241@@ -881,7 +886,7 @@ void __init mem_init(void)
25242 set_highmem_pages_init();
25243
25244 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25245- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25246+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25247 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25248
25249 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25250@@ -923,10 +928,10 @@ void __init mem_init(void)
25251 ((unsigned long)&__init_end -
25252 (unsigned long)&__init_begin) >> 10,
25253
25254- (unsigned long)&_etext, (unsigned long)&_edata,
25255- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25256+ (unsigned long)&_sdata, (unsigned long)&_edata,
25257+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25258
25259- (unsigned long)&_text, (unsigned long)&_etext,
25260+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25261 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25262
25263 /*
25264@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25265 if (!kernel_set_to_readonly)
25266 return;
25267
25268+ start = ktla_ktva(start);
25269 pr_debug("Set kernel text: %lx - %lx for read write\n",
25270 start, start+size);
25271
25272@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25273 if (!kernel_set_to_readonly)
25274 return;
25275
25276+ start = ktla_ktva(start);
25277 pr_debug("Set kernel text: %lx - %lx for read only\n",
25278 start, start+size);
25279
25280@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25281 unsigned long start = PFN_ALIGN(_text);
25282 unsigned long size = PFN_ALIGN(_etext) - start;
25283
25284+ start = ktla_ktva(start);
25285 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25286 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25287 size >> 10);
25288diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25289index 7d095ad..25d2549 100644
25290--- a/arch/x86/mm/init_64.c
25291+++ b/arch/x86/mm/init_64.c
25292@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25293 pmd = fill_pmd(pud, vaddr);
25294 pte = fill_pte(pmd, vaddr);
25295
25296+ pax_open_kernel();
25297 set_pte(pte, new_pte);
25298+ pax_close_kernel();
25299
25300 /*
25301 * It's enough to flush this one mapping.
25302@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25303 pgd = pgd_offset_k((unsigned long)__va(phys));
25304 if (pgd_none(*pgd)) {
25305 pud = (pud_t *) spp_getpage();
25306- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25307- _PAGE_USER));
25308+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25309 }
25310 pud = pud_offset(pgd, (unsigned long)__va(phys));
25311 if (pud_none(*pud)) {
25312 pmd = (pmd_t *) spp_getpage();
25313- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25314- _PAGE_USER));
25315+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25316 }
25317 pmd = pmd_offset(pud, phys);
25318 BUG_ON(!pmd_none(*pmd));
25319@@ -675,6 +675,12 @@ void __init mem_init(void)
25320
25321 pci_iommu_alloc();
25322
25323+#ifdef CONFIG_PAX_PER_CPU_PGD
25324+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25325+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25326+ KERNEL_PGD_PTRS);
25327+#endif
25328+
25329 /* clear_bss() already clear the empty_zero_page */
25330
25331 reservedpages = 0;
25332@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25333 static struct vm_area_struct gate_vma = {
25334 .vm_start = VSYSCALL_START,
25335 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25336- .vm_page_prot = PAGE_READONLY_EXEC,
25337- .vm_flags = VM_READ | VM_EXEC
25338+ .vm_page_prot = PAGE_READONLY,
25339+ .vm_flags = VM_READ
25340 };
25341
25342 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25343@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25344
25345 const char *arch_vma_name(struct vm_area_struct *vma)
25346 {
25347- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25348+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25349 return "[vdso]";
25350 if (vma == &gate_vma)
25351 return "[vsyscall]";
25352diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25353index 84e236c..69bd3f6 100644
25354--- a/arch/x86/mm/iomap_32.c
25355+++ b/arch/x86/mm/iomap_32.c
25356@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25357 debug_kmap_atomic(type);
25358 idx = type + KM_TYPE_NR * smp_processor_id();
25359 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25360+
25361+ pax_open_kernel();
25362 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25363+ pax_close_kernel();
25364+
25365 arch_flush_lazy_mmu_mode();
25366
25367 return (void *)vaddr;
25368diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25369index 2feb9bd..ab91e7b 100644
25370--- a/arch/x86/mm/ioremap.c
25371+++ b/arch/x86/mm/ioremap.c
25372@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25373 * Second special case: Some BIOSen report the PC BIOS
25374 * area (640->1Mb) as ram even though it is not.
25375 */
25376- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25377- pagenr < (BIOS_END >> PAGE_SHIFT))
25378+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25379+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25380 return 0;
25381
25382 for (i = 0; i < e820.nr_map; i++) {
25383@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25384 /*
25385 * Don't allow anybody to remap normal RAM that we're using..
25386 */
25387- for (pfn = phys_addr >> PAGE_SHIFT;
25388- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25389- pfn++) {
25390-
25391+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25392 int is_ram = page_is_ram(pfn);
25393
25394- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25395+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25396 return NULL;
25397 WARN_ON_ONCE(is_ram);
25398 }
25399@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25400
25401 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25402 if (page_is_ram(start >> PAGE_SHIFT))
25403+#ifdef CONFIG_HIGHMEM
25404+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25405+#endif
25406 return __va(phys);
25407
25408 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25409@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25410 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25411
25412 static __initdata int after_paging_init;
25413-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25414+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25415
25416 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25417 {
25418@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25419 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25420
25421 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25422- memset(bm_pte, 0, sizeof(bm_pte));
25423- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25424+ pmd_populate_user(&init_mm, pmd, bm_pte);
25425
25426 /*
25427 * The boot-ioremap range spans multiple pmds, for which
25428diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25429index 8cc1833..1abbc5b 100644
25430--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25431+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25432@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25433 * memory (e.g. tracked pages)? For now, we need this to avoid
25434 * invoking kmemcheck for PnP BIOS calls.
25435 */
25436- if (regs->flags & X86_VM_MASK)
25437+ if (v8086_mode(regs))
25438 return false;
25439- if (regs->cs != __KERNEL_CS)
25440+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25441 return false;
25442
25443 pte = kmemcheck_pte_lookup(address);
25444diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25445index c9e57af..07a321b 100644
25446--- a/arch/x86/mm/mmap.c
25447+++ b/arch/x86/mm/mmap.c
25448@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25449 * Leave an at least ~128 MB hole with possible stack randomization.
25450 */
25451 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25452-#define MAX_GAP (TASK_SIZE/6*5)
25453+#define MAX_GAP (pax_task_size/6*5)
25454
25455 /*
25456 * True on X86_32 or when emulating IA32 on X86_64
25457@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25458 return rnd << PAGE_SHIFT;
25459 }
25460
25461-static unsigned long mmap_base(void)
25462+static unsigned long mmap_base(struct mm_struct *mm)
25463 {
25464 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25465+ unsigned long pax_task_size = TASK_SIZE;
25466+
25467+#ifdef CONFIG_PAX_SEGMEXEC
25468+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25469+ pax_task_size = SEGMEXEC_TASK_SIZE;
25470+#endif
25471
25472 if (gap < MIN_GAP)
25473 gap = MIN_GAP;
25474 else if (gap > MAX_GAP)
25475 gap = MAX_GAP;
25476
25477- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25478+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25479 }
25480
25481 /*
25482 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25483 * does, but not when emulating X86_32
25484 */
25485-static unsigned long mmap_legacy_base(void)
25486+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25487 {
25488- if (mmap_is_ia32())
25489+ if (mmap_is_ia32()) {
25490+
25491+#ifdef CONFIG_PAX_SEGMEXEC
25492+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25493+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25494+ else
25495+#endif
25496+
25497 return TASK_UNMAPPED_BASE;
25498- else
25499+ } else
25500 return TASK_UNMAPPED_BASE + mmap_rnd();
25501 }
25502
25503@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25504 void arch_pick_mmap_layout(struct mm_struct *mm)
25505 {
25506 if (mmap_is_legacy()) {
25507- mm->mmap_base = mmap_legacy_base();
25508+ mm->mmap_base = mmap_legacy_base(mm);
25509+
25510+#ifdef CONFIG_PAX_RANDMMAP
25511+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25512+ mm->mmap_base += mm->delta_mmap;
25513+#endif
25514+
25515 mm->get_unmapped_area = arch_get_unmapped_area;
25516 mm->unmap_area = arch_unmap_area;
25517 } else {
25518- mm->mmap_base = mmap_base();
25519+ mm->mmap_base = mmap_base(mm);
25520+
25521+#ifdef CONFIG_PAX_RANDMMAP
25522+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25523+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25524+#endif
25525+
25526 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25527 mm->unmap_area = arch_unmap_area_topdown;
25528 }
25529diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25530index 132772a..b961f11 100644
25531--- a/arch/x86/mm/mmio-mod.c
25532+++ b/arch/x86/mm/mmio-mod.c
25533@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25534 break;
25535 default:
25536 {
25537- unsigned char *ip = (unsigned char *)instptr;
25538+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25539 my_trace->opcode = MMIO_UNKNOWN_OP;
25540 my_trace->width = 0;
25541 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25542@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25543 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25544 void __iomem *addr)
25545 {
25546- static atomic_t next_id;
25547+ static atomic_unchecked_t next_id;
25548 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25549 /* These are page-unaligned. */
25550 struct mmiotrace_map map = {
25551@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25552 .private = trace
25553 },
25554 .phys = offset,
25555- .id = atomic_inc_return(&next_id)
25556+ .id = atomic_inc_return_unchecked(&next_id)
25557 };
25558 map.map_id = trace->id;
25559
25560diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25561index d253006..e56dd6a 100644
25562--- a/arch/x86/mm/numa_32.c
25563+++ b/arch/x86/mm/numa_32.c
25564@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25565 }
25566 #endif
25567
25568-extern unsigned long find_max_low_pfn(void);
25569 extern unsigned long highend_pfn, highstart_pfn;
25570
25571 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25572diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25573index e1d1069..2251ff3 100644
25574--- a/arch/x86/mm/pageattr-test.c
25575+++ b/arch/x86/mm/pageattr-test.c
25576@@ -36,7 +36,7 @@ enum {
25577
25578 static int pte_testbit(pte_t pte)
25579 {
25580- return pte_flags(pte) & _PAGE_UNUSED1;
25581+ return pte_flags(pte) & _PAGE_CPA_TEST;
25582 }
25583
25584 struct split_state {
25585diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25586index dd38bfb..b72c63e 100644
25587--- a/arch/x86/mm/pageattr.c
25588+++ b/arch/x86/mm/pageattr.c
25589@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25590 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25591 */
25592 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25593- pgprot_val(forbidden) |= _PAGE_NX;
25594+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25595
25596 /*
25597 * The kernel text needs to be executable for obvious reasons
25598 * Does not cover __inittext since that is gone later on. On
25599 * 64bit we do not enforce !NX on the low mapping
25600 */
25601- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25602- pgprot_val(forbidden) |= _PAGE_NX;
25603+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25604+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25605
25606+#ifdef CONFIG_DEBUG_RODATA
25607 /*
25608 * The .rodata section needs to be read-only. Using the pfn
25609 * catches all aliases.
25610@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25611 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25612 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25613 pgprot_val(forbidden) |= _PAGE_RW;
25614+#endif
25615+
25616+#ifdef CONFIG_PAX_KERNEXEC
25617+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25618+ pgprot_val(forbidden) |= _PAGE_RW;
25619+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25620+ }
25621+#endif
25622
25623 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25624
25625@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25626 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25627 {
25628 /* change init_mm */
25629+ pax_open_kernel();
25630 set_pte_atomic(kpte, pte);
25631+
25632 #ifdef CONFIG_X86_32
25633 if (!SHARED_KERNEL_PMD) {
25634+
25635+#ifdef CONFIG_PAX_PER_CPU_PGD
25636+ unsigned long cpu;
25637+#else
25638 struct page *page;
25639+#endif
25640
25641+#ifdef CONFIG_PAX_PER_CPU_PGD
25642+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
25643+ pgd_t *pgd = get_cpu_pgd(cpu);
25644+#else
25645 list_for_each_entry(page, &pgd_list, lru) {
25646- pgd_t *pgd;
25647+ pgd_t *pgd = (pgd_t *)page_address(page);
25648+#endif
25649+
25650 pud_t *pud;
25651 pmd_t *pmd;
25652
25653- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25654+ pgd += pgd_index(address);
25655 pud = pud_offset(pgd, address);
25656 pmd = pmd_offset(pud, address);
25657 set_pte_atomic((pte_t *)pmd, pte);
25658 }
25659 }
25660 #endif
25661+ pax_close_kernel();
25662 }
25663
25664 static int
25665diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25666index e78cd0e..de0a817 100644
25667--- a/arch/x86/mm/pat.c
25668+++ b/arch/x86/mm/pat.c
25669@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25670
25671 conflict:
25672 printk(KERN_INFO "%s:%d conflicting memory types "
25673- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25674+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25675 new->end, cattr_name(new->type), cattr_name(entry->type));
25676 return -EBUSY;
25677 }
25678@@ -559,7 +559,7 @@ unlock_ret:
25679
25680 if (err) {
25681 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25682- current->comm, current->pid, start, end);
25683+ current->comm, task_pid_nr(current), start, end);
25684 }
25685
25686 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25687@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25688 while (cursor < to) {
25689 if (!devmem_is_allowed(pfn)) {
25690 printk(KERN_INFO
25691- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25692- current->comm, from, to);
25693+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25694+ current->comm, from, to, cursor);
25695 return 0;
25696 }
25697 cursor += PAGE_SIZE;
25698@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25699 printk(KERN_INFO
25700 "%s:%d ioremap_change_attr failed %s "
25701 "for %Lx-%Lx\n",
25702- current->comm, current->pid,
25703+ current->comm, task_pid_nr(current),
25704 cattr_name(flags),
25705 base, (unsigned long long)(base + size));
25706 return -EINVAL;
25707@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25708 free_memtype(paddr, paddr + size);
25709 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25710 " for %Lx-%Lx, got %s\n",
25711- current->comm, current->pid,
25712+ current->comm, task_pid_nr(current),
25713 cattr_name(want_flags),
25714 (unsigned long long)paddr,
25715 (unsigned long long)(paddr + size),
25716diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25717index df3d5c8..c2223e1 100644
25718--- a/arch/x86/mm/pf_in.c
25719+++ b/arch/x86/mm/pf_in.c
25720@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25721 int i;
25722 enum reason_type rv = OTHERS;
25723
25724- p = (unsigned char *)ins_addr;
25725+ p = (unsigned char *)ktla_ktva(ins_addr);
25726 p += skip_prefix(p, &prf);
25727 p += get_opcode(p, &opcode);
25728
25729@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25730 struct prefix_bits prf;
25731 int i;
25732
25733- p = (unsigned char *)ins_addr;
25734+ p = (unsigned char *)ktla_ktva(ins_addr);
25735 p += skip_prefix(p, &prf);
25736 p += get_opcode(p, &opcode);
25737
25738@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25739 struct prefix_bits prf;
25740 int i;
25741
25742- p = (unsigned char *)ins_addr;
25743+ p = (unsigned char *)ktla_ktva(ins_addr);
25744 p += skip_prefix(p, &prf);
25745 p += get_opcode(p, &opcode);
25746
25747@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25748 int i;
25749 unsigned long rv;
25750
25751- p = (unsigned char *)ins_addr;
25752+ p = (unsigned char *)ktla_ktva(ins_addr);
25753 p += skip_prefix(p, &prf);
25754 p += get_opcode(p, &opcode);
25755 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25756@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25757 int i;
25758 unsigned long rv;
25759
25760- p = (unsigned char *)ins_addr;
25761+ p = (unsigned char *)ktla_ktva(ins_addr);
25762 p += skip_prefix(p, &prf);
25763 p += get_opcode(p, &opcode);
25764 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25765diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25766index e0e6fad..c56b495 100644
25767--- a/arch/x86/mm/pgtable.c
25768+++ b/arch/x86/mm/pgtable.c
25769@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25770 list_del(&page->lru);
25771 }
25772
25773-#define UNSHARED_PTRS_PER_PGD \
25774- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25775+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25776+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25777
25778+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25779+{
25780+ while (count--)
25781+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25782+}
25783+#endif
25784+
25785+#ifdef CONFIG_PAX_PER_CPU_PGD
25786+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25787+{
25788+ while (count--)
25789+
25790+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25791+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25792+#else
25793+ *dst++ = *src++;
25794+#endif
25795+
25796+}
25797+#endif
25798+
25799+#ifdef CONFIG_X86_64
25800+#define pxd_t pud_t
25801+#define pyd_t pgd_t
25802+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25803+#define pxd_free(mm, pud) pud_free((mm), (pud))
25804+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25805+#define pyd_offset(mm, address) pgd_offset((mm), (address))
25806+#define PYD_SIZE PGDIR_SIZE
25807+#else
25808+#define pxd_t pmd_t
25809+#define pyd_t pud_t
25810+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25811+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25812+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25813+#define pyd_offset(mm, address) pud_offset((mm), (address))
25814+#define PYD_SIZE PUD_SIZE
25815+#endif
25816+
25817+#ifdef CONFIG_PAX_PER_CPU_PGD
25818+static inline void pgd_ctor(pgd_t *pgd) {}
25819+static inline void pgd_dtor(pgd_t *pgd) {}
25820+#else
25821 static void pgd_ctor(pgd_t *pgd)
25822 {
25823 /* If the pgd points to a shared pagetable level (either the
25824@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25825 pgd_list_del(pgd);
25826 spin_unlock_irqrestore(&pgd_lock, flags);
25827 }
25828+#endif
25829
25830 /*
25831 * List of all pgd's needed for non-PAE so it can invalidate entries
25832@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25833 * -- wli
25834 */
25835
25836-#ifdef CONFIG_X86_PAE
25837+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25838 /*
25839 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25840 * updating the top-level pagetable entries to guarantee the
25841@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25842 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25843 * and initialize the kernel pmds here.
25844 */
25845-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25846+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25847
25848 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25849 {
25850@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25851 */
25852 flush_tlb_mm(mm);
25853 }
25854+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25855+#define PREALLOCATED_PXDS USER_PGD_PTRS
25856 #else /* !CONFIG_X86_PAE */
25857
25858 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25859-#define PREALLOCATED_PMDS 0
25860+#define PREALLOCATED_PXDS 0
25861
25862 #endif /* CONFIG_X86_PAE */
25863
25864-static void free_pmds(pmd_t *pmds[])
25865+static void free_pxds(pxd_t *pxds[])
25866 {
25867 int i;
25868
25869- for(i = 0; i < PREALLOCATED_PMDS; i++)
25870- if (pmds[i])
25871- free_page((unsigned long)pmds[i]);
25872+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25873+ if (pxds[i])
25874+ free_page((unsigned long)pxds[i]);
25875 }
25876
25877-static int preallocate_pmds(pmd_t *pmds[])
25878+static int preallocate_pxds(pxd_t *pxds[])
25879 {
25880 int i;
25881 bool failed = false;
25882
25883- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25884- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25885- if (pmd == NULL)
25886+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25887+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25888+ if (pxd == NULL)
25889 failed = true;
25890- pmds[i] = pmd;
25891+ pxds[i] = pxd;
25892 }
25893
25894 if (failed) {
25895- free_pmds(pmds);
25896+ free_pxds(pxds);
25897 return -ENOMEM;
25898 }
25899
25900@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25901 * preallocate which never got a corresponding vma will need to be
25902 * freed manually.
25903 */
25904-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25905+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25906 {
25907 int i;
25908
25909- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25910+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25911 pgd_t pgd = pgdp[i];
25912
25913 if (pgd_val(pgd) != 0) {
25914- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25915+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25916
25917- pgdp[i] = native_make_pgd(0);
25918+ set_pgd(pgdp + i, native_make_pgd(0));
25919
25920- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25921- pmd_free(mm, pmd);
25922+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25923+ pxd_free(mm, pxd);
25924 }
25925 }
25926 }
25927
25928-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25929+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25930 {
25931- pud_t *pud;
25932+ pyd_t *pyd;
25933 unsigned long addr;
25934 int i;
25935
25936- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25937+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25938 return;
25939
25940- pud = pud_offset(pgd, 0);
25941+#ifdef CONFIG_X86_64
25942+ pyd = pyd_offset(mm, 0L);
25943+#else
25944+ pyd = pyd_offset(pgd, 0L);
25945+#endif
25946
25947- for (addr = i = 0; i < PREALLOCATED_PMDS;
25948- i++, pud++, addr += PUD_SIZE) {
25949- pmd_t *pmd = pmds[i];
25950+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25951+ i++, pyd++, addr += PYD_SIZE) {
25952+ pxd_t *pxd = pxds[i];
25953
25954 if (i >= KERNEL_PGD_BOUNDARY)
25955- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25956- sizeof(pmd_t) * PTRS_PER_PMD);
25957+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25958+ sizeof(pxd_t) * PTRS_PER_PMD);
25959
25960- pud_populate(mm, pud, pmd);
25961+ pyd_populate(mm, pyd, pxd);
25962 }
25963 }
25964
25965 pgd_t *pgd_alloc(struct mm_struct *mm)
25966 {
25967 pgd_t *pgd;
25968- pmd_t *pmds[PREALLOCATED_PMDS];
25969+ pxd_t *pxds[PREALLOCATED_PXDS];
25970+
25971 unsigned long flags;
25972
25973 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25974@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25975
25976 mm->pgd = pgd;
25977
25978- if (preallocate_pmds(pmds) != 0)
25979+ if (preallocate_pxds(pxds) != 0)
25980 goto out_free_pgd;
25981
25982 if (paravirt_pgd_alloc(mm) != 0)
25983- goto out_free_pmds;
25984+ goto out_free_pxds;
25985
25986 /*
25987 * Make sure that pre-populating the pmds is atomic with
25988@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25989 spin_lock_irqsave(&pgd_lock, flags);
25990
25991 pgd_ctor(pgd);
25992- pgd_prepopulate_pmd(mm, pgd, pmds);
25993+ pgd_prepopulate_pxd(mm, pgd, pxds);
25994
25995 spin_unlock_irqrestore(&pgd_lock, flags);
25996
25997 return pgd;
25998
25999-out_free_pmds:
26000- free_pmds(pmds);
26001+out_free_pxds:
26002+ free_pxds(pxds);
26003 out_free_pgd:
26004 free_page((unsigned long)pgd);
26005 out:
26006@@ -287,7 +338,7 @@ out:
26007
26008 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26009 {
26010- pgd_mop_up_pmds(mm, pgd);
26011+ pgd_mop_up_pxds(mm, pgd);
26012 pgd_dtor(pgd);
26013 paravirt_pgd_free(mm, pgd);
26014 free_page((unsigned long)pgd);
26015diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26016index 46c8834..fcab43d 100644
26017--- a/arch/x86/mm/pgtable_32.c
26018+++ b/arch/x86/mm/pgtable_32.c
26019@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26020 return;
26021 }
26022 pte = pte_offset_kernel(pmd, vaddr);
26023+
26024+ pax_open_kernel();
26025 if (pte_val(pteval))
26026 set_pte_at(&init_mm, vaddr, pte, pteval);
26027 else
26028 pte_clear(&init_mm, vaddr, pte);
26029+ pax_close_kernel();
26030
26031 /*
26032 * It's enough to flush this one mapping.
26033diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26034index 513d8ed..978c161 100644
26035--- a/arch/x86/mm/setup_nx.c
26036+++ b/arch/x86/mm/setup_nx.c
26037@@ -4,11 +4,10 @@
26038
26039 #include <asm/pgtable.h>
26040
26041+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26042 int nx_enabled;
26043
26044-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26045-static int disable_nx __cpuinitdata;
26046-
26047+#ifndef CONFIG_PAX_PAGEEXEC
26048 /*
26049 * noexec = on|off
26050 *
26051@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26052 if (!str)
26053 return -EINVAL;
26054 if (!strncmp(str, "on", 2)) {
26055- __supported_pte_mask |= _PAGE_NX;
26056- disable_nx = 0;
26057+ nx_enabled = 1;
26058 } else if (!strncmp(str, "off", 3)) {
26059- disable_nx = 1;
26060- __supported_pte_mask &= ~_PAGE_NX;
26061+ nx_enabled = 0;
26062 }
26063 return 0;
26064 }
26065 early_param("noexec", noexec_setup);
26066 #endif
26067+#endif
26068
26069 #ifdef CONFIG_X86_PAE
26070 void __init set_nx(void)
26071 {
26072- unsigned int v[4], l, h;
26073+ if (!nx_enabled && cpu_has_nx) {
26074+ unsigned l, h;
26075
26076- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26077- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26078-
26079- if ((v[3] & (1 << 20)) && !disable_nx) {
26080- rdmsr(MSR_EFER, l, h);
26081- l |= EFER_NX;
26082- wrmsr(MSR_EFER, l, h);
26083- nx_enabled = 1;
26084- __supported_pte_mask |= _PAGE_NX;
26085- }
26086+ __supported_pte_mask &= ~_PAGE_NX;
26087+ rdmsr(MSR_EFER, l, h);
26088+ l &= ~EFER_NX;
26089+ wrmsr(MSR_EFER, l, h);
26090 }
26091 }
26092 #else
26093@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26094 unsigned long efer;
26095
26096 rdmsrl(MSR_EFER, efer);
26097- if (!(efer & EFER_NX) || disable_nx)
26098+ if (!(efer & EFER_NX) || !nx_enabled)
26099 __supported_pte_mask &= ~_PAGE_NX;
26100 }
26101 #endif
26102diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26103index 36fe08e..b123d3a 100644
26104--- a/arch/x86/mm/tlb.c
26105+++ b/arch/x86/mm/tlb.c
26106@@ -61,7 +61,11 @@ void leave_mm(int cpu)
26107 BUG();
26108 cpumask_clear_cpu(cpu,
26109 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26110+
26111+#ifndef CONFIG_PAX_PER_CPU_PGD
26112 load_cr3(swapper_pg_dir);
26113+#endif
26114+
26115 }
26116 EXPORT_SYMBOL_GPL(leave_mm);
26117
26118diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26119index 829edf0..672adb3 100644
26120--- a/arch/x86/oprofile/backtrace.c
26121+++ b/arch/x86/oprofile/backtrace.c
26122@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26123 {
26124 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26125
26126- if (!user_mode_vm(regs)) {
26127+ if (!user_mode(regs)) {
26128 unsigned long stack = kernel_stack_pointer(regs);
26129 if (depth)
26130 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26131diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26132index e6a160a..36deff6 100644
26133--- a/arch/x86/oprofile/op_model_p4.c
26134+++ b/arch/x86/oprofile/op_model_p4.c
26135@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26136 #endif
26137 }
26138
26139-static int inline addr_increment(void)
26140+static inline int addr_increment(void)
26141 {
26142 #ifdef CONFIG_SMP
26143 return smp_num_siblings == 2 ? 2 : 1;
26144diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26145index 1331fcf..03901b2 100644
26146--- a/arch/x86/pci/common.c
26147+++ b/arch/x86/pci/common.c
26148@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26149 int pcibios_last_bus = -1;
26150 unsigned long pirq_table_addr;
26151 struct pci_bus *pci_root_bus;
26152-struct pci_raw_ops *raw_pci_ops;
26153-struct pci_raw_ops *raw_pci_ext_ops;
26154+const struct pci_raw_ops *raw_pci_ops;
26155+const struct pci_raw_ops *raw_pci_ext_ops;
26156
26157 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26158 int reg, int len, u32 *val)
26159diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26160index 347d882..4baf6b6 100644
26161--- a/arch/x86/pci/direct.c
26162+++ b/arch/x86/pci/direct.c
26163@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26164
26165 #undef PCI_CONF1_ADDRESS
26166
26167-struct pci_raw_ops pci_direct_conf1 = {
26168+const struct pci_raw_ops pci_direct_conf1 = {
26169 .read = pci_conf1_read,
26170 .write = pci_conf1_write,
26171 };
26172@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26173
26174 #undef PCI_CONF2_ADDRESS
26175
26176-struct pci_raw_ops pci_direct_conf2 = {
26177+const struct pci_raw_ops pci_direct_conf2 = {
26178 .read = pci_conf2_read,
26179 .write = pci_conf2_write,
26180 };
26181@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26182 * This should be close to trivial, but it isn't, because there are buggy
26183 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26184 */
26185-static int __init pci_sanity_check(struct pci_raw_ops *o)
26186+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26187 {
26188 u32 x = 0;
26189 int year, devfn;
26190diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26191index f10a7e9..0425342 100644
26192--- a/arch/x86/pci/mmconfig_32.c
26193+++ b/arch/x86/pci/mmconfig_32.c
26194@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26195 return 0;
26196 }
26197
26198-static struct pci_raw_ops pci_mmcfg = {
26199+static const struct pci_raw_ops pci_mmcfg = {
26200 .read = pci_mmcfg_read,
26201 .write = pci_mmcfg_write,
26202 };
26203diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26204index 94349f8..41600a7 100644
26205--- a/arch/x86/pci/mmconfig_64.c
26206+++ b/arch/x86/pci/mmconfig_64.c
26207@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26208 return 0;
26209 }
26210
26211-static struct pci_raw_ops pci_mmcfg = {
26212+static const struct pci_raw_ops pci_mmcfg = {
26213 .read = pci_mmcfg_read,
26214 .write = pci_mmcfg_write,
26215 };
26216diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26217index 8eb295e..86bd657 100644
26218--- a/arch/x86/pci/numaq_32.c
26219+++ b/arch/x86/pci/numaq_32.c
26220@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26221
26222 #undef PCI_CONF1_MQ_ADDRESS
26223
26224-static struct pci_raw_ops pci_direct_conf1_mq = {
26225+static const struct pci_raw_ops pci_direct_conf1_mq = {
26226 .read = pci_conf1_mq_read,
26227 .write = pci_conf1_mq_write
26228 };
26229diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26230index b889d82..5a58a0a 100644
26231--- a/arch/x86/pci/olpc.c
26232+++ b/arch/x86/pci/olpc.c
26233@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26234 return 0;
26235 }
26236
26237-static struct pci_raw_ops pci_olpc_conf = {
26238+static const struct pci_raw_ops pci_olpc_conf = {
26239 .read = pci_olpc_read,
26240 .write = pci_olpc_write,
26241 };
26242diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26243index 1c975cc..b8e16c2 100644
26244--- a/arch/x86/pci/pcbios.c
26245+++ b/arch/x86/pci/pcbios.c
26246@@ -56,50 +56,93 @@ union bios32 {
26247 static struct {
26248 unsigned long address;
26249 unsigned short segment;
26250-} bios32_indirect = { 0, __KERNEL_CS };
26251+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26252
26253 /*
26254 * Returns the entry point for the given service, NULL on error
26255 */
26256
26257-static unsigned long bios32_service(unsigned long service)
26258+static unsigned long __devinit bios32_service(unsigned long service)
26259 {
26260 unsigned char return_code; /* %al */
26261 unsigned long address; /* %ebx */
26262 unsigned long length; /* %ecx */
26263 unsigned long entry; /* %edx */
26264 unsigned long flags;
26265+ struct desc_struct d, *gdt;
26266
26267 local_irq_save(flags);
26268- __asm__("lcall *(%%edi); cld"
26269+
26270+ gdt = get_cpu_gdt_table(smp_processor_id());
26271+
26272+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26273+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26274+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26275+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26276+
26277+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26278 : "=a" (return_code),
26279 "=b" (address),
26280 "=c" (length),
26281 "=d" (entry)
26282 : "0" (service),
26283 "1" (0),
26284- "D" (&bios32_indirect));
26285+ "D" (&bios32_indirect),
26286+ "r"(__PCIBIOS_DS)
26287+ : "memory");
26288+
26289+ pax_open_kernel();
26290+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26291+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26292+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26293+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26294+ pax_close_kernel();
26295+
26296 local_irq_restore(flags);
26297
26298 switch (return_code) {
26299- case 0:
26300- return address + entry;
26301- case 0x80: /* Not present */
26302- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26303- return 0;
26304- default: /* Shouldn't happen */
26305- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26306- service, return_code);
26307+ case 0: {
26308+ int cpu;
26309+ unsigned char flags;
26310+
26311+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26312+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26313+ printk(KERN_WARNING "bios32_service: not valid\n");
26314 return 0;
26315+ }
26316+ address = address + PAGE_OFFSET;
26317+ length += 16UL; /* some BIOSs underreport this... */
26318+ flags = 4;
26319+ if (length >= 64*1024*1024) {
26320+ length >>= PAGE_SHIFT;
26321+ flags |= 8;
26322+ }
26323+
26324+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
26325+ gdt = get_cpu_gdt_table(cpu);
26326+ pack_descriptor(&d, address, length, 0x9b, flags);
26327+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26328+ pack_descriptor(&d, address, length, 0x93, flags);
26329+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26330+ }
26331+ return entry;
26332+ }
26333+ case 0x80: /* Not present */
26334+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26335+ return 0;
26336+ default: /* Shouldn't happen */
26337+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26338+ service, return_code);
26339+ return 0;
26340 }
26341 }
26342
26343 static struct {
26344 unsigned long address;
26345 unsigned short segment;
26346-} pci_indirect = { 0, __KERNEL_CS };
26347+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26348
26349-static int pci_bios_present;
26350+static int pci_bios_present __read_only;
26351
26352 static int __devinit check_pcibios(void)
26353 {
26354@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26355 unsigned long flags, pcibios_entry;
26356
26357 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26358- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26359+ pci_indirect.address = pcibios_entry;
26360
26361 local_irq_save(flags);
26362- __asm__(
26363- "lcall *(%%edi); cld\n\t"
26364+ __asm__("movw %w6, %%ds\n\t"
26365+ "lcall *%%ss:(%%edi); cld\n\t"
26366+ "push %%ss\n\t"
26367+ "pop %%ds\n\t"
26368 "jc 1f\n\t"
26369 "xor %%ah, %%ah\n"
26370 "1:"
26371@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26372 "=b" (ebx),
26373 "=c" (ecx)
26374 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26375- "D" (&pci_indirect)
26376+ "D" (&pci_indirect),
26377+ "r" (__PCIBIOS_DS)
26378 : "memory");
26379 local_irq_restore(flags);
26380
26381@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26382
26383 switch (len) {
26384 case 1:
26385- __asm__("lcall *(%%esi); cld\n\t"
26386+ __asm__("movw %w6, %%ds\n\t"
26387+ "lcall *%%ss:(%%esi); cld\n\t"
26388+ "push %%ss\n\t"
26389+ "pop %%ds\n\t"
26390 "jc 1f\n\t"
26391 "xor %%ah, %%ah\n"
26392 "1:"
26393@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26394 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26395 "b" (bx),
26396 "D" ((long)reg),
26397- "S" (&pci_indirect));
26398+ "S" (&pci_indirect),
26399+ "r" (__PCIBIOS_DS));
26400 /*
26401 * Zero-extend the result beyond 8 bits, do not trust the
26402 * BIOS having done it:
26403@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26404 *value &= 0xff;
26405 break;
26406 case 2:
26407- __asm__("lcall *(%%esi); cld\n\t"
26408+ __asm__("movw %w6, %%ds\n\t"
26409+ "lcall *%%ss:(%%esi); cld\n\t"
26410+ "push %%ss\n\t"
26411+ "pop %%ds\n\t"
26412 "jc 1f\n\t"
26413 "xor %%ah, %%ah\n"
26414 "1:"
26415@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26416 : "1" (PCIBIOS_READ_CONFIG_WORD),
26417 "b" (bx),
26418 "D" ((long)reg),
26419- "S" (&pci_indirect));
26420+ "S" (&pci_indirect),
26421+ "r" (__PCIBIOS_DS));
26422 /*
26423 * Zero-extend the result beyond 16 bits, do not trust the
26424 * BIOS having done it:
26425@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26426 *value &= 0xffff;
26427 break;
26428 case 4:
26429- __asm__("lcall *(%%esi); cld\n\t"
26430+ __asm__("movw %w6, %%ds\n\t"
26431+ "lcall *%%ss:(%%esi); cld\n\t"
26432+ "push %%ss\n\t"
26433+ "pop %%ds\n\t"
26434 "jc 1f\n\t"
26435 "xor %%ah, %%ah\n"
26436 "1:"
26437@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26438 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26439 "b" (bx),
26440 "D" ((long)reg),
26441- "S" (&pci_indirect));
26442+ "S" (&pci_indirect),
26443+ "r" (__PCIBIOS_DS));
26444 break;
26445 }
26446
26447@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26448
26449 switch (len) {
26450 case 1:
26451- __asm__("lcall *(%%esi); cld\n\t"
26452+ __asm__("movw %w6, %%ds\n\t"
26453+ "lcall *%%ss:(%%esi); cld\n\t"
26454+ "push %%ss\n\t"
26455+ "pop %%ds\n\t"
26456 "jc 1f\n\t"
26457 "xor %%ah, %%ah\n"
26458 "1:"
26459@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26460 "c" (value),
26461 "b" (bx),
26462 "D" ((long)reg),
26463- "S" (&pci_indirect));
26464+ "S" (&pci_indirect),
26465+ "r" (__PCIBIOS_DS));
26466 break;
26467 case 2:
26468- __asm__("lcall *(%%esi); cld\n\t"
26469+ __asm__("movw %w6, %%ds\n\t"
26470+ "lcall *%%ss:(%%esi); cld\n\t"
26471+ "push %%ss\n\t"
26472+ "pop %%ds\n\t"
26473 "jc 1f\n\t"
26474 "xor %%ah, %%ah\n"
26475 "1:"
26476@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26477 "c" (value),
26478 "b" (bx),
26479 "D" ((long)reg),
26480- "S" (&pci_indirect));
26481+ "S" (&pci_indirect),
26482+ "r" (__PCIBIOS_DS));
26483 break;
26484 case 4:
26485- __asm__("lcall *(%%esi); cld\n\t"
26486+ __asm__("movw %w6, %%ds\n\t"
26487+ "lcall *%%ss:(%%esi); cld\n\t"
26488+ "push %%ss\n\t"
26489+ "pop %%ds\n\t"
26490 "jc 1f\n\t"
26491 "xor %%ah, %%ah\n"
26492 "1:"
26493@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26494 "c" (value),
26495 "b" (bx),
26496 "D" ((long)reg),
26497- "S" (&pci_indirect));
26498+ "S" (&pci_indirect),
26499+ "r" (__PCIBIOS_DS));
26500 break;
26501 }
26502
26503@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26504 * Function table for BIOS32 access
26505 */
26506
26507-static struct pci_raw_ops pci_bios_access = {
26508+static const struct pci_raw_ops pci_bios_access = {
26509 .read = pci_bios_read,
26510 .write = pci_bios_write
26511 };
26512@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26513 * Try to find PCI BIOS.
26514 */
26515
26516-static struct pci_raw_ops * __devinit pci_find_bios(void)
26517+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26518 {
26519 union bios32 *check;
26520 unsigned char sum;
26521@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26522
26523 DBG("PCI: Fetching IRQ routing table... ");
26524 __asm__("push %%es\n\t"
26525+ "movw %w8, %%ds\n\t"
26526 "push %%ds\n\t"
26527 "pop %%es\n\t"
26528- "lcall *(%%esi); cld\n\t"
26529+ "lcall *%%ss:(%%esi); cld\n\t"
26530 "pop %%es\n\t"
26531+ "push %%ss\n\t"
26532+ "pop %%ds\n"
26533 "jc 1f\n\t"
26534 "xor %%ah, %%ah\n"
26535 "1:"
26536@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26537 "1" (0),
26538 "D" ((long) &opt),
26539 "S" (&pci_indirect),
26540- "m" (opt)
26541+ "m" (opt),
26542+ "r" (__PCIBIOS_DS)
26543 : "memory");
26544 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26545 if (ret & 0xff00)
26546@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26547 {
26548 int ret;
26549
26550- __asm__("lcall *(%%esi); cld\n\t"
26551+ __asm__("movw %w5, %%ds\n\t"
26552+ "lcall *%%ss:(%%esi); cld\n\t"
26553+ "push %%ss\n\t"
26554+ "pop %%ds\n"
26555 "jc 1f\n\t"
26556 "xor %%ah, %%ah\n"
26557 "1:"
26558@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26559 : "0" (PCIBIOS_SET_PCI_HW_INT),
26560 "b" ((dev->bus->number << 8) | dev->devfn),
26561 "c" ((irq << 8) | (pin + 10)),
26562- "S" (&pci_indirect));
26563+ "S" (&pci_indirect),
26564+ "r" (__PCIBIOS_DS));
26565 return !(ret & 0xff00);
26566 }
26567 EXPORT_SYMBOL(pcibios_set_irq_routing);
26568diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26569index fa0f651..9d8f3d9 100644
26570--- a/arch/x86/power/cpu.c
26571+++ b/arch/x86/power/cpu.c
26572@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26573 static void fix_processor_context(void)
26574 {
26575 int cpu = smp_processor_id();
26576- struct tss_struct *t = &per_cpu(init_tss, cpu);
26577+ struct tss_struct *t = init_tss + cpu;
26578
26579 set_tss_desc(cpu, t); /*
26580 * This just modifies memory; should not be
26581@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26582 */
26583
26584 #ifdef CONFIG_X86_64
26585+ pax_open_kernel();
26586 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26587+ pax_close_kernel();
26588
26589 syscall_init(); /* This sets MSR_*STAR and related */
26590 #endif
26591diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26592index dd78ef6..f9d928d 100644
26593--- a/arch/x86/vdso/Makefile
26594+++ b/arch/x86/vdso/Makefile
26595@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26596 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26597 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26598
26599-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26600+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26601 GCOV_PROFILE := n
26602
26603 #
26604diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26605index ee55754..0013b2e 100644
26606--- a/arch/x86/vdso/vclock_gettime.c
26607+++ b/arch/x86/vdso/vclock_gettime.c
26608@@ -22,24 +22,48 @@
26609 #include <asm/hpet.h>
26610 #include <asm/unistd.h>
26611 #include <asm/io.h>
26612+#include <asm/fixmap.h>
26613 #include "vextern.h"
26614
26615 #define gtod vdso_vsyscall_gtod_data
26616
26617+notrace noinline long __vdso_fallback_time(long *t)
26618+{
26619+ long secs;
26620+ asm volatile("syscall"
26621+ : "=a" (secs)
26622+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26623+ return secs;
26624+}
26625+
26626 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26627 {
26628 long ret;
26629 asm("syscall" : "=a" (ret) :
26630- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26631+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26632 return ret;
26633 }
26634
26635+notrace static inline cycle_t __vdso_vread_hpet(void)
26636+{
26637+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26638+}
26639+
26640+notrace static inline cycle_t __vdso_vread_tsc(void)
26641+{
26642+ cycle_t ret = (cycle_t)vget_cycles();
26643+
26644+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26645+}
26646+
26647 notrace static inline long vgetns(void)
26648 {
26649 long v;
26650- cycles_t (*vread)(void);
26651- vread = gtod->clock.vread;
26652- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26653+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26654+ v = __vdso_vread_tsc();
26655+ else
26656+ v = __vdso_vread_hpet();
26657+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26658 return (v * gtod->clock.mult) >> gtod->clock.shift;
26659 }
26660
26661@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26662
26663 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26664 {
26665- if (likely(gtod->sysctl_enabled))
26666+ if (likely(gtod->sysctl_enabled &&
26667+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26668+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26669 switch (clock) {
26670 case CLOCK_REALTIME:
26671 if (likely(gtod->clock.vread))
26672@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26673 int clock_gettime(clockid_t, struct timespec *)
26674 __attribute__((weak, alias("__vdso_clock_gettime")));
26675
26676+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26677+{
26678+ long ret;
26679+ asm("syscall" : "=a" (ret) :
26680+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26681+ return ret;
26682+}
26683+
26684 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26685 {
26686- long ret;
26687- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26688+ if (likely(gtod->sysctl_enabled &&
26689+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26690+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26691+ {
26692 if (likely(tv != NULL)) {
26693 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26694 offsetof(struct timespec, tv_nsec) ||
26695@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26696 }
26697 return 0;
26698 }
26699- asm("syscall" : "=a" (ret) :
26700- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26701- return ret;
26702+ return __vdso_fallback_gettimeofday(tv, tz);
26703 }
26704 int gettimeofday(struct timeval *, struct timezone *)
26705 __attribute__((weak, alias("__vdso_gettimeofday")));
26706diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26707index 4e5dd3b..00ba15e 100644
26708--- a/arch/x86/vdso/vdso.lds.S
26709+++ b/arch/x86/vdso/vdso.lds.S
26710@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26711 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26712 #include "vextern.h"
26713 #undef VEXTERN
26714+
26715+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26716+VEXTERN(fallback_gettimeofday)
26717+VEXTERN(fallback_time)
26718+VEXTERN(getcpu)
26719+#undef VEXTERN
26720diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26721index 58bc00f..d53fb48 100644
26722--- a/arch/x86/vdso/vdso32-setup.c
26723+++ b/arch/x86/vdso/vdso32-setup.c
26724@@ -25,6 +25,7 @@
26725 #include <asm/tlbflush.h>
26726 #include <asm/vdso.h>
26727 #include <asm/proto.h>
26728+#include <asm/mman.h>
26729
26730 enum {
26731 VDSO_DISABLED = 0,
26732@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26733 void enable_sep_cpu(void)
26734 {
26735 int cpu = get_cpu();
26736- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26737+ struct tss_struct *tss = init_tss + cpu;
26738
26739 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26740 put_cpu();
26741@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26742 gate_vma.vm_start = FIXADDR_USER_START;
26743 gate_vma.vm_end = FIXADDR_USER_END;
26744 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26745- gate_vma.vm_page_prot = __P101;
26746+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26747 /*
26748 * Make sure the vDSO gets into every core dump.
26749 * Dumping its contents makes post-mortem fully interpretable later
26750@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26751 if (compat)
26752 addr = VDSO_HIGH_BASE;
26753 else {
26754- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26755+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26756 if (IS_ERR_VALUE(addr)) {
26757 ret = addr;
26758 goto up_fail;
26759 }
26760 }
26761
26762- current->mm->context.vdso = (void *)addr;
26763+ current->mm->context.vdso = addr;
26764
26765 if (compat_uses_vma || !compat) {
26766 /*
26767@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26768 }
26769
26770 current_thread_info()->sysenter_return =
26771- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26772+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26773
26774 up_fail:
26775 if (ret)
26776- current->mm->context.vdso = NULL;
26777+ current->mm->context.vdso = 0;
26778
26779 up_write(&mm->mmap_sem);
26780
26781@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26782
26783 const char *arch_vma_name(struct vm_area_struct *vma)
26784 {
26785- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26786+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26787 return "[vdso]";
26788+
26789+#ifdef CONFIG_PAX_SEGMEXEC
26790+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26791+ return "[vdso]";
26792+#endif
26793+
26794 return NULL;
26795 }
26796
26797@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26798 struct mm_struct *mm = tsk->mm;
26799
26800 /* Check to see if this task was created in compat vdso mode */
26801- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26802+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26803 return &gate_vma;
26804 return NULL;
26805 }
26806diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26807index 1683ba2..48d07f3 100644
26808--- a/arch/x86/vdso/vextern.h
26809+++ b/arch/x86/vdso/vextern.h
26810@@ -11,6 +11,5 @@
26811 put into vextern.h and be referenced as a pointer with vdso prefix.
26812 The main kernel later fills in the values. */
26813
26814-VEXTERN(jiffies)
26815 VEXTERN(vgetcpu_mode)
26816 VEXTERN(vsyscall_gtod_data)
26817diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26818index 21e1aeb..2c0b3c4 100644
26819--- a/arch/x86/vdso/vma.c
26820+++ b/arch/x86/vdso/vma.c
26821@@ -17,8 +17,6 @@
26822 #include "vextern.h" /* Just for VMAGIC. */
26823 #undef VEXTERN
26824
26825-unsigned int __read_mostly vdso_enabled = 1;
26826-
26827 extern char vdso_start[], vdso_end[];
26828 extern unsigned short vdso_sync_cpuid;
26829
26830@@ -27,10 +25,8 @@ static unsigned vdso_size;
26831
26832 static inline void *var_ref(void *p, char *name)
26833 {
26834- if (*(void **)p != (void *)VMAGIC) {
26835- printk("VDSO: variable %s broken\n", name);
26836- vdso_enabled = 0;
26837- }
26838+ if (*(void **)p != (void *)VMAGIC)
26839+ panic("VDSO: variable %s broken\n", name);
26840 return p;
26841 }
26842
26843@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26844 if (!vbase)
26845 goto oom;
26846
26847- if (memcmp(vbase, "\177ELF", 4)) {
26848- printk("VDSO: I'm broken; not ELF\n");
26849- vdso_enabled = 0;
26850- }
26851+ if (memcmp(vbase, ELFMAG, SELFMAG))
26852+ panic("VDSO: I'm broken; not ELF\n");
26853
26854 #define VEXTERN(x) \
26855 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26856 #include "vextern.h"
26857 #undef VEXTERN
26858+ vunmap(vbase);
26859 return 0;
26860
26861 oom:
26862- printk("Cannot allocate vdso\n");
26863- vdso_enabled = 0;
26864- return -ENOMEM;
26865+ panic("Cannot allocate vdso\n");
26866 }
26867 __initcall(init_vdso_vars);
26868
26869@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26870 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26871 {
26872 struct mm_struct *mm = current->mm;
26873- unsigned long addr;
26874+ unsigned long addr = 0;
26875 int ret;
26876
26877- if (!vdso_enabled)
26878- return 0;
26879-
26880 down_write(&mm->mmap_sem);
26881+
26882+#ifdef CONFIG_PAX_RANDMMAP
26883+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26884+#endif
26885+
26886 addr = vdso_addr(mm->start_stack, vdso_size);
26887 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26888 if (IS_ERR_VALUE(addr)) {
26889@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26890 goto up_fail;
26891 }
26892
26893- current->mm->context.vdso = (void *)addr;
26894+ current->mm->context.vdso = addr;
26895
26896 ret = install_special_mapping(mm, addr, vdso_size,
26897 VM_READ|VM_EXEC|
26898@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26899 VM_ALWAYSDUMP,
26900 vdso_pages);
26901 if (ret) {
26902- current->mm->context.vdso = NULL;
26903+ current->mm->context.vdso = 0;
26904 goto up_fail;
26905 }
26906
26907@@ -132,10 +127,3 @@ up_fail:
26908 up_write(&mm->mmap_sem);
26909 return ret;
26910 }
26911-
26912-static __init int vdso_setup(char *s)
26913-{
26914- vdso_enabled = simple_strtoul(s, NULL, 0);
26915- return 0;
26916-}
26917-__setup("vdso=", vdso_setup);
26918diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26919index 0087b00..eecb34f 100644
26920--- a/arch/x86/xen/enlighten.c
26921+++ b/arch/x86/xen/enlighten.c
26922@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26923
26924 struct shared_info xen_dummy_shared_info;
26925
26926-void *xen_initial_gdt;
26927-
26928 /*
26929 * Point at some empty memory to start with. We map the real shared_info
26930 * page as soon as fixmap is up and running.
26931@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26932
26933 preempt_disable();
26934
26935- start = __get_cpu_var(idt_desc).address;
26936+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26937 end = start + __get_cpu_var(idt_desc).size + 1;
26938
26939 xen_mc_flush();
26940@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26941 #endif
26942 };
26943
26944-static void xen_reboot(int reason)
26945+static __noreturn void xen_reboot(int reason)
26946 {
26947 struct sched_shutdown r = { .reason = reason };
26948
26949@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26950 BUG();
26951 }
26952
26953-static void xen_restart(char *msg)
26954+static __noreturn void xen_restart(char *msg)
26955 {
26956 xen_reboot(SHUTDOWN_reboot);
26957 }
26958
26959-static void xen_emergency_restart(void)
26960+static __noreturn void xen_emergency_restart(void)
26961 {
26962 xen_reboot(SHUTDOWN_reboot);
26963 }
26964
26965-static void xen_machine_halt(void)
26966+static __noreturn void xen_machine_halt(void)
26967 {
26968 xen_reboot(SHUTDOWN_poweroff);
26969 }
26970@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26971 */
26972 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26973
26974-#ifdef CONFIG_X86_64
26975 /* Work out if we support NX */
26976- check_efer();
26977+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26978+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26979+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26980+ unsigned l, h;
26981+
26982+#ifdef CONFIG_X86_PAE
26983+ nx_enabled = 1;
26984+#endif
26985+ __supported_pte_mask |= _PAGE_NX;
26986+ rdmsr(MSR_EFER, l, h);
26987+ l |= EFER_NX;
26988+ wrmsr(MSR_EFER, l, h);
26989+ }
26990 #endif
26991
26992 xen_setup_features();
26993@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26994
26995 machine_ops = xen_machine_ops;
26996
26997- /*
26998- * The only reliable way to retain the initial address of the
26999- * percpu gdt_page is to remember it here, so we can go and
27000- * mark it RW later, when the initial percpu area is freed.
27001- */
27002- xen_initial_gdt = &per_cpu(gdt_page, 0);
27003-
27004 xen_smp_init();
27005
27006 pgd = (pgd_t *)xen_start_info->pt_base;
27007diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27008index 3f90a2c..2c2ad84 100644
27009--- a/arch/x86/xen/mmu.c
27010+++ b/arch/x86/xen/mmu.c
27011@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27012 convert_pfn_mfn(init_level4_pgt);
27013 convert_pfn_mfn(level3_ident_pgt);
27014 convert_pfn_mfn(level3_kernel_pgt);
27015+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27016+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27017+ convert_pfn_mfn(level3_vmemmap_pgt);
27018
27019 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27020 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27021@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27022 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27023 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27024 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27025+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27026+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27027+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27028 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27029+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27030 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27031 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27032
27033@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27034 pv_mmu_ops.set_pud = xen_set_pud;
27035 #if PAGETABLE_LEVELS == 4
27036 pv_mmu_ops.set_pgd = xen_set_pgd;
27037+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27038 #endif
27039
27040 /* This will work as long as patching hasn't happened yet
27041@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27042 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27043 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27044 .set_pgd = xen_set_pgd_hyper,
27045+ .set_pgd_batched = xen_set_pgd_hyper,
27046
27047 .alloc_pud = xen_alloc_pmd_init,
27048 .release_pud = xen_release_pmd_init,
27049diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27050index a96204a..fca9b8e 100644
27051--- a/arch/x86/xen/smp.c
27052+++ b/arch/x86/xen/smp.c
27053@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27054 {
27055 BUG_ON(smp_processor_id() != 0);
27056 native_smp_prepare_boot_cpu();
27057-
27058- /* We've switched to the "real" per-cpu gdt, so make sure the
27059- old memory can be recycled */
27060- make_lowmem_page_readwrite(xen_initial_gdt);
27061-
27062 xen_setup_vcpu_info_placement();
27063 }
27064
27065@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27066 gdt = get_cpu_gdt_table(cpu);
27067
27068 ctxt->flags = VGCF_IN_KERNEL;
27069- ctxt->user_regs.ds = __USER_DS;
27070- ctxt->user_regs.es = __USER_DS;
27071+ ctxt->user_regs.ds = __KERNEL_DS;
27072+ ctxt->user_regs.es = __KERNEL_DS;
27073 ctxt->user_regs.ss = __KERNEL_DS;
27074 #ifdef CONFIG_X86_32
27075 ctxt->user_regs.fs = __KERNEL_PERCPU;
27076- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27077+ savesegment(gs, ctxt->user_regs.gs);
27078 #else
27079 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27080 #endif
27081@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27082 int rc;
27083
27084 per_cpu(current_task, cpu) = idle;
27085+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27086 #ifdef CONFIG_X86_32
27087 irq_ctx_init(cpu);
27088 #else
27089 clear_tsk_thread_flag(idle, TIF_FORK);
27090- per_cpu(kernel_stack, cpu) =
27091- (unsigned long)task_stack_page(idle) -
27092- KERNEL_STACK_OFFSET + THREAD_SIZE;
27093+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27094 #endif
27095 xen_setup_runstate_info(cpu);
27096 xen_setup_timer(cpu);
27097diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27098index 9a95a9c..4f39e774 100644
27099--- a/arch/x86/xen/xen-asm_32.S
27100+++ b/arch/x86/xen/xen-asm_32.S
27101@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27102 ESP_OFFSET=4 # bytes pushed onto stack
27103
27104 /*
27105- * Store vcpu_info pointer for easy access. Do it this way to
27106- * avoid having to reload %fs
27107+ * Store vcpu_info pointer for easy access.
27108 */
27109 #ifdef CONFIG_SMP
27110- GET_THREAD_INFO(%eax)
27111- movl TI_cpu(%eax), %eax
27112- movl __per_cpu_offset(,%eax,4), %eax
27113- mov per_cpu__xen_vcpu(%eax), %eax
27114+ push %fs
27115+ mov $(__KERNEL_PERCPU), %eax
27116+ mov %eax, %fs
27117+ mov PER_CPU_VAR(xen_vcpu), %eax
27118+ pop %fs
27119 #else
27120 movl per_cpu__xen_vcpu, %eax
27121 #endif
27122diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27123index 1a5ff24..a187d40 100644
27124--- a/arch/x86/xen/xen-head.S
27125+++ b/arch/x86/xen/xen-head.S
27126@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27127 #ifdef CONFIG_X86_32
27128 mov %esi,xen_start_info
27129 mov $init_thread_union+THREAD_SIZE,%esp
27130+#ifdef CONFIG_SMP
27131+ movl $cpu_gdt_table,%edi
27132+ movl $__per_cpu_load,%eax
27133+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27134+ rorl $16,%eax
27135+ movb %al,__KERNEL_PERCPU + 4(%edi)
27136+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27137+ movl $__per_cpu_end - 1,%eax
27138+ subl $__per_cpu_start,%eax
27139+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27140+#endif
27141 #else
27142 mov %rsi,xen_start_info
27143 mov $init_thread_union+THREAD_SIZE,%rsp
27144diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27145index f9153a3..51eab3d 100644
27146--- a/arch/x86/xen/xen-ops.h
27147+++ b/arch/x86/xen/xen-ops.h
27148@@ -10,8 +10,6 @@
27149 extern const char xen_hypervisor_callback[];
27150 extern const char xen_failsafe_callback[];
27151
27152-extern void *xen_initial_gdt;
27153-
27154 struct trap_info;
27155 void xen_copy_trap_info(struct trap_info *traps);
27156
27157diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27158index 15c6308..96e83c2 100644
27159--- a/block/blk-integrity.c
27160+++ b/block/blk-integrity.c
27161@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27162 NULL,
27163 };
27164
27165-static struct sysfs_ops integrity_ops = {
27166+static const struct sysfs_ops integrity_ops = {
27167 .show = &integrity_attr_show,
27168 .store = &integrity_attr_store,
27169 };
27170diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27171index ca56420..f2fc409 100644
27172--- a/block/blk-iopoll.c
27173+++ b/block/blk-iopoll.c
27174@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27175 }
27176 EXPORT_SYMBOL(blk_iopoll_complete);
27177
27178-static void blk_iopoll_softirq(struct softirq_action *h)
27179+static void blk_iopoll_softirq(void)
27180 {
27181 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27182 int rearm = 0, budget = blk_iopoll_budget;
27183diff --git a/block/blk-map.c b/block/blk-map.c
27184index 30a7e51..0aeec6a 100644
27185--- a/block/blk-map.c
27186+++ b/block/blk-map.c
27187@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27188 * direct dma. else, set up kernel bounce buffers
27189 */
27190 uaddr = (unsigned long) ubuf;
27191- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27192+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27193 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27194 else
27195 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27196@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27197 for (i = 0; i < iov_count; i++) {
27198 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27199
27200+ if (!iov[i].iov_len)
27201+ return -EINVAL;
27202+
27203 if (uaddr & queue_dma_alignment(q)) {
27204 unaligned = 1;
27205 break;
27206 }
27207- if (!iov[i].iov_len)
27208- return -EINVAL;
27209 }
27210
27211 if (unaligned || (q->dma_pad_mask & len) || map_data)
27212@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27213 if (!len || !kbuf)
27214 return -EINVAL;
27215
27216- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27217+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27218 if (do_copy)
27219 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27220 else
27221diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27222index ee9c216..58d410a 100644
27223--- a/block/blk-softirq.c
27224+++ b/block/blk-softirq.c
27225@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27226 * Softirq action handler - move entries to local list and loop over them
27227 * while passing them to the queue registered handler.
27228 */
27229-static void blk_done_softirq(struct softirq_action *h)
27230+static void blk_done_softirq(void)
27231 {
27232 struct list_head *cpu_list, local_list;
27233
27234diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27235index bb9c5ea..5330d48 100644
27236--- a/block/blk-sysfs.c
27237+++ b/block/blk-sysfs.c
27238@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27239 kmem_cache_free(blk_requestq_cachep, q);
27240 }
27241
27242-static struct sysfs_ops queue_sysfs_ops = {
27243+static const struct sysfs_ops queue_sysfs_ops = {
27244 .show = queue_attr_show,
27245 .store = queue_attr_store,
27246 };
27247diff --git a/block/bsg.c b/block/bsg.c
27248index 7154a7a..08ac2f0 100644
27249--- a/block/bsg.c
27250+++ b/block/bsg.c
27251@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27252 struct sg_io_v4 *hdr, struct bsg_device *bd,
27253 fmode_t has_write_perm)
27254 {
27255+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27256+ unsigned char *cmdptr;
27257+
27258 if (hdr->request_len > BLK_MAX_CDB) {
27259 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27260 if (!rq->cmd)
27261 return -ENOMEM;
27262- }
27263+ cmdptr = rq->cmd;
27264+ } else
27265+ cmdptr = tmpcmd;
27266
27267- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27268+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27269 hdr->request_len))
27270 return -EFAULT;
27271
27272+ if (cmdptr != rq->cmd)
27273+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27274+
27275 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27276 if (blk_verify_command(rq->cmd, has_write_perm))
27277 return -EPERM;
27278@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27279 rq->next_rq = next_rq;
27280 next_rq->cmd_type = rq->cmd_type;
27281
27282- dxferp = (void*)(unsigned long)hdr->din_xferp;
27283+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27284 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27285 hdr->din_xfer_len, GFP_KERNEL);
27286 if (ret)
27287@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27288
27289 if (hdr->dout_xfer_len) {
27290 dxfer_len = hdr->dout_xfer_len;
27291- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27292+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27293 } else if (hdr->din_xfer_len) {
27294 dxfer_len = hdr->din_xfer_len;
27295- dxferp = (void*)(unsigned long)hdr->din_xferp;
27296+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27297 } else
27298 dxfer_len = 0;
27299
27300@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27301 int len = min_t(unsigned int, hdr->max_response_len,
27302 rq->sense_len);
27303
27304- ret = copy_to_user((void*)(unsigned long)hdr->response,
27305+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27306 rq->sense, len);
27307 if (!ret)
27308 hdr->response_len = len;
27309diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27310index 9bd086c..ca1fc22 100644
27311--- a/block/compat_ioctl.c
27312+++ b/block/compat_ioctl.c
27313@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27314 err |= __get_user(f->spec1, &uf->spec1);
27315 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27316 err |= __get_user(name, &uf->name);
27317- f->name = compat_ptr(name);
27318+ f->name = (void __force_kernel *)compat_ptr(name);
27319 if (err) {
27320 err = -EFAULT;
27321 goto out;
27322diff --git a/block/elevator.c b/block/elevator.c
27323index a847046..75a1746 100644
27324--- a/block/elevator.c
27325+++ b/block/elevator.c
27326@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27327 return error;
27328 }
27329
27330-static struct sysfs_ops elv_sysfs_ops = {
27331+static const struct sysfs_ops elv_sysfs_ops = {
27332 .show = elv_attr_show,
27333 .store = elv_attr_store,
27334 };
27335diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27336index 2be0a97..bded3fd 100644
27337--- a/block/scsi_ioctl.c
27338+++ b/block/scsi_ioctl.c
27339@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27340 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27341 struct sg_io_hdr *hdr, fmode_t mode)
27342 {
27343- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27344+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27345+ unsigned char *cmdptr;
27346+
27347+ if (rq->cmd != rq->__cmd)
27348+ cmdptr = rq->cmd;
27349+ else
27350+ cmdptr = tmpcmd;
27351+
27352+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27353 return -EFAULT;
27354+
27355+ if (cmdptr != rq->cmd)
27356+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27357+
27358 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27359 return -EPERM;
27360
27361@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27362 int err;
27363 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27364 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27365+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27366+ unsigned char *cmdptr;
27367
27368 if (!sic)
27369 return -EINVAL;
27370@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27371 */
27372 err = -EFAULT;
27373 rq->cmd_len = cmdlen;
27374- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27375+
27376+ if (rq->cmd != rq->__cmd)
27377+ cmdptr = rq->cmd;
27378+ else
27379+ cmdptr = tmpcmd;
27380+
27381+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27382 goto error;
27383
27384+ if (rq->cmd != cmdptr)
27385+ memcpy(rq->cmd, cmdptr, cmdlen);
27386+
27387 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27388 goto error;
27389
27390diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27391index 3533582..f143117 100644
27392--- a/crypto/cryptd.c
27393+++ b/crypto/cryptd.c
27394@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27395
27396 struct cryptd_blkcipher_request_ctx {
27397 crypto_completion_t complete;
27398-};
27399+} __no_const;
27400
27401 struct cryptd_hash_ctx {
27402 struct crypto_shash *child;
27403diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27404index a90d260..7a9765e 100644
27405--- a/crypto/gf128mul.c
27406+++ b/crypto/gf128mul.c
27407@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27408 for (i = 0; i < 7; ++i)
27409 gf128mul_x_lle(&p[i + 1], &p[i]);
27410
27411- memset(r, 0, sizeof(r));
27412+ memset(r, 0, sizeof(*r));
27413 for (i = 0;;) {
27414 u8 ch = ((u8 *)b)[15 - i];
27415
27416@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27417 for (i = 0; i < 7; ++i)
27418 gf128mul_x_bbe(&p[i + 1], &p[i]);
27419
27420- memset(r, 0, sizeof(r));
27421+ memset(r, 0, sizeof(*r));
27422 for (i = 0;;) {
27423 u8 ch = ((u8 *)b)[i];
27424
27425diff --git a/crypto/serpent.c b/crypto/serpent.c
27426index b651a55..023297d 100644
27427--- a/crypto/serpent.c
27428+++ b/crypto/serpent.c
27429@@ -21,6 +21,7 @@
27430 #include <asm/byteorder.h>
27431 #include <linux/crypto.h>
27432 #include <linux/types.h>
27433+#include <linux/sched.h>
27434
27435 /* Key is padded to the maximum of 256 bits before round key generation.
27436 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27437@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27438 u32 r0,r1,r2,r3,r4;
27439 int i;
27440
27441+ pax_track_stack();
27442+
27443 /* Copy key, add padding */
27444
27445 for (i = 0; i < keylen; ++i)
27446diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27447index 0d2cdb8..d8de48d 100644
27448--- a/drivers/acpi/acpi_pad.c
27449+++ b/drivers/acpi/acpi_pad.c
27450@@ -30,7 +30,7 @@
27451 #include <acpi/acpi_bus.h>
27452 #include <acpi/acpi_drivers.h>
27453
27454-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27455+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27456 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27457 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27458 static DEFINE_MUTEX(isolated_cpus_lock);
27459diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27460index 3f4602b..2e41d36 100644
27461--- a/drivers/acpi/battery.c
27462+++ b/drivers/acpi/battery.c
27463@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27464 }
27465
27466 static struct battery_file {
27467- struct file_operations ops;
27468+ const struct file_operations ops;
27469 mode_t mode;
27470 const char *name;
27471 } acpi_battery_file[] = {
27472diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27473index 7338b6a..82f0257 100644
27474--- a/drivers/acpi/dock.c
27475+++ b/drivers/acpi/dock.c
27476@@ -77,7 +77,7 @@ struct dock_dependent_device {
27477 struct list_head list;
27478 struct list_head hotplug_list;
27479 acpi_handle handle;
27480- struct acpi_dock_ops *ops;
27481+ const struct acpi_dock_ops *ops;
27482 void *context;
27483 };
27484
27485@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27486 * the dock driver after _DCK is executed.
27487 */
27488 int
27489-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27490+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27491 void *context)
27492 {
27493 struct dock_dependent_device *dd;
27494diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27495index 7c1c59e..2993595 100644
27496--- a/drivers/acpi/osl.c
27497+++ b/drivers/acpi/osl.c
27498@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27499 void __iomem *virt_addr;
27500
27501 virt_addr = ioremap(phys_addr, width);
27502+ if (!virt_addr)
27503+ return AE_NO_MEMORY;
27504 if (!value)
27505 value = &dummy;
27506
27507@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27508 void __iomem *virt_addr;
27509
27510 virt_addr = ioremap(phys_addr, width);
27511+ if (!virt_addr)
27512+ return AE_NO_MEMORY;
27513
27514 switch (width) {
27515 case 8:
27516diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27517index c216062..eec10d2 100644
27518--- a/drivers/acpi/power_meter.c
27519+++ b/drivers/acpi/power_meter.c
27520@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27521 return res;
27522
27523 temp /= 1000;
27524- if (temp < 0)
27525- return -EINVAL;
27526
27527 mutex_lock(&resource->lock);
27528 resource->trip[attr->index - 7] = temp;
27529diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27530index d0d25e2..961643d 100644
27531--- a/drivers/acpi/proc.c
27532+++ b/drivers/acpi/proc.c
27533@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27534 size_t count, loff_t * ppos)
27535 {
27536 struct list_head *node, *next;
27537- char strbuf[5];
27538- char str[5] = "";
27539- unsigned int len = count;
27540+ char strbuf[5] = {0};
27541 struct acpi_device *found_dev = NULL;
27542
27543- if (len > 4)
27544- len = 4;
27545- if (len < 0)
27546- return -EFAULT;
27547+ if (count > 4)
27548+ count = 4;
27549
27550- if (copy_from_user(strbuf, buffer, len))
27551+ if (copy_from_user(strbuf, buffer, count))
27552 return -EFAULT;
27553- strbuf[len] = '\0';
27554- sscanf(strbuf, "%s", str);
27555+ strbuf[count] = '\0';
27556
27557 mutex_lock(&acpi_device_lock);
27558 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27559@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27560 if (!dev->wakeup.flags.valid)
27561 continue;
27562
27563- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27564+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27565 dev->wakeup.state.enabled =
27566 dev->wakeup.state.enabled ? 0 : 1;
27567 found_dev = dev;
27568diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27569index 7102474..de8ad22 100644
27570--- a/drivers/acpi/processor_core.c
27571+++ b/drivers/acpi/processor_core.c
27572@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27573 return 0;
27574 }
27575
27576- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27577+ BUG_ON(pr->id >= nr_cpu_ids);
27578
27579 /*
27580 * Buggy BIOS check
27581diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27582index d933980..5761f13 100644
27583--- a/drivers/acpi/sbshc.c
27584+++ b/drivers/acpi/sbshc.c
27585@@ -17,7 +17,7 @@
27586
27587 #define PREFIX "ACPI: "
27588
27589-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27590+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27591 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27592
27593 struct acpi_smb_hc {
27594diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27595index 0458094..6978e7b 100644
27596--- a/drivers/acpi/sleep.c
27597+++ b/drivers/acpi/sleep.c
27598@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27599 }
27600 }
27601
27602-static struct platform_suspend_ops acpi_suspend_ops = {
27603+static const struct platform_suspend_ops acpi_suspend_ops = {
27604 .valid = acpi_suspend_state_valid,
27605 .begin = acpi_suspend_begin,
27606 .prepare_late = acpi_pm_prepare,
27607@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27608 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27609 * been requested.
27610 */
27611-static struct platform_suspend_ops acpi_suspend_ops_old = {
27612+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27613 .valid = acpi_suspend_state_valid,
27614 .begin = acpi_suspend_begin_old,
27615 .prepare_late = acpi_pm_disable_gpes,
27616@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27617 acpi_enable_all_runtime_gpes();
27618 }
27619
27620-static struct platform_hibernation_ops acpi_hibernation_ops = {
27621+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27622 .begin = acpi_hibernation_begin,
27623 .end = acpi_pm_end,
27624 .pre_snapshot = acpi_hibernation_pre_snapshot,
27625@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27626 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27627 * been requested.
27628 */
27629-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27630+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27631 .begin = acpi_hibernation_begin_old,
27632 .end = acpi_pm_end,
27633 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27634diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27635index 05dff63..b662ab7 100644
27636--- a/drivers/acpi/video.c
27637+++ b/drivers/acpi/video.c
27638@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27639 vd->brightness->levels[request_level]);
27640 }
27641
27642-static struct backlight_ops acpi_backlight_ops = {
27643+static const struct backlight_ops acpi_backlight_ops = {
27644 .get_brightness = acpi_video_get_brightness,
27645 .update_status = acpi_video_set_brightness,
27646 };
27647diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27648index 6787aab..23ffb0e 100644
27649--- a/drivers/ata/ahci.c
27650+++ b/drivers/ata/ahci.c
27651@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27652 .sdev_attrs = ahci_sdev_attrs,
27653 };
27654
27655-static struct ata_port_operations ahci_ops = {
27656+static const struct ata_port_operations ahci_ops = {
27657 .inherits = &sata_pmp_port_ops,
27658
27659 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27660@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27661 .port_stop = ahci_port_stop,
27662 };
27663
27664-static struct ata_port_operations ahci_vt8251_ops = {
27665+static const struct ata_port_operations ahci_vt8251_ops = {
27666 .inherits = &ahci_ops,
27667 .hardreset = ahci_vt8251_hardreset,
27668 };
27669
27670-static struct ata_port_operations ahci_p5wdh_ops = {
27671+static const struct ata_port_operations ahci_p5wdh_ops = {
27672 .inherits = &ahci_ops,
27673 .hardreset = ahci_p5wdh_hardreset,
27674 };
27675
27676-static struct ata_port_operations ahci_sb600_ops = {
27677+static const struct ata_port_operations ahci_sb600_ops = {
27678 .inherits = &ahci_ops,
27679 .softreset = ahci_sb600_softreset,
27680 .pmp_softreset = ahci_sb600_softreset,
27681diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27682index 99e7196..4968c77 100644
27683--- a/drivers/ata/ata_generic.c
27684+++ b/drivers/ata/ata_generic.c
27685@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27686 ATA_BMDMA_SHT(DRV_NAME),
27687 };
27688
27689-static struct ata_port_operations generic_port_ops = {
27690+static const struct ata_port_operations generic_port_ops = {
27691 .inherits = &ata_bmdma_port_ops,
27692 .cable_detect = ata_cable_unknown,
27693 .set_mode = generic_set_mode,
27694diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27695index c33591d..000c121 100644
27696--- a/drivers/ata/ata_piix.c
27697+++ b/drivers/ata/ata_piix.c
27698@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27699 ATA_BMDMA_SHT(DRV_NAME),
27700 };
27701
27702-static struct ata_port_operations piix_pata_ops = {
27703+static const struct ata_port_operations piix_pata_ops = {
27704 .inherits = &ata_bmdma32_port_ops,
27705 .cable_detect = ata_cable_40wire,
27706 .set_piomode = piix_set_piomode,
27707@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27708 .prereset = piix_pata_prereset,
27709 };
27710
27711-static struct ata_port_operations piix_vmw_ops = {
27712+static const struct ata_port_operations piix_vmw_ops = {
27713 .inherits = &piix_pata_ops,
27714 .bmdma_status = piix_vmw_bmdma_status,
27715 };
27716
27717-static struct ata_port_operations ich_pata_ops = {
27718+static const struct ata_port_operations ich_pata_ops = {
27719 .inherits = &piix_pata_ops,
27720 .cable_detect = ich_pata_cable_detect,
27721 .set_dmamode = ich_set_dmamode,
27722 };
27723
27724-static struct ata_port_operations piix_sata_ops = {
27725+static const struct ata_port_operations piix_sata_ops = {
27726 .inherits = &ata_bmdma_port_ops,
27727 };
27728
27729-static struct ata_port_operations piix_sidpr_sata_ops = {
27730+static const struct ata_port_operations piix_sidpr_sata_ops = {
27731 .inherits = &piix_sata_ops,
27732 .hardreset = sata_std_hardreset,
27733 .scr_read = piix_sidpr_scr_read,
27734diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27735index b0882cd..c295d65 100644
27736--- a/drivers/ata/libata-acpi.c
27737+++ b/drivers/ata/libata-acpi.c
27738@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27739 ata_acpi_uevent(dev->link->ap, dev, event);
27740 }
27741
27742-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27743+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27744 .handler = ata_acpi_dev_notify_dock,
27745 .uevent = ata_acpi_dev_uevent,
27746 };
27747
27748-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27749+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27750 .handler = ata_acpi_ap_notify_dock,
27751 .uevent = ata_acpi_ap_uevent,
27752 };
27753diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27754index d4f7f99..94f603e 100644
27755--- a/drivers/ata/libata-core.c
27756+++ b/drivers/ata/libata-core.c
27757@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27758 struct ata_port *ap;
27759 unsigned int tag;
27760
27761- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27762+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27763 ap = qc->ap;
27764
27765 qc->flags = 0;
27766@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27767 struct ata_port *ap;
27768 struct ata_link *link;
27769
27770- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27771+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27772 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27773 ap = qc->ap;
27774 link = qc->dev->link;
27775@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27776 * LOCKING:
27777 * None.
27778 */
27779-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27780+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27781 {
27782 static DEFINE_SPINLOCK(lock);
27783 const struct ata_port_operations *cur;
27784@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27785 return;
27786
27787 spin_lock(&lock);
27788+ pax_open_kernel();
27789
27790 for (cur = ops->inherits; cur; cur = cur->inherits) {
27791 void **inherit = (void **)cur;
27792@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27793 if (IS_ERR(*pp))
27794 *pp = NULL;
27795
27796- ops->inherits = NULL;
27797+ *(struct ata_port_operations **)&ops->inherits = NULL;
27798
27799+ pax_close_kernel();
27800 spin_unlock(&lock);
27801 }
27802
27803@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27804 */
27805 /* KILLME - the only user left is ipr */
27806 void ata_host_init(struct ata_host *host, struct device *dev,
27807- unsigned long flags, struct ata_port_operations *ops)
27808+ unsigned long flags, const struct ata_port_operations *ops)
27809 {
27810 spin_lock_init(&host->lock);
27811 host->dev = dev;
27812@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27813 /* truly dummy */
27814 }
27815
27816-struct ata_port_operations ata_dummy_port_ops = {
27817+const struct ata_port_operations ata_dummy_port_ops = {
27818 .qc_prep = ata_noop_qc_prep,
27819 .qc_issue = ata_dummy_qc_issue,
27820 .error_handler = ata_dummy_error_handler,
27821diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27822index e5bdb9b..45a8e72 100644
27823--- a/drivers/ata/libata-eh.c
27824+++ b/drivers/ata/libata-eh.c
27825@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27826 {
27827 struct ata_link *link;
27828
27829+ pax_track_stack();
27830+
27831 ata_for_each_link(link, ap, HOST_FIRST)
27832 ata_eh_link_report(link);
27833 }
27834@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27835 */
27836 void ata_std_error_handler(struct ata_port *ap)
27837 {
27838- struct ata_port_operations *ops = ap->ops;
27839+ const struct ata_port_operations *ops = ap->ops;
27840 ata_reset_fn_t hardreset = ops->hardreset;
27841
27842 /* ignore built-in hardreset if SCR access is not available */
27843diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27844index 51f0ffb..19ce3e3 100644
27845--- a/drivers/ata/libata-pmp.c
27846+++ b/drivers/ata/libata-pmp.c
27847@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27848 */
27849 static int sata_pmp_eh_recover(struct ata_port *ap)
27850 {
27851- struct ata_port_operations *ops = ap->ops;
27852+ const struct ata_port_operations *ops = ap->ops;
27853 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27854 struct ata_link *pmp_link = &ap->link;
27855 struct ata_device *pmp_dev = pmp_link->device;
27856diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27857index d8f35fe..288180a 100644
27858--- a/drivers/ata/pata_acpi.c
27859+++ b/drivers/ata/pata_acpi.c
27860@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27861 ATA_BMDMA_SHT(DRV_NAME),
27862 };
27863
27864-static struct ata_port_operations pacpi_ops = {
27865+static const struct ata_port_operations pacpi_ops = {
27866 .inherits = &ata_bmdma_port_ops,
27867 .qc_issue = pacpi_qc_issue,
27868 .cable_detect = pacpi_cable_detect,
27869diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27870index 9434114..1f2f364 100644
27871--- a/drivers/ata/pata_ali.c
27872+++ b/drivers/ata/pata_ali.c
27873@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27874 * Port operations for PIO only ALi
27875 */
27876
27877-static struct ata_port_operations ali_early_port_ops = {
27878+static const struct ata_port_operations ali_early_port_ops = {
27879 .inherits = &ata_sff_port_ops,
27880 .cable_detect = ata_cable_40wire,
27881 .set_piomode = ali_set_piomode,
27882@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27883 * Port operations for DMA capable ALi without cable
27884 * detect
27885 */
27886-static struct ata_port_operations ali_20_port_ops = {
27887+static const struct ata_port_operations ali_20_port_ops = {
27888 .inherits = &ali_dma_base_ops,
27889 .cable_detect = ata_cable_40wire,
27890 .mode_filter = ali_20_filter,
27891@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27892 /*
27893 * Port operations for DMA capable ALi with cable detect
27894 */
27895-static struct ata_port_operations ali_c2_port_ops = {
27896+static const struct ata_port_operations ali_c2_port_ops = {
27897 .inherits = &ali_dma_base_ops,
27898 .check_atapi_dma = ali_check_atapi_dma,
27899 .cable_detect = ali_c2_cable_detect,
27900@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27901 /*
27902 * Port operations for DMA capable ALi with cable detect
27903 */
27904-static struct ata_port_operations ali_c4_port_ops = {
27905+static const struct ata_port_operations ali_c4_port_ops = {
27906 .inherits = &ali_dma_base_ops,
27907 .check_atapi_dma = ali_check_atapi_dma,
27908 .cable_detect = ali_c2_cable_detect,
27909@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27910 /*
27911 * Port operations for DMA capable ALi with cable detect and LBA48
27912 */
27913-static struct ata_port_operations ali_c5_port_ops = {
27914+static const struct ata_port_operations ali_c5_port_ops = {
27915 .inherits = &ali_dma_base_ops,
27916 .check_atapi_dma = ali_check_atapi_dma,
27917 .dev_config = ali_warn_atapi_dma,
27918diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27919index 567f3f7..c8ee0da 100644
27920--- a/drivers/ata/pata_amd.c
27921+++ b/drivers/ata/pata_amd.c
27922@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27923 .prereset = amd_pre_reset,
27924 };
27925
27926-static struct ata_port_operations amd33_port_ops = {
27927+static const struct ata_port_operations amd33_port_ops = {
27928 .inherits = &amd_base_port_ops,
27929 .cable_detect = ata_cable_40wire,
27930 .set_piomode = amd33_set_piomode,
27931 .set_dmamode = amd33_set_dmamode,
27932 };
27933
27934-static struct ata_port_operations amd66_port_ops = {
27935+static const struct ata_port_operations amd66_port_ops = {
27936 .inherits = &amd_base_port_ops,
27937 .cable_detect = ata_cable_unknown,
27938 .set_piomode = amd66_set_piomode,
27939 .set_dmamode = amd66_set_dmamode,
27940 };
27941
27942-static struct ata_port_operations amd100_port_ops = {
27943+static const struct ata_port_operations amd100_port_ops = {
27944 .inherits = &amd_base_port_ops,
27945 .cable_detect = ata_cable_unknown,
27946 .set_piomode = amd100_set_piomode,
27947 .set_dmamode = amd100_set_dmamode,
27948 };
27949
27950-static struct ata_port_operations amd133_port_ops = {
27951+static const struct ata_port_operations amd133_port_ops = {
27952 .inherits = &amd_base_port_ops,
27953 .cable_detect = amd_cable_detect,
27954 .set_piomode = amd133_set_piomode,
27955@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27956 .host_stop = nv_host_stop,
27957 };
27958
27959-static struct ata_port_operations nv100_port_ops = {
27960+static const struct ata_port_operations nv100_port_ops = {
27961 .inherits = &nv_base_port_ops,
27962 .set_piomode = nv100_set_piomode,
27963 .set_dmamode = nv100_set_dmamode,
27964 };
27965
27966-static struct ata_port_operations nv133_port_ops = {
27967+static const struct ata_port_operations nv133_port_ops = {
27968 .inherits = &nv_base_port_ops,
27969 .set_piomode = nv133_set_piomode,
27970 .set_dmamode = nv133_set_dmamode,
27971diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27972index d332cfd..4b7eaae 100644
27973--- a/drivers/ata/pata_artop.c
27974+++ b/drivers/ata/pata_artop.c
27975@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27976 ATA_BMDMA_SHT(DRV_NAME),
27977 };
27978
27979-static struct ata_port_operations artop6210_ops = {
27980+static const struct ata_port_operations artop6210_ops = {
27981 .inherits = &ata_bmdma_port_ops,
27982 .cable_detect = ata_cable_40wire,
27983 .set_piomode = artop6210_set_piomode,
27984@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27985 .qc_defer = artop6210_qc_defer,
27986 };
27987
27988-static struct ata_port_operations artop6260_ops = {
27989+static const struct ata_port_operations artop6260_ops = {
27990 .inherits = &ata_bmdma_port_ops,
27991 .cable_detect = artop6260_cable_detect,
27992 .set_piomode = artop6260_set_piomode,
27993diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27994index 5c129f9..7bb7ccb 100644
27995--- a/drivers/ata/pata_at32.c
27996+++ b/drivers/ata/pata_at32.c
27997@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27998 ATA_PIO_SHT(DRV_NAME),
27999 };
28000
28001-static struct ata_port_operations at32_port_ops = {
28002+static const struct ata_port_operations at32_port_ops = {
28003 .inherits = &ata_sff_port_ops,
28004 .cable_detect = ata_cable_40wire,
28005 .set_piomode = pata_at32_set_piomode,
28006diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28007index 41c94b1..829006d 100644
28008--- a/drivers/ata/pata_at91.c
28009+++ b/drivers/ata/pata_at91.c
28010@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28011 ATA_PIO_SHT(DRV_NAME),
28012 };
28013
28014-static struct ata_port_operations pata_at91_port_ops = {
28015+static const struct ata_port_operations pata_at91_port_ops = {
28016 .inherits = &ata_sff_port_ops,
28017
28018 .sff_data_xfer = pata_at91_data_xfer_noirq,
28019diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28020index ae4454d..d391eb4 100644
28021--- a/drivers/ata/pata_atiixp.c
28022+++ b/drivers/ata/pata_atiixp.c
28023@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28024 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28025 };
28026
28027-static struct ata_port_operations atiixp_port_ops = {
28028+static const struct ata_port_operations atiixp_port_ops = {
28029 .inherits = &ata_bmdma_port_ops,
28030
28031 .qc_prep = ata_sff_dumb_qc_prep,
28032diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28033index 6fe7ded..2a425dc 100644
28034--- a/drivers/ata/pata_atp867x.c
28035+++ b/drivers/ata/pata_atp867x.c
28036@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28037 ATA_BMDMA_SHT(DRV_NAME),
28038 };
28039
28040-static struct ata_port_operations atp867x_ops = {
28041+static const struct ata_port_operations atp867x_ops = {
28042 .inherits = &ata_bmdma_port_ops,
28043 .cable_detect = atp867x_cable_detect,
28044 .set_piomode = atp867x_set_piomode,
28045diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28046index c4b47a3..b27a367 100644
28047--- a/drivers/ata/pata_bf54x.c
28048+++ b/drivers/ata/pata_bf54x.c
28049@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28050 .dma_boundary = ATA_DMA_BOUNDARY,
28051 };
28052
28053-static struct ata_port_operations bfin_pata_ops = {
28054+static const struct ata_port_operations bfin_pata_ops = {
28055 .inherits = &ata_sff_port_ops,
28056
28057 .set_piomode = bfin_set_piomode,
28058diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28059index 5acf9fa..84248be 100644
28060--- a/drivers/ata/pata_cmd640.c
28061+++ b/drivers/ata/pata_cmd640.c
28062@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28063 ATA_BMDMA_SHT(DRV_NAME),
28064 };
28065
28066-static struct ata_port_operations cmd640_port_ops = {
28067+static const struct ata_port_operations cmd640_port_ops = {
28068 .inherits = &ata_bmdma_port_ops,
28069 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28070 .sff_data_xfer = ata_sff_data_xfer_noirq,
28071diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28072index ccd2694..c869c3d 100644
28073--- a/drivers/ata/pata_cmd64x.c
28074+++ b/drivers/ata/pata_cmd64x.c
28075@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28076 .set_dmamode = cmd64x_set_dmamode,
28077 };
28078
28079-static struct ata_port_operations cmd64x_port_ops = {
28080+static const struct ata_port_operations cmd64x_port_ops = {
28081 .inherits = &cmd64x_base_ops,
28082 .cable_detect = ata_cable_40wire,
28083 };
28084
28085-static struct ata_port_operations cmd646r1_port_ops = {
28086+static const struct ata_port_operations cmd646r1_port_ops = {
28087 .inherits = &cmd64x_base_ops,
28088 .bmdma_stop = cmd646r1_bmdma_stop,
28089 .cable_detect = ata_cable_40wire,
28090 };
28091
28092-static struct ata_port_operations cmd648_port_ops = {
28093+static const struct ata_port_operations cmd648_port_ops = {
28094 .inherits = &cmd64x_base_ops,
28095 .bmdma_stop = cmd648_bmdma_stop,
28096 .cable_detect = cmd648_cable_detect,
28097diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28098index 0df83cf..d7595b0 100644
28099--- a/drivers/ata/pata_cs5520.c
28100+++ b/drivers/ata/pata_cs5520.c
28101@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28102 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28103 };
28104
28105-static struct ata_port_operations cs5520_port_ops = {
28106+static const struct ata_port_operations cs5520_port_ops = {
28107 .inherits = &ata_bmdma_port_ops,
28108 .qc_prep = ata_sff_dumb_qc_prep,
28109 .cable_detect = ata_cable_40wire,
28110diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28111index c974b05..6d26b11 100644
28112--- a/drivers/ata/pata_cs5530.c
28113+++ b/drivers/ata/pata_cs5530.c
28114@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28115 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28116 };
28117
28118-static struct ata_port_operations cs5530_port_ops = {
28119+static const struct ata_port_operations cs5530_port_ops = {
28120 .inherits = &ata_bmdma_port_ops,
28121
28122 .qc_prep = ata_sff_dumb_qc_prep,
28123diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28124index 403f561..aacd26b 100644
28125--- a/drivers/ata/pata_cs5535.c
28126+++ b/drivers/ata/pata_cs5535.c
28127@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28128 ATA_BMDMA_SHT(DRV_NAME),
28129 };
28130
28131-static struct ata_port_operations cs5535_port_ops = {
28132+static const struct ata_port_operations cs5535_port_ops = {
28133 .inherits = &ata_bmdma_port_ops,
28134 .cable_detect = cs5535_cable_detect,
28135 .set_piomode = cs5535_set_piomode,
28136diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28137index 6da4cb4..de24a25 100644
28138--- a/drivers/ata/pata_cs5536.c
28139+++ b/drivers/ata/pata_cs5536.c
28140@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28141 ATA_BMDMA_SHT(DRV_NAME),
28142 };
28143
28144-static struct ata_port_operations cs5536_port_ops = {
28145+static const struct ata_port_operations cs5536_port_ops = {
28146 .inherits = &ata_bmdma_port_ops,
28147 .cable_detect = cs5536_cable_detect,
28148 .set_piomode = cs5536_set_piomode,
28149diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28150index 8fb040b..b16a9c9 100644
28151--- a/drivers/ata/pata_cypress.c
28152+++ b/drivers/ata/pata_cypress.c
28153@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28154 ATA_BMDMA_SHT(DRV_NAME),
28155 };
28156
28157-static struct ata_port_operations cy82c693_port_ops = {
28158+static const struct ata_port_operations cy82c693_port_ops = {
28159 .inherits = &ata_bmdma_port_ops,
28160 .cable_detect = ata_cable_40wire,
28161 .set_piomode = cy82c693_set_piomode,
28162diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28163index 2a6412f..555ee11 100644
28164--- a/drivers/ata/pata_efar.c
28165+++ b/drivers/ata/pata_efar.c
28166@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28167 ATA_BMDMA_SHT(DRV_NAME),
28168 };
28169
28170-static struct ata_port_operations efar_ops = {
28171+static const struct ata_port_operations efar_ops = {
28172 .inherits = &ata_bmdma_port_ops,
28173 .cable_detect = efar_cable_detect,
28174 .set_piomode = efar_set_piomode,
28175diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28176index b9d8836..0b92030 100644
28177--- a/drivers/ata/pata_hpt366.c
28178+++ b/drivers/ata/pata_hpt366.c
28179@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28180 * Configuration for HPT366/68
28181 */
28182
28183-static struct ata_port_operations hpt366_port_ops = {
28184+static const struct ata_port_operations hpt366_port_ops = {
28185 .inherits = &ata_bmdma_port_ops,
28186 .cable_detect = hpt36x_cable_detect,
28187 .mode_filter = hpt366_filter,
28188diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28189index 5af7f19..00c4980 100644
28190--- a/drivers/ata/pata_hpt37x.c
28191+++ b/drivers/ata/pata_hpt37x.c
28192@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28193 * Configuration for HPT370
28194 */
28195
28196-static struct ata_port_operations hpt370_port_ops = {
28197+static const struct ata_port_operations hpt370_port_ops = {
28198 .inherits = &ata_bmdma_port_ops,
28199
28200 .bmdma_stop = hpt370_bmdma_stop,
28201@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28202 * Configuration for HPT370A. Close to 370 but less filters
28203 */
28204
28205-static struct ata_port_operations hpt370a_port_ops = {
28206+static const struct ata_port_operations hpt370a_port_ops = {
28207 .inherits = &hpt370_port_ops,
28208 .mode_filter = hpt370a_filter,
28209 };
28210@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28211 * and DMA mode setting functionality.
28212 */
28213
28214-static struct ata_port_operations hpt372_port_ops = {
28215+static const struct ata_port_operations hpt372_port_ops = {
28216 .inherits = &ata_bmdma_port_ops,
28217
28218 .bmdma_stop = hpt37x_bmdma_stop,
28219@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28220 * but we have a different cable detection procedure for function 1.
28221 */
28222
28223-static struct ata_port_operations hpt374_fn1_port_ops = {
28224+static const struct ata_port_operations hpt374_fn1_port_ops = {
28225 .inherits = &hpt372_port_ops,
28226 .prereset = hpt374_fn1_pre_reset,
28227 };
28228diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28229index 100f227..2e39382 100644
28230--- a/drivers/ata/pata_hpt3x2n.c
28231+++ b/drivers/ata/pata_hpt3x2n.c
28232@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28233 * Configuration for HPT3x2n.
28234 */
28235
28236-static struct ata_port_operations hpt3x2n_port_ops = {
28237+static const struct ata_port_operations hpt3x2n_port_ops = {
28238 .inherits = &ata_bmdma_port_ops,
28239
28240 .bmdma_stop = hpt3x2n_bmdma_stop,
28241diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28242index 7e31025..6fca8f4 100644
28243--- a/drivers/ata/pata_hpt3x3.c
28244+++ b/drivers/ata/pata_hpt3x3.c
28245@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28246 ATA_BMDMA_SHT(DRV_NAME),
28247 };
28248
28249-static struct ata_port_operations hpt3x3_port_ops = {
28250+static const struct ata_port_operations hpt3x3_port_ops = {
28251 .inherits = &ata_bmdma_port_ops,
28252 .cable_detect = ata_cable_40wire,
28253 .set_piomode = hpt3x3_set_piomode,
28254diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28255index b663b7f..9a26c2a 100644
28256--- a/drivers/ata/pata_icside.c
28257+++ b/drivers/ata/pata_icside.c
28258@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28259 }
28260 }
28261
28262-static struct ata_port_operations pata_icside_port_ops = {
28263+static const struct ata_port_operations pata_icside_port_ops = {
28264 .inherits = &ata_sff_port_ops,
28265 /* no need to build any PRD tables for DMA */
28266 .qc_prep = ata_noop_qc_prep,
28267diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28268index 4bceb88..457dfb6 100644
28269--- a/drivers/ata/pata_isapnp.c
28270+++ b/drivers/ata/pata_isapnp.c
28271@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28272 ATA_PIO_SHT(DRV_NAME),
28273 };
28274
28275-static struct ata_port_operations isapnp_port_ops = {
28276+static const struct ata_port_operations isapnp_port_ops = {
28277 .inherits = &ata_sff_port_ops,
28278 .cable_detect = ata_cable_40wire,
28279 };
28280
28281-static struct ata_port_operations isapnp_noalt_port_ops = {
28282+static const struct ata_port_operations isapnp_noalt_port_ops = {
28283 .inherits = &ata_sff_port_ops,
28284 .cable_detect = ata_cable_40wire,
28285 /* No altstatus so we don't want to use the lost interrupt poll */
28286diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28287index f156da8..24976e2 100644
28288--- a/drivers/ata/pata_it8213.c
28289+++ b/drivers/ata/pata_it8213.c
28290@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28291 };
28292
28293
28294-static struct ata_port_operations it8213_ops = {
28295+static const struct ata_port_operations it8213_ops = {
28296 .inherits = &ata_bmdma_port_ops,
28297 .cable_detect = it8213_cable_detect,
28298 .set_piomode = it8213_set_piomode,
28299diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28300index 188bc2f..ca9e785 100644
28301--- a/drivers/ata/pata_it821x.c
28302+++ b/drivers/ata/pata_it821x.c
28303@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28304 ATA_BMDMA_SHT(DRV_NAME),
28305 };
28306
28307-static struct ata_port_operations it821x_smart_port_ops = {
28308+static const struct ata_port_operations it821x_smart_port_ops = {
28309 .inherits = &ata_bmdma_port_ops,
28310
28311 .check_atapi_dma= it821x_check_atapi_dma,
28312@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28313 .port_start = it821x_port_start,
28314 };
28315
28316-static struct ata_port_operations it821x_passthru_port_ops = {
28317+static const struct ata_port_operations it821x_passthru_port_ops = {
28318 .inherits = &ata_bmdma_port_ops,
28319
28320 .check_atapi_dma= it821x_check_atapi_dma,
28321@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28322 .port_start = it821x_port_start,
28323 };
28324
28325-static struct ata_port_operations it821x_rdc_port_ops = {
28326+static const struct ata_port_operations it821x_rdc_port_ops = {
28327 .inherits = &ata_bmdma_port_ops,
28328
28329 .check_atapi_dma= it821x_check_atapi_dma,
28330diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28331index ba54b08..4b952b7 100644
28332--- a/drivers/ata/pata_ixp4xx_cf.c
28333+++ b/drivers/ata/pata_ixp4xx_cf.c
28334@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28335 ATA_PIO_SHT(DRV_NAME),
28336 };
28337
28338-static struct ata_port_operations ixp4xx_port_ops = {
28339+static const struct ata_port_operations ixp4xx_port_ops = {
28340 .inherits = &ata_sff_port_ops,
28341 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28342 .cable_detect = ata_cable_40wire,
28343diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28344index 3a1474a..434b0ff 100644
28345--- a/drivers/ata/pata_jmicron.c
28346+++ b/drivers/ata/pata_jmicron.c
28347@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28348 ATA_BMDMA_SHT(DRV_NAME),
28349 };
28350
28351-static struct ata_port_operations jmicron_ops = {
28352+static const struct ata_port_operations jmicron_ops = {
28353 .inherits = &ata_bmdma_port_ops,
28354 .prereset = jmicron_pre_reset,
28355 };
28356diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28357index 6932e56..220e71d 100644
28358--- a/drivers/ata/pata_legacy.c
28359+++ b/drivers/ata/pata_legacy.c
28360@@ -106,7 +106,7 @@ struct legacy_probe {
28361
28362 struct legacy_controller {
28363 const char *name;
28364- struct ata_port_operations *ops;
28365+ const struct ata_port_operations *ops;
28366 unsigned int pio_mask;
28367 unsigned int flags;
28368 unsigned int pflags;
28369@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28370 * pio_mask as well.
28371 */
28372
28373-static struct ata_port_operations simple_port_ops = {
28374+static const struct ata_port_operations simple_port_ops = {
28375 .inherits = &legacy_base_port_ops,
28376 .sff_data_xfer = ata_sff_data_xfer_noirq,
28377 };
28378
28379-static struct ata_port_operations legacy_port_ops = {
28380+static const struct ata_port_operations legacy_port_ops = {
28381 .inherits = &legacy_base_port_ops,
28382 .sff_data_xfer = ata_sff_data_xfer_noirq,
28383 .set_mode = legacy_set_mode,
28384@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28385 return buflen;
28386 }
28387
28388-static struct ata_port_operations pdc20230_port_ops = {
28389+static const struct ata_port_operations pdc20230_port_ops = {
28390 .inherits = &legacy_base_port_ops,
28391 .set_piomode = pdc20230_set_piomode,
28392 .sff_data_xfer = pdc_data_xfer_vlb,
28393@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28394 ioread8(ap->ioaddr.status_addr);
28395 }
28396
28397-static struct ata_port_operations ht6560a_port_ops = {
28398+static const struct ata_port_operations ht6560a_port_ops = {
28399 .inherits = &legacy_base_port_ops,
28400 .set_piomode = ht6560a_set_piomode,
28401 };
28402@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28403 ioread8(ap->ioaddr.status_addr);
28404 }
28405
28406-static struct ata_port_operations ht6560b_port_ops = {
28407+static const struct ata_port_operations ht6560b_port_ops = {
28408 .inherits = &legacy_base_port_ops,
28409 .set_piomode = ht6560b_set_piomode,
28410 };
28411@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28412 }
28413
28414
28415-static struct ata_port_operations opti82c611a_port_ops = {
28416+static const struct ata_port_operations opti82c611a_port_ops = {
28417 .inherits = &legacy_base_port_ops,
28418 .set_piomode = opti82c611a_set_piomode,
28419 };
28420@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28421 return ata_sff_qc_issue(qc);
28422 }
28423
28424-static struct ata_port_operations opti82c46x_port_ops = {
28425+static const struct ata_port_operations opti82c46x_port_ops = {
28426 .inherits = &legacy_base_port_ops,
28427 .set_piomode = opti82c46x_set_piomode,
28428 .qc_issue = opti82c46x_qc_issue,
28429@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28430 return 0;
28431 }
28432
28433-static struct ata_port_operations qdi6500_port_ops = {
28434+static const struct ata_port_operations qdi6500_port_ops = {
28435 .inherits = &legacy_base_port_ops,
28436 .set_piomode = qdi6500_set_piomode,
28437 .qc_issue = qdi_qc_issue,
28438 .sff_data_xfer = vlb32_data_xfer,
28439 };
28440
28441-static struct ata_port_operations qdi6580_port_ops = {
28442+static const struct ata_port_operations qdi6580_port_ops = {
28443 .inherits = &legacy_base_port_ops,
28444 .set_piomode = qdi6580_set_piomode,
28445 .sff_data_xfer = vlb32_data_xfer,
28446 };
28447
28448-static struct ata_port_operations qdi6580dp_port_ops = {
28449+static const struct ata_port_operations qdi6580dp_port_ops = {
28450 .inherits = &legacy_base_port_ops,
28451 .set_piomode = qdi6580dp_set_piomode,
28452 .sff_data_xfer = vlb32_data_xfer,
28453@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28454 return 0;
28455 }
28456
28457-static struct ata_port_operations winbond_port_ops = {
28458+static const struct ata_port_operations winbond_port_ops = {
28459 .inherits = &legacy_base_port_ops,
28460 .set_piomode = winbond_set_piomode,
28461 .sff_data_xfer = vlb32_data_xfer,
28462@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28463 int pio_modes = controller->pio_mask;
28464 unsigned long io = probe->port;
28465 u32 mask = (1 << probe->slot);
28466- struct ata_port_operations *ops = controller->ops;
28467+ const struct ata_port_operations *ops = controller->ops;
28468 struct legacy_data *ld = &legacy_data[probe->slot];
28469 struct ata_host *host = NULL;
28470 struct ata_port *ap;
28471diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28472index 2096fb7..4d090fc 100644
28473--- a/drivers/ata/pata_marvell.c
28474+++ b/drivers/ata/pata_marvell.c
28475@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28476 ATA_BMDMA_SHT(DRV_NAME),
28477 };
28478
28479-static struct ata_port_operations marvell_ops = {
28480+static const struct ata_port_operations marvell_ops = {
28481 .inherits = &ata_bmdma_port_ops,
28482 .cable_detect = marvell_cable_detect,
28483 .prereset = marvell_pre_reset,
28484diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28485index 99d41be..7d56aa8 100644
28486--- a/drivers/ata/pata_mpc52xx.c
28487+++ b/drivers/ata/pata_mpc52xx.c
28488@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28489 ATA_PIO_SHT(DRV_NAME),
28490 };
28491
28492-static struct ata_port_operations mpc52xx_ata_port_ops = {
28493+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28494 .inherits = &ata_bmdma_port_ops,
28495 .sff_dev_select = mpc52xx_ata_dev_select,
28496 .set_piomode = mpc52xx_ata_set_piomode,
28497diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28498index b21f002..0a27e7f 100644
28499--- a/drivers/ata/pata_mpiix.c
28500+++ b/drivers/ata/pata_mpiix.c
28501@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28502 ATA_PIO_SHT(DRV_NAME),
28503 };
28504
28505-static struct ata_port_operations mpiix_port_ops = {
28506+static const struct ata_port_operations mpiix_port_ops = {
28507 .inherits = &ata_sff_port_ops,
28508 .qc_issue = mpiix_qc_issue,
28509 .cable_detect = ata_cable_40wire,
28510diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28511index f0d52f7..89c3be3 100644
28512--- a/drivers/ata/pata_netcell.c
28513+++ b/drivers/ata/pata_netcell.c
28514@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28515 ATA_BMDMA_SHT(DRV_NAME),
28516 };
28517
28518-static struct ata_port_operations netcell_ops = {
28519+static const struct ata_port_operations netcell_ops = {
28520 .inherits = &ata_bmdma_port_ops,
28521 .cable_detect = ata_cable_80wire,
28522 .read_id = netcell_read_id,
28523diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28524index dd53a66..a3f4317 100644
28525--- a/drivers/ata/pata_ninja32.c
28526+++ b/drivers/ata/pata_ninja32.c
28527@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28528 ATA_BMDMA_SHT(DRV_NAME),
28529 };
28530
28531-static struct ata_port_operations ninja32_port_ops = {
28532+static const struct ata_port_operations ninja32_port_ops = {
28533 .inherits = &ata_bmdma_port_ops,
28534 .sff_dev_select = ninja32_dev_select,
28535 .cable_detect = ata_cable_40wire,
28536diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28537index ca53fac..9aa93ef 100644
28538--- a/drivers/ata/pata_ns87410.c
28539+++ b/drivers/ata/pata_ns87410.c
28540@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28541 ATA_PIO_SHT(DRV_NAME),
28542 };
28543
28544-static struct ata_port_operations ns87410_port_ops = {
28545+static const struct ata_port_operations ns87410_port_ops = {
28546 .inherits = &ata_sff_port_ops,
28547 .qc_issue = ns87410_qc_issue,
28548 .cable_detect = ata_cable_40wire,
28549diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28550index 773b159..55f454e 100644
28551--- a/drivers/ata/pata_ns87415.c
28552+++ b/drivers/ata/pata_ns87415.c
28553@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28554 }
28555 #endif /* 87560 SuperIO Support */
28556
28557-static struct ata_port_operations ns87415_pata_ops = {
28558+static const struct ata_port_operations ns87415_pata_ops = {
28559 .inherits = &ata_bmdma_port_ops,
28560
28561 .check_atapi_dma = ns87415_check_atapi_dma,
28562@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28563 };
28564
28565 #if defined(CONFIG_SUPERIO)
28566-static struct ata_port_operations ns87560_pata_ops = {
28567+static const struct ata_port_operations ns87560_pata_ops = {
28568 .inherits = &ns87415_pata_ops,
28569 .sff_tf_read = ns87560_tf_read,
28570 .sff_check_status = ns87560_check_status,
28571diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28572index d6f6956..639295b 100644
28573--- a/drivers/ata/pata_octeon_cf.c
28574+++ b/drivers/ata/pata_octeon_cf.c
28575@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28576 return 0;
28577 }
28578
28579+/* cannot be const */
28580 static struct ata_port_operations octeon_cf_ops = {
28581 .inherits = &ata_sff_port_ops,
28582 .check_atapi_dma = octeon_cf_check_atapi_dma,
28583diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28584index 84ac503..adee1cd 100644
28585--- a/drivers/ata/pata_oldpiix.c
28586+++ b/drivers/ata/pata_oldpiix.c
28587@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28588 ATA_BMDMA_SHT(DRV_NAME),
28589 };
28590
28591-static struct ata_port_operations oldpiix_pata_ops = {
28592+static const struct ata_port_operations oldpiix_pata_ops = {
28593 .inherits = &ata_bmdma_port_ops,
28594 .qc_issue = oldpiix_qc_issue,
28595 .cable_detect = ata_cable_40wire,
28596diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28597index 99eddda..3a4c0aa 100644
28598--- a/drivers/ata/pata_opti.c
28599+++ b/drivers/ata/pata_opti.c
28600@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28601 ATA_PIO_SHT(DRV_NAME),
28602 };
28603
28604-static struct ata_port_operations opti_port_ops = {
28605+static const struct ata_port_operations opti_port_ops = {
28606 .inherits = &ata_sff_port_ops,
28607 .cable_detect = ata_cable_40wire,
28608 .set_piomode = opti_set_piomode,
28609diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28610index 86885a4..8e9968d 100644
28611--- a/drivers/ata/pata_optidma.c
28612+++ b/drivers/ata/pata_optidma.c
28613@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28614 ATA_BMDMA_SHT(DRV_NAME),
28615 };
28616
28617-static struct ata_port_operations optidma_port_ops = {
28618+static const struct ata_port_operations optidma_port_ops = {
28619 .inherits = &ata_bmdma_port_ops,
28620 .cable_detect = ata_cable_40wire,
28621 .set_piomode = optidma_set_pio_mode,
28622@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28623 .prereset = optidma_pre_reset,
28624 };
28625
28626-static struct ata_port_operations optiplus_port_ops = {
28627+static const struct ata_port_operations optiplus_port_ops = {
28628 .inherits = &optidma_port_ops,
28629 .set_piomode = optiplus_set_pio_mode,
28630 .set_dmamode = optiplus_set_dma_mode,
28631diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28632index 11fb4cc..1a14022 100644
28633--- a/drivers/ata/pata_palmld.c
28634+++ b/drivers/ata/pata_palmld.c
28635@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28636 ATA_PIO_SHT(DRV_NAME),
28637 };
28638
28639-static struct ata_port_operations palmld_port_ops = {
28640+static const struct ata_port_operations palmld_port_ops = {
28641 .inherits = &ata_sff_port_ops,
28642 .sff_data_xfer = ata_sff_data_xfer_noirq,
28643 .cable_detect = ata_cable_40wire,
28644diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28645index dc99e26..7f4b1e4 100644
28646--- a/drivers/ata/pata_pcmcia.c
28647+++ b/drivers/ata/pata_pcmcia.c
28648@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28649 ATA_PIO_SHT(DRV_NAME),
28650 };
28651
28652-static struct ata_port_operations pcmcia_port_ops = {
28653+static const struct ata_port_operations pcmcia_port_ops = {
28654 .inherits = &ata_sff_port_ops,
28655 .sff_data_xfer = ata_sff_data_xfer_noirq,
28656 .cable_detect = ata_cable_40wire,
28657 .set_mode = pcmcia_set_mode,
28658 };
28659
28660-static struct ata_port_operations pcmcia_8bit_port_ops = {
28661+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28662 .inherits = &ata_sff_port_ops,
28663 .sff_data_xfer = ata_data_xfer_8bit,
28664 .cable_detect = ata_cable_40wire,
28665@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28666 unsigned long io_base, ctl_base;
28667 void __iomem *io_addr, *ctl_addr;
28668 int n_ports = 1;
28669- struct ata_port_operations *ops = &pcmcia_port_ops;
28670+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28671
28672 info = kzalloc(sizeof(*info), GFP_KERNEL);
28673 if (info == NULL)
28674diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28675index ca5cad0..3a1f125 100644
28676--- a/drivers/ata/pata_pdc2027x.c
28677+++ b/drivers/ata/pata_pdc2027x.c
28678@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28679 ATA_BMDMA_SHT(DRV_NAME),
28680 };
28681
28682-static struct ata_port_operations pdc2027x_pata100_ops = {
28683+static const struct ata_port_operations pdc2027x_pata100_ops = {
28684 .inherits = &ata_bmdma_port_ops,
28685 .check_atapi_dma = pdc2027x_check_atapi_dma,
28686 .cable_detect = pdc2027x_cable_detect,
28687 .prereset = pdc2027x_prereset,
28688 };
28689
28690-static struct ata_port_operations pdc2027x_pata133_ops = {
28691+static const struct ata_port_operations pdc2027x_pata133_ops = {
28692 .inherits = &pdc2027x_pata100_ops,
28693 .mode_filter = pdc2027x_mode_filter,
28694 .set_piomode = pdc2027x_set_piomode,
28695diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28696index 2911120..4bf62aa 100644
28697--- a/drivers/ata/pata_pdc202xx_old.c
28698+++ b/drivers/ata/pata_pdc202xx_old.c
28699@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28700 ATA_BMDMA_SHT(DRV_NAME),
28701 };
28702
28703-static struct ata_port_operations pdc2024x_port_ops = {
28704+static const struct ata_port_operations pdc2024x_port_ops = {
28705 .inherits = &ata_bmdma_port_ops,
28706
28707 .cable_detect = ata_cable_40wire,
28708@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28709 .sff_exec_command = pdc202xx_exec_command,
28710 };
28711
28712-static struct ata_port_operations pdc2026x_port_ops = {
28713+static const struct ata_port_operations pdc2026x_port_ops = {
28714 .inherits = &pdc2024x_port_ops,
28715
28716 .check_atapi_dma = pdc2026x_check_atapi_dma,
28717diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28718index 3f6ebc6..a18c358 100644
28719--- a/drivers/ata/pata_platform.c
28720+++ b/drivers/ata/pata_platform.c
28721@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28722 ATA_PIO_SHT(DRV_NAME),
28723 };
28724
28725-static struct ata_port_operations pata_platform_port_ops = {
28726+static const struct ata_port_operations pata_platform_port_ops = {
28727 .inherits = &ata_sff_port_ops,
28728 .sff_data_xfer = ata_sff_data_xfer_noirq,
28729 .cable_detect = ata_cable_unknown,
28730diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28731index 45879dc..165a9f9 100644
28732--- a/drivers/ata/pata_qdi.c
28733+++ b/drivers/ata/pata_qdi.c
28734@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28735 ATA_PIO_SHT(DRV_NAME),
28736 };
28737
28738-static struct ata_port_operations qdi6500_port_ops = {
28739+static const struct ata_port_operations qdi6500_port_ops = {
28740 .inherits = &ata_sff_port_ops,
28741 .qc_issue = qdi_qc_issue,
28742 .sff_data_xfer = qdi_data_xfer,
28743@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28744 .set_piomode = qdi6500_set_piomode,
28745 };
28746
28747-static struct ata_port_operations qdi6580_port_ops = {
28748+static const struct ata_port_operations qdi6580_port_ops = {
28749 .inherits = &qdi6500_port_ops,
28750 .set_piomode = qdi6580_set_piomode,
28751 };
28752diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28753index 4401b33..716c5cc 100644
28754--- a/drivers/ata/pata_radisys.c
28755+++ b/drivers/ata/pata_radisys.c
28756@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28757 ATA_BMDMA_SHT(DRV_NAME),
28758 };
28759
28760-static struct ata_port_operations radisys_pata_ops = {
28761+static const struct ata_port_operations radisys_pata_ops = {
28762 .inherits = &ata_bmdma_port_ops,
28763 .qc_issue = radisys_qc_issue,
28764 .cable_detect = ata_cable_unknown,
28765diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28766index 45f1e10..fab6bca 100644
28767--- a/drivers/ata/pata_rb532_cf.c
28768+++ b/drivers/ata/pata_rb532_cf.c
28769@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28770 return IRQ_HANDLED;
28771 }
28772
28773-static struct ata_port_operations rb532_pata_port_ops = {
28774+static const struct ata_port_operations rb532_pata_port_ops = {
28775 .inherits = &ata_sff_port_ops,
28776 .sff_data_xfer = ata_sff_data_xfer32,
28777 };
28778diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28779index c843a1e..b5853c3 100644
28780--- a/drivers/ata/pata_rdc.c
28781+++ b/drivers/ata/pata_rdc.c
28782@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28783 pci_write_config_byte(dev, 0x48, udma_enable);
28784 }
28785
28786-static struct ata_port_operations rdc_pata_ops = {
28787+static const struct ata_port_operations rdc_pata_ops = {
28788 .inherits = &ata_bmdma32_port_ops,
28789 .cable_detect = rdc_pata_cable_detect,
28790 .set_piomode = rdc_set_piomode,
28791diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28792index a5e4dfe..080c8c9 100644
28793--- a/drivers/ata/pata_rz1000.c
28794+++ b/drivers/ata/pata_rz1000.c
28795@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28796 ATA_PIO_SHT(DRV_NAME),
28797 };
28798
28799-static struct ata_port_operations rz1000_port_ops = {
28800+static const struct ata_port_operations rz1000_port_ops = {
28801 .inherits = &ata_sff_port_ops,
28802 .cable_detect = ata_cable_40wire,
28803 .set_mode = rz1000_set_mode,
28804diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28805index 3bbed83..e309daf 100644
28806--- a/drivers/ata/pata_sc1200.c
28807+++ b/drivers/ata/pata_sc1200.c
28808@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28809 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28810 };
28811
28812-static struct ata_port_operations sc1200_port_ops = {
28813+static const struct ata_port_operations sc1200_port_ops = {
28814 .inherits = &ata_bmdma_port_ops,
28815 .qc_prep = ata_sff_dumb_qc_prep,
28816 .qc_issue = sc1200_qc_issue,
28817diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28818index 4257d6b..4c1d9d5 100644
28819--- a/drivers/ata/pata_scc.c
28820+++ b/drivers/ata/pata_scc.c
28821@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28822 ATA_BMDMA_SHT(DRV_NAME),
28823 };
28824
28825-static struct ata_port_operations scc_pata_ops = {
28826+static const struct ata_port_operations scc_pata_ops = {
28827 .inherits = &ata_bmdma_port_ops,
28828
28829 .set_piomode = scc_set_piomode,
28830diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28831index 99cceb4..e2e0a87 100644
28832--- a/drivers/ata/pata_sch.c
28833+++ b/drivers/ata/pata_sch.c
28834@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28835 ATA_BMDMA_SHT(DRV_NAME),
28836 };
28837
28838-static struct ata_port_operations sch_pata_ops = {
28839+static const struct ata_port_operations sch_pata_ops = {
28840 .inherits = &ata_bmdma_port_ops,
28841 .cable_detect = ata_cable_unknown,
28842 .set_piomode = sch_set_piomode,
28843diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28844index beaed12..39969f1 100644
28845--- a/drivers/ata/pata_serverworks.c
28846+++ b/drivers/ata/pata_serverworks.c
28847@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28848 ATA_BMDMA_SHT(DRV_NAME),
28849 };
28850
28851-static struct ata_port_operations serverworks_osb4_port_ops = {
28852+static const struct ata_port_operations serverworks_osb4_port_ops = {
28853 .inherits = &ata_bmdma_port_ops,
28854 .cable_detect = serverworks_cable_detect,
28855 .mode_filter = serverworks_osb4_filter,
28856@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28857 .set_dmamode = serverworks_set_dmamode,
28858 };
28859
28860-static struct ata_port_operations serverworks_csb_port_ops = {
28861+static const struct ata_port_operations serverworks_csb_port_ops = {
28862 .inherits = &serverworks_osb4_port_ops,
28863 .mode_filter = serverworks_csb_filter,
28864 };
28865diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28866index a2ace48..0463b44 100644
28867--- a/drivers/ata/pata_sil680.c
28868+++ b/drivers/ata/pata_sil680.c
28869@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28870 ATA_BMDMA_SHT(DRV_NAME),
28871 };
28872
28873-static struct ata_port_operations sil680_port_ops = {
28874+static const struct ata_port_operations sil680_port_ops = {
28875 .inherits = &ata_bmdma32_port_ops,
28876 .cable_detect = sil680_cable_detect,
28877 .set_piomode = sil680_set_piomode,
28878diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28879index 488e77b..b3724d5 100644
28880--- a/drivers/ata/pata_sis.c
28881+++ b/drivers/ata/pata_sis.c
28882@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28883 ATA_BMDMA_SHT(DRV_NAME),
28884 };
28885
28886-static struct ata_port_operations sis_133_for_sata_ops = {
28887+static const struct ata_port_operations sis_133_for_sata_ops = {
28888 .inherits = &ata_bmdma_port_ops,
28889 .set_piomode = sis_133_set_piomode,
28890 .set_dmamode = sis_133_set_dmamode,
28891 .cable_detect = sis_133_cable_detect,
28892 };
28893
28894-static struct ata_port_operations sis_base_ops = {
28895+static const struct ata_port_operations sis_base_ops = {
28896 .inherits = &ata_bmdma_port_ops,
28897 .prereset = sis_pre_reset,
28898 };
28899
28900-static struct ata_port_operations sis_133_ops = {
28901+static const struct ata_port_operations sis_133_ops = {
28902 .inherits = &sis_base_ops,
28903 .set_piomode = sis_133_set_piomode,
28904 .set_dmamode = sis_133_set_dmamode,
28905 .cable_detect = sis_133_cable_detect,
28906 };
28907
28908-static struct ata_port_operations sis_133_early_ops = {
28909+static const struct ata_port_operations sis_133_early_ops = {
28910 .inherits = &sis_base_ops,
28911 .set_piomode = sis_100_set_piomode,
28912 .set_dmamode = sis_133_early_set_dmamode,
28913 .cable_detect = sis_66_cable_detect,
28914 };
28915
28916-static struct ata_port_operations sis_100_ops = {
28917+static const struct ata_port_operations sis_100_ops = {
28918 .inherits = &sis_base_ops,
28919 .set_piomode = sis_100_set_piomode,
28920 .set_dmamode = sis_100_set_dmamode,
28921 .cable_detect = sis_66_cable_detect,
28922 };
28923
28924-static struct ata_port_operations sis_66_ops = {
28925+static const struct ata_port_operations sis_66_ops = {
28926 .inherits = &sis_base_ops,
28927 .set_piomode = sis_old_set_piomode,
28928 .set_dmamode = sis_66_set_dmamode,
28929 .cable_detect = sis_66_cable_detect,
28930 };
28931
28932-static struct ata_port_operations sis_old_ops = {
28933+static const struct ata_port_operations sis_old_ops = {
28934 .inherits = &sis_base_ops,
28935 .set_piomode = sis_old_set_piomode,
28936 .set_dmamode = sis_old_set_dmamode,
28937diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28938index 29f733c..43e9ca0 100644
28939--- a/drivers/ata/pata_sl82c105.c
28940+++ b/drivers/ata/pata_sl82c105.c
28941@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28942 ATA_BMDMA_SHT(DRV_NAME),
28943 };
28944
28945-static struct ata_port_operations sl82c105_port_ops = {
28946+static const struct ata_port_operations sl82c105_port_ops = {
28947 .inherits = &ata_bmdma_port_ops,
28948 .qc_defer = sl82c105_qc_defer,
28949 .bmdma_start = sl82c105_bmdma_start,
28950diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28951index f1f13ff..df39e99 100644
28952--- a/drivers/ata/pata_triflex.c
28953+++ b/drivers/ata/pata_triflex.c
28954@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28955 ATA_BMDMA_SHT(DRV_NAME),
28956 };
28957
28958-static struct ata_port_operations triflex_port_ops = {
28959+static const struct ata_port_operations triflex_port_ops = {
28960 .inherits = &ata_bmdma_port_ops,
28961 .bmdma_start = triflex_bmdma_start,
28962 .bmdma_stop = triflex_bmdma_stop,
28963diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28964index 1d73b8d..98a4b29 100644
28965--- a/drivers/ata/pata_via.c
28966+++ b/drivers/ata/pata_via.c
28967@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28968 ATA_BMDMA_SHT(DRV_NAME),
28969 };
28970
28971-static struct ata_port_operations via_port_ops = {
28972+static const struct ata_port_operations via_port_ops = {
28973 .inherits = &ata_bmdma_port_ops,
28974 .cable_detect = via_cable_detect,
28975 .set_piomode = via_set_piomode,
28976@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28977 .port_start = via_port_start,
28978 };
28979
28980-static struct ata_port_operations via_port_ops_noirq = {
28981+static const struct ata_port_operations via_port_ops_noirq = {
28982 .inherits = &via_port_ops,
28983 .sff_data_xfer = ata_sff_data_xfer_noirq,
28984 };
28985diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28986index 6d8619b..ad511c4 100644
28987--- a/drivers/ata/pata_winbond.c
28988+++ b/drivers/ata/pata_winbond.c
28989@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28990 ATA_PIO_SHT(DRV_NAME),
28991 };
28992
28993-static struct ata_port_operations winbond_port_ops = {
28994+static const struct ata_port_operations winbond_port_ops = {
28995 .inherits = &ata_sff_port_ops,
28996 .sff_data_xfer = winbond_data_xfer,
28997 .cable_detect = ata_cable_40wire,
28998diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28999index 6c65b07..f996ec7 100644
29000--- a/drivers/ata/pdc_adma.c
29001+++ b/drivers/ata/pdc_adma.c
29002@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
29003 .dma_boundary = ADMA_DMA_BOUNDARY,
29004 };
29005
29006-static struct ata_port_operations adma_ata_ops = {
29007+static const struct ata_port_operations adma_ata_ops = {
29008 .inherits = &ata_sff_port_ops,
29009
29010 .lost_interrupt = ATA_OP_NULL,
29011diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29012index 172b57e..c49bc1e 100644
29013--- a/drivers/ata/sata_fsl.c
29014+++ b/drivers/ata/sata_fsl.c
29015@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29016 .dma_boundary = ATA_DMA_BOUNDARY,
29017 };
29018
29019-static struct ata_port_operations sata_fsl_ops = {
29020+static const struct ata_port_operations sata_fsl_ops = {
29021 .inherits = &sata_pmp_port_ops,
29022
29023 .qc_defer = ata_std_qc_defer,
29024diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29025index 4406902..60603ef 100644
29026--- a/drivers/ata/sata_inic162x.c
29027+++ b/drivers/ata/sata_inic162x.c
29028@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29029 return 0;
29030 }
29031
29032-static struct ata_port_operations inic_port_ops = {
29033+static const struct ata_port_operations inic_port_ops = {
29034 .inherits = &sata_port_ops,
29035
29036 .check_atapi_dma = inic_check_atapi_dma,
29037diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29038index cf41126..8107be6 100644
29039--- a/drivers/ata/sata_mv.c
29040+++ b/drivers/ata/sata_mv.c
29041@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29042 .dma_boundary = MV_DMA_BOUNDARY,
29043 };
29044
29045-static struct ata_port_operations mv5_ops = {
29046+static const struct ata_port_operations mv5_ops = {
29047 .inherits = &ata_sff_port_ops,
29048
29049 .lost_interrupt = ATA_OP_NULL,
29050@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29051 .port_stop = mv_port_stop,
29052 };
29053
29054-static struct ata_port_operations mv6_ops = {
29055+static const struct ata_port_operations mv6_ops = {
29056 .inherits = &mv5_ops,
29057 .dev_config = mv6_dev_config,
29058 .scr_read = mv_scr_read,
29059@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29060 .bmdma_status = mv_bmdma_status,
29061 };
29062
29063-static struct ata_port_operations mv_iie_ops = {
29064+static const struct ata_port_operations mv_iie_ops = {
29065 .inherits = &mv6_ops,
29066 .dev_config = ATA_OP_NULL,
29067 .qc_prep = mv_qc_prep_iie,
29068diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29069index ae2297c..d5c9c33 100644
29070--- a/drivers/ata/sata_nv.c
29071+++ b/drivers/ata/sata_nv.c
29072@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29073 * cases. Define nv_hardreset() which only kicks in for post-boot
29074 * probing and use it for all variants.
29075 */
29076-static struct ata_port_operations nv_generic_ops = {
29077+static const struct ata_port_operations nv_generic_ops = {
29078 .inherits = &ata_bmdma_port_ops,
29079 .lost_interrupt = ATA_OP_NULL,
29080 .scr_read = nv_scr_read,
29081@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29082 .hardreset = nv_hardreset,
29083 };
29084
29085-static struct ata_port_operations nv_nf2_ops = {
29086+static const struct ata_port_operations nv_nf2_ops = {
29087 .inherits = &nv_generic_ops,
29088 .freeze = nv_nf2_freeze,
29089 .thaw = nv_nf2_thaw,
29090 };
29091
29092-static struct ata_port_operations nv_ck804_ops = {
29093+static const struct ata_port_operations nv_ck804_ops = {
29094 .inherits = &nv_generic_ops,
29095 .freeze = nv_ck804_freeze,
29096 .thaw = nv_ck804_thaw,
29097 .host_stop = nv_ck804_host_stop,
29098 };
29099
29100-static struct ata_port_operations nv_adma_ops = {
29101+static const struct ata_port_operations nv_adma_ops = {
29102 .inherits = &nv_ck804_ops,
29103
29104 .check_atapi_dma = nv_adma_check_atapi_dma,
29105@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29106 .host_stop = nv_adma_host_stop,
29107 };
29108
29109-static struct ata_port_operations nv_swncq_ops = {
29110+static const struct ata_port_operations nv_swncq_ops = {
29111 .inherits = &nv_generic_ops,
29112
29113 .qc_defer = ata_std_qc_defer,
29114diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29115index 07d8d00..6cc70bb 100644
29116--- a/drivers/ata/sata_promise.c
29117+++ b/drivers/ata/sata_promise.c
29118@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29119 .error_handler = pdc_error_handler,
29120 };
29121
29122-static struct ata_port_operations pdc_sata_ops = {
29123+static const struct ata_port_operations pdc_sata_ops = {
29124 .inherits = &pdc_common_ops,
29125 .cable_detect = pdc_sata_cable_detect,
29126 .freeze = pdc_sata_freeze,
29127@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29128
29129 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29130 and ->freeze/thaw that ignore the hotplug controls. */
29131-static struct ata_port_operations pdc_old_sata_ops = {
29132+static const struct ata_port_operations pdc_old_sata_ops = {
29133 .inherits = &pdc_sata_ops,
29134 .freeze = pdc_freeze,
29135 .thaw = pdc_thaw,
29136 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29137 };
29138
29139-static struct ata_port_operations pdc_pata_ops = {
29140+static const struct ata_port_operations pdc_pata_ops = {
29141 .inherits = &pdc_common_ops,
29142 .cable_detect = pdc_pata_cable_detect,
29143 .freeze = pdc_freeze,
29144diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29145index 326c0cf..36ecebe 100644
29146--- a/drivers/ata/sata_qstor.c
29147+++ b/drivers/ata/sata_qstor.c
29148@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29149 .dma_boundary = QS_DMA_BOUNDARY,
29150 };
29151
29152-static struct ata_port_operations qs_ata_ops = {
29153+static const struct ata_port_operations qs_ata_ops = {
29154 .inherits = &ata_sff_port_ops,
29155
29156 .check_atapi_dma = qs_check_atapi_dma,
29157diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29158index 3cb69d5..0871d3c 100644
29159--- a/drivers/ata/sata_sil.c
29160+++ b/drivers/ata/sata_sil.c
29161@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29162 .sg_tablesize = ATA_MAX_PRD
29163 };
29164
29165-static struct ata_port_operations sil_ops = {
29166+static const struct ata_port_operations sil_ops = {
29167 .inherits = &ata_bmdma32_port_ops,
29168 .dev_config = sil_dev_config,
29169 .set_mode = sil_set_mode,
29170diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29171index e6946fc..eddb794 100644
29172--- a/drivers/ata/sata_sil24.c
29173+++ b/drivers/ata/sata_sil24.c
29174@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29175 .dma_boundary = ATA_DMA_BOUNDARY,
29176 };
29177
29178-static struct ata_port_operations sil24_ops = {
29179+static const struct ata_port_operations sil24_ops = {
29180 .inherits = &sata_pmp_port_ops,
29181
29182 .qc_defer = sil24_qc_defer,
29183diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29184index f8a91bf..9cb06b6 100644
29185--- a/drivers/ata/sata_sis.c
29186+++ b/drivers/ata/sata_sis.c
29187@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29188 ATA_BMDMA_SHT(DRV_NAME),
29189 };
29190
29191-static struct ata_port_operations sis_ops = {
29192+static const struct ata_port_operations sis_ops = {
29193 .inherits = &ata_bmdma_port_ops,
29194 .scr_read = sis_scr_read,
29195 .scr_write = sis_scr_write,
29196diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29197index 7257f2d..d04c6f5 100644
29198--- a/drivers/ata/sata_svw.c
29199+++ b/drivers/ata/sata_svw.c
29200@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29201 };
29202
29203
29204-static struct ata_port_operations k2_sata_ops = {
29205+static const struct ata_port_operations k2_sata_ops = {
29206 .inherits = &ata_bmdma_port_ops,
29207 .sff_tf_load = k2_sata_tf_load,
29208 .sff_tf_read = k2_sata_tf_read,
29209diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29210index bbcf970..cd0df0d 100644
29211--- a/drivers/ata/sata_sx4.c
29212+++ b/drivers/ata/sata_sx4.c
29213@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29214 };
29215
29216 /* TODO: inherit from base port_ops after converting to new EH */
29217-static struct ata_port_operations pdc_20621_ops = {
29218+static const struct ata_port_operations pdc_20621_ops = {
29219 .inherits = &ata_sff_port_ops,
29220
29221 .check_atapi_dma = pdc_check_atapi_dma,
29222diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29223index e5bff47..089d859 100644
29224--- a/drivers/ata/sata_uli.c
29225+++ b/drivers/ata/sata_uli.c
29226@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29227 ATA_BMDMA_SHT(DRV_NAME),
29228 };
29229
29230-static struct ata_port_operations uli_ops = {
29231+static const struct ata_port_operations uli_ops = {
29232 .inherits = &ata_bmdma_port_ops,
29233 .scr_read = uli_scr_read,
29234 .scr_write = uli_scr_write,
29235diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29236index f5dcca7..77b94eb 100644
29237--- a/drivers/ata/sata_via.c
29238+++ b/drivers/ata/sata_via.c
29239@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29240 ATA_BMDMA_SHT(DRV_NAME),
29241 };
29242
29243-static struct ata_port_operations svia_base_ops = {
29244+static const struct ata_port_operations svia_base_ops = {
29245 .inherits = &ata_bmdma_port_ops,
29246 .sff_tf_load = svia_tf_load,
29247 };
29248
29249-static struct ata_port_operations vt6420_sata_ops = {
29250+static const struct ata_port_operations vt6420_sata_ops = {
29251 .inherits = &svia_base_ops,
29252 .freeze = svia_noop_freeze,
29253 .prereset = vt6420_prereset,
29254 .bmdma_start = vt6420_bmdma_start,
29255 };
29256
29257-static struct ata_port_operations vt6421_pata_ops = {
29258+static const struct ata_port_operations vt6421_pata_ops = {
29259 .inherits = &svia_base_ops,
29260 .cable_detect = vt6421_pata_cable_detect,
29261 .set_piomode = vt6421_set_pio_mode,
29262 .set_dmamode = vt6421_set_dma_mode,
29263 };
29264
29265-static struct ata_port_operations vt6421_sata_ops = {
29266+static const struct ata_port_operations vt6421_sata_ops = {
29267 .inherits = &svia_base_ops,
29268 .scr_read = svia_scr_read,
29269 .scr_write = svia_scr_write,
29270 };
29271
29272-static struct ata_port_operations vt8251_ops = {
29273+static const struct ata_port_operations vt8251_ops = {
29274 .inherits = &svia_base_ops,
29275 .hardreset = sata_std_hardreset,
29276 .scr_read = vt8251_scr_read,
29277diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29278index 8b2a278..51e65d3 100644
29279--- a/drivers/ata/sata_vsc.c
29280+++ b/drivers/ata/sata_vsc.c
29281@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29282 };
29283
29284
29285-static struct ata_port_operations vsc_sata_ops = {
29286+static const struct ata_port_operations vsc_sata_ops = {
29287 .inherits = &ata_bmdma_port_ops,
29288 /* The IRQ handling is not quite standard SFF behaviour so we
29289 cannot use the default lost interrupt handler */
29290diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29291index 5effec6..7e4019a 100644
29292--- a/drivers/atm/adummy.c
29293+++ b/drivers/atm/adummy.c
29294@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29295 vcc->pop(vcc, skb);
29296 else
29297 dev_kfree_skb_any(skb);
29298- atomic_inc(&vcc->stats->tx);
29299+ atomic_inc_unchecked(&vcc->stats->tx);
29300
29301 return 0;
29302 }
29303diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29304index 66e1813..26a27c6 100644
29305--- a/drivers/atm/ambassador.c
29306+++ b/drivers/atm/ambassador.c
29307@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29308 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29309
29310 // VC layer stats
29311- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29312+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29313
29314 // free the descriptor
29315 kfree (tx_descr);
29316@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29317 dump_skb ("<<<", vc, skb);
29318
29319 // VC layer stats
29320- atomic_inc(&atm_vcc->stats->rx);
29321+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29322 __net_timestamp(skb);
29323 // end of our responsability
29324 atm_vcc->push (atm_vcc, skb);
29325@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29326 } else {
29327 PRINTK (KERN_INFO, "dropped over-size frame");
29328 // should we count this?
29329- atomic_inc(&atm_vcc->stats->rx_drop);
29330+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29331 }
29332
29333 } else {
29334@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29335 }
29336
29337 if (check_area (skb->data, skb->len)) {
29338- atomic_inc(&atm_vcc->stats->tx_err);
29339+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29340 return -ENOMEM; // ?
29341 }
29342
29343diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29344index 02ad83d..6daffeb 100644
29345--- a/drivers/atm/atmtcp.c
29346+++ b/drivers/atm/atmtcp.c
29347@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29348 if (vcc->pop) vcc->pop(vcc,skb);
29349 else dev_kfree_skb(skb);
29350 if (dev_data) return 0;
29351- atomic_inc(&vcc->stats->tx_err);
29352+ atomic_inc_unchecked(&vcc->stats->tx_err);
29353 return -ENOLINK;
29354 }
29355 size = skb->len+sizeof(struct atmtcp_hdr);
29356@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29357 if (!new_skb) {
29358 if (vcc->pop) vcc->pop(vcc,skb);
29359 else dev_kfree_skb(skb);
29360- atomic_inc(&vcc->stats->tx_err);
29361+ atomic_inc_unchecked(&vcc->stats->tx_err);
29362 return -ENOBUFS;
29363 }
29364 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29365@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29366 if (vcc->pop) vcc->pop(vcc,skb);
29367 else dev_kfree_skb(skb);
29368 out_vcc->push(out_vcc,new_skb);
29369- atomic_inc(&vcc->stats->tx);
29370- atomic_inc(&out_vcc->stats->rx);
29371+ atomic_inc_unchecked(&vcc->stats->tx);
29372+ atomic_inc_unchecked(&out_vcc->stats->rx);
29373 return 0;
29374 }
29375
29376@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29377 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29378 read_unlock(&vcc_sklist_lock);
29379 if (!out_vcc) {
29380- atomic_inc(&vcc->stats->tx_err);
29381+ atomic_inc_unchecked(&vcc->stats->tx_err);
29382 goto done;
29383 }
29384 skb_pull(skb,sizeof(struct atmtcp_hdr));
29385@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29386 __net_timestamp(new_skb);
29387 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29388 out_vcc->push(out_vcc,new_skb);
29389- atomic_inc(&vcc->stats->tx);
29390- atomic_inc(&out_vcc->stats->rx);
29391+ atomic_inc_unchecked(&vcc->stats->tx);
29392+ atomic_inc_unchecked(&out_vcc->stats->rx);
29393 done:
29394 if (vcc->pop) vcc->pop(vcc,skb);
29395 else dev_kfree_skb(skb);
29396diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29397index 0c30261..3da356e 100644
29398--- a/drivers/atm/eni.c
29399+++ b/drivers/atm/eni.c
29400@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29401 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29402 vcc->dev->number);
29403 length = 0;
29404- atomic_inc(&vcc->stats->rx_err);
29405+ atomic_inc_unchecked(&vcc->stats->rx_err);
29406 }
29407 else {
29408 length = ATM_CELL_SIZE-1; /* no HEC */
29409@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29410 size);
29411 }
29412 eff = length = 0;
29413- atomic_inc(&vcc->stats->rx_err);
29414+ atomic_inc_unchecked(&vcc->stats->rx_err);
29415 }
29416 else {
29417 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29418@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29419 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29420 vcc->dev->number,vcc->vci,length,size << 2,descr);
29421 length = eff = 0;
29422- atomic_inc(&vcc->stats->rx_err);
29423+ atomic_inc_unchecked(&vcc->stats->rx_err);
29424 }
29425 }
29426 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29427@@ -770,7 +770,7 @@ rx_dequeued++;
29428 vcc->push(vcc,skb);
29429 pushed++;
29430 }
29431- atomic_inc(&vcc->stats->rx);
29432+ atomic_inc_unchecked(&vcc->stats->rx);
29433 }
29434 wake_up(&eni_dev->rx_wait);
29435 }
29436@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29437 PCI_DMA_TODEVICE);
29438 if (vcc->pop) vcc->pop(vcc,skb);
29439 else dev_kfree_skb_irq(skb);
29440- atomic_inc(&vcc->stats->tx);
29441+ atomic_inc_unchecked(&vcc->stats->tx);
29442 wake_up(&eni_dev->tx_wait);
29443 dma_complete++;
29444 }
29445@@ -1570,7 +1570,7 @@ tx_complete++;
29446 /*--------------------------------- entries ---------------------------------*/
29447
29448
29449-static const char *media_name[] __devinitdata = {
29450+static const char *media_name[] __devinitconst = {
29451 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29452 "UTP", "05?", "06?", "07?", /* 4- 7 */
29453 "TAXI","09?", "10?", "11?", /* 8-11 */
29454diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29455index cd5049a..a51209f 100644
29456--- a/drivers/atm/firestream.c
29457+++ b/drivers/atm/firestream.c
29458@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29459 }
29460 }
29461
29462- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29463+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29464
29465 fs_dprintk (FS_DEBUG_TXMEM, "i");
29466 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29467@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29468 #endif
29469 skb_put (skb, qe->p1 & 0xffff);
29470 ATM_SKB(skb)->vcc = atm_vcc;
29471- atomic_inc(&atm_vcc->stats->rx);
29472+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29473 __net_timestamp(skb);
29474 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29475 atm_vcc->push (atm_vcc, skb);
29476@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29477 kfree (pe);
29478 }
29479 if (atm_vcc)
29480- atomic_inc(&atm_vcc->stats->rx_drop);
29481+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29482 break;
29483 case 0x1f: /* Reassembly abort: no buffers. */
29484 /* Silently increment error counter. */
29485 if (atm_vcc)
29486- atomic_inc(&atm_vcc->stats->rx_drop);
29487+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29488 break;
29489 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29490 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29491diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29492index f766cc4..a34002e 100644
29493--- a/drivers/atm/fore200e.c
29494+++ b/drivers/atm/fore200e.c
29495@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29496 #endif
29497 /* check error condition */
29498 if (*entry->status & STATUS_ERROR)
29499- atomic_inc(&vcc->stats->tx_err);
29500+ atomic_inc_unchecked(&vcc->stats->tx_err);
29501 else
29502- atomic_inc(&vcc->stats->tx);
29503+ atomic_inc_unchecked(&vcc->stats->tx);
29504 }
29505 }
29506
29507@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29508 if (skb == NULL) {
29509 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29510
29511- atomic_inc(&vcc->stats->rx_drop);
29512+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29513 return -ENOMEM;
29514 }
29515
29516@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29517
29518 dev_kfree_skb_any(skb);
29519
29520- atomic_inc(&vcc->stats->rx_drop);
29521+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29522 return -ENOMEM;
29523 }
29524
29525 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29526
29527 vcc->push(vcc, skb);
29528- atomic_inc(&vcc->stats->rx);
29529+ atomic_inc_unchecked(&vcc->stats->rx);
29530
29531 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29532
29533@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29534 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29535 fore200e->atm_dev->number,
29536 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29537- atomic_inc(&vcc->stats->rx_err);
29538+ atomic_inc_unchecked(&vcc->stats->rx_err);
29539 }
29540 }
29541
29542@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29543 goto retry_here;
29544 }
29545
29546- atomic_inc(&vcc->stats->tx_err);
29547+ atomic_inc_unchecked(&vcc->stats->tx_err);
29548
29549 fore200e->tx_sat++;
29550 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29551diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29552index 7066703..2b130de 100644
29553--- a/drivers/atm/he.c
29554+++ b/drivers/atm/he.c
29555@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29556
29557 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29558 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29559- atomic_inc(&vcc->stats->rx_drop);
29560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29561 goto return_host_buffers;
29562 }
29563
29564@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29565 RBRQ_LEN_ERR(he_dev->rbrq_head)
29566 ? "LEN_ERR" : "",
29567 vcc->vpi, vcc->vci);
29568- atomic_inc(&vcc->stats->rx_err);
29569+ atomic_inc_unchecked(&vcc->stats->rx_err);
29570 goto return_host_buffers;
29571 }
29572
29573@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29574 vcc->push(vcc, skb);
29575 spin_lock(&he_dev->global_lock);
29576
29577- atomic_inc(&vcc->stats->rx);
29578+ atomic_inc_unchecked(&vcc->stats->rx);
29579
29580 return_host_buffers:
29581 ++pdus_assembled;
29582@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29583 tpd->vcc->pop(tpd->vcc, tpd->skb);
29584 else
29585 dev_kfree_skb_any(tpd->skb);
29586- atomic_inc(&tpd->vcc->stats->tx_err);
29587+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29588 }
29589 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29590 return;
29591@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29592 vcc->pop(vcc, skb);
29593 else
29594 dev_kfree_skb_any(skb);
29595- atomic_inc(&vcc->stats->tx_err);
29596+ atomic_inc_unchecked(&vcc->stats->tx_err);
29597 return -EINVAL;
29598 }
29599
29600@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29601 vcc->pop(vcc, skb);
29602 else
29603 dev_kfree_skb_any(skb);
29604- atomic_inc(&vcc->stats->tx_err);
29605+ atomic_inc_unchecked(&vcc->stats->tx_err);
29606 return -EINVAL;
29607 }
29608 #endif
29609@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29610 vcc->pop(vcc, skb);
29611 else
29612 dev_kfree_skb_any(skb);
29613- atomic_inc(&vcc->stats->tx_err);
29614+ atomic_inc_unchecked(&vcc->stats->tx_err);
29615 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29616 return -ENOMEM;
29617 }
29618@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29619 vcc->pop(vcc, skb);
29620 else
29621 dev_kfree_skb_any(skb);
29622- atomic_inc(&vcc->stats->tx_err);
29623+ atomic_inc_unchecked(&vcc->stats->tx_err);
29624 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29625 return -ENOMEM;
29626 }
29627@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29628 __enqueue_tpd(he_dev, tpd, cid);
29629 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29630
29631- atomic_inc(&vcc->stats->tx);
29632+ atomic_inc_unchecked(&vcc->stats->tx);
29633
29634 return 0;
29635 }
29636diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29637index 4e49021..01b1512 100644
29638--- a/drivers/atm/horizon.c
29639+++ b/drivers/atm/horizon.c
29640@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29641 {
29642 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29643 // VC layer stats
29644- atomic_inc(&vcc->stats->rx);
29645+ atomic_inc_unchecked(&vcc->stats->rx);
29646 __net_timestamp(skb);
29647 // end of our responsability
29648 vcc->push (vcc, skb);
29649@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29650 dev->tx_iovec = NULL;
29651
29652 // VC layer stats
29653- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29654+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29655
29656 // free the skb
29657 hrz_kfree_skb (skb);
29658diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29659index e33ae00..9deb4ab 100644
29660--- a/drivers/atm/idt77252.c
29661+++ b/drivers/atm/idt77252.c
29662@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29663 else
29664 dev_kfree_skb(skb);
29665
29666- atomic_inc(&vcc->stats->tx);
29667+ atomic_inc_unchecked(&vcc->stats->tx);
29668 }
29669
29670 atomic_dec(&scq->used);
29671@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29672 if ((sb = dev_alloc_skb(64)) == NULL) {
29673 printk("%s: Can't allocate buffers for aal0.\n",
29674 card->name);
29675- atomic_add(i, &vcc->stats->rx_drop);
29676+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29677 break;
29678 }
29679 if (!atm_charge(vcc, sb->truesize)) {
29680 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29681 card->name);
29682- atomic_add(i - 1, &vcc->stats->rx_drop);
29683+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29684 dev_kfree_skb(sb);
29685 break;
29686 }
29687@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29688 ATM_SKB(sb)->vcc = vcc;
29689 __net_timestamp(sb);
29690 vcc->push(vcc, sb);
29691- atomic_inc(&vcc->stats->rx);
29692+ atomic_inc_unchecked(&vcc->stats->rx);
29693
29694 cell += ATM_CELL_PAYLOAD;
29695 }
29696@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29697 "(CDC: %08x)\n",
29698 card->name, len, rpp->len, readl(SAR_REG_CDC));
29699 recycle_rx_pool_skb(card, rpp);
29700- atomic_inc(&vcc->stats->rx_err);
29701+ atomic_inc_unchecked(&vcc->stats->rx_err);
29702 return;
29703 }
29704 if (stat & SAR_RSQE_CRC) {
29705 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29706 recycle_rx_pool_skb(card, rpp);
29707- atomic_inc(&vcc->stats->rx_err);
29708+ atomic_inc_unchecked(&vcc->stats->rx_err);
29709 return;
29710 }
29711 if (skb_queue_len(&rpp->queue) > 1) {
29712@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29713 RXPRINTK("%s: Can't alloc RX skb.\n",
29714 card->name);
29715 recycle_rx_pool_skb(card, rpp);
29716- atomic_inc(&vcc->stats->rx_err);
29717+ atomic_inc_unchecked(&vcc->stats->rx_err);
29718 return;
29719 }
29720 if (!atm_charge(vcc, skb->truesize)) {
29721@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29722 __net_timestamp(skb);
29723
29724 vcc->push(vcc, skb);
29725- atomic_inc(&vcc->stats->rx);
29726+ atomic_inc_unchecked(&vcc->stats->rx);
29727
29728 return;
29729 }
29730@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29731 __net_timestamp(skb);
29732
29733 vcc->push(vcc, skb);
29734- atomic_inc(&vcc->stats->rx);
29735+ atomic_inc_unchecked(&vcc->stats->rx);
29736
29737 if (skb->truesize > SAR_FB_SIZE_3)
29738 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29739@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29740 if (vcc->qos.aal != ATM_AAL0) {
29741 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29742 card->name, vpi, vci);
29743- atomic_inc(&vcc->stats->rx_drop);
29744+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29745 goto drop;
29746 }
29747
29748 if ((sb = dev_alloc_skb(64)) == NULL) {
29749 printk("%s: Can't allocate buffers for AAL0.\n",
29750 card->name);
29751- atomic_inc(&vcc->stats->rx_err);
29752+ atomic_inc_unchecked(&vcc->stats->rx_err);
29753 goto drop;
29754 }
29755
29756@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29757 ATM_SKB(sb)->vcc = vcc;
29758 __net_timestamp(sb);
29759 vcc->push(vcc, sb);
29760- atomic_inc(&vcc->stats->rx);
29761+ atomic_inc_unchecked(&vcc->stats->rx);
29762
29763 drop:
29764 skb_pull(queue, 64);
29765@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29766
29767 if (vc == NULL) {
29768 printk("%s: NULL connection in send().\n", card->name);
29769- atomic_inc(&vcc->stats->tx_err);
29770+ atomic_inc_unchecked(&vcc->stats->tx_err);
29771 dev_kfree_skb(skb);
29772 return -EINVAL;
29773 }
29774 if (!test_bit(VCF_TX, &vc->flags)) {
29775 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29776- atomic_inc(&vcc->stats->tx_err);
29777+ atomic_inc_unchecked(&vcc->stats->tx_err);
29778 dev_kfree_skb(skb);
29779 return -EINVAL;
29780 }
29781@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29782 break;
29783 default:
29784 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29785- atomic_inc(&vcc->stats->tx_err);
29786+ atomic_inc_unchecked(&vcc->stats->tx_err);
29787 dev_kfree_skb(skb);
29788 return -EINVAL;
29789 }
29790
29791 if (skb_shinfo(skb)->nr_frags != 0) {
29792 printk("%s: No scatter-gather yet.\n", card->name);
29793- atomic_inc(&vcc->stats->tx_err);
29794+ atomic_inc_unchecked(&vcc->stats->tx_err);
29795 dev_kfree_skb(skb);
29796 return -EINVAL;
29797 }
29798@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29799
29800 err = queue_skb(card, vc, skb, oam);
29801 if (err) {
29802- atomic_inc(&vcc->stats->tx_err);
29803+ atomic_inc_unchecked(&vcc->stats->tx_err);
29804 dev_kfree_skb(skb);
29805 return err;
29806 }
29807@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29808 skb = dev_alloc_skb(64);
29809 if (!skb) {
29810 printk("%s: Out of memory in send_oam().\n", card->name);
29811- atomic_inc(&vcc->stats->tx_err);
29812+ atomic_inc_unchecked(&vcc->stats->tx_err);
29813 return -ENOMEM;
29814 }
29815 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29816diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29817index b2c1b37..faa672b 100644
29818--- a/drivers/atm/iphase.c
29819+++ b/drivers/atm/iphase.c
29820@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29821 status = (u_short) (buf_desc_ptr->desc_mode);
29822 if (status & (RX_CER | RX_PTE | RX_OFL))
29823 {
29824- atomic_inc(&vcc->stats->rx_err);
29825+ atomic_inc_unchecked(&vcc->stats->rx_err);
29826 IF_ERR(printk("IA: bad packet, dropping it");)
29827 if (status & RX_CER) {
29828 IF_ERR(printk(" cause: packet CRC error\n");)
29829@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29830 len = dma_addr - buf_addr;
29831 if (len > iadev->rx_buf_sz) {
29832 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29833- atomic_inc(&vcc->stats->rx_err);
29834+ atomic_inc_unchecked(&vcc->stats->rx_err);
29835 goto out_free_desc;
29836 }
29837
29838@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29839 ia_vcc = INPH_IA_VCC(vcc);
29840 if (ia_vcc == NULL)
29841 {
29842- atomic_inc(&vcc->stats->rx_err);
29843+ atomic_inc_unchecked(&vcc->stats->rx_err);
29844 dev_kfree_skb_any(skb);
29845 atm_return(vcc, atm_guess_pdu2truesize(len));
29846 goto INCR_DLE;
29847@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29848 if ((length > iadev->rx_buf_sz) || (length >
29849 (skb->len - sizeof(struct cpcs_trailer))))
29850 {
29851- atomic_inc(&vcc->stats->rx_err);
29852+ atomic_inc_unchecked(&vcc->stats->rx_err);
29853 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29854 length, skb->len);)
29855 dev_kfree_skb_any(skb);
29856@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29857
29858 IF_RX(printk("rx_dle_intr: skb push");)
29859 vcc->push(vcc,skb);
29860- atomic_inc(&vcc->stats->rx);
29861+ atomic_inc_unchecked(&vcc->stats->rx);
29862 iadev->rx_pkt_cnt++;
29863 }
29864 INCR_DLE:
29865@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29866 {
29867 struct k_sonet_stats *stats;
29868 stats = &PRIV(_ia_dev[board])->sonet_stats;
29869- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29870- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29871- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29872- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29873- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29874- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29875- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29876- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29877- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29878+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29879+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29880+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29881+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29882+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29883+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29884+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29885+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29886+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29887 }
29888 ia_cmds.status = 0;
29889 break;
29890@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29891 if ((desc == 0) || (desc > iadev->num_tx_desc))
29892 {
29893 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29894- atomic_inc(&vcc->stats->tx);
29895+ atomic_inc_unchecked(&vcc->stats->tx);
29896 if (vcc->pop)
29897 vcc->pop(vcc, skb);
29898 else
29899@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29900 ATM_DESC(skb) = vcc->vci;
29901 skb_queue_tail(&iadev->tx_dma_q, skb);
29902
29903- atomic_inc(&vcc->stats->tx);
29904+ atomic_inc_unchecked(&vcc->stats->tx);
29905 iadev->tx_pkt_cnt++;
29906 /* Increment transaction counter */
29907 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29908
29909 #if 0
29910 /* add flow control logic */
29911- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29912+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29913 if (iavcc->vc_desc_cnt > 10) {
29914 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29915 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29916diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29917index cf97c34..8d30655 100644
29918--- a/drivers/atm/lanai.c
29919+++ b/drivers/atm/lanai.c
29920@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29921 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29922 lanai_endtx(lanai, lvcc);
29923 lanai_free_skb(lvcc->tx.atmvcc, skb);
29924- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29925+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29926 }
29927
29928 /* Try to fill the buffer - don't call unless there is backlog */
29929@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29930 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29931 __net_timestamp(skb);
29932 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29933- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29934+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29935 out:
29936 lvcc->rx.buf.ptr = end;
29937 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29938@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29939 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29940 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29941 lanai->stats.service_rxnotaal5++;
29942- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29943+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29944 return 0;
29945 }
29946 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29947@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29948 int bytes;
29949 read_unlock(&vcc_sklist_lock);
29950 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29951- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29952+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29953 lvcc->stats.x.aal5.service_trash++;
29954 bytes = (SERVICE_GET_END(s) * 16) -
29955 (((unsigned long) lvcc->rx.buf.ptr) -
29956@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29957 }
29958 if (s & SERVICE_STREAM) {
29959 read_unlock(&vcc_sklist_lock);
29960- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29961+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29962 lvcc->stats.x.aal5.service_stream++;
29963 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29964 "PDU on VCI %d!\n", lanai->number, vci);
29965@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29966 return 0;
29967 }
29968 DPRINTK("got rx crc error on vci %d\n", vci);
29969- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29970+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29971 lvcc->stats.x.aal5.service_rxcrc++;
29972 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29973 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29974diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29975index 3da804b..d3b0eed 100644
29976--- a/drivers/atm/nicstar.c
29977+++ b/drivers/atm/nicstar.c
29978@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29979 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29980 {
29981 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29982- atomic_inc(&vcc->stats->tx_err);
29983+ atomic_inc_unchecked(&vcc->stats->tx_err);
29984 dev_kfree_skb_any(skb);
29985 return -EINVAL;
29986 }
29987@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29988 if (!vc->tx)
29989 {
29990 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29991- atomic_inc(&vcc->stats->tx_err);
29992+ atomic_inc_unchecked(&vcc->stats->tx_err);
29993 dev_kfree_skb_any(skb);
29994 return -EINVAL;
29995 }
29996@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29997 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29998 {
29999 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
30000- atomic_inc(&vcc->stats->tx_err);
30001+ atomic_inc_unchecked(&vcc->stats->tx_err);
30002 dev_kfree_skb_any(skb);
30003 return -EINVAL;
30004 }
30005@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30006 if (skb_shinfo(skb)->nr_frags != 0)
30007 {
30008 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30009- atomic_inc(&vcc->stats->tx_err);
30010+ atomic_inc_unchecked(&vcc->stats->tx_err);
30011 dev_kfree_skb_any(skb);
30012 return -EINVAL;
30013 }
30014@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30015
30016 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30017 {
30018- atomic_inc(&vcc->stats->tx_err);
30019+ atomic_inc_unchecked(&vcc->stats->tx_err);
30020 dev_kfree_skb_any(skb);
30021 return -EIO;
30022 }
30023- atomic_inc(&vcc->stats->tx);
30024+ atomic_inc_unchecked(&vcc->stats->tx);
30025
30026 return 0;
30027 }
30028@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30029 {
30030 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30031 card->index);
30032- atomic_add(i,&vcc->stats->rx_drop);
30033+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
30034 break;
30035 }
30036 if (!atm_charge(vcc, sb->truesize))
30037 {
30038 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30039 card->index);
30040- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30041+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30042 dev_kfree_skb_any(sb);
30043 break;
30044 }
30045@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30046 ATM_SKB(sb)->vcc = vcc;
30047 __net_timestamp(sb);
30048 vcc->push(vcc, sb);
30049- atomic_inc(&vcc->stats->rx);
30050+ atomic_inc_unchecked(&vcc->stats->rx);
30051 cell += ATM_CELL_PAYLOAD;
30052 }
30053
30054@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30055 if (iovb == NULL)
30056 {
30057 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30058- atomic_inc(&vcc->stats->rx_drop);
30059+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30060 recycle_rx_buf(card, skb);
30061 return;
30062 }
30063@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30064 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30065 {
30066 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30067- atomic_inc(&vcc->stats->rx_err);
30068+ atomic_inc_unchecked(&vcc->stats->rx_err);
30069 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30070 NS_SKB(iovb)->iovcnt = 0;
30071 iovb->len = 0;
30072@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30073 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30074 card->index);
30075 which_list(card, skb);
30076- atomic_inc(&vcc->stats->rx_err);
30077+ atomic_inc_unchecked(&vcc->stats->rx_err);
30078 recycle_rx_buf(card, skb);
30079 vc->rx_iov = NULL;
30080 recycle_iov_buf(card, iovb);
30081@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30082 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30083 card->index);
30084 which_list(card, skb);
30085- atomic_inc(&vcc->stats->rx_err);
30086+ atomic_inc_unchecked(&vcc->stats->rx_err);
30087 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30088 NS_SKB(iovb)->iovcnt);
30089 vc->rx_iov = NULL;
30090@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30091 printk(" - PDU size mismatch.\n");
30092 else
30093 printk(".\n");
30094- atomic_inc(&vcc->stats->rx_err);
30095+ atomic_inc_unchecked(&vcc->stats->rx_err);
30096 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30097 NS_SKB(iovb)->iovcnt);
30098 vc->rx_iov = NULL;
30099@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30100 if (!atm_charge(vcc, skb->truesize))
30101 {
30102 push_rxbufs(card, skb);
30103- atomic_inc(&vcc->stats->rx_drop);
30104+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30105 }
30106 else
30107 {
30108@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30109 ATM_SKB(skb)->vcc = vcc;
30110 __net_timestamp(skb);
30111 vcc->push(vcc, skb);
30112- atomic_inc(&vcc->stats->rx);
30113+ atomic_inc_unchecked(&vcc->stats->rx);
30114 }
30115 }
30116 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30117@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30118 if (!atm_charge(vcc, sb->truesize))
30119 {
30120 push_rxbufs(card, sb);
30121- atomic_inc(&vcc->stats->rx_drop);
30122+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30123 }
30124 else
30125 {
30126@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30127 ATM_SKB(sb)->vcc = vcc;
30128 __net_timestamp(sb);
30129 vcc->push(vcc, sb);
30130- atomic_inc(&vcc->stats->rx);
30131+ atomic_inc_unchecked(&vcc->stats->rx);
30132 }
30133
30134 push_rxbufs(card, skb);
30135@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30136 if (!atm_charge(vcc, skb->truesize))
30137 {
30138 push_rxbufs(card, skb);
30139- atomic_inc(&vcc->stats->rx_drop);
30140+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30141 }
30142 else
30143 {
30144@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30145 ATM_SKB(skb)->vcc = vcc;
30146 __net_timestamp(skb);
30147 vcc->push(vcc, skb);
30148- atomic_inc(&vcc->stats->rx);
30149+ atomic_inc_unchecked(&vcc->stats->rx);
30150 }
30151
30152 push_rxbufs(card, sb);
30153@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30154 if (hb == NULL)
30155 {
30156 printk("nicstar%d: Out of huge buffers.\n", card->index);
30157- atomic_inc(&vcc->stats->rx_drop);
30158+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30159 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30160 NS_SKB(iovb)->iovcnt);
30161 vc->rx_iov = NULL;
30162@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30163 }
30164 else
30165 dev_kfree_skb_any(hb);
30166- atomic_inc(&vcc->stats->rx_drop);
30167+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30168 }
30169 else
30170 {
30171@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30172 #endif /* NS_USE_DESTRUCTORS */
30173 __net_timestamp(hb);
30174 vcc->push(vcc, hb);
30175- atomic_inc(&vcc->stats->rx);
30176+ atomic_inc_unchecked(&vcc->stats->rx);
30177 }
30178 }
30179
30180diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30181index 84c93ff..e6ed269 100644
30182--- a/drivers/atm/solos-pci.c
30183+++ b/drivers/atm/solos-pci.c
30184@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30185 }
30186 atm_charge(vcc, skb->truesize);
30187 vcc->push(vcc, skb);
30188- atomic_inc(&vcc->stats->rx);
30189+ atomic_inc_unchecked(&vcc->stats->rx);
30190 break;
30191
30192 case PKT_STATUS:
30193@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30194 char msg[500];
30195 char item[10];
30196
30197+ pax_track_stack();
30198+
30199 len = buf->len;
30200 for (i = 0; i < len; i++){
30201 if(i % 8 == 0)
30202@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30203 vcc = SKB_CB(oldskb)->vcc;
30204
30205 if (vcc) {
30206- atomic_inc(&vcc->stats->tx);
30207+ atomic_inc_unchecked(&vcc->stats->tx);
30208 solos_pop(vcc, oldskb);
30209 } else
30210 dev_kfree_skb_irq(oldskb);
30211diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30212index 6dd3f59..ee377f3 100644
30213--- a/drivers/atm/suni.c
30214+++ b/drivers/atm/suni.c
30215@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30216
30217
30218 #define ADD_LIMITED(s,v) \
30219- atomic_add((v),&stats->s); \
30220- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30221+ atomic_add_unchecked((v),&stats->s); \
30222+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30223
30224
30225 static void suni_hz(unsigned long from_timer)
30226diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30227index fc8cb07..4a80e53 100644
30228--- a/drivers/atm/uPD98402.c
30229+++ b/drivers/atm/uPD98402.c
30230@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30231 struct sonet_stats tmp;
30232 int error = 0;
30233
30234- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30235+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30236 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30237 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30238 if (zero && !error) {
30239@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30240
30241
30242 #define ADD_LIMITED(s,v) \
30243- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30244- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30245- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30246+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30247+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30248+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30249
30250
30251 static void stat_event(struct atm_dev *dev)
30252@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30253 if (reason & uPD98402_INT_PFM) stat_event(dev);
30254 if (reason & uPD98402_INT_PCO) {
30255 (void) GET(PCOCR); /* clear interrupt cause */
30256- atomic_add(GET(HECCT),
30257+ atomic_add_unchecked(GET(HECCT),
30258 &PRIV(dev)->sonet_stats.uncorr_hcs);
30259 }
30260 if ((reason & uPD98402_INT_RFO) &&
30261@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30262 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30263 uPD98402_INT_LOS),PIMR); /* enable them */
30264 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30265- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30266- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30267- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30268+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30269+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30270+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30271 return 0;
30272 }
30273
30274diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30275index 2e9635b..32927b4 100644
30276--- a/drivers/atm/zatm.c
30277+++ b/drivers/atm/zatm.c
30278@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30279 }
30280 if (!size) {
30281 dev_kfree_skb_irq(skb);
30282- if (vcc) atomic_inc(&vcc->stats->rx_err);
30283+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30284 continue;
30285 }
30286 if (!atm_charge(vcc,skb->truesize)) {
30287@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30288 skb->len = size;
30289 ATM_SKB(skb)->vcc = vcc;
30290 vcc->push(vcc,skb);
30291- atomic_inc(&vcc->stats->rx);
30292+ atomic_inc_unchecked(&vcc->stats->rx);
30293 }
30294 zout(pos & 0xffff,MTA(mbx));
30295 #if 0 /* probably a stupid idea */
30296@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30297 skb_queue_head(&zatm_vcc->backlog,skb);
30298 break;
30299 }
30300- atomic_inc(&vcc->stats->tx);
30301+ atomic_inc_unchecked(&vcc->stats->tx);
30302 wake_up(&zatm_vcc->tx_wait);
30303 }
30304
30305diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30306index 63c143e..fece183 100644
30307--- a/drivers/base/bus.c
30308+++ b/drivers/base/bus.c
30309@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30310 return ret;
30311 }
30312
30313-static struct sysfs_ops driver_sysfs_ops = {
30314+static const struct sysfs_ops driver_sysfs_ops = {
30315 .show = drv_attr_show,
30316 .store = drv_attr_store,
30317 };
30318@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30319 return ret;
30320 }
30321
30322-static struct sysfs_ops bus_sysfs_ops = {
30323+static const struct sysfs_ops bus_sysfs_ops = {
30324 .show = bus_attr_show,
30325 .store = bus_attr_store,
30326 };
30327@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30328 return 0;
30329 }
30330
30331-static struct kset_uevent_ops bus_uevent_ops = {
30332+static const struct kset_uevent_ops bus_uevent_ops = {
30333 .filter = bus_uevent_filter,
30334 };
30335
30336diff --git a/drivers/base/class.c b/drivers/base/class.c
30337index 6e2c3b0..cb61871 100644
30338--- a/drivers/base/class.c
30339+++ b/drivers/base/class.c
30340@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30341 kfree(cp);
30342 }
30343
30344-static struct sysfs_ops class_sysfs_ops = {
30345+static const struct sysfs_ops class_sysfs_ops = {
30346 .show = class_attr_show,
30347 .store = class_attr_store,
30348 };
30349diff --git a/drivers/base/core.c b/drivers/base/core.c
30350index f33d768..a9358d0 100644
30351--- a/drivers/base/core.c
30352+++ b/drivers/base/core.c
30353@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30354 return ret;
30355 }
30356
30357-static struct sysfs_ops dev_sysfs_ops = {
30358+static const struct sysfs_ops dev_sysfs_ops = {
30359 .show = dev_attr_show,
30360 .store = dev_attr_store,
30361 };
30362@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30363 return retval;
30364 }
30365
30366-static struct kset_uevent_ops device_uevent_ops = {
30367+static const struct kset_uevent_ops device_uevent_ops = {
30368 .filter = dev_uevent_filter,
30369 .name = dev_uevent_name,
30370 .uevent = dev_uevent,
30371diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30372index 989429c..2272b00 100644
30373--- a/drivers/base/memory.c
30374+++ b/drivers/base/memory.c
30375@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30376 return retval;
30377 }
30378
30379-static struct kset_uevent_ops memory_uevent_ops = {
30380+static const struct kset_uevent_ops memory_uevent_ops = {
30381 .name = memory_uevent_name,
30382 .uevent = memory_uevent,
30383 };
30384diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30385index 3f202f7..61c4a6f 100644
30386--- a/drivers/base/sys.c
30387+++ b/drivers/base/sys.c
30388@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30389 return -EIO;
30390 }
30391
30392-static struct sysfs_ops sysfs_ops = {
30393+static const struct sysfs_ops sysfs_ops = {
30394 .show = sysdev_show,
30395 .store = sysdev_store,
30396 };
30397@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30398 return -EIO;
30399 }
30400
30401-static struct sysfs_ops sysfs_class_ops = {
30402+static const struct sysfs_ops sysfs_class_ops = {
30403 .show = sysdev_class_show,
30404 .store = sysdev_class_store,
30405 };
30406diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30407index eb4fa19..1954777 100644
30408--- a/drivers/block/DAC960.c
30409+++ b/drivers/block/DAC960.c
30410@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30411 unsigned long flags;
30412 int Channel, TargetID;
30413
30414+ pax_track_stack();
30415+
30416 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30417 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30418 sizeof(DAC960_SCSI_Inquiry_T) +
30419diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30420index 68b90d9..7e2e3f3 100644
30421--- a/drivers/block/cciss.c
30422+++ b/drivers/block/cciss.c
30423@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30424 int err;
30425 u32 cp;
30426
30427+ memset(&arg64, 0, sizeof(arg64));
30428+
30429 err = 0;
30430 err |=
30431 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30432@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30433 /* Wait (up to 20 seconds) for a command to complete */
30434
30435 for (i = 20 * HZ; i > 0; i--) {
30436- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30437+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30438 if (done == FIFO_EMPTY)
30439 schedule_timeout_uninterruptible(1);
30440 else
30441@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30442 resend_cmd1:
30443
30444 /* Disable interrupt on the board. */
30445- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30446+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30447
30448 /* Make sure there is room in the command FIFO */
30449 /* Actually it should be completely empty at this time */
30450@@ -2884,13 +2886,13 @@ resend_cmd1:
30451 /* tape side of the driver. */
30452 for (i = 200000; i > 0; i--) {
30453 /* if fifo isn't full go */
30454- if (!(h->access.fifo_full(h)))
30455+ if (!(h->access->fifo_full(h)))
30456 break;
30457 udelay(10);
30458 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30459 " waiting!\n", h->ctlr);
30460 }
30461- h->access.submit_command(h, c); /* Send the cmd */
30462+ h->access->submit_command(h, c); /* Send the cmd */
30463 do {
30464 complete = pollcomplete(h->ctlr);
30465
30466@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30467 while (!hlist_empty(&h->reqQ)) {
30468 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30469 /* can't do anything if fifo is full */
30470- if ((h->access.fifo_full(h))) {
30471+ if ((h->access->fifo_full(h))) {
30472 printk(KERN_WARNING "cciss: fifo full\n");
30473 break;
30474 }
30475@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30476 h->Qdepth--;
30477
30478 /* Tell the controller execute command */
30479- h->access.submit_command(h, c);
30480+ h->access->submit_command(h, c);
30481
30482 /* Put job onto the completed Q */
30483 addQ(&h->cmpQ, c);
30484@@ -3393,17 +3395,17 @@ startio:
30485
30486 static inline unsigned long get_next_completion(ctlr_info_t *h)
30487 {
30488- return h->access.command_completed(h);
30489+ return h->access->command_completed(h);
30490 }
30491
30492 static inline int interrupt_pending(ctlr_info_t *h)
30493 {
30494- return h->access.intr_pending(h);
30495+ return h->access->intr_pending(h);
30496 }
30497
30498 static inline long interrupt_not_for_us(ctlr_info_t *h)
30499 {
30500- return (((h->access.intr_pending(h) == 0) ||
30501+ return (((h->access->intr_pending(h) == 0) ||
30502 (h->interrupts_enabled == 0)));
30503 }
30504
30505@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30506 */
30507 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30508 c->product_name = products[prod_index].product_name;
30509- c->access = *(products[prod_index].access);
30510+ c->access = products[prod_index].access;
30511 c->nr_cmds = c->max_commands - 4;
30512 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30513 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30514@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30515 }
30516
30517 /* make sure the board interrupts are off */
30518- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30519+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30520 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30521 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30522 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30523@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30524 cciss_scsi_setup(i);
30525
30526 /* Turn the interrupts on so we can service requests */
30527- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30528+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30529
30530 /* Get the firmware version */
30531 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30532diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30533index 04d6bf8..36e712d 100644
30534--- a/drivers/block/cciss.h
30535+++ b/drivers/block/cciss.h
30536@@ -90,7 +90,7 @@ struct ctlr_info
30537 // information about each logical volume
30538 drive_info_struct *drv[CISS_MAX_LUN];
30539
30540- struct access_method access;
30541+ struct access_method *access;
30542
30543 /* queue and queue Info */
30544 struct hlist_head reqQ;
30545diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30546index 6422651..bb1bdef 100644
30547--- a/drivers/block/cpqarray.c
30548+++ b/drivers/block/cpqarray.c
30549@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30550 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30551 goto Enomem4;
30552 }
30553- hba[i]->access.set_intr_mask(hba[i], 0);
30554+ hba[i]->access->set_intr_mask(hba[i], 0);
30555 if (request_irq(hba[i]->intr, do_ida_intr,
30556 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30557 {
30558@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30559 add_timer(&hba[i]->timer);
30560
30561 /* Enable IRQ now that spinlock and rate limit timer are set up */
30562- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30563+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30564
30565 for(j=0; j<NWD; j++) {
30566 struct gendisk *disk = ida_gendisk[i][j];
30567@@ -695,7 +695,7 @@ DBGINFO(
30568 for(i=0; i<NR_PRODUCTS; i++) {
30569 if (board_id == products[i].board_id) {
30570 c->product_name = products[i].product_name;
30571- c->access = *(products[i].access);
30572+ c->access = products[i].access;
30573 break;
30574 }
30575 }
30576@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30577 hba[ctlr]->intr = intr;
30578 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30579 hba[ctlr]->product_name = products[j].product_name;
30580- hba[ctlr]->access = *(products[j].access);
30581+ hba[ctlr]->access = products[j].access;
30582 hba[ctlr]->ctlr = ctlr;
30583 hba[ctlr]->board_id = board_id;
30584 hba[ctlr]->pci_dev = NULL; /* not PCI */
30585@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30586 struct scatterlist tmp_sg[SG_MAX];
30587 int i, dir, seg;
30588
30589+ pax_track_stack();
30590+
30591 if (blk_queue_plugged(q))
30592 goto startio;
30593
30594@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30595
30596 while((c = h->reqQ) != NULL) {
30597 /* Can't do anything if we're busy */
30598- if (h->access.fifo_full(h) == 0)
30599+ if (h->access->fifo_full(h) == 0)
30600 return;
30601
30602 /* Get the first entry from the request Q */
30603@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30604 h->Qdepth--;
30605
30606 /* Tell the controller to do our bidding */
30607- h->access.submit_command(h, c);
30608+ h->access->submit_command(h, c);
30609
30610 /* Get onto the completion Q */
30611 addQ(&h->cmpQ, c);
30612@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30613 unsigned long flags;
30614 __u32 a,a1;
30615
30616- istat = h->access.intr_pending(h);
30617+ istat = h->access->intr_pending(h);
30618 /* Is this interrupt for us? */
30619 if (istat == 0)
30620 return IRQ_NONE;
30621@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30622 */
30623 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30624 if (istat & FIFO_NOT_EMPTY) {
30625- while((a = h->access.command_completed(h))) {
30626+ while((a = h->access->command_completed(h))) {
30627 a1 = a; a &= ~3;
30628 if ((c = h->cmpQ) == NULL)
30629 {
30630@@ -1434,11 +1436,11 @@ static int sendcmd(
30631 /*
30632 * Disable interrupt
30633 */
30634- info_p->access.set_intr_mask(info_p, 0);
30635+ info_p->access->set_intr_mask(info_p, 0);
30636 /* Make sure there is room in the command FIFO */
30637 /* Actually it should be completely empty at this time. */
30638 for (i = 200000; i > 0; i--) {
30639- temp = info_p->access.fifo_full(info_p);
30640+ temp = info_p->access->fifo_full(info_p);
30641 if (temp != 0) {
30642 break;
30643 }
30644@@ -1451,7 +1453,7 @@ DBG(
30645 /*
30646 * Send the cmd
30647 */
30648- info_p->access.submit_command(info_p, c);
30649+ info_p->access->submit_command(info_p, c);
30650 complete = pollcomplete(ctlr);
30651
30652 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30653@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30654 * we check the new geometry. Then turn interrupts back on when
30655 * we're done.
30656 */
30657- host->access.set_intr_mask(host, 0);
30658+ host->access->set_intr_mask(host, 0);
30659 getgeometry(ctlr);
30660- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30661+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30662
30663 for(i=0; i<NWD; i++) {
30664 struct gendisk *disk = ida_gendisk[ctlr][i];
30665@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30666 /* Wait (up to 2 seconds) for a command to complete */
30667
30668 for (i = 200000; i > 0; i--) {
30669- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30670+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30671 if (done == 0) {
30672 udelay(10); /* a short fixed delay */
30673 } else
30674diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30675index be73e9d..7fbf140 100644
30676--- a/drivers/block/cpqarray.h
30677+++ b/drivers/block/cpqarray.h
30678@@ -99,7 +99,7 @@ struct ctlr_info {
30679 drv_info_t drv[NWD];
30680 struct proc_dir_entry *proc;
30681
30682- struct access_method access;
30683+ struct access_method *access;
30684
30685 cmdlist_t *reqQ;
30686 cmdlist_t *cmpQ;
30687diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30688index 8ec2d70..2804b30 100644
30689--- a/drivers/block/loop.c
30690+++ b/drivers/block/loop.c
30691@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30692 mm_segment_t old_fs = get_fs();
30693
30694 set_fs(get_ds());
30695- bw = file->f_op->write(file, buf, len, &pos);
30696+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30697 set_fs(old_fs);
30698 if (likely(bw == len))
30699 return 0;
30700diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30701index 26ada47..083c480 100644
30702--- a/drivers/block/nbd.c
30703+++ b/drivers/block/nbd.c
30704@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30705 struct kvec iov;
30706 sigset_t blocked, oldset;
30707
30708+ pax_track_stack();
30709+
30710 if (unlikely(!sock)) {
30711 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30712 lo->disk->disk_name, (send ? "send" : "recv"));
30713@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30714 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30715 unsigned int cmd, unsigned long arg)
30716 {
30717+ pax_track_stack();
30718+
30719 switch (cmd) {
30720 case NBD_DISCONNECT: {
30721 struct request sreq;
30722diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30723index a5d585d..d087be3 100644
30724--- a/drivers/block/pktcdvd.c
30725+++ b/drivers/block/pktcdvd.c
30726@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30727 return len;
30728 }
30729
30730-static struct sysfs_ops kobj_pkt_ops = {
30731+static const struct sysfs_ops kobj_pkt_ops = {
30732 .show = kobj_pkt_show,
30733 .store = kobj_pkt_store
30734 };
30735diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30736index 6aad99e..89cd142 100644
30737--- a/drivers/char/Kconfig
30738+++ b/drivers/char/Kconfig
30739@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30740
30741 config DEVKMEM
30742 bool "/dev/kmem virtual device support"
30743- default y
30744+ default n
30745+ depends on !GRKERNSEC_KMEM
30746 help
30747 Say Y here if you want to support the /dev/kmem device. The
30748 /dev/kmem device is rarely used, but can be used for certain
30749@@ -1114,6 +1115,7 @@ config DEVPORT
30750 bool
30751 depends on !M68K
30752 depends on ISA || PCI
30753+ depends on !GRKERNSEC_KMEM
30754 default y
30755
30756 source "drivers/s390/char/Kconfig"
30757diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30758index a96f319..a778a5b 100644
30759--- a/drivers/char/agp/frontend.c
30760+++ b/drivers/char/agp/frontend.c
30761@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30762 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30763 return -EFAULT;
30764
30765- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30766+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30767 return -EFAULT;
30768
30769 client = agp_find_client_by_pid(reserve.pid);
30770diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30771index d8cff90..9628e70 100644
30772--- a/drivers/char/briq_panel.c
30773+++ b/drivers/char/briq_panel.c
30774@@ -10,6 +10,7 @@
30775 #include <linux/types.h>
30776 #include <linux/errno.h>
30777 #include <linux/tty.h>
30778+#include <linux/mutex.h>
30779 #include <linux/timer.h>
30780 #include <linux/kernel.h>
30781 #include <linux/wait.h>
30782@@ -36,6 +37,7 @@ static int vfd_is_open;
30783 static unsigned char vfd[40];
30784 static int vfd_cursor;
30785 static unsigned char ledpb, led;
30786+static DEFINE_MUTEX(vfd_mutex);
30787
30788 static void update_vfd(void)
30789 {
30790@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30791 if (!vfd_is_open)
30792 return -EBUSY;
30793
30794+ mutex_lock(&vfd_mutex);
30795 for (;;) {
30796 char c;
30797 if (!indx)
30798 break;
30799- if (get_user(c, buf))
30800+ if (get_user(c, buf)) {
30801+ mutex_unlock(&vfd_mutex);
30802 return -EFAULT;
30803+ }
30804 if (esc) {
30805 set_led(c);
30806 esc = 0;
30807@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30808 buf++;
30809 }
30810 update_vfd();
30811+ mutex_unlock(&vfd_mutex);
30812
30813 return len;
30814 }
30815diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30816index 31e7c91..161afc0 100644
30817--- a/drivers/char/genrtc.c
30818+++ b/drivers/char/genrtc.c
30819@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30820 switch (cmd) {
30821
30822 case RTC_PLL_GET:
30823+ memset(&pll, 0, sizeof(pll));
30824 if (get_rtc_pll(&pll))
30825 return -EINVAL;
30826 else
30827diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30828index 006466d..a2bb21c 100644
30829--- a/drivers/char/hpet.c
30830+++ b/drivers/char/hpet.c
30831@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30832 return 0;
30833 }
30834
30835-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30836+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30837
30838 static int
30839 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30840@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30841 }
30842
30843 static int
30844-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30845+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30846 {
30847 struct hpet_timer __iomem *timer;
30848 struct hpet __iomem *hpet;
30849@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30850 {
30851 struct hpet_info info;
30852
30853+ memset(&info, 0, sizeof(info));
30854+
30855 if (devp->hd_ireqfreq)
30856 info.hi_ireqfreq =
30857 hpet_time_div(hpetp, devp->hd_ireqfreq);
30858- else
30859- info.hi_ireqfreq = 0;
30860 info.hi_flags =
30861 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30862 info.hi_hpet = hpetp->hp_which;
30863diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30864index 0afc8b8..6913fc3 100644
30865--- a/drivers/char/hvc_beat.c
30866+++ b/drivers/char/hvc_beat.c
30867@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30868 return cnt;
30869 }
30870
30871-static struct hv_ops hvc_beat_get_put_ops = {
30872+static const struct hv_ops hvc_beat_get_put_ops = {
30873 .get_chars = hvc_beat_get_chars,
30874 .put_chars = hvc_beat_put_chars,
30875 };
30876diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30877index 98097f2..407dddc 100644
30878--- a/drivers/char/hvc_console.c
30879+++ b/drivers/char/hvc_console.c
30880@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30881 * console interfaces but can still be used as a tty device. This has to be
30882 * static because kmalloc will not work during early console init.
30883 */
30884-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30885+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30886 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30887 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30888
30889@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30890 * vty adapters do NOT get an hvc_instantiate() callback since they
30891 * appear after early console init.
30892 */
30893-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30894+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30895 {
30896 struct hvc_struct *hp;
30897
30898@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30899 };
30900
30901 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30902- struct hv_ops *ops, int outbuf_size)
30903+ const struct hv_ops *ops, int outbuf_size)
30904 {
30905 struct hvc_struct *hp;
30906 int i;
30907diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30908index 10950ca..ed176c3 100644
30909--- a/drivers/char/hvc_console.h
30910+++ b/drivers/char/hvc_console.h
30911@@ -55,7 +55,7 @@ struct hvc_struct {
30912 int outbuf_size;
30913 int n_outbuf;
30914 uint32_t vtermno;
30915- struct hv_ops *ops;
30916+ const struct hv_ops *ops;
30917 int irq_requested;
30918 int data;
30919 struct winsize ws;
30920@@ -76,11 +76,11 @@ struct hv_ops {
30921 };
30922
30923 /* Register a vterm and a slot index for use as a console (console_init) */
30924-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30925+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30926
30927 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30928 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30929- struct hv_ops *ops, int outbuf_size);
30930+ const struct hv_ops *ops, int outbuf_size);
30931 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30932 extern int hvc_remove(struct hvc_struct *hp);
30933
30934diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30935index 936d05b..fd02426 100644
30936--- a/drivers/char/hvc_iseries.c
30937+++ b/drivers/char/hvc_iseries.c
30938@@ -197,7 +197,7 @@ done:
30939 return sent;
30940 }
30941
30942-static struct hv_ops hvc_get_put_ops = {
30943+static const struct hv_ops hvc_get_put_ops = {
30944 .get_chars = get_chars,
30945 .put_chars = put_chars,
30946 .notifier_add = notifier_add_irq,
30947diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30948index b0e168f..69cda2a 100644
30949--- a/drivers/char/hvc_iucv.c
30950+++ b/drivers/char/hvc_iucv.c
30951@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30952
30953
30954 /* HVC operations */
30955-static struct hv_ops hvc_iucv_ops = {
30956+static const struct hv_ops hvc_iucv_ops = {
30957 .get_chars = hvc_iucv_get_chars,
30958 .put_chars = hvc_iucv_put_chars,
30959 .notifier_add = hvc_iucv_notifier_add,
30960diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30961index 88590d0..61c4a61 100644
30962--- a/drivers/char/hvc_rtas.c
30963+++ b/drivers/char/hvc_rtas.c
30964@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30965 return i;
30966 }
30967
30968-static struct hv_ops hvc_rtas_get_put_ops = {
30969+static const struct hv_ops hvc_rtas_get_put_ops = {
30970 .get_chars = hvc_rtas_read_console,
30971 .put_chars = hvc_rtas_write_console,
30972 };
30973diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30974index bd63ba8..b0957e6 100644
30975--- a/drivers/char/hvc_udbg.c
30976+++ b/drivers/char/hvc_udbg.c
30977@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30978 return i;
30979 }
30980
30981-static struct hv_ops hvc_udbg_ops = {
30982+static const struct hv_ops hvc_udbg_ops = {
30983 .get_chars = hvc_udbg_get,
30984 .put_chars = hvc_udbg_put,
30985 };
30986diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30987index 10be343..27370e9 100644
30988--- a/drivers/char/hvc_vio.c
30989+++ b/drivers/char/hvc_vio.c
30990@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30991 return got;
30992 }
30993
30994-static struct hv_ops hvc_get_put_ops = {
30995+static const struct hv_ops hvc_get_put_ops = {
30996 .get_chars = filtered_get_chars,
30997 .put_chars = hvc_put_chars,
30998 .notifier_add = notifier_add_irq,
30999diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
31000index a6ee32b..94f8c26 100644
31001--- a/drivers/char/hvc_xen.c
31002+++ b/drivers/char/hvc_xen.c
31003@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
31004 return recv;
31005 }
31006
31007-static struct hv_ops hvc_ops = {
31008+static const struct hv_ops hvc_ops = {
31009 .get_chars = read_console,
31010 .put_chars = write_console,
31011 .notifier_add = notifier_add_irq,
31012diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31013index 266b858..f3ee0bb 100644
31014--- a/drivers/char/hvcs.c
31015+++ b/drivers/char/hvcs.c
31016@@ -82,6 +82,7 @@
31017 #include <asm/hvcserver.h>
31018 #include <asm/uaccess.h>
31019 #include <asm/vio.h>
31020+#include <asm/local.h>
31021
31022 /*
31023 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31024@@ -269,7 +270,7 @@ struct hvcs_struct {
31025 unsigned int index;
31026
31027 struct tty_struct *tty;
31028- int open_count;
31029+ local_t open_count;
31030
31031 /*
31032 * Used to tell the driver kernel_thread what operations need to take
31033@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31034
31035 spin_lock_irqsave(&hvcsd->lock, flags);
31036
31037- if (hvcsd->open_count > 0) {
31038+ if (local_read(&hvcsd->open_count) > 0) {
31039 spin_unlock_irqrestore(&hvcsd->lock, flags);
31040 printk(KERN_INFO "HVCS: vterm state unchanged. "
31041 "The hvcs device node is still in use.\n");
31042@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31043 if ((retval = hvcs_partner_connect(hvcsd)))
31044 goto error_release;
31045
31046- hvcsd->open_count = 1;
31047+ local_set(&hvcsd->open_count, 1);
31048 hvcsd->tty = tty;
31049 tty->driver_data = hvcsd;
31050
31051@@ -1169,7 +1170,7 @@ fast_open:
31052
31053 spin_lock_irqsave(&hvcsd->lock, flags);
31054 kref_get(&hvcsd->kref);
31055- hvcsd->open_count++;
31056+ local_inc(&hvcsd->open_count);
31057 hvcsd->todo_mask |= HVCS_SCHED_READ;
31058 spin_unlock_irqrestore(&hvcsd->lock, flags);
31059
31060@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31061 hvcsd = tty->driver_data;
31062
31063 spin_lock_irqsave(&hvcsd->lock, flags);
31064- if (--hvcsd->open_count == 0) {
31065+ if (local_dec_and_test(&hvcsd->open_count)) {
31066
31067 vio_disable_interrupts(hvcsd->vdev);
31068
31069@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31070 free_irq(irq, hvcsd);
31071 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31072 return;
31073- } else if (hvcsd->open_count < 0) {
31074+ } else if (local_read(&hvcsd->open_count) < 0) {
31075 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31076 " is missmanaged.\n",
31077- hvcsd->vdev->unit_address, hvcsd->open_count);
31078+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31079 }
31080
31081 spin_unlock_irqrestore(&hvcsd->lock, flags);
31082@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31083
31084 spin_lock_irqsave(&hvcsd->lock, flags);
31085 /* Preserve this so that we know how many kref refs to put */
31086- temp_open_count = hvcsd->open_count;
31087+ temp_open_count = local_read(&hvcsd->open_count);
31088
31089 /*
31090 * Don't kref put inside the spinlock because the destruction
31091@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31092 hvcsd->tty->driver_data = NULL;
31093 hvcsd->tty = NULL;
31094
31095- hvcsd->open_count = 0;
31096+ local_set(&hvcsd->open_count, 0);
31097
31098 /* This will drop any buffered data on the floor which is OK in a hangup
31099 * scenario. */
31100@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31101 * the middle of a write operation? This is a crummy place to do this
31102 * but we want to keep it all in the spinlock.
31103 */
31104- if (hvcsd->open_count <= 0) {
31105+ if (local_read(&hvcsd->open_count) <= 0) {
31106 spin_unlock_irqrestore(&hvcsd->lock, flags);
31107 return -ENODEV;
31108 }
31109@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31110 {
31111 struct hvcs_struct *hvcsd = tty->driver_data;
31112
31113- if (!hvcsd || hvcsd->open_count <= 0)
31114+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31115 return 0;
31116
31117 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31118diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31119index ec5e3f8..02455ba 100644
31120--- a/drivers/char/ipmi/ipmi_msghandler.c
31121+++ b/drivers/char/ipmi/ipmi_msghandler.c
31122@@ -414,7 +414,7 @@ struct ipmi_smi {
31123 struct proc_dir_entry *proc_dir;
31124 char proc_dir_name[10];
31125
31126- atomic_t stats[IPMI_NUM_STATS];
31127+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31128
31129 /*
31130 * run_to_completion duplicate of smb_info, smi_info
31131@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31132
31133
31134 #define ipmi_inc_stat(intf, stat) \
31135- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31136+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31137 #define ipmi_get_stat(intf, stat) \
31138- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31139+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31140
31141 static int is_lan_addr(struct ipmi_addr *addr)
31142 {
31143@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31144 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31145 init_waitqueue_head(&intf->waitq);
31146 for (i = 0; i < IPMI_NUM_STATS; i++)
31147- atomic_set(&intf->stats[i], 0);
31148+ atomic_set_unchecked(&intf->stats[i], 0);
31149
31150 intf->proc_dir = NULL;
31151
31152@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31153 struct ipmi_smi_msg smi_msg;
31154 struct ipmi_recv_msg recv_msg;
31155
31156+ pax_track_stack();
31157+
31158 si = (struct ipmi_system_interface_addr *) &addr;
31159 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31160 si->channel = IPMI_BMC_CHANNEL;
31161diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31162index abae8c9..8021979 100644
31163--- a/drivers/char/ipmi/ipmi_si_intf.c
31164+++ b/drivers/char/ipmi/ipmi_si_intf.c
31165@@ -277,7 +277,7 @@ struct smi_info {
31166 unsigned char slave_addr;
31167
31168 /* Counters and things for the proc filesystem. */
31169- atomic_t stats[SI_NUM_STATS];
31170+ atomic_unchecked_t stats[SI_NUM_STATS];
31171
31172 struct task_struct *thread;
31173
31174@@ -285,9 +285,9 @@ struct smi_info {
31175 };
31176
31177 #define smi_inc_stat(smi, stat) \
31178- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31179+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31180 #define smi_get_stat(smi, stat) \
31181- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31182+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31183
31184 #define SI_MAX_PARMS 4
31185
31186@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31187 atomic_set(&new_smi->req_events, 0);
31188 new_smi->run_to_completion = 0;
31189 for (i = 0; i < SI_NUM_STATS; i++)
31190- atomic_set(&new_smi->stats[i], 0);
31191+ atomic_set_unchecked(&new_smi->stats[i], 0);
31192
31193 new_smi->interrupt_disabled = 0;
31194 atomic_set(&new_smi->stop_operation, 0);
31195diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31196index 402838f..55e2200 100644
31197--- a/drivers/char/istallion.c
31198+++ b/drivers/char/istallion.c
31199@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31200 * re-used for each stats call.
31201 */
31202 static comstats_t stli_comstats;
31203-static combrd_t stli_brdstats;
31204 static struct asystats stli_cdkstats;
31205
31206 /*****************************************************************************/
31207@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31208 {
31209 struct stlibrd *brdp;
31210 unsigned int i;
31211+ combrd_t stli_brdstats;
31212
31213 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31214 return -EFAULT;
31215@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31216 struct stliport stli_dummyport;
31217 struct stliport *portp;
31218
31219+ pax_track_stack();
31220+
31221 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31222 return -EFAULT;
31223 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31224@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31225 struct stlibrd stli_dummybrd;
31226 struct stlibrd *brdp;
31227
31228+ pax_track_stack();
31229+
31230 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31231 return -EFAULT;
31232 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31233diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31234index 950837c..e55a288 100644
31235--- a/drivers/char/keyboard.c
31236+++ b/drivers/char/keyboard.c
31237@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31238 kbd->kbdmode == VC_MEDIUMRAW) &&
31239 value != KVAL(K_SAK))
31240 return; /* SAK is allowed even in raw mode */
31241+
31242+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31243+ {
31244+ void *func = fn_handler[value];
31245+ if (func == fn_show_state || func == fn_show_ptregs ||
31246+ func == fn_show_mem)
31247+ return;
31248+ }
31249+#endif
31250+
31251 fn_handler[value](vc);
31252 }
31253
31254@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31255 .evbit = { BIT_MASK(EV_SND) },
31256 },
31257
31258- { }, /* Terminating entry */
31259+ { 0 }, /* Terminating entry */
31260 };
31261
31262 MODULE_DEVICE_TABLE(input, kbd_ids);
31263diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31264index 87c67b4..230527a 100644
31265--- a/drivers/char/mbcs.c
31266+++ b/drivers/char/mbcs.c
31267@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31268 return 0;
31269 }
31270
31271-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31272+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31273 {
31274 .part_num = MBCS_PART_NUM,
31275 .mfg_num = MBCS_MFG_NUM,
31276diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31277index 1270f64..8495f49 100644
31278--- a/drivers/char/mem.c
31279+++ b/drivers/char/mem.c
31280@@ -18,6 +18,7 @@
31281 #include <linux/raw.h>
31282 #include <linux/tty.h>
31283 #include <linux/capability.h>
31284+#include <linux/security.h>
31285 #include <linux/ptrace.h>
31286 #include <linux/device.h>
31287 #include <linux/highmem.h>
31288@@ -35,6 +36,10 @@
31289 # include <linux/efi.h>
31290 #endif
31291
31292+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31293+extern struct file_operations grsec_fops;
31294+#endif
31295+
31296 static inline unsigned long size_inside_page(unsigned long start,
31297 unsigned long size)
31298 {
31299@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31300
31301 while (cursor < to) {
31302 if (!devmem_is_allowed(pfn)) {
31303+#ifdef CONFIG_GRKERNSEC_KMEM
31304+ gr_handle_mem_readwrite(from, to);
31305+#else
31306 printk(KERN_INFO
31307 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31308 current->comm, from, to);
31309+#endif
31310 return 0;
31311 }
31312 cursor += PAGE_SIZE;
31313@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31314 }
31315 return 1;
31316 }
31317+#elif defined(CONFIG_GRKERNSEC_KMEM)
31318+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31319+{
31320+ return 0;
31321+}
31322 #else
31323 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31324 {
31325@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31326 #endif
31327
31328 while (count > 0) {
31329+ char *temp;
31330+
31331 /*
31332 * Handle first page in case it's not aligned
31333 */
31334@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31335 if (!ptr)
31336 return -EFAULT;
31337
31338- if (copy_to_user(buf, ptr, sz)) {
31339+#ifdef CONFIG_PAX_USERCOPY
31340+ temp = kmalloc(sz, GFP_KERNEL);
31341+ if (!temp) {
31342+ unxlate_dev_mem_ptr(p, ptr);
31343+ return -ENOMEM;
31344+ }
31345+ memcpy(temp, ptr, sz);
31346+#else
31347+ temp = ptr;
31348+#endif
31349+
31350+ if (copy_to_user(buf, temp, sz)) {
31351+
31352+#ifdef CONFIG_PAX_USERCOPY
31353+ kfree(temp);
31354+#endif
31355+
31356 unxlate_dev_mem_ptr(p, ptr);
31357 return -EFAULT;
31358 }
31359
31360+#ifdef CONFIG_PAX_USERCOPY
31361+ kfree(temp);
31362+#endif
31363+
31364 unxlate_dev_mem_ptr(p, ptr);
31365
31366 buf += sz;
31367@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31368 size_t count, loff_t *ppos)
31369 {
31370 unsigned long p = *ppos;
31371- ssize_t low_count, read, sz;
31372+ ssize_t low_count, read, sz, err = 0;
31373 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31374- int err = 0;
31375
31376 read = 0;
31377 if (p < (unsigned long) high_memory) {
31378@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31379 }
31380 #endif
31381 while (low_count > 0) {
31382+ char *temp;
31383+
31384 sz = size_inside_page(p, low_count);
31385
31386 /*
31387@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31388 */
31389 kbuf = xlate_dev_kmem_ptr((char *)p);
31390
31391- if (copy_to_user(buf, kbuf, sz))
31392+#ifdef CONFIG_PAX_USERCOPY
31393+ temp = kmalloc(sz, GFP_KERNEL);
31394+ if (!temp)
31395+ return -ENOMEM;
31396+ memcpy(temp, kbuf, sz);
31397+#else
31398+ temp = kbuf;
31399+#endif
31400+
31401+ err = copy_to_user(buf, temp, sz);
31402+
31403+#ifdef CONFIG_PAX_USERCOPY
31404+ kfree(temp);
31405+#endif
31406+
31407+ if (err)
31408 return -EFAULT;
31409 buf += sz;
31410 p += sz;
31411@@ -889,6 +941,9 @@ static const struct memdev {
31412 #ifdef CONFIG_CRASH_DUMP
31413 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31414 #endif
31415+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31416+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31417+#endif
31418 };
31419
31420 static int memory_open(struct inode *inode, struct file *filp)
31421diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31422index 674b3ab..a8d1970 100644
31423--- a/drivers/char/pcmcia/ipwireless/tty.c
31424+++ b/drivers/char/pcmcia/ipwireless/tty.c
31425@@ -29,6 +29,7 @@
31426 #include <linux/tty_driver.h>
31427 #include <linux/tty_flip.h>
31428 #include <linux/uaccess.h>
31429+#include <asm/local.h>
31430
31431 #include "tty.h"
31432 #include "network.h"
31433@@ -51,7 +52,7 @@ struct ipw_tty {
31434 int tty_type;
31435 struct ipw_network *network;
31436 struct tty_struct *linux_tty;
31437- int open_count;
31438+ local_t open_count;
31439 unsigned int control_lines;
31440 struct mutex ipw_tty_mutex;
31441 int tx_bytes_queued;
31442@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31443 mutex_unlock(&tty->ipw_tty_mutex);
31444 return -ENODEV;
31445 }
31446- if (tty->open_count == 0)
31447+ if (local_read(&tty->open_count) == 0)
31448 tty->tx_bytes_queued = 0;
31449
31450- tty->open_count++;
31451+ local_inc(&tty->open_count);
31452
31453 tty->linux_tty = linux_tty;
31454 linux_tty->driver_data = tty;
31455@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31456
31457 static void do_ipw_close(struct ipw_tty *tty)
31458 {
31459- tty->open_count--;
31460-
31461- if (tty->open_count == 0) {
31462+ if (local_dec_return(&tty->open_count) == 0) {
31463 struct tty_struct *linux_tty = tty->linux_tty;
31464
31465 if (linux_tty != NULL) {
31466@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31467 return;
31468
31469 mutex_lock(&tty->ipw_tty_mutex);
31470- if (tty->open_count == 0) {
31471+ if (local_read(&tty->open_count) == 0) {
31472 mutex_unlock(&tty->ipw_tty_mutex);
31473 return;
31474 }
31475@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31476 return;
31477 }
31478
31479- if (!tty->open_count) {
31480+ if (!local_read(&tty->open_count)) {
31481 mutex_unlock(&tty->ipw_tty_mutex);
31482 return;
31483 }
31484@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31485 return -ENODEV;
31486
31487 mutex_lock(&tty->ipw_tty_mutex);
31488- if (!tty->open_count) {
31489+ if (!local_read(&tty->open_count)) {
31490 mutex_unlock(&tty->ipw_tty_mutex);
31491 return -EINVAL;
31492 }
31493@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31494 if (!tty)
31495 return -ENODEV;
31496
31497- if (!tty->open_count)
31498+ if (!local_read(&tty->open_count))
31499 return -EINVAL;
31500
31501 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31502@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31503 if (!tty)
31504 return 0;
31505
31506- if (!tty->open_count)
31507+ if (!local_read(&tty->open_count))
31508 return 0;
31509
31510 return tty->tx_bytes_queued;
31511@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31512 if (!tty)
31513 return -ENODEV;
31514
31515- if (!tty->open_count)
31516+ if (!local_read(&tty->open_count))
31517 return -EINVAL;
31518
31519 return get_control_lines(tty);
31520@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31521 if (!tty)
31522 return -ENODEV;
31523
31524- if (!tty->open_count)
31525+ if (!local_read(&tty->open_count))
31526 return -EINVAL;
31527
31528 return set_control_lines(tty, set, clear);
31529@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31530 if (!tty)
31531 return -ENODEV;
31532
31533- if (!tty->open_count)
31534+ if (!local_read(&tty->open_count))
31535 return -EINVAL;
31536
31537 /* FIXME: Exactly how is the tty object locked here .. */
31538@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31539 against a parallel ioctl etc */
31540 mutex_lock(&ttyj->ipw_tty_mutex);
31541 }
31542- while (ttyj->open_count)
31543+ while (local_read(&ttyj->open_count))
31544 do_ipw_close(ttyj);
31545 ipwireless_disassociate_network_ttys(network,
31546 ttyj->channel_idx);
31547diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31548index 62f282e..e45c45c 100644
31549--- a/drivers/char/pty.c
31550+++ b/drivers/char/pty.c
31551@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31552 register_sysctl_table(pty_root_table);
31553
31554 /* Now create the /dev/ptmx special device */
31555+ pax_open_kernel();
31556 tty_default_fops(&ptmx_fops);
31557- ptmx_fops.open = ptmx_open;
31558+ *(void **)&ptmx_fops.open = ptmx_open;
31559+ pax_close_kernel();
31560
31561 cdev_init(&ptmx_cdev, &ptmx_fops);
31562 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31563diff --git a/drivers/char/random.c b/drivers/char/random.c
31564index 3a19e2d..6ed09d3 100644
31565--- a/drivers/char/random.c
31566+++ b/drivers/char/random.c
31567@@ -254,8 +254,13 @@
31568 /*
31569 * Configuration information
31570 */
31571+#ifdef CONFIG_GRKERNSEC_RANDNET
31572+#define INPUT_POOL_WORDS 512
31573+#define OUTPUT_POOL_WORDS 128
31574+#else
31575 #define INPUT_POOL_WORDS 128
31576 #define OUTPUT_POOL_WORDS 32
31577+#endif
31578 #define SEC_XFER_SIZE 512
31579
31580 /*
31581@@ -292,10 +297,17 @@ static struct poolinfo {
31582 int poolwords;
31583 int tap1, tap2, tap3, tap4, tap5;
31584 } poolinfo_table[] = {
31585+#ifdef CONFIG_GRKERNSEC_RANDNET
31586+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31587+ { 512, 411, 308, 208, 104, 1 },
31588+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31589+ { 128, 103, 76, 51, 25, 1 },
31590+#else
31591 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31592 { 128, 103, 76, 51, 25, 1 },
31593 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31594 { 32, 26, 20, 14, 7, 1 },
31595+#endif
31596 #if 0
31597 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31598 { 2048, 1638, 1231, 819, 411, 1 },
31599@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31600 #include <linux/sysctl.h>
31601
31602 static int min_read_thresh = 8, min_write_thresh;
31603-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31604+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31605 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31606 static char sysctl_bootid[16];
31607
31608diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31609index 0e29a23..0efc2c2 100644
31610--- a/drivers/char/rocket.c
31611+++ b/drivers/char/rocket.c
31612@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31613 struct rocket_ports tmp;
31614 int board;
31615
31616+ pax_track_stack();
31617+
31618 if (!retports)
31619 return -EFAULT;
31620 memset(&tmp, 0, sizeof (tmp));
31621diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31622index 8c262aa..4d3b058 100644
31623--- a/drivers/char/sonypi.c
31624+++ b/drivers/char/sonypi.c
31625@@ -55,6 +55,7 @@
31626 #include <asm/uaccess.h>
31627 #include <asm/io.h>
31628 #include <asm/system.h>
31629+#include <asm/local.h>
31630
31631 #include <linux/sonypi.h>
31632
31633@@ -491,7 +492,7 @@ static struct sonypi_device {
31634 spinlock_t fifo_lock;
31635 wait_queue_head_t fifo_proc_list;
31636 struct fasync_struct *fifo_async;
31637- int open_count;
31638+ local_t open_count;
31639 int model;
31640 struct input_dev *input_jog_dev;
31641 struct input_dev *input_key_dev;
31642@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31643 static int sonypi_misc_release(struct inode *inode, struct file *file)
31644 {
31645 mutex_lock(&sonypi_device.lock);
31646- sonypi_device.open_count--;
31647+ local_dec(&sonypi_device.open_count);
31648 mutex_unlock(&sonypi_device.lock);
31649 return 0;
31650 }
31651@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31652 lock_kernel();
31653 mutex_lock(&sonypi_device.lock);
31654 /* Flush input queue on first open */
31655- if (!sonypi_device.open_count)
31656+ if (!local_read(&sonypi_device.open_count))
31657 kfifo_reset(sonypi_device.fifo);
31658- sonypi_device.open_count++;
31659+ local_inc(&sonypi_device.open_count);
31660 mutex_unlock(&sonypi_device.lock);
31661 unlock_kernel();
31662 return 0;
31663diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31664index db6dcfa..13834cb 100644
31665--- a/drivers/char/stallion.c
31666+++ b/drivers/char/stallion.c
31667@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31668 struct stlport stl_dummyport;
31669 struct stlport *portp;
31670
31671+ pax_track_stack();
31672+
31673 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31674 return -EFAULT;
31675 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31676diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31677index a0789f6..cea3902 100644
31678--- a/drivers/char/tpm/tpm.c
31679+++ b/drivers/char/tpm/tpm.c
31680@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31681 chip->vendor.req_complete_val)
31682 goto out_recv;
31683
31684- if ((status == chip->vendor.req_canceled)) {
31685+ if (status == chip->vendor.req_canceled) {
31686 dev_err(chip->dev, "Operation Canceled\n");
31687 rc = -ECANCELED;
31688 goto out;
31689@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31690
31691 struct tpm_chip *chip = dev_get_drvdata(dev);
31692
31693+ pax_track_stack();
31694+
31695 tpm_cmd.header.in = tpm_readpubek_header;
31696 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31697 "attempting to read the PUBEK");
31698diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31699index bf2170f..ce8cab9 100644
31700--- a/drivers/char/tpm/tpm_bios.c
31701+++ b/drivers/char/tpm/tpm_bios.c
31702@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31703 event = addr;
31704
31705 if ((event->event_type == 0 && event->event_size == 0) ||
31706- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31707+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31708 return NULL;
31709
31710 return addr;
31711@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31712 return NULL;
31713
31714 if ((event->event_type == 0 && event->event_size == 0) ||
31715- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31716+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31717 return NULL;
31718
31719 (*pos)++;
31720@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31721 int i;
31722
31723 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31724- seq_putc(m, data[i]);
31725+ if (!seq_putc(m, data[i]))
31726+ return -EFAULT;
31727
31728 return 0;
31729 }
31730@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31731 log->bios_event_log_end = log->bios_event_log + len;
31732
31733 virt = acpi_os_map_memory(start, len);
31734+ if (!virt) {
31735+ kfree(log->bios_event_log);
31736+ log->bios_event_log = NULL;
31737+ return -EFAULT;
31738+ }
31739
31740- memcpy(log->bios_event_log, virt, len);
31741+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31742
31743 acpi_os_unmap_memory(virt, len);
31744 return 0;
31745diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31746index 123cedf..6664cb4 100644
31747--- a/drivers/char/tty_io.c
31748+++ b/drivers/char/tty_io.c
31749@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
31750 static int tty_release(struct inode *, struct file *);
31751 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
31752 #ifdef CONFIG_COMPAT
31753-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31754+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31755 unsigned long arg);
31756 #else
31757 #define tty_compat_ioctl NULL
31758@@ -1774,6 +1774,7 @@ got_driver:
31759
31760 if (IS_ERR(tty)) {
31761 mutex_unlock(&tty_mutex);
31762+ tty_driver_kref_put(driver);
31763 return PTR_ERR(tty);
31764 }
31765 }
31766@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31767 return retval;
31768 }
31769
31770+EXPORT_SYMBOL(tty_ioctl);
31771+
31772 #ifdef CONFIG_COMPAT
31773-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31774+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31775 unsigned long arg)
31776 {
31777 struct inode *inode = file->f_dentry->d_inode;
31778@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31779
31780 return retval;
31781 }
31782+
31783+EXPORT_SYMBOL(tty_compat_ioctl);
31784 #endif
31785
31786 /*
31787@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31788
31789 void tty_default_fops(struct file_operations *fops)
31790 {
31791- *fops = tty_fops;
31792+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31793 }
31794
31795 /*
31796diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31797index d814a3d..b55b9c9 100644
31798--- a/drivers/char/tty_ldisc.c
31799+++ b/drivers/char/tty_ldisc.c
31800@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31801 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31802 struct tty_ldisc_ops *ldo = ld->ops;
31803
31804- ldo->refcount--;
31805+ atomic_dec(&ldo->refcount);
31806 module_put(ldo->owner);
31807 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31808
31809@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31810 spin_lock_irqsave(&tty_ldisc_lock, flags);
31811 tty_ldiscs[disc] = new_ldisc;
31812 new_ldisc->num = disc;
31813- new_ldisc->refcount = 0;
31814+ atomic_set(&new_ldisc->refcount, 0);
31815 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31816
31817 return ret;
31818@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31819 return -EINVAL;
31820
31821 spin_lock_irqsave(&tty_ldisc_lock, flags);
31822- if (tty_ldiscs[disc]->refcount)
31823+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31824 ret = -EBUSY;
31825 else
31826 tty_ldiscs[disc] = NULL;
31827@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31828 if (ldops) {
31829 ret = ERR_PTR(-EAGAIN);
31830 if (try_module_get(ldops->owner)) {
31831- ldops->refcount++;
31832+ atomic_inc(&ldops->refcount);
31833 ret = ldops;
31834 }
31835 }
31836@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31837 unsigned long flags;
31838
31839 spin_lock_irqsave(&tty_ldisc_lock, flags);
31840- ldops->refcount--;
31841+ atomic_dec(&ldops->refcount);
31842 module_put(ldops->owner);
31843 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31844 }
31845diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31846index a035ae3..c27fe2c 100644
31847--- a/drivers/char/virtio_console.c
31848+++ b/drivers/char/virtio_console.c
31849@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31850 * virtqueue, so we let the drivers do some boutique early-output thing. */
31851 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31852 {
31853- virtio_cons.put_chars = put_chars;
31854+ pax_open_kernel();
31855+ *(void **)&virtio_cons.put_chars = put_chars;
31856+ pax_close_kernel();
31857 return hvc_instantiate(0, 0, &virtio_cons);
31858 }
31859
31860@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31861 out_vq = vqs[1];
31862
31863 /* Start using the new console output. */
31864- virtio_cons.get_chars = get_chars;
31865- virtio_cons.put_chars = put_chars;
31866- virtio_cons.notifier_add = notifier_add_vio;
31867- virtio_cons.notifier_del = notifier_del_vio;
31868- virtio_cons.notifier_hangup = notifier_del_vio;
31869+ pax_open_kernel();
31870+ *(void **)&virtio_cons.get_chars = get_chars;
31871+ *(void **)&virtio_cons.put_chars = put_chars;
31872+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31873+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31874+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31875+ pax_close_kernel();
31876
31877 /* The first argument of hvc_alloc() is the virtual console number, so
31878 * we use zero. The second argument is the parameter for the
31879diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31880index 0c80c68..53d59c1 100644
31881--- a/drivers/char/vt.c
31882+++ b/drivers/char/vt.c
31883@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31884
31885 static void notify_write(struct vc_data *vc, unsigned int unicode)
31886 {
31887- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31888+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31889 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31890 }
31891
31892diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31893index 6351a26..999af95 100644
31894--- a/drivers/char/vt_ioctl.c
31895+++ b/drivers/char/vt_ioctl.c
31896@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31897 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31898 return -EFAULT;
31899
31900- if (!capable(CAP_SYS_TTY_CONFIG))
31901- perm = 0;
31902-
31903 switch (cmd) {
31904 case KDGKBENT:
31905 key_map = key_maps[s];
31906@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31907 val = (i ? K_HOLE : K_NOSUCHMAP);
31908 return put_user(val, &user_kbe->kb_value);
31909 case KDSKBENT:
31910+ if (!capable(CAP_SYS_TTY_CONFIG))
31911+ perm = 0;
31912+
31913 if (!perm)
31914 return -EPERM;
31915+
31916 if (!i && v == K_NOSUCHMAP) {
31917 /* deallocate map */
31918 key_map = key_maps[s];
31919@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31920 int i, j, k;
31921 int ret;
31922
31923- if (!capable(CAP_SYS_TTY_CONFIG))
31924- perm = 0;
31925-
31926 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31927 if (!kbs) {
31928 ret = -ENOMEM;
31929@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31930 kfree(kbs);
31931 return ((p && *p) ? -EOVERFLOW : 0);
31932 case KDSKBSENT:
31933+ if (!capable(CAP_SYS_TTY_CONFIG))
31934+ perm = 0;
31935+
31936 if (!perm) {
31937 ret = -EPERM;
31938 goto reterr;
31939diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31940index c7ae026..1769c1d 100644
31941--- a/drivers/cpufreq/cpufreq.c
31942+++ b/drivers/cpufreq/cpufreq.c
31943@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31944 complete(&policy->kobj_unregister);
31945 }
31946
31947-static struct sysfs_ops sysfs_ops = {
31948+static const struct sysfs_ops sysfs_ops = {
31949 .show = show,
31950 .store = store,
31951 };
31952diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31953index 97b0038..2056670 100644
31954--- a/drivers/cpuidle/sysfs.c
31955+++ b/drivers/cpuidle/sysfs.c
31956@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31957 return ret;
31958 }
31959
31960-static struct sysfs_ops cpuidle_sysfs_ops = {
31961+static const struct sysfs_ops cpuidle_sysfs_ops = {
31962 .show = cpuidle_show,
31963 .store = cpuidle_store,
31964 };
31965@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31966 return ret;
31967 }
31968
31969-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31970+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31971 .show = cpuidle_state_show,
31972 };
31973
31974@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31975 .release = cpuidle_state_sysfs_release,
31976 };
31977
31978-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31979+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31980 {
31981 kobject_put(&device->kobjs[i]->kobj);
31982 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31983diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31984index 5f753fc..0377ae9 100644
31985--- a/drivers/crypto/hifn_795x.c
31986+++ b/drivers/crypto/hifn_795x.c
31987@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31988 0xCA, 0x34, 0x2B, 0x2E};
31989 struct scatterlist sg;
31990
31991+ pax_track_stack();
31992+
31993 memset(src, 0, sizeof(src));
31994 memset(ctx.key, 0, sizeof(ctx.key));
31995
31996diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31997index 71e6482..de8d96c 100644
31998--- a/drivers/crypto/padlock-aes.c
31999+++ b/drivers/crypto/padlock-aes.c
32000@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
32001 struct crypto_aes_ctx gen_aes;
32002 int cpu;
32003
32004+ pax_track_stack();
32005+
32006 if (key_len % 8) {
32007 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32008 return -EINVAL;
32009diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32010index dcc4ab7..cc834bb 100644
32011--- a/drivers/dma/ioat/dma.c
32012+++ b/drivers/dma/ioat/dma.c
32013@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32014 return entry->show(&chan->common, page);
32015 }
32016
32017-struct sysfs_ops ioat_sysfs_ops = {
32018+const struct sysfs_ops ioat_sysfs_ops = {
32019 .show = ioat_attr_show,
32020 };
32021
32022diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32023index bbc3e78..f2db62c 100644
32024--- a/drivers/dma/ioat/dma.h
32025+++ b/drivers/dma/ioat/dma.h
32026@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32027 unsigned long *phys_complete);
32028 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32029 void ioat_kobject_del(struct ioatdma_device *device);
32030-extern struct sysfs_ops ioat_sysfs_ops;
32031+extern const struct sysfs_ops ioat_sysfs_ops;
32032 extern struct ioat_sysfs_entry ioat_version_attr;
32033 extern struct ioat_sysfs_entry ioat_cap_attr;
32034 #endif /* IOATDMA_H */
32035diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32036index 9908c9e..3ceb0e5 100644
32037--- a/drivers/dma/ioat/dma_v3.c
32038+++ b/drivers/dma/ioat/dma_v3.c
32039@@ -71,10 +71,10 @@
32040 /* provide a lookup table for setting the source address in the base or
32041 * extended descriptor of an xor or pq descriptor
32042 */
32043-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32044-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32045-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32046-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32047+static const u8 xor_idx_to_desc = 0xd0;
32048+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32049+static const u8 pq_idx_to_desc = 0xf8;
32050+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32051
32052 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32053 {
32054diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32055index 85c464a..afd1e73 100644
32056--- a/drivers/edac/amd64_edac.c
32057+++ b/drivers/edac/amd64_edac.c
32058@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32059 * PCI core identifies what devices are on a system during boot, and then
32060 * inquiry this table to see if this driver is for a given device found.
32061 */
32062-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32063+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32064 {
32065 .vendor = PCI_VENDOR_ID_AMD,
32066 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32067diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32068index 2b95f1a..4f52793 100644
32069--- a/drivers/edac/amd76x_edac.c
32070+++ b/drivers/edac/amd76x_edac.c
32071@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32072 edac_mc_free(mci);
32073 }
32074
32075-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32076+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32077 {
32078 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32079 AMD762},
32080diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32081index d205d49..74c9672 100644
32082--- a/drivers/edac/e752x_edac.c
32083+++ b/drivers/edac/e752x_edac.c
32084@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32085 edac_mc_free(mci);
32086 }
32087
32088-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32089+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32090 {
32091 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32092 E7520},
32093diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32094index c7d11cc..c59c1ca 100644
32095--- a/drivers/edac/e7xxx_edac.c
32096+++ b/drivers/edac/e7xxx_edac.c
32097@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32098 edac_mc_free(mci);
32099 }
32100
32101-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32102+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32103 {
32104 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32105 E7205},
32106diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32107index 5376457..5fdedbc 100644
32108--- a/drivers/edac/edac_device_sysfs.c
32109+++ b/drivers/edac/edac_device_sysfs.c
32110@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32111 }
32112
32113 /* edac_dev file operations for an 'ctl_info' */
32114-static struct sysfs_ops device_ctl_info_ops = {
32115+static const struct sysfs_ops device_ctl_info_ops = {
32116 .show = edac_dev_ctl_info_show,
32117 .store = edac_dev_ctl_info_store
32118 };
32119@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32120 }
32121
32122 /* edac_dev file operations for an 'instance' */
32123-static struct sysfs_ops device_instance_ops = {
32124+static const struct sysfs_ops device_instance_ops = {
32125 .show = edac_dev_instance_show,
32126 .store = edac_dev_instance_store
32127 };
32128@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32129 }
32130
32131 /* edac_dev file operations for a 'block' */
32132-static struct sysfs_ops device_block_ops = {
32133+static const struct sysfs_ops device_block_ops = {
32134 .show = edac_dev_block_show,
32135 .store = edac_dev_block_store
32136 };
32137diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32138index e1d4ce0..88840e9 100644
32139--- a/drivers/edac/edac_mc_sysfs.c
32140+++ b/drivers/edac/edac_mc_sysfs.c
32141@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32142 return -EIO;
32143 }
32144
32145-static struct sysfs_ops csrowfs_ops = {
32146+static const struct sysfs_ops csrowfs_ops = {
32147 .show = csrowdev_show,
32148 .store = csrowdev_store
32149 };
32150@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32151 }
32152
32153 /* Intermediate show/store table */
32154-static struct sysfs_ops mci_ops = {
32155+static const struct sysfs_ops mci_ops = {
32156 .show = mcidev_show,
32157 .store = mcidev_store
32158 };
32159diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32160index 422728c..d8d9c88 100644
32161--- a/drivers/edac/edac_pci_sysfs.c
32162+++ b/drivers/edac/edac_pci_sysfs.c
32163@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32164 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32165 static int edac_pci_poll_msec = 1000; /* one second workq period */
32166
32167-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32168-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32169+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32170+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32171
32172 static struct kobject *edac_pci_top_main_kobj;
32173 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32174@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32175 }
32176
32177 /* fs_ops table */
32178-static struct sysfs_ops pci_instance_ops = {
32179+static const struct sysfs_ops pci_instance_ops = {
32180 .show = edac_pci_instance_show,
32181 .store = edac_pci_instance_store
32182 };
32183@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32184 return -EIO;
32185 }
32186
32187-static struct sysfs_ops edac_pci_sysfs_ops = {
32188+static const struct sysfs_ops edac_pci_sysfs_ops = {
32189 .show = edac_pci_dev_show,
32190 .store = edac_pci_dev_store
32191 };
32192@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32193 edac_printk(KERN_CRIT, EDAC_PCI,
32194 "Signaled System Error on %s\n",
32195 pci_name(dev));
32196- atomic_inc(&pci_nonparity_count);
32197+ atomic_inc_unchecked(&pci_nonparity_count);
32198 }
32199
32200 if (status & (PCI_STATUS_PARITY)) {
32201@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32202 "Master Data Parity Error on %s\n",
32203 pci_name(dev));
32204
32205- atomic_inc(&pci_parity_count);
32206+ atomic_inc_unchecked(&pci_parity_count);
32207 }
32208
32209 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32210@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32211 "Detected Parity Error on %s\n",
32212 pci_name(dev));
32213
32214- atomic_inc(&pci_parity_count);
32215+ atomic_inc_unchecked(&pci_parity_count);
32216 }
32217 }
32218
32219@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32220 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32221 "Signaled System Error on %s\n",
32222 pci_name(dev));
32223- atomic_inc(&pci_nonparity_count);
32224+ atomic_inc_unchecked(&pci_nonparity_count);
32225 }
32226
32227 if (status & (PCI_STATUS_PARITY)) {
32228@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32229 "Master Data Parity Error on "
32230 "%s\n", pci_name(dev));
32231
32232- atomic_inc(&pci_parity_count);
32233+ atomic_inc_unchecked(&pci_parity_count);
32234 }
32235
32236 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32237@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32238 "Detected Parity Error on %s\n",
32239 pci_name(dev));
32240
32241- atomic_inc(&pci_parity_count);
32242+ atomic_inc_unchecked(&pci_parity_count);
32243 }
32244 }
32245 }
32246@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32247 if (!check_pci_errors)
32248 return;
32249
32250- before_count = atomic_read(&pci_parity_count);
32251+ before_count = atomic_read_unchecked(&pci_parity_count);
32252
32253 /* scan all PCI devices looking for a Parity Error on devices and
32254 * bridges.
32255@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32256 /* Only if operator has selected panic on PCI Error */
32257 if (edac_pci_get_panic_on_pe()) {
32258 /* If the count is different 'after' from 'before' */
32259- if (before_count != atomic_read(&pci_parity_count))
32260+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32261 panic("EDAC: PCI Parity Error");
32262 }
32263 }
32264diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32265index 6c9a0f2..9c1cf7e 100644
32266--- a/drivers/edac/i3000_edac.c
32267+++ b/drivers/edac/i3000_edac.c
32268@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32269 edac_mc_free(mci);
32270 }
32271
32272-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32273+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32274 {
32275 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32276 I3000},
32277diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32278index fde4db9..fe108f9 100644
32279--- a/drivers/edac/i3200_edac.c
32280+++ b/drivers/edac/i3200_edac.c
32281@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32282 edac_mc_free(mci);
32283 }
32284
32285-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32286+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32287 {
32288 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32289 I3200},
32290diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32291index adc10a2..57d4ccf 100644
32292--- a/drivers/edac/i5000_edac.c
32293+++ b/drivers/edac/i5000_edac.c
32294@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32295 *
32296 * The "E500P" device is the first device supported.
32297 */
32298-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32299+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32300 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32301 .driver_data = I5000P},
32302
32303diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32304index 22db05a..b2b5503 100644
32305--- a/drivers/edac/i5100_edac.c
32306+++ b/drivers/edac/i5100_edac.c
32307@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32308 edac_mc_free(mci);
32309 }
32310
32311-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32312+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32313 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32314 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32315 { 0, }
32316diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32317index f99d106..f050710 100644
32318--- a/drivers/edac/i5400_edac.c
32319+++ b/drivers/edac/i5400_edac.c
32320@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32321 *
32322 * The "E500P" device is the first device supported.
32323 */
32324-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32325+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32326 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32327 {0,} /* 0 terminated list. */
32328 };
32329diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32330index 577760a..9ce16ce 100644
32331--- a/drivers/edac/i82443bxgx_edac.c
32332+++ b/drivers/edac/i82443bxgx_edac.c
32333@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32334
32335 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32336
32337-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32338+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32339 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32340 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32341 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32342diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32343index c0088ba..64a7b98 100644
32344--- a/drivers/edac/i82860_edac.c
32345+++ b/drivers/edac/i82860_edac.c
32346@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32347 edac_mc_free(mci);
32348 }
32349
32350-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32351+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32352 {
32353 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32354 I82860},
32355diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32356index b2d83b9..a34357b 100644
32357--- a/drivers/edac/i82875p_edac.c
32358+++ b/drivers/edac/i82875p_edac.c
32359@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32360 edac_mc_free(mci);
32361 }
32362
32363-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32364+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32365 {
32366 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32367 I82875P},
32368diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32369index 2eed3ea..87bbbd1 100644
32370--- a/drivers/edac/i82975x_edac.c
32371+++ b/drivers/edac/i82975x_edac.c
32372@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32373 edac_mc_free(mci);
32374 }
32375
32376-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32377+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32378 {
32379 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32380 I82975X
32381diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32382index 9900675..78ac2b6 100644
32383--- a/drivers/edac/r82600_edac.c
32384+++ b/drivers/edac/r82600_edac.c
32385@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32386 edac_mc_free(mci);
32387 }
32388
32389-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32390+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32391 {
32392 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32393 },
32394diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32395index d4ec605..4cfec4e 100644
32396--- a/drivers/edac/x38_edac.c
32397+++ b/drivers/edac/x38_edac.c
32398@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32399 edac_mc_free(mci);
32400 }
32401
32402-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32403+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32404 {
32405 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32406 X38},
32407diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32408index 3fc2ceb..daf098f 100644
32409--- a/drivers/firewire/core-card.c
32410+++ b/drivers/firewire/core-card.c
32411@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32412
32413 void fw_core_remove_card(struct fw_card *card)
32414 {
32415- struct fw_card_driver dummy_driver = dummy_driver_template;
32416+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32417
32418 card->driver->update_phy_reg(card, 4,
32419 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32420diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32421index 4560d8f..36db24a 100644
32422--- a/drivers/firewire/core-cdev.c
32423+++ b/drivers/firewire/core-cdev.c
32424@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32425 int ret;
32426
32427 if ((request->channels == 0 && request->bandwidth == 0) ||
32428- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32429- request->bandwidth < 0)
32430+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32431 return -EINVAL;
32432
32433 r = kmalloc(sizeof(*r), GFP_KERNEL);
32434diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32435index da628c7..cf54a2c 100644
32436--- a/drivers/firewire/core-transaction.c
32437+++ b/drivers/firewire/core-transaction.c
32438@@ -36,6 +36,7 @@
32439 #include <linux/string.h>
32440 #include <linux/timer.h>
32441 #include <linux/types.h>
32442+#include <linux/sched.h>
32443
32444 #include <asm/byteorder.h>
32445
32446@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32447 struct transaction_callback_data d;
32448 struct fw_transaction t;
32449
32450+ pax_track_stack();
32451+
32452 init_completion(&d.done);
32453 d.payload = payload;
32454 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32455diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32456index 7ff6e75..a2965d9 100644
32457--- a/drivers/firewire/core.h
32458+++ b/drivers/firewire/core.h
32459@@ -86,6 +86,7 @@ struct fw_card_driver {
32460
32461 int (*stop_iso)(struct fw_iso_context *ctx);
32462 };
32463+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32464
32465 void fw_card_initialize(struct fw_card *card,
32466 const struct fw_card_driver *driver, struct device *device);
32467diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32468index 3a2ccb0..82fd7c4 100644
32469--- a/drivers/firmware/dmi_scan.c
32470+++ b/drivers/firmware/dmi_scan.c
32471@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32472 }
32473 }
32474 else {
32475- /*
32476- * no iounmap() for that ioremap(); it would be a no-op, but
32477- * it's so early in setup that sucker gets confused into doing
32478- * what it shouldn't if we actually call it.
32479- */
32480 p = dmi_ioremap(0xF0000, 0x10000);
32481 if (p == NULL)
32482 goto error;
32483@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32484 if (buf == NULL)
32485 return -1;
32486
32487- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32488+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32489
32490 iounmap(buf);
32491 return 0;
32492diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32493index 9e4f59d..110e24e 100644
32494--- a/drivers/firmware/edd.c
32495+++ b/drivers/firmware/edd.c
32496@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32497 return ret;
32498 }
32499
32500-static struct sysfs_ops edd_attr_ops = {
32501+static const struct sysfs_ops edd_attr_ops = {
32502 .show = edd_attr_show,
32503 };
32504
32505diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32506index f4f709d..082f06e 100644
32507--- a/drivers/firmware/efivars.c
32508+++ b/drivers/firmware/efivars.c
32509@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32510 return ret;
32511 }
32512
32513-static struct sysfs_ops efivar_attr_ops = {
32514+static const struct sysfs_ops efivar_attr_ops = {
32515 .show = efivar_attr_show,
32516 .store = efivar_attr_store,
32517 };
32518diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32519index 051d1eb..0a5d4e7 100644
32520--- a/drivers/firmware/iscsi_ibft.c
32521+++ b/drivers/firmware/iscsi_ibft.c
32522@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32523 return ret;
32524 }
32525
32526-static struct sysfs_ops ibft_attr_ops = {
32527+static const struct sysfs_ops ibft_attr_ops = {
32528 .show = ibft_show_attribute,
32529 };
32530
32531diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32532index 56f9234..8c58c7b 100644
32533--- a/drivers/firmware/memmap.c
32534+++ b/drivers/firmware/memmap.c
32535@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32536 NULL
32537 };
32538
32539-static struct sysfs_ops memmap_attr_ops = {
32540+static const struct sysfs_ops memmap_attr_ops = {
32541 .show = memmap_attr_show,
32542 };
32543
32544diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32545index b16c9a8..2af7d3f 100644
32546--- a/drivers/gpio/vr41xx_giu.c
32547+++ b/drivers/gpio/vr41xx_giu.c
32548@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32549 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32550 maskl, pendl, maskh, pendh);
32551
32552- atomic_inc(&irq_err_count);
32553+ atomic_inc_unchecked(&irq_err_count);
32554
32555 return -EINVAL;
32556 }
32557diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32558index bea6efc..3dc0f42 100644
32559--- a/drivers/gpu/drm/drm_crtc.c
32560+++ b/drivers/gpu/drm/drm_crtc.c
32561@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32562 */
32563 if ((out_resp->count_modes >= mode_count) && mode_count) {
32564 copied = 0;
32565- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32566+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32567 list_for_each_entry(mode, &connector->modes, head) {
32568 drm_crtc_convert_to_umode(&u_mode, mode);
32569 if (copy_to_user(mode_ptr + copied,
32570@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32571
32572 if ((out_resp->count_props >= props_count) && props_count) {
32573 copied = 0;
32574- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32575- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32576+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32577+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32578 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32579 if (connector->property_ids[i] != 0) {
32580 if (put_user(connector->property_ids[i],
32581@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32582
32583 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32584 copied = 0;
32585- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32586+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32587 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32588 if (connector->encoder_ids[i] != 0) {
32589 if (put_user(connector->encoder_ids[i],
32590@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32591 }
32592
32593 for (i = 0; i < crtc_req->count_connectors; i++) {
32594- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32595+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32596 if (get_user(out_id, &set_connectors_ptr[i])) {
32597 ret = -EFAULT;
32598 goto out;
32599@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32600 out_resp->flags = property->flags;
32601
32602 if ((out_resp->count_values >= value_count) && value_count) {
32603- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32604+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32605 for (i = 0; i < value_count; i++) {
32606 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32607 ret = -EFAULT;
32608@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32609 if (property->flags & DRM_MODE_PROP_ENUM) {
32610 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32611 copied = 0;
32612- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32613+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32614 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32615
32616 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32617@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32618 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32619 copied = 0;
32620 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32621- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32622+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32623
32624 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32625 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32626@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32627 blob = obj_to_blob(obj);
32628
32629 if (out_resp->length == blob->length) {
32630- blob_ptr = (void *)(unsigned long)out_resp->data;
32631+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32632 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32633 ret = -EFAULT;
32634 goto done;
32635diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32636index 1b8745d..92fdbf6 100644
32637--- a/drivers/gpu/drm/drm_crtc_helper.c
32638+++ b/drivers/gpu/drm/drm_crtc_helper.c
32639@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32640 struct drm_crtc *tmp;
32641 int crtc_mask = 1;
32642
32643- WARN(!crtc, "checking null crtc?");
32644+ BUG_ON(!crtc);
32645
32646 dev = crtc->dev;
32647
32648@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32649
32650 adjusted_mode = drm_mode_duplicate(dev, mode);
32651
32652+ pax_track_stack();
32653+
32654 crtc->enabled = drm_helper_crtc_in_use(crtc);
32655
32656 if (!crtc->enabled)
32657diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32658index 0e27d98..dec8768 100644
32659--- a/drivers/gpu/drm/drm_drv.c
32660+++ b/drivers/gpu/drm/drm_drv.c
32661@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32662 char *kdata = NULL;
32663
32664 atomic_inc(&dev->ioctl_count);
32665- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32666+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32667 ++file_priv->ioctl_count;
32668
32669 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32670diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32671index 519161e..98c840c 100644
32672--- a/drivers/gpu/drm/drm_fops.c
32673+++ b/drivers/gpu/drm/drm_fops.c
32674@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32675 }
32676
32677 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32678- atomic_set(&dev->counts[i], 0);
32679+ atomic_set_unchecked(&dev->counts[i], 0);
32680
32681 dev->sigdata.lock = NULL;
32682
32683@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32684
32685 retcode = drm_open_helper(inode, filp, dev);
32686 if (!retcode) {
32687- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32688+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32689 spin_lock(&dev->count_lock);
32690- if (!dev->open_count++) {
32691+ if (local_inc_return(&dev->open_count) == 1) {
32692 spin_unlock(&dev->count_lock);
32693 retcode = drm_setup(dev);
32694 goto out;
32695@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32696
32697 lock_kernel();
32698
32699- DRM_DEBUG("open_count = %d\n", dev->open_count);
32700+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32701
32702 if (dev->driver->preclose)
32703 dev->driver->preclose(dev, file_priv);
32704@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32705 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32706 task_pid_nr(current),
32707 (long)old_encode_dev(file_priv->minor->device),
32708- dev->open_count);
32709+ local_read(&dev->open_count));
32710
32711 /* Release any auth tokens that might point to this file_priv,
32712 (do that under the drm_global_mutex) */
32713@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32714 * End inline drm_release
32715 */
32716
32717- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32718+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32719 spin_lock(&dev->count_lock);
32720- if (!--dev->open_count) {
32721+ if (local_dec_and_test(&dev->open_count)) {
32722 if (atomic_read(&dev->ioctl_count)) {
32723 DRM_ERROR("Device busy: %d\n",
32724 atomic_read(&dev->ioctl_count));
32725diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32726index 8bf3770..79422805 100644
32727--- a/drivers/gpu/drm/drm_gem.c
32728+++ b/drivers/gpu/drm/drm_gem.c
32729@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32730 spin_lock_init(&dev->object_name_lock);
32731 idr_init(&dev->object_name_idr);
32732 atomic_set(&dev->object_count, 0);
32733- atomic_set(&dev->object_memory, 0);
32734+ atomic_set_unchecked(&dev->object_memory, 0);
32735 atomic_set(&dev->pin_count, 0);
32736- atomic_set(&dev->pin_memory, 0);
32737+ atomic_set_unchecked(&dev->pin_memory, 0);
32738 atomic_set(&dev->gtt_count, 0);
32739- atomic_set(&dev->gtt_memory, 0);
32740+ atomic_set_unchecked(&dev->gtt_memory, 0);
32741
32742 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32743 if (!mm) {
32744@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32745 goto fput;
32746 }
32747 atomic_inc(&dev->object_count);
32748- atomic_add(obj->size, &dev->object_memory);
32749+ atomic_add_unchecked(obj->size, &dev->object_memory);
32750 return obj;
32751 fput:
32752 fput(obj->filp);
32753@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32754
32755 fput(obj->filp);
32756 atomic_dec(&dev->object_count);
32757- atomic_sub(obj->size, &dev->object_memory);
32758+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32759 kfree(obj);
32760 }
32761 EXPORT_SYMBOL(drm_gem_object_free);
32762diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32763index f0f6c6b..34af322 100644
32764--- a/drivers/gpu/drm/drm_info.c
32765+++ b/drivers/gpu/drm/drm_info.c
32766@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32767 struct drm_local_map *map;
32768 struct drm_map_list *r_list;
32769
32770- /* Hardcoded from _DRM_FRAME_BUFFER,
32771- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32772- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32773- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32774+ static const char * const types[] = {
32775+ [_DRM_FRAME_BUFFER] = "FB",
32776+ [_DRM_REGISTERS] = "REG",
32777+ [_DRM_SHM] = "SHM",
32778+ [_DRM_AGP] = "AGP",
32779+ [_DRM_SCATTER_GATHER] = "SG",
32780+ [_DRM_CONSISTENT] = "PCI",
32781+ [_DRM_GEM] = "GEM" };
32782 const char *type;
32783 int i;
32784
32785@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32786 map = r_list->map;
32787 if (!map)
32788 continue;
32789- if (map->type < 0 || map->type > 5)
32790+ if (map->type >= ARRAY_SIZE(types))
32791 type = "??";
32792 else
32793 type = types[map->type];
32794@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32795 struct drm_device *dev = node->minor->dev;
32796
32797 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32798- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32799+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32800 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32801- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32802- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32803+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32804+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32805 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32806 return 0;
32807 }
32808@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32809 mutex_lock(&dev->struct_mutex);
32810 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32811 atomic_read(&dev->vma_count),
32812+#ifdef CONFIG_GRKERNSEC_HIDESYM
32813+ NULL, 0);
32814+#else
32815 high_memory, (u64)virt_to_phys(high_memory));
32816+#endif
32817
32818 list_for_each_entry(pt, &dev->vmalist, head) {
32819 vma = pt->vma;
32820@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32821 continue;
32822 seq_printf(m,
32823 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32824- pt->pid, vma->vm_start, vma->vm_end,
32825+ pt->pid,
32826+#ifdef CONFIG_GRKERNSEC_HIDESYM
32827+ 0, 0,
32828+#else
32829+ vma->vm_start, vma->vm_end,
32830+#endif
32831 vma->vm_flags & VM_READ ? 'r' : '-',
32832 vma->vm_flags & VM_WRITE ? 'w' : '-',
32833 vma->vm_flags & VM_EXEC ? 'x' : '-',
32834 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32835 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32836 vma->vm_flags & VM_IO ? 'i' : '-',
32837+#ifdef CONFIG_GRKERNSEC_HIDESYM
32838+ 0);
32839+#else
32840 vma->vm_pgoff);
32841+#endif
32842
32843 #if defined(__i386__)
32844 pgprot = pgprot_val(vma->vm_page_prot);
32845diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32846index 282d9fd..71e5f11 100644
32847--- a/drivers/gpu/drm/drm_ioc32.c
32848+++ b/drivers/gpu/drm/drm_ioc32.c
32849@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32850 request = compat_alloc_user_space(nbytes);
32851 if (!access_ok(VERIFY_WRITE, request, nbytes))
32852 return -EFAULT;
32853- list = (struct drm_buf_desc *) (request + 1);
32854+ list = (struct drm_buf_desc __user *) (request + 1);
32855
32856 if (__put_user(count, &request->count)
32857 || __put_user(list, &request->list))
32858@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32859 request = compat_alloc_user_space(nbytes);
32860 if (!access_ok(VERIFY_WRITE, request, nbytes))
32861 return -EFAULT;
32862- list = (struct drm_buf_pub *) (request + 1);
32863+ list = (struct drm_buf_pub __user *) (request + 1);
32864
32865 if (__put_user(count, &request->count)
32866 || __put_user(list, &request->list))
32867diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32868index 9b9ff46..4ea724c 100644
32869--- a/drivers/gpu/drm/drm_ioctl.c
32870+++ b/drivers/gpu/drm/drm_ioctl.c
32871@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32872 stats->data[i].value =
32873 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32874 else
32875- stats->data[i].value = atomic_read(&dev->counts[i]);
32876+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32877 stats->data[i].type = dev->types[i];
32878 }
32879
32880diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32881index e2f70a5..c703e86 100644
32882--- a/drivers/gpu/drm/drm_lock.c
32883+++ b/drivers/gpu/drm/drm_lock.c
32884@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32885 if (drm_lock_take(&master->lock, lock->context)) {
32886 master->lock.file_priv = file_priv;
32887 master->lock.lock_time = jiffies;
32888- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32889+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32890 break; /* Got lock */
32891 }
32892
32893@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32894 return -EINVAL;
32895 }
32896
32897- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32898+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32899
32900 /* kernel_context_switch isn't used by any of the x86 drm
32901 * modules but is required by the Sparc driver.
32902diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32903index 7d1d88c..b9131b2 100644
32904--- a/drivers/gpu/drm/i810/i810_dma.c
32905+++ b/drivers/gpu/drm/i810/i810_dma.c
32906@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32907 dma->buflist[vertex->idx],
32908 vertex->discard, vertex->used);
32909
32910- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32911- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32912+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32913+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32914 sarea_priv->last_enqueue = dev_priv->counter - 1;
32915 sarea_priv->last_dispatch = (int)hw_status[5];
32916
32917@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32918 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32919 mc->last_render);
32920
32921- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32922- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32923+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32924+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32925 sarea_priv->last_enqueue = dev_priv->counter - 1;
32926 sarea_priv->last_dispatch = (int)hw_status[5];
32927
32928diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32929index 21e2691..7321edd 100644
32930--- a/drivers/gpu/drm/i810/i810_drv.h
32931+++ b/drivers/gpu/drm/i810/i810_drv.h
32932@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32933 int page_flipping;
32934
32935 wait_queue_head_t irq_queue;
32936- atomic_t irq_received;
32937- atomic_t irq_emitted;
32938+ atomic_unchecked_t irq_received;
32939+ atomic_unchecked_t irq_emitted;
32940
32941 int front_offset;
32942 } drm_i810_private_t;
32943diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32944index da82afe..48a45de 100644
32945--- a/drivers/gpu/drm/i830/i830_drv.h
32946+++ b/drivers/gpu/drm/i830/i830_drv.h
32947@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32948 int page_flipping;
32949
32950 wait_queue_head_t irq_queue;
32951- atomic_t irq_received;
32952- atomic_t irq_emitted;
32953+ atomic_unchecked_t irq_received;
32954+ atomic_unchecked_t irq_emitted;
32955
32956 int use_mi_batchbuffer_start;
32957
32958diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32959index 91ec2bb..6f21fab 100644
32960--- a/drivers/gpu/drm/i830/i830_irq.c
32961+++ b/drivers/gpu/drm/i830/i830_irq.c
32962@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32963
32964 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32965
32966- atomic_inc(&dev_priv->irq_received);
32967+ atomic_inc_unchecked(&dev_priv->irq_received);
32968 wake_up_interruptible(&dev_priv->irq_queue);
32969
32970 return IRQ_HANDLED;
32971@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32972
32973 DRM_DEBUG("%s\n", __func__);
32974
32975- atomic_inc(&dev_priv->irq_emitted);
32976+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32977
32978 BEGIN_LP_RING(2);
32979 OUT_RING(0);
32980 OUT_RING(GFX_OP_USER_INTERRUPT);
32981 ADVANCE_LP_RING();
32982
32983- return atomic_read(&dev_priv->irq_emitted);
32984+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32985 }
32986
32987 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32988@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32989
32990 DRM_DEBUG("%s\n", __func__);
32991
32992- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32993+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32994 return 0;
32995
32996 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32997@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32998
32999 for (;;) {
33000 __set_current_state(TASK_INTERRUPTIBLE);
33001- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
33002+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
33003 break;
33004 if ((signed)(end - jiffies) <= 0) {
33005 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
33006@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33007 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33008 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33009 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33010- atomic_set(&dev_priv->irq_received, 0);
33011- atomic_set(&dev_priv->irq_emitted, 0);
33012+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33013+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33014 init_waitqueue_head(&dev_priv->irq_queue);
33015 }
33016
33017diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33018index 288fc50..c6092055 100644
33019--- a/drivers/gpu/drm/i915/dvo.h
33020+++ b/drivers/gpu/drm/i915/dvo.h
33021@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33022 *
33023 * \return singly-linked list of modes or NULL if no modes found.
33024 */
33025- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33026+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33027
33028 /**
33029 * Clean up driver-specific bits of the output
33030 */
33031- void (*destroy) (struct intel_dvo_device *dvo);
33032+ void (* const destroy) (struct intel_dvo_device *dvo);
33033
33034 /**
33035 * Debugging hook to dump device registers to log file
33036 */
33037- void (*dump_regs)(struct intel_dvo_device *dvo);
33038+ void (* const dump_regs)(struct intel_dvo_device *dvo);
33039 };
33040
33041-extern struct intel_dvo_dev_ops sil164_ops;
33042-extern struct intel_dvo_dev_ops ch7xxx_ops;
33043-extern struct intel_dvo_dev_ops ivch_ops;
33044-extern struct intel_dvo_dev_ops tfp410_ops;
33045-extern struct intel_dvo_dev_ops ch7017_ops;
33046+extern const struct intel_dvo_dev_ops sil164_ops;
33047+extern const struct intel_dvo_dev_ops ch7xxx_ops;
33048+extern const struct intel_dvo_dev_ops ivch_ops;
33049+extern const struct intel_dvo_dev_ops tfp410_ops;
33050+extern const struct intel_dvo_dev_ops ch7017_ops;
33051
33052 #endif /* _INTEL_DVO_H */
33053diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33054index 621815b..499d82e 100644
33055--- a/drivers/gpu/drm/i915/dvo_ch7017.c
33056+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33057@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33058 }
33059 }
33060
33061-struct intel_dvo_dev_ops ch7017_ops = {
33062+const struct intel_dvo_dev_ops ch7017_ops = {
33063 .init = ch7017_init,
33064 .detect = ch7017_detect,
33065 .mode_valid = ch7017_mode_valid,
33066diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33067index a9b8962..ac769ba 100644
33068--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33069+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33070@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33071 }
33072 }
33073
33074-struct intel_dvo_dev_ops ch7xxx_ops = {
33075+const struct intel_dvo_dev_ops ch7xxx_ops = {
33076 .init = ch7xxx_init,
33077 .detect = ch7xxx_detect,
33078 .mode_valid = ch7xxx_mode_valid,
33079diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33080index aa176f9..ed2930c 100644
33081--- a/drivers/gpu/drm/i915/dvo_ivch.c
33082+++ b/drivers/gpu/drm/i915/dvo_ivch.c
33083@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33084 }
33085 }
33086
33087-struct intel_dvo_dev_ops ivch_ops= {
33088+const struct intel_dvo_dev_ops ivch_ops= {
33089 .init = ivch_init,
33090 .dpms = ivch_dpms,
33091 .save = ivch_save,
33092diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33093index e1c1f73..7dbebcf 100644
33094--- a/drivers/gpu/drm/i915/dvo_sil164.c
33095+++ b/drivers/gpu/drm/i915/dvo_sil164.c
33096@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33097 }
33098 }
33099
33100-struct intel_dvo_dev_ops sil164_ops = {
33101+const struct intel_dvo_dev_ops sil164_ops = {
33102 .init = sil164_init,
33103 .detect = sil164_detect,
33104 .mode_valid = sil164_mode_valid,
33105diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33106index 16dce84..7e1b6f8 100644
33107--- a/drivers/gpu/drm/i915/dvo_tfp410.c
33108+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33109@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33110 }
33111 }
33112
33113-struct intel_dvo_dev_ops tfp410_ops = {
33114+const struct intel_dvo_dev_ops tfp410_ops = {
33115 .init = tfp410_init,
33116 .detect = tfp410_detect,
33117 .mode_valid = tfp410_mode_valid,
33118diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33119index 7e859d6..7d1cf2b 100644
33120--- a/drivers/gpu/drm/i915/i915_debugfs.c
33121+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33122@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33123 I915_READ(GTIMR));
33124 }
33125 seq_printf(m, "Interrupts received: %d\n",
33126- atomic_read(&dev_priv->irq_received));
33127+ atomic_read_unchecked(&dev_priv->irq_received));
33128 if (dev_priv->hw_status_page != NULL) {
33129 seq_printf(m, "Current sequence: %d\n",
33130 i915_get_gem_seqno(dev));
33131diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33132index 5449239..7e4f68d 100644
33133--- a/drivers/gpu/drm/i915/i915_drv.c
33134+++ b/drivers/gpu/drm/i915/i915_drv.c
33135@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33136 return i915_resume(dev);
33137 }
33138
33139-static struct vm_operations_struct i915_gem_vm_ops = {
33140+static const struct vm_operations_struct i915_gem_vm_ops = {
33141 .fault = i915_gem_fault,
33142 .open = drm_gem_vm_open,
33143 .close = drm_gem_vm_close,
33144diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33145index 97163f7..c24c7c7 100644
33146--- a/drivers/gpu/drm/i915/i915_drv.h
33147+++ b/drivers/gpu/drm/i915/i915_drv.h
33148@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33149 /* display clock increase/decrease */
33150 /* pll clock increase/decrease */
33151 /* clock gating init */
33152-};
33153+} __no_const;
33154
33155 typedef struct drm_i915_private {
33156 struct drm_device *dev;
33157@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33158 int page_flipping;
33159
33160 wait_queue_head_t irq_queue;
33161- atomic_t irq_received;
33162+ atomic_unchecked_t irq_received;
33163 /** Protects user_irq_refcount and irq_mask_reg */
33164 spinlock_t user_irq_lock;
33165 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33166diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33167index 27a3074..eb3f959 100644
33168--- a/drivers/gpu/drm/i915/i915_gem.c
33169+++ b/drivers/gpu/drm/i915/i915_gem.c
33170@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33171
33172 args->aper_size = dev->gtt_total;
33173 args->aper_available_size = (args->aper_size -
33174- atomic_read(&dev->pin_memory));
33175+ atomic_read_unchecked(&dev->pin_memory));
33176
33177 return 0;
33178 }
33179@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33180
33181 if (obj_priv->gtt_space) {
33182 atomic_dec(&dev->gtt_count);
33183- atomic_sub(obj->size, &dev->gtt_memory);
33184+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33185
33186 drm_mm_put_block(obj_priv->gtt_space);
33187 obj_priv->gtt_space = NULL;
33188@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33189 goto search_free;
33190 }
33191 atomic_inc(&dev->gtt_count);
33192- atomic_add(obj->size, &dev->gtt_memory);
33193+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33194
33195 /* Assert that the object is not currently in any GPU domain. As it
33196 * wasn't in the GTT, there shouldn't be any way it could have been in
33197@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33198 "%d/%d gtt bytes\n",
33199 atomic_read(&dev->object_count),
33200 atomic_read(&dev->pin_count),
33201- atomic_read(&dev->object_memory),
33202- atomic_read(&dev->pin_memory),
33203- atomic_read(&dev->gtt_memory),
33204+ atomic_read_unchecked(&dev->object_memory),
33205+ atomic_read_unchecked(&dev->pin_memory),
33206+ atomic_read_unchecked(&dev->gtt_memory),
33207 dev->gtt_total);
33208 }
33209 goto err;
33210@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33211 */
33212 if (obj_priv->pin_count == 1) {
33213 atomic_inc(&dev->pin_count);
33214- atomic_add(obj->size, &dev->pin_memory);
33215+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33216 if (!obj_priv->active &&
33217 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33218 !list_empty(&obj_priv->list))
33219@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33220 list_move_tail(&obj_priv->list,
33221 &dev_priv->mm.inactive_list);
33222 atomic_dec(&dev->pin_count);
33223- atomic_sub(obj->size, &dev->pin_memory);
33224+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33225 }
33226 i915_verify_inactive(dev, __FILE__, __LINE__);
33227 }
33228diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33229index 63f28ad..f5469da 100644
33230--- a/drivers/gpu/drm/i915/i915_irq.c
33231+++ b/drivers/gpu/drm/i915/i915_irq.c
33232@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33233 int irq_received;
33234 int ret = IRQ_NONE;
33235
33236- atomic_inc(&dev_priv->irq_received);
33237+ atomic_inc_unchecked(&dev_priv->irq_received);
33238
33239 if (IS_IGDNG(dev))
33240 return igdng_irq_handler(dev);
33241@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33242 {
33243 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33244
33245- atomic_set(&dev_priv->irq_received, 0);
33246+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33247
33248 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33249 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33250diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33251index 5d9c6a7..d1b0e29 100644
33252--- a/drivers/gpu/drm/i915/intel_sdvo.c
33253+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33254@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33255 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33256
33257 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33258- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33259+ pax_open_kernel();
33260+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33261+ pax_close_kernel();
33262
33263 /* Read the regs to test if we can talk to the device */
33264 for (i = 0; i < 0x40; i++) {
33265diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33266index be6c6b9..8615d9c 100644
33267--- a/drivers/gpu/drm/mga/mga_drv.h
33268+++ b/drivers/gpu/drm/mga/mga_drv.h
33269@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33270 u32 clear_cmd;
33271 u32 maccess;
33272
33273- atomic_t vbl_received; /**< Number of vblanks received. */
33274+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33275 wait_queue_head_t fence_queue;
33276- atomic_t last_fence_retired;
33277+ atomic_unchecked_t last_fence_retired;
33278 u32 next_fence_to_post;
33279
33280 unsigned int fb_cpp;
33281diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33282index daa6041..a28a5da 100644
33283--- a/drivers/gpu/drm/mga/mga_irq.c
33284+++ b/drivers/gpu/drm/mga/mga_irq.c
33285@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33286 if (crtc != 0)
33287 return 0;
33288
33289- return atomic_read(&dev_priv->vbl_received);
33290+ return atomic_read_unchecked(&dev_priv->vbl_received);
33291 }
33292
33293
33294@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33295 /* VBLANK interrupt */
33296 if (status & MGA_VLINEPEN) {
33297 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33298- atomic_inc(&dev_priv->vbl_received);
33299+ atomic_inc_unchecked(&dev_priv->vbl_received);
33300 drm_handle_vblank(dev, 0);
33301 handled = 1;
33302 }
33303@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33304 MGA_WRITE(MGA_PRIMEND, prim_end);
33305 }
33306
33307- atomic_inc(&dev_priv->last_fence_retired);
33308+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33309 DRM_WAKEUP(&dev_priv->fence_queue);
33310 handled = 1;
33311 }
33312@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33313 * using fences.
33314 */
33315 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33316- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33317+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33318 - *sequence) <= (1 << 23)));
33319
33320 *sequence = cur_fence;
33321diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33322index 4c39a40..b22a9ea 100644
33323--- a/drivers/gpu/drm/r128/r128_cce.c
33324+++ b/drivers/gpu/drm/r128/r128_cce.c
33325@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33326
33327 /* GH: Simple idle check.
33328 */
33329- atomic_set(&dev_priv->idle_count, 0);
33330+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33331
33332 /* We don't support anything other than bus-mastering ring mode,
33333 * but the ring can be in either AGP or PCI space for the ring
33334diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33335index 3c60829..4faf484 100644
33336--- a/drivers/gpu/drm/r128/r128_drv.h
33337+++ b/drivers/gpu/drm/r128/r128_drv.h
33338@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33339 int is_pci;
33340 unsigned long cce_buffers_offset;
33341
33342- atomic_t idle_count;
33343+ atomic_unchecked_t idle_count;
33344
33345 int page_flipping;
33346 int current_page;
33347 u32 crtc_offset;
33348 u32 crtc_offset_cntl;
33349
33350- atomic_t vbl_received;
33351+ atomic_unchecked_t vbl_received;
33352
33353 u32 color_fmt;
33354 unsigned int front_offset;
33355diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33356index 69810fb..97bf17a 100644
33357--- a/drivers/gpu/drm/r128/r128_irq.c
33358+++ b/drivers/gpu/drm/r128/r128_irq.c
33359@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33360 if (crtc != 0)
33361 return 0;
33362
33363- return atomic_read(&dev_priv->vbl_received);
33364+ return atomic_read_unchecked(&dev_priv->vbl_received);
33365 }
33366
33367 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33368@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33369 /* VBLANK interrupt */
33370 if (status & R128_CRTC_VBLANK_INT) {
33371 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33372- atomic_inc(&dev_priv->vbl_received);
33373+ atomic_inc_unchecked(&dev_priv->vbl_received);
33374 drm_handle_vblank(dev, 0);
33375 return IRQ_HANDLED;
33376 }
33377diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33378index af2665c..51922d2 100644
33379--- a/drivers/gpu/drm/r128/r128_state.c
33380+++ b/drivers/gpu/drm/r128/r128_state.c
33381@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33382
33383 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33384 {
33385- if (atomic_read(&dev_priv->idle_count) == 0) {
33386+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33387 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33388 } else {
33389- atomic_set(&dev_priv->idle_count, 0);
33390+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33391 }
33392 }
33393
33394diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33395index dd72b91..8644b3c 100644
33396--- a/drivers/gpu/drm/radeon/atom.c
33397+++ b/drivers/gpu/drm/radeon/atom.c
33398@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33399 char name[512];
33400 int i;
33401
33402+ pax_track_stack();
33403+
33404 ctx->card = card;
33405 ctx->bios = bios;
33406
33407diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33408index 0d79577..efaa7a5 100644
33409--- a/drivers/gpu/drm/radeon/mkregtable.c
33410+++ b/drivers/gpu/drm/radeon/mkregtable.c
33411@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33412 regex_t mask_rex;
33413 regmatch_t match[4];
33414 char buf[1024];
33415- size_t end;
33416+ long end;
33417 int len;
33418 int done = 0;
33419 int r;
33420 unsigned o;
33421 struct offset *offset;
33422 char last_reg_s[10];
33423- int last_reg;
33424+ unsigned long last_reg;
33425
33426 if (regcomp
33427 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33428diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33429index 6735213..38c2c67 100644
33430--- a/drivers/gpu/drm/radeon/radeon.h
33431+++ b/drivers/gpu/drm/radeon/radeon.h
33432@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33433 */
33434 struct radeon_fence_driver {
33435 uint32_t scratch_reg;
33436- atomic_t seq;
33437+ atomic_unchecked_t seq;
33438 uint32_t last_seq;
33439 unsigned long count_timeout;
33440 wait_queue_head_t queue;
33441@@ -640,7 +640,7 @@ struct radeon_asic {
33442 uint32_t offset, uint32_t obj_size);
33443 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33444 void (*bandwidth_update)(struct radeon_device *rdev);
33445-};
33446+} __no_const;
33447
33448 /*
33449 * Asic structures
33450diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33451index 4e928b9..d8b6008 100644
33452--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33453+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33454@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33455 bool linkb;
33456 struct radeon_i2c_bus_rec ddc_bus;
33457
33458+ pax_track_stack();
33459+
33460 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33461
33462 if (data_offset == 0)
33463@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33464 }
33465 }
33466
33467-struct bios_connector {
33468+static struct bios_connector {
33469 bool valid;
33470 uint16_t line_mux;
33471 uint16_t devices;
33472 int connector_type;
33473 struct radeon_i2c_bus_rec ddc_bus;
33474-};
33475+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33476
33477 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33478 drm_device
33479@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33480 uint8_t dac;
33481 union atom_supported_devices *supported_devices;
33482 int i, j;
33483- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33484
33485 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33486
33487diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33488index 083a181..ccccae0 100644
33489--- a/drivers/gpu/drm/radeon/radeon_display.c
33490+++ b/drivers/gpu/drm/radeon/radeon_display.c
33491@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33492
33493 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33494 error = freq - current_freq;
33495- error = error < 0 ? 0xffffffff : error;
33496+ error = (int32_t)error < 0 ? 0xffffffff : error;
33497 } else
33498 error = abs(current_freq - freq);
33499 vco_diff = abs(vco - best_vco);
33500diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33501index 76e4070..193fa7f 100644
33502--- a/drivers/gpu/drm/radeon/radeon_drv.h
33503+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33504@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33505
33506 /* SW interrupt */
33507 wait_queue_head_t swi_queue;
33508- atomic_t swi_emitted;
33509+ atomic_unchecked_t swi_emitted;
33510 int vblank_crtc;
33511 uint32_t irq_enable_reg;
33512 uint32_t r500_disp_irq_reg;
33513diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33514index 3beb26d..6ce9c4a 100644
33515--- a/drivers/gpu/drm/radeon/radeon_fence.c
33516+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33517@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33518 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33519 return 0;
33520 }
33521- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33522+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33523 if (!rdev->cp.ready) {
33524 /* FIXME: cp is not running assume everythings is done right
33525 * away
33526@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33527 return r;
33528 }
33529 WREG32(rdev->fence_drv.scratch_reg, 0);
33530- atomic_set(&rdev->fence_drv.seq, 0);
33531+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33532 INIT_LIST_HEAD(&rdev->fence_drv.created);
33533 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33534 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33535diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33536index a1bf11d..4a123c0 100644
33537--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33538+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33539@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33540 request = compat_alloc_user_space(sizeof(*request));
33541 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33542 || __put_user(req32.param, &request->param)
33543- || __put_user((void __user *)(unsigned long)req32.value,
33544+ || __put_user((unsigned long)req32.value,
33545 &request->value))
33546 return -EFAULT;
33547
33548diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33549index b79ecc4..8dab92d 100644
33550--- a/drivers/gpu/drm/radeon/radeon_irq.c
33551+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33552@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33553 unsigned int ret;
33554 RING_LOCALS;
33555
33556- atomic_inc(&dev_priv->swi_emitted);
33557- ret = atomic_read(&dev_priv->swi_emitted);
33558+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33559+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33560
33561 BEGIN_RING(4);
33562 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33563@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33564 drm_radeon_private_t *dev_priv =
33565 (drm_radeon_private_t *) dev->dev_private;
33566
33567- atomic_set(&dev_priv->swi_emitted, 0);
33568+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33569 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33570
33571 dev->max_vblank_count = 0x001fffff;
33572diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33573index 4747910..48ca4b3 100644
33574--- a/drivers/gpu/drm/radeon/radeon_state.c
33575+++ b/drivers/gpu/drm/radeon/radeon_state.c
33576@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33577 {
33578 drm_radeon_private_t *dev_priv = dev->dev_private;
33579 drm_radeon_getparam_t *param = data;
33580- int value;
33581+ int value = 0;
33582
33583 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33584
33585diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33586index 1381e06..0e53b17 100644
33587--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33588+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33589@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33590 DRM_INFO("radeon: ttm finalized\n");
33591 }
33592
33593-static struct vm_operations_struct radeon_ttm_vm_ops;
33594-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33595-
33596-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33597-{
33598- struct ttm_buffer_object *bo;
33599- int r;
33600-
33601- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33602- if (bo == NULL) {
33603- return VM_FAULT_NOPAGE;
33604- }
33605- r = ttm_vm_ops->fault(vma, vmf);
33606- return r;
33607-}
33608-
33609 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33610 {
33611 struct drm_file *file_priv;
33612 struct radeon_device *rdev;
33613- int r;
33614
33615 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33616 return drm_mmap(filp, vma);
33617@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33618
33619 file_priv = (struct drm_file *)filp->private_data;
33620 rdev = file_priv->minor->dev->dev_private;
33621- if (rdev == NULL) {
33622+ if (!rdev)
33623 return -EINVAL;
33624- }
33625- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33626- if (unlikely(r != 0)) {
33627- return r;
33628- }
33629- if (unlikely(ttm_vm_ops == NULL)) {
33630- ttm_vm_ops = vma->vm_ops;
33631- radeon_ttm_vm_ops = *ttm_vm_ops;
33632- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33633- }
33634- vma->vm_ops = &radeon_ttm_vm_ops;
33635- return 0;
33636+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33637 }
33638
33639
33640diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33641index b12ff76..0bd0c6e 100644
33642--- a/drivers/gpu/drm/radeon/rs690.c
33643+++ b/drivers/gpu/drm/radeon/rs690.c
33644@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33645 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33646 rdev->pm.sideport_bandwidth.full)
33647 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33648- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33649+ read_delay_latency.full = rfixed_const(800 * 1000);
33650 read_delay_latency.full = rfixed_div(read_delay_latency,
33651 rdev->pm.igp_sideport_mclk);
33652+ a.full = rfixed_const(370);
33653+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33654 } else {
33655 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33656 rdev->pm.k8_bandwidth.full)
33657diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33658index 0ed436e..e6e7ce3 100644
33659--- a/drivers/gpu/drm/ttm/ttm_bo.c
33660+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33661@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33662 NULL
33663 };
33664
33665-static struct sysfs_ops ttm_bo_global_ops = {
33666+static const struct sysfs_ops ttm_bo_global_ops = {
33667 .show = &ttm_bo_global_show
33668 };
33669
33670diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33671index 1c040d0..f9e4af8 100644
33672--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33673+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33674@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33675 {
33676 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33677 vma->vm_private_data;
33678- struct ttm_bo_device *bdev = bo->bdev;
33679+ struct ttm_bo_device *bdev;
33680 unsigned long bus_base;
33681 unsigned long bus_offset;
33682 unsigned long bus_size;
33683@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33684 unsigned long address = (unsigned long)vmf->virtual_address;
33685 int retval = VM_FAULT_NOPAGE;
33686
33687+ if (!bo)
33688+ return VM_FAULT_NOPAGE;
33689+ bdev = bo->bdev;
33690+
33691 /*
33692 * Work around locking order reversal in fault / nopfn
33693 * between mmap_sem and bo_reserve: Perform a trylock operation
33694diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33695index b170071..28ae90e 100644
33696--- a/drivers/gpu/drm/ttm/ttm_global.c
33697+++ b/drivers/gpu/drm/ttm/ttm_global.c
33698@@ -36,7 +36,7 @@
33699 struct ttm_global_item {
33700 struct mutex mutex;
33701 void *object;
33702- int refcount;
33703+ atomic_t refcount;
33704 };
33705
33706 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33707@@ -49,7 +49,7 @@ void ttm_global_init(void)
33708 struct ttm_global_item *item = &glob[i];
33709 mutex_init(&item->mutex);
33710 item->object = NULL;
33711- item->refcount = 0;
33712+ atomic_set(&item->refcount, 0);
33713 }
33714 }
33715
33716@@ -59,7 +59,7 @@ void ttm_global_release(void)
33717 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33718 struct ttm_global_item *item = &glob[i];
33719 BUG_ON(item->object != NULL);
33720- BUG_ON(item->refcount != 0);
33721+ BUG_ON(atomic_read(&item->refcount) != 0);
33722 }
33723 }
33724
33725@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33726 void *object;
33727
33728 mutex_lock(&item->mutex);
33729- if (item->refcount == 0) {
33730+ if (atomic_read(&item->refcount) == 0) {
33731 item->object = kzalloc(ref->size, GFP_KERNEL);
33732 if (unlikely(item->object == NULL)) {
33733 ret = -ENOMEM;
33734@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33735 goto out_err;
33736
33737 }
33738- ++item->refcount;
33739+ atomic_inc(&item->refcount);
33740 ref->object = item->object;
33741 object = item->object;
33742 mutex_unlock(&item->mutex);
33743@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33744 struct ttm_global_item *item = &glob[ref->global_type];
33745
33746 mutex_lock(&item->mutex);
33747- BUG_ON(item->refcount == 0);
33748+ BUG_ON(atomic_read(&item->refcount) == 0);
33749 BUG_ON(ref->object != item->object);
33750- if (--item->refcount == 0) {
33751+ if (atomic_dec_and_test(&item->refcount)) {
33752 ref->release(ref);
33753 item->object = NULL;
33754 }
33755diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33756index 072c281..d8ef483 100644
33757--- a/drivers/gpu/drm/ttm/ttm_memory.c
33758+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33759@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33760 NULL
33761 };
33762
33763-static struct sysfs_ops ttm_mem_zone_ops = {
33764+static const struct sysfs_ops ttm_mem_zone_ops = {
33765 .show = &ttm_mem_zone_show,
33766 .store = &ttm_mem_zone_store
33767 };
33768diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33769index cafcb84..b8e66cc 100644
33770--- a/drivers/gpu/drm/via/via_drv.h
33771+++ b/drivers/gpu/drm/via/via_drv.h
33772@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33773 typedef uint32_t maskarray_t[5];
33774
33775 typedef struct drm_via_irq {
33776- atomic_t irq_received;
33777+ atomic_unchecked_t irq_received;
33778 uint32_t pending_mask;
33779 uint32_t enable_mask;
33780 wait_queue_head_t irq_queue;
33781@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33782 struct timeval last_vblank;
33783 int last_vblank_valid;
33784 unsigned usec_per_vblank;
33785- atomic_t vbl_received;
33786+ atomic_unchecked_t vbl_received;
33787 drm_via_state_t hc_state;
33788 char pci_buf[VIA_PCI_BUF_SIZE];
33789 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33790diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33791index 5935b88..127a8a6 100644
33792--- a/drivers/gpu/drm/via/via_irq.c
33793+++ b/drivers/gpu/drm/via/via_irq.c
33794@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33795 if (crtc != 0)
33796 return 0;
33797
33798- return atomic_read(&dev_priv->vbl_received);
33799+ return atomic_read_unchecked(&dev_priv->vbl_received);
33800 }
33801
33802 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33803@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33804
33805 status = VIA_READ(VIA_REG_INTERRUPT);
33806 if (status & VIA_IRQ_VBLANK_PENDING) {
33807- atomic_inc(&dev_priv->vbl_received);
33808- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33809+ atomic_inc_unchecked(&dev_priv->vbl_received);
33810+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33811 do_gettimeofday(&cur_vblank);
33812 if (dev_priv->last_vblank_valid) {
33813 dev_priv->usec_per_vblank =
33814@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33815 dev_priv->last_vblank = cur_vblank;
33816 dev_priv->last_vblank_valid = 1;
33817 }
33818- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33819+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33820 DRM_DEBUG("US per vblank is: %u\n",
33821 dev_priv->usec_per_vblank);
33822 }
33823@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33824
33825 for (i = 0; i < dev_priv->num_irqs; ++i) {
33826 if (status & cur_irq->pending_mask) {
33827- atomic_inc(&cur_irq->irq_received);
33828+ atomic_inc_unchecked(&cur_irq->irq_received);
33829 DRM_WAKEUP(&cur_irq->irq_queue);
33830 handled = 1;
33831 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33832@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33833 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33834 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33835 masks[irq][4]));
33836- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33837+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33838 } else {
33839 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33840 (((cur_irq_sequence =
33841- atomic_read(&cur_irq->irq_received)) -
33842+ atomic_read_unchecked(&cur_irq->irq_received)) -
33843 *sequence) <= (1 << 23)));
33844 }
33845 *sequence = cur_irq_sequence;
33846@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33847 }
33848
33849 for (i = 0; i < dev_priv->num_irqs; ++i) {
33850- atomic_set(&cur_irq->irq_received, 0);
33851+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33852 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33853 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33854 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33855@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33856 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33857 case VIA_IRQ_RELATIVE:
33858 irqwait->request.sequence +=
33859- atomic_read(&cur_irq->irq_received);
33860+ atomic_read_unchecked(&cur_irq->irq_received);
33861 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33862 case VIA_IRQ_ABSOLUTE:
33863 break;
33864diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33865index aa8688d..6a0140c 100644
33866--- a/drivers/gpu/vga/vgaarb.c
33867+++ b/drivers/gpu/vga/vgaarb.c
33868@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33869 uc = &priv->cards[i];
33870 }
33871
33872- if (!uc)
33873- return -EINVAL;
33874+ if (!uc) {
33875+ ret_val = -EINVAL;
33876+ goto done;
33877+ }
33878
33879- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33880- return -EINVAL;
33881+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33882+ ret_val = -EINVAL;
33883+ goto done;
33884+ }
33885
33886- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33887- return -EINVAL;
33888+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33889+ ret_val = -EINVAL;
33890+ goto done;
33891+ }
33892
33893 vga_put(pdev, io_state);
33894
33895diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33896index 11f8069..4783396 100644
33897--- a/drivers/hid/hid-core.c
33898+++ b/drivers/hid/hid-core.c
33899@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33900
33901 int hid_add_device(struct hid_device *hdev)
33902 {
33903- static atomic_t id = ATOMIC_INIT(0);
33904+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33905 int ret;
33906
33907 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33908@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33909 /* XXX hack, any other cleaner solution after the driver core
33910 * is converted to allow more than 20 bytes as the device name? */
33911 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33912- hdev->vendor, hdev->product, atomic_inc_return(&id));
33913+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33914
33915 ret = device_add(&hdev->dev);
33916 if (!ret)
33917diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33918index 8b6ee24..70f657d 100644
33919--- a/drivers/hid/usbhid/hiddev.c
33920+++ b/drivers/hid/usbhid/hiddev.c
33921@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33922 return put_user(HID_VERSION, (int __user *)arg);
33923
33924 case HIDIOCAPPLICATION:
33925- if (arg < 0 || arg >= hid->maxapplication)
33926+ if (arg >= hid->maxapplication)
33927 return -EINVAL;
33928
33929 for (i = 0; i < hid->maxcollection; i++)
33930diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33931index 5d5ed69..f40533e 100644
33932--- a/drivers/hwmon/lis3lv02d.c
33933+++ b/drivers/hwmon/lis3lv02d.c
33934@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33935 * the lid is closed. This leads to interrupts as soon as a little move
33936 * is done.
33937 */
33938- atomic_inc(&lis3_dev.count);
33939+ atomic_inc_unchecked(&lis3_dev.count);
33940
33941 wake_up_interruptible(&lis3_dev.misc_wait);
33942 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33943@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33944 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33945 return -EBUSY; /* already open */
33946
33947- atomic_set(&lis3_dev.count, 0);
33948+ atomic_set_unchecked(&lis3_dev.count, 0);
33949
33950 /*
33951 * The sensor can generate interrupts for free-fall and direction
33952@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33953 add_wait_queue(&lis3_dev.misc_wait, &wait);
33954 while (true) {
33955 set_current_state(TASK_INTERRUPTIBLE);
33956- data = atomic_xchg(&lis3_dev.count, 0);
33957+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33958 if (data)
33959 break;
33960
33961@@ -244,7 +244,7 @@ out:
33962 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33963 {
33964 poll_wait(file, &lis3_dev.misc_wait, wait);
33965- if (atomic_read(&lis3_dev.count))
33966+ if (atomic_read_unchecked(&lis3_dev.count))
33967 return POLLIN | POLLRDNORM;
33968 return 0;
33969 }
33970diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33971index 7cdd76f..fe0efdf 100644
33972--- a/drivers/hwmon/lis3lv02d.h
33973+++ b/drivers/hwmon/lis3lv02d.h
33974@@ -201,7 +201,7 @@ struct lis3lv02d {
33975
33976 struct input_polled_dev *idev; /* input device */
33977 struct platform_device *pdev; /* platform device */
33978- atomic_t count; /* interrupt count after last read */
33979+ atomic_unchecked_t count; /* interrupt count after last read */
33980 int xcalib; /* calibrated null value for x */
33981 int ycalib; /* calibrated null value for y */
33982 int zcalib; /* calibrated null value for z */
33983diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33984index 740785e..5a5c6c6 100644
33985--- a/drivers/hwmon/sht15.c
33986+++ b/drivers/hwmon/sht15.c
33987@@ -112,7 +112,7 @@ struct sht15_data {
33988 int supply_uV;
33989 int supply_uV_valid;
33990 struct work_struct update_supply_work;
33991- atomic_t interrupt_handled;
33992+ atomic_unchecked_t interrupt_handled;
33993 };
33994
33995 /**
33996@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33997 return ret;
33998
33999 gpio_direction_input(data->pdata->gpio_data);
34000- atomic_set(&data->interrupt_handled, 0);
34001+ atomic_set_unchecked(&data->interrupt_handled, 0);
34002
34003 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34004 if (gpio_get_value(data->pdata->gpio_data) == 0) {
34005 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
34006 /* Only relevant if the interrupt hasn't occured. */
34007- if (!atomic_read(&data->interrupt_handled))
34008+ if (!atomic_read_unchecked(&data->interrupt_handled))
34009 schedule_work(&data->read_work);
34010 }
34011 ret = wait_event_timeout(data->wait_queue,
34012@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34013 struct sht15_data *data = d;
34014 /* First disable the interrupt */
34015 disable_irq_nosync(irq);
34016- atomic_inc(&data->interrupt_handled);
34017+ atomic_inc_unchecked(&data->interrupt_handled);
34018 /* Then schedule a reading work struct */
34019 if (data->flag != SHT15_READING_NOTHING)
34020 schedule_work(&data->read_work);
34021@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34022 here as could have gone low in meantime so verify
34023 it hasn't!
34024 */
34025- atomic_set(&data->interrupt_handled, 0);
34026+ atomic_set_unchecked(&data->interrupt_handled, 0);
34027 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34028 /* If still not occured or another handler has been scheduled */
34029 if (gpio_get_value(data->pdata->gpio_data)
34030- || atomic_read(&data->interrupt_handled))
34031+ || atomic_read_unchecked(&data->interrupt_handled))
34032 return;
34033 }
34034 /* Read the data back from the device */
34035diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34036index 97851c5..cb40626 100644
34037--- a/drivers/hwmon/w83791d.c
34038+++ b/drivers/hwmon/w83791d.c
34039@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34040 struct i2c_board_info *info);
34041 static int w83791d_remove(struct i2c_client *client);
34042
34043-static int w83791d_read(struct i2c_client *client, u8 register);
34044-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34045+static int w83791d_read(struct i2c_client *client, u8 reg);
34046+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34047 static struct w83791d_data *w83791d_update_device(struct device *dev);
34048
34049 #ifdef DEBUG
34050diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34051index 378fcb5..5e91fa8 100644
34052--- a/drivers/i2c/busses/i2c-amd756-s4882.c
34053+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34054@@ -43,7 +43,7 @@
34055 extern struct i2c_adapter amd756_smbus;
34056
34057 static struct i2c_adapter *s4882_adapter;
34058-static struct i2c_algorithm *s4882_algo;
34059+static i2c_algorithm_no_const *s4882_algo;
34060
34061 /* Wrapper access functions for multiplexed SMBus */
34062 static DEFINE_MUTEX(amd756_lock);
34063diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34064index 29015eb..af2d8e9 100644
34065--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34066+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34067@@ -41,7 +41,7 @@
34068 extern struct i2c_adapter *nforce2_smbus;
34069
34070 static struct i2c_adapter *s4985_adapter;
34071-static struct i2c_algorithm *s4985_algo;
34072+static i2c_algorithm_no_const *s4985_algo;
34073
34074 /* Wrapper access functions for multiplexed SMBus */
34075 static DEFINE_MUTEX(nforce2_lock);
34076diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34077index 878f8ec..12376fc 100644
34078--- a/drivers/ide/aec62xx.c
34079+++ b/drivers/ide/aec62xx.c
34080@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34081 .cable_detect = atp86x_cable_detect,
34082 };
34083
34084-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34085+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34086 { /* 0: AEC6210 */
34087 .name = DRV_NAME,
34088 .init_chipset = init_chipset_aec62xx,
34089diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34090index e59b6de..4b4fc65 100644
34091--- a/drivers/ide/alim15x3.c
34092+++ b/drivers/ide/alim15x3.c
34093@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34094 .dma_sff_read_status = ide_dma_sff_read_status,
34095 };
34096
34097-static const struct ide_port_info ali15x3_chipset __devinitdata = {
34098+static const struct ide_port_info ali15x3_chipset __devinitconst = {
34099 .name = DRV_NAME,
34100 .init_chipset = init_chipset_ali15x3,
34101 .init_hwif = init_hwif_ali15x3,
34102diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34103index 628cd2e..087a414 100644
34104--- a/drivers/ide/amd74xx.c
34105+++ b/drivers/ide/amd74xx.c
34106@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34107 .udma_mask = udma, \
34108 }
34109
34110-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34111+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34112 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34113 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34114 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34115diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34116index 837322b..837fd71 100644
34117--- a/drivers/ide/atiixp.c
34118+++ b/drivers/ide/atiixp.c
34119@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34120 .cable_detect = atiixp_cable_detect,
34121 };
34122
34123-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34124+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34125 { /* 0: IXP200/300/400/700 */
34126 .name = DRV_NAME,
34127 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34128diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34129index ca0c46f..d55318a 100644
34130--- a/drivers/ide/cmd64x.c
34131+++ b/drivers/ide/cmd64x.c
34132@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34133 .dma_sff_read_status = ide_dma_sff_read_status,
34134 };
34135
34136-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34137+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34138 { /* 0: CMD643 */
34139 .name = DRV_NAME,
34140 .init_chipset = init_chipset_cmd64x,
34141diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34142index 09f98ed..cebc5bc 100644
34143--- a/drivers/ide/cs5520.c
34144+++ b/drivers/ide/cs5520.c
34145@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34146 .set_dma_mode = cs5520_set_dma_mode,
34147 };
34148
34149-static const struct ide_port_info cyrix_chipset __devinitdata = {
34150+static const struct ide_port_info cyrix_chipset __devinitconst = {
34151 .name = DRV_NAME,
34152 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34153 .port_ops = &cs5520_port_ops,
34154diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34155index 40bf05e..7d58ca0 100644
34156--- a/drivers/ide/cs5530.c
34157+++ b/drivers/ide/cs5530.c
34158@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34159 .udma_filter = cs5530_udma_filter,
34160 };
34161
34162-static const struct ide_port_info cs5530_chipset __devinitdata = {
34163+static const struct ide_port_info cs5530_chipset __devinitconst = {
34164 .name = DRV_NAME,
34165 .init_chipset = init_chipset_cs5530,
34166 .init_hwif = init_hwif_cs5530,
34167diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34168index 983d957..53e6172 100644
34169--- a/drivers/ide/cs5535.c
34170+++ b/drivers/ide/cs5535.c
34171@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34172 .cable_detect = cs5535_cable_detect,
34173 };
34174
34175-static const struct ide_port_info cs5535_chipset __devinitdata = {
34176+static const struct ide_port_info cs5535_chipset __devinitconst = {
34177 .name = DRV_NAME,
34178 .port_ops = &cs5535_port_ops,
34179 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34180diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34181index 74fc540..8e933d8 100644
34182--- a/drivers/ide/cy82c693.c
34183+++ b/drivers/ide/cy82c693.c
34184@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34185 .set_dma_mode = cy82c693_set_dma_mode,
34186 };
34187
34188-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34189+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34190 .name = DRV_NAME,
34191 .init_iops = init_iops_cy82c693,
34192 .port_ops = &cy82c693_port_ops,
34193diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34194index 7ce68ef..e78197d 100644
34195--- a/drivers/ide/hpt366.c
34196+++ b/drivers/ide/hpt366.c
34197@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34198 }
34199 };
34200
34201-static const struct hpt_info hpt36x __devinitdata = {
34202+static const struct hpt_info hpt36x __devinitconst = {
34203 .chip_name = "HPT36x",
34204 .chip_type = HPT36x,
34205 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34206@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34207 .timings = &hpt36x_timings
34208 };
34209
34210-static const struct hpt_info hpt370 __devinitdata = {
34211+static const struct hpt_info hpt370 __devinitconst = {
34212 .chip_name = "HPT370",
34213 .chip_type = HPT370,
34214 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34215@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34216 .timings = &hpt37x_timings
34217 };
34218
34219-static const struct hpt_info hpt370a __devinitdata = {
34220+static const struct hpt_info hpt370a __devinitconst = {
34221 .chip_name = "HPT370A",
34222 .chip_type = HPT370A,
34223 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34224@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34225 .timings = &hpt37x_timings
34226 };
34227
34228-static const struct hpt_info hpt374 __devinitdata = {
34229+static const struct hpt_info hpt374 __devinitconst = {
34230 .chip_name = "HPT374",
34231 .chip_type = HPT374,
34232 .udma_mask = ATA_UDMA5,
34233@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34234 .timings = &hpt37x_timings
34235 };
34236
34237-static const struct hpt_info hpt372 __devinitdata = {
34238+static const struct hpt_info hpt372 __devinitconst = {
34239 .chip_name = "HPT372",
34240 .chip_type = HPT372,
34241 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34242@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34243 .timings = &hpt37x_timings
34244 };
34245
34246-static const struct hpt_info hpt372a __devinitdata = {
34247+static const struct hpt_info hpt372a __devinitconst = {
34248 .chip_name = "HPT372A",
34249 .chip_type = HPT372A,
34250 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34251@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34252 .timings = &hpt37x_timings
34253 };
34254
34255-static const struct hpt_info hpt302 __devinitdata = {
34256+static const struct hpt_info hpt302 __devinitconst = {
34257 .chip_name = "HPT302",
34258 .chip_type = HPT302,
34259 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34260@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34261 .timings = &hpt37x_timings
34262 };
34263
34264-static const struct hpt_info hpt371 __devinitdata = {
34265+static const struct hpt_info hpt371 __devinitconst = {
34266 .chip_name = "HPT371",
34267 .chip_type = HPT371,
34268 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34269@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34270 .timings = &hpt37x_timings
34271 };
34272
34273-static const struct hpt_info hpt372n __devinitdata = {
34274+static const struct hpt_info hpt372n __devinitconst = {
34275 .chip_name = "HPT372N",
34276 .chip_type = HPT372N,
34277 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34278@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34279 .timings = &hpt37x_timings
34280 };
34281
34282-static const struct hpt_info hpt302n __devinitdata = {
34283+static const struct hpt_info hpt302n __devinitconst = {
34284 .chip_name = "HPT302N",
34285 .chip_type = HPT302N,
34286 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34287@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34288 .timings = &hpt37x_timings
34289 };
34290
34291-static const struct hpt_info hpt371n __devinitdata = {
34292+static const struct hpt_info hpt371n __devinitconst = {
34293 .chip_name = "HPT371N",
34294 .chip_type = HPT371N,
34295 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34296@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34297 .dma_sff_read_status = ide_dma_sff_read_status,
34298 };
34299
34300-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34301+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34302 { /* 0: HPT36x */
34303 .name = DRV_NAME,
34304 .init_chipset = init_chipset_hpt366,
34305diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34306index 2de76cc..74186a1 100644
34307--- a/drivers/ide/ide-cd.c
34308+++ b/drivers/ide/ide-cd.c
34309@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34310 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34311 if ((unsigned long)buf & alignment
34312 || blk_rq_bytes(rq) & q->dma_pad_mask
34313- || object_is_on_stack(buf))
34314+ || object_starts_on_stack(buf))
34315 drive->dma = 0;
34316 }
34317 }
34318diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34319index fefbdfc..62ff465 100644
34320--- a/drivers/ide/ide-floppy.c
34321+++ b/drivers/ide/ide-floppy.c
34322@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34323 u8 pc_buf[256], header_len, desc_cnt;
34324 int i, rc = 1, blocks, length;
34325
34326+ pax_track_stack();
34327+
34328 ide_debug_log(IDE_DBG_FUNC, "enter");
34329
34330 drive->bios_cyl = 0;
34331diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34332index 39d4e01..11538ce 100644
34333--- a/drivers/ide/ide-pci-generic.c
34334+++ b/drivers/ide/ide-pci-generic.c
34335@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34336 .udma_mask = ATA_UDMA6, \
34337 }
34338
34339-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34340+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34341 /* 0: Unknown */
34342 DECLARE_GENERIC_PCI_DEV(0),
34343
34344diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34345index 0d266a5..aaca790 100644
34346--- a/drivers/ide/it8172.c
34347+++ b/drivers/ide/it8172.c
34348@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34349 .set_dma_mode = it8172_set_dma_mode,
34350 };
34351
34352-static const struct ide_port_info it8172_port_info __devinitdata = {
34353+static const struct ide_port_info it8172_port_info __devinitconst = {
34354 .name = DRV_NAME,
34355 .port_ops = &it8172_port_ops,
34356 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34357diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34358index 4797616..4be488a 100644
34359--- a/drivers/ide/it8213.c
34360+++ b/drivers/ide/it8213.c
34361@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34362 .cable_detect = it8213_cable_detect,
34363 };
34364
34365-static const struct ide_port_info it8213_chipset __devinitdata = {
34366+static const struct ide_port_info it8213_chipset __devinitconst = {
34367 .name = DRV_NAME,
34368 .enablebits = { {0x41, 0x80, 0x80} },
34369 .port_ops = &it8213_port_ops,
34370diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34371index 51aa745..146ee60 100644
34372--- a/drivers/ide/it821x.c
34373+++ b/drivers/ide/it821x.c
34374@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34375 .cable_detect = it821x_cable_detect,
34376 };
34377
34378-static const struct ide_port_info it821x_chipset __devinitdata = {
34379+static const struct ide_port_info it821x_chipset __devinitconst = {
34380 .name = DRV_NAME,
34381 .init_chipset = init_chipset_it821x,
34382 .init_hwif = init_hwif_it821x,
34383diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34384index bf2be64..9270098 100644
34385--- a/drivers/ide/jmicron.c
34386+++ b/drivers/ide/jmicron.c
34387@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34388 .cable_detect = jmicron_cable_detect,
34389 };
34390
34391-static const struct ide_port_info jmicron_chipset __devinitdata = {
34392+static const struct ide_port_info jmicron_chipset __devinitconst = {
34393 .name = DRV_NAME,
34394 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34395 .port_ops = &jmicron_port_ops,
34396diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34397index 95327a2..73f78d8 100644
34398--- a/drivers/ide/ns87415.c
34399+++ b/drivers/ide/ns87415.c
34400@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34401 .dma_sff_read_status = superio_dma_sff_read_status,
34402 };
34403
34404-static const struct ide_port_info ns87415_chipset __devinitdata = {
34405+static const struct ide_port_info ns87415_chipset __devinitconst = {
34406 .name = DRV_NAME,
34407 .init_hwif = init_hwif_ns87415,
34408 .tp_ops = &ns87415_tp_ops,
34409diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34410index f1d70d6..e1de05b 100644
34411--- a/drivers/ide/opti621.c
34412+++ b/drivers/ide/opti621.c
34413@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34414 .set_pio_mode = opti621_set_pio_mode,
34415 };
34416
34417-static const struct ide_port_info opti621_chipset __devinitdata = {
34418+static const struct ide_port_info opti621_chipset __devinitconst = {
34419 .name = DRV_NAME,
34420 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34421 .port_ops = &opti621_port_ops,
34422diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34423index 65ba823..7311f4d 100644
34424--- a/drivers/ide/pdc202xx_new.c
34425+++ b/drivers/ide/pdc202xx_new.c
34426@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34427 .udma_mask = udma, \
34428 }
34429
34430-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34431+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34432 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34433 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34434 };
34435diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34436index cb812f3..af816ef 100644
34437--- a/drivers/ide/pdc202xx_old.c
34438+++ b/drivers/ide/pdc202xx_old.c
34439@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34440 .max_sectors = sectors, \
34441 }
34442
34443-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34444+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34445 { /* 0: PDC20246 */
34446 .name = DRV_NAME,
34447 .init_chipset = init_chipset_pdc202xx,
34448diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34449index bf14f39..15c4b98 100644
34450--- a/drivers/ide/piix.c
34451+++ b/drivers/ide/piix.c
34452@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34453 .udma_mask = udma, \
34454 }
34455
34456-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34457+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34458 /* 0: MPIIX */
34459 { /*
34460 * MPIIX actually has only a single IDE channel mapped to
34461diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34462index a6414a8..c04173e 100644
34463--- a/drivers/ide/rz1000.c
34464+++ b/drivers/ide/rz1000.c
34465@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34466 }
34467 }
34468
34469-static const struct ide_port_info rz1000_chipset __devinitdata = {
34470+static const struct ide_port_info rz1000_chipset __devinitconst = {
34471 .name = DRV_NAME,
34472 .host_flags = IDE_HFLAG_NO_DMA,
34473 };
34474diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34475index d467478..9203942 100644
34476--- a/drivers/ide/sc1200.c
34477+++ b/drivers/ide/sc1200.c
34478@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34479 .dma_sff_read_status = ide_dma_sff_read_status,
34480 };
34481
34482-static const struct ide_port_info sc1200_chipset __devinitdata = {
34483+static const struct ide_port_info sc1200_chipset __devinitconst = {
34484 .name = DRV_NAME,
34485 .port_ops = &sc1200_port_ops,
34486 .dma_ops = &sc1200_dma_ops,
34487diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34488index 1104bb3..59c5194 100644
34489--- a/drivers/ide/scc_pata.c
34490+++ b/drivers/ide/scc_pata.c
34491@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34492 .dma_sff_read_status = scc_dma_sff_read_status,
34493 };
34494
34495-static const struct ide_port_info scc_chipset __devinitdata = {
34496+static const struct ide_port_info scc_chipset __devinitconst = {
34497 .name = "sccIDE",
34498 .init_iops = init_iops_scc,
34499 .init_dma = scc_init_dma,
34500diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34501index b6554ef..6cc2cc3 100644
34502--- a/drivers/ide/serverworks.c
34503+++ b/drivers/ide/serverworks.c
34504@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34505 .cable_detect = svwks_cable_detect,
34506 };
34507
34508-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34509+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34510 { /* 0: OSB4 */
34511 .name = DRV_NAME,
34512 .init_chipset = init_chipset_svwks,
34513diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34514index ab3db61..afed580 100644
34515--- a/drivers/ide/setup-pci.c
34516+++ b/drivers/ide/setup-pci.c
34517@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34518 int ret, i, n_ports = dev2 ? 4 : 2;
34519 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34520
34521+ pax_track_stack();
34522+
34523 for (i = 0; i < n_ports / 2; i++) {
34524 ret = ide_setup_pci_controller(pdev[i], d, !i);
34525 if (ret < 0)
34526diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34527index d95df52..0b03a39 100644
34528--- a/drivers/ide/siimage.c
34529+++ b/drivers/ide/siimage.c
34530@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34531 .udma_mask = ATA_UDMA6, \
34532 }
34533
34534-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34535+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34536 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34537 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34538 };
34539diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34540index 3b88eba..ca8699d 100644
34541--- a/drivers/ide/sis5513.c
34542+++ b/drivers/ide/sis5513.c
34543@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34544 .cable_detect = sis_cable_detect,
34545 };
34546
34547-static const struct ide_port_info sis5513_chipset __devinitdata = {
34548+static const struct ide_port_info sis5513_chipset __devinitconst = {
34549 .name = DRV_NAME,
34550 .init_chipset = init_chipset_sis5513,
34551 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34552diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34553index d698da4..fca42a4 100644
34554--- a/drivers/ide/sl82c105.c
34555+++ b/drivers/ide/sl82c105.c
34556@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34557 .dma_sff_read_status = ide_dma_sff_read_status,
34558 };
34559
34560-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34561+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34562 .name = DRV_NAME,
34563 .init_chipset = init_chipset_sl82c105,
34564 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34565diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34566index 1ccfb40..83d5779 100644
34567--- a/drivers/ide/slc90e66.c
34568+++ b/drivers/ide/slc90e66.c
34569@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34570 .cable_detect = slc90e66_cable_detect,
34571 };
34572
34573-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34574+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34575 .name = DRV_NAME,
34576 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34577 .port_ops = &slc90e66_port_ops,
34578diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34579index 05a93d6..5f9e325 100644
34580--- a/drivers/ide/tc86c001.c
34581+++ b/drivers/ide/tc86c001.c
34582@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34583 .dma_sff_read_status = ide_dma_sff_read_status,
34584 };
34585
34586-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34587+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34588 .name = DRV_NAME,
34589 .init_hwif = init_hwif_tc86c001,
34590 .port_ops = &tc86c001_port_ops,
34591diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34592index 8773c3b..7907d6c 100644
34593--- a/drivers/ide/triflex.c
34594+++ b/drivers/ide/triflex.c
34595@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34596 .set_dma_mode = triflex_set_mode,
34597 };
34598
34599-static const struct ide_port_info triflex_device __devinitdata = {
34600+static const struct ide_port_info triflex_device __devinitconst = {
34601 .name = DRV_NAME,
34602 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34603 .port_ops = &triflex_port_ops,
34604diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34605index 4b42ca0..e494a98 100644
34606--- a/drivers/ide/trm290.c
34607+++ b/drivers/ide/trm290.c
34608@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34609 .dma_check = trm290_dma_check,
34610 };
34611
34612-static const struct ide_port_info trm290_chipset __devinitdata = {
34613+static const struct ide_port_info trm290_chipset __devinitconst = {
34614 .name = DRV_NAME,
34615 .init_hwif = init_hwif_trm290,
34616 .tp_ops = &trm290_tp_ops,
34617diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34618index 028de26..520d5d5 100644
34619--- a/drivers/ide/via82cxxx.c
34620+++ b/drivers/ide/via82cxxx.c
34621@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34622 .cable_detect = via82cxxx_cable_detect,
34623 };
34624
34625-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34626+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34627 .name = DRV_NAME,
34628 .init_chipset = init_chipset_via82cxxx,
34629 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34630diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34631index 2cd00b5..14de699 100644
34632--- a/drivers/ieee1394/dv1394.c
34633+++ b/drivers/ieee1394/dv1394.c
34634@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34635 based upon DIF section and sequence
34636 */
34637
34638-static void inline
34639+static inline void
34640 frame_put_packet (struct frame *f, struct packet *p)
34641 {
34642 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34643diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34644index e947d8f..6a966b9 100644
34645--- a/drivers/ieee1394/hosts.c
34646+++ b/drivers/ieee1394/hosts.c
34647@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34648 }
34649
34650 static struct hpsb_host_driver dummy_driver = {
34651+ .name = "dummy",
34652 .transmit_packet = dummy_transmit_packet,
34653 .devctl = dummy_devctl,
34654 .isoctl = dummy_isoctl
34655diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34656index ddaab6e..8d37435 100644
34657--- a/drivers/ieee1394/init_ohci1394_dma.c
34658+++ b/drivers/ieee1394/init_ohci1394_dma.c
34659@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34660 for (func = 0; func < 8; func++) {
34661 u32 class = read_pci_config(num,slot,func,
34662 PCI_CLASS_REVISION);
34663- if ((class == 0xffffffff))
34664+ if (class == 0xffffffff)
34665 continue; /* No device at this func */
34666
34667 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34668diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34669index 65c1429..5d8c11f 100644
34670--- a/drivers/ieee1394/ohci1394.c
34671+++ b/drivers/ieee1394/ohci1394.c
34672@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34673 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34674
34675 /* Module Parameters */
34676-static int phys_dma = 1;
34677+static int phys_dma;
34678 module_param(phys_dma, int, 0444);
34679-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34680+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34681
34682 static void dma_trm_tasklet(unsigned long data);
34683 static void dma_trm_reset(struct dma_trm_ctx *d);
34684diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34685index f199896..78c9fc8 100644
34686--- a/drivers/ieee1394/sbp2.c
34687+++ b/drivers/ieee1394/sbp2.c
34688@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34689 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34690 MODULE_LICENSE("GPL");
34691
34692-static int sbp2_module_init(void)
34693+static int __init sbp2_module_init(void)
34694 {
34695 int ret;
34696
34697diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34698index a5dea6b..0cefe8f 100644
34699--- a/drivers/infiniband/core/cm.c
34700+++ b/drivers/infiniband/core/cm.c
34701@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34702
34703 struct cm_counter_group {
34704 struct kobject obj;
34705- atomic_long_t counter[CM_ATTR_COUNT];
34706+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34707 };
34708
34709 struct cm_counter_attribute {
34710@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34711 struct ib_mad_send_buf *msg = NULL;
34712 int ret;
34713
34714- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34715+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34716 counter[CM_REQ_COUNTER]);
34717
34718 /* Quick state check to discard duplicate REQs. */
34719@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34720 if (!cm_id_priv)
34721 return;
34722
34723- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34724+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34725 counter[CM_REP_COUNTER]);
34726 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34727 if (ret)
34728@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34729 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34730 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34731 spin_unlock_irq(&cm_id_priv->lock);
34732- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34733+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34734 counter[CM_RTU_COUNTER]);
34735 goto out;
34736 }
34737@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34738 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34739 dreq_msg->local_comm_id);
34740 if (!cm_id_priv) {
34741- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34742+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34743 counter[CM_DREQ_COUNTER]);
34744 cm_issue_drep(work->port, work->mad_recv_wc);
34745 return -EINVAL;
34746@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34747 case IB_CM_MRA_REP_RCVD:
34748 break;
34749 case IB_CM_TIMEWAIT:
34750- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34751+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34752 counter[CM_DREQ_COUNTER]);
34753 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34754 goto unlock;
34755@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34756 cm_free_msg(msg);
34757 goto deref;
34758 case IB_CM_DREQ_RCVD:
34759- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34760+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34761 counter[CM_DREQ_COUNTER]);
34762 goto unlock;
34763 default:
34764@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34765 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34766 cm_id_priv->msg, timeout)) {
34767 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34768- atomic_long_inc(&work->port->
34769+ atomic_long_inc_unchecked(&work->port->
34770 counter_group[CM_RECV_DUPLICATES].
34771 counter[CM_MRA_COUNTER]);
34772 goto out;
34773@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34774 break;
34775 case IB_CM_MRA_REQ_RCVD:
34776 case IB_CM_MRA_REP_RCVD:
34777- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34778+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34779 counter[CM_MRA_COUNTER]);
34780 /* fall through */
34781 default:
34782@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34783 case IB_CM_LAP_IDLE:
34784 break;
34785 case IB_CM_MRA_LAP_SENT:
34786- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34787+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34788 counter[CM_LAP_COUNTER]);
34789 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34790 goto unlock;
34791@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34792 cm_free_msg(msg);
34793 goto deref;
34794 case IB_CM_LAP_RCVD:
34795- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34796+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34797 counter[CM_LAP_COUNTER]);
34798 goto unlock;
34799 default:
34800@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34801 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34802 if (cur_cm_id_priv) {
34803 spin_unlock_irq(&cm.lock);
34804- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34805+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34806 counter[CM_SIDR_REQ_COUNTER]);
34807 goto out; /* Duplicate message. */
34808 }
34809@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34810 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34811 msg->retries = 1;
34812
34813- atomic_long_add(1 + msg->retries,
34814+ atomic_long_add_unchecked(1 + msg->retries,
34815 &port->counter_group[CM_XMIT].counter[attr_index]);
34816 if (msg->retries)
34817- atomic_long_add(msg->retries,
34818+ atomic_long_add_unchecked(msg->retries,
34819 &port->counter_group[CM_XMIT_RETRIES].
34820 counter[attr_index]);
34821
34822@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34823 }
34824
34825 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34826- atomic_long_inc(&port->counter_group[CM_RECV].
34827+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34828 counter[attr_id - CM_ATTR_ID_OFFSET]);
34829
34830 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34831@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34832 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34833
34834 return sprintf(buf, "%ld\n",
34835- atomic_long_read(&group->counter[cm_attr->index]));
34836+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34837 }
34838
34839-static struct sysfs_ops cm_counter_ops = {
34840+static const struct sysfs_ops cm_counter_ops = {
34841 .show = cm_show_counter
34842 };
34843
34844diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34845index 8fd3a6f..61d8075 100644
34846--- a/drivers/infiniband/core/cma.c
34847+++ b/drivers/infiniband/core/cma.c
34848@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
34849
34850 req.private_data_len = sizeof(struct cma_hdr) +
34851 conn_param->private_data_len;
34852+ if (req.private_data_len < conn_param->private_data_len)
34853+ return -EINVAL;
34854+
34855 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34856 if (!req.private_data)
34857 return -ENOMEM;
34858@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
34859 memset(&req, 0, sizeof req);
34860 offset = cma_user_data_offset(id_priv->id.ps);
34861 req.private_data_len = offset + conn_param->private_data_len;
34862+ if (req.private_data_len < conn_param->private_data_len)
34863+ return -EINVAL;
34864+
34865 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34866 if (!private_data)
34867 return -ENOMEM;
34868diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34869index 4507043..14ad522 100644
34870--- a/drivers/infiniband/core/fmr_pool.c
34871+++ b/drivers/infiniband/core/fmr_pool.c
34872@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34873
34874 struct task_struct *thread;
34875
34876- atomic_t req_ser;
34877- atomic_t flush_ser;
34878+ atomic_unchecked_t req_ser;
34879+ atomic_unchecked_t flush_ser;
34880
34881 wait_queue_head_t force_wait;
34882 };
34883@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34884 struct ib_fmr_pool *pool = pool_ptr;
34885
34886 do {
34887- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34888+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34889 ib_fmr_batch_release(pool);
34890
34891- atomic_inc(&pool->flush_ser);
34892+ atomic_inc_unchecked(&pool->flush_ser);
34893 wake_up_interruptible(&pool->force_wait);
34894
34895 if (pool->flush_function)
34896@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34897 }
34898
34899 set_current_state(TASK_INTERRUPTIBLE);
34900- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34901+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34902 !kthread_should_stop())
34903 schedule();
34904 __set_current_state(TASK_RUNNING);
34905@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34906 pool->dirty_watermark = params->dirty_watermark;
34907 pool->dirty_len = 0;
34908 spin_lock_init(&pool->pool_lock);
34909- atomic_set(&pool->req_ser, 0);
34910- atomic_set(&pool->flush_ser, 0);
34911+ atomic_set_unchecked(&pool->req_ser, 0);
34912+ atomic_set_unchecked(&pool->flush_ser, 0);
34913 init_waitqueue_head(&pool->force_wait);
34914
34915 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34916@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34917 }
34918 spin_unlock_irq(&pool->pool_lock);
34919
34920- serial = atomic_inc_return(&pool->req_ser);
34921+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34922 wake_up_process(pool->thread);
34923
34924 if (wait_event_interruptible(pool->force_wait,
34925- atomic_read(&pool->flush_ser) - serial >= 0))
34926+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34927 return -EINTR;
34928
34929 return 0;
34930@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34931 } else {
34932 list_add_tail(&fmr->list, &pool->dirty_list);
34933 if (++pool->dirty_len >= pool->dirty_watermark) {
34934- atomic_inc(&pool->req_ser);
34935+ atomic_inc_unchecked(&pool->req_ser);
34936 wake_up_process(pool->thread);
34937 }
34938 }
34939diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34940index 158a214..1558bb7 100644
34941--- a/drivers/infiniband/core/sysfs.c
34942+++ b/drivers/infiniband/core/sysfs.c
34943@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34944 return port_attr->show(p, port_attr, buf);
34945 }
34946
34947-static struct sysfs_ops port_sysfs_ops = {
34948+static const struct sysfs_ops port_sysfs_ops = {
34949 .show = port_attr_show
34950 };
34951
34952diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34953index 5440da0..1194ecb 100644
34954--- a/drivers/infiniband/core/uverbs_marshall.c
34955+++ b/drivers/infiniband/core/uverbs_marshall.c
34956@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34957 dst->grh.sgid_index = src->grh.sgid_index;
34958 dst->grh.hop_limit = src->grh.hop_limit;
34959 dst->grh.traffic_class = src->grh.traffic_class;
34960+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34961 dst->dlid = src->dlid;
34962 dst->sl = src->sl;
34963 dst->src_path_bits = src->src_path_bits;
34964 dst->static_rate = src->static_rate;
34965 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34966 dst->port_num = src->port_num;
34967+ dst->reserved = 0;
34968 }
34969 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34970
34971 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34972 struct ib_qp_attr *src)
34973 {
34974+ dst->qp_state = src->qp_state;
34975 dst->cur_qp_state = src->cur_qp_state;
34976 dst->path_mtu = src->path_mtu;
34977 dst->path_mig_state = src->path_mig_state;
34978@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34979 dst->rnr_retry = src->rnr_retry;
34980 dst->alt_port_num = src->alt_port_num;
34981 dst->alt_timeout = src->alt_timeout;
34982+ memset(dst->reserved, 0, sizeof(dst->reserved));
34983 }
34984 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34985
34986diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34987index 100da85..62e6b88 100644
34988--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34989+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34990@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34991 struct infinipath_counters counters;
34992 struct ipath_devdata *dd;
34993
34994+ pax_track_stack();
34995+
34996 dd = file->f_path.dentry->d_inode->i_private;
34997 dd->ipath_f_read_counters(dd, &counters);
34998
34999diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
35000index cbde0cf..afaf55c 100644
35001--- a/drivers/infiniband/hw/nes/nes.c
35002+++ b/drivers/infiniband/hw/nes/nes.c
35003@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
35004 LIST_HEAD(nes_adapter_list);
35005 static LIST_HEAD(nes_dev_list);
35006
35007-atomic_t qps_destroyed;
35008+atomic_unchecked_t qps_destroyed;
35009
35010 static unsigned int ee_flsh_adapter;
35011 static unsigned int sysfs_nonidx_addr;
35012@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35013 struct nes_adapter *nesadapter = nesdev->nesadapter;
35014 u32 qp_id;
35015
35016- atomic_inc(&qps_destroyed);
35017+ atomic_inc_unchecked(&qps_destroyed);
35018
35019 /* Free the control structures */
35020
35021diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35022index bcc6abc..9c76b2f 100644
35023--- a/drivers/infiniband/hw/nes/nes.h
35024+++ b/drivers/infiniband/hw/nes/nes.h
35025@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35026 extern unsigned int wqm_quanta;
35027 extern struct list_head nes_adapter_list;
35028
35029-extern atomic_t cm_connects;
35030-extern atomic_t cm_accepts;
35031-extern atomic_t cm_disconnects;
35032-extern atomic_t cm_closes;
35033-extern atomic_t cm_connecteds;
35034-extern atomic_t cm_connect_reqs;
35035-extern atomic_t cm_rejects;
35036-extern atomic_t mod_qp_timouts;
35037-extern atomic_t qps_created;
35038-extern atomic_t qps_destroyed;
35039-extern atomic_t sw_qps_destroyed;
35040+extern atomic_unchecked_t cm_connects;
35041+extern atomic_unchecked_t cm_accepts;
35042+extern atomic_unchecked_t cm_disconnects;
35043+extern atomic_unchecked_t cm_closes;
35044+extern atomic_unchecked_t cm_connecteds;
35045+extern atomic_unchecked_t cm_connect_reqs;
35046+extern atomic_unchecked_t cm_rejects;
35047+extern atomic_unchecked_t mod_qp_timouts;
35048+extern atomic_unchecked_t qps_created;
35049+extern atomic_unchecked_t qps_destroyed;
35050+extern atomic_unchecked_t sw_qps_destroyed;
35051 extern u32 mh_detected;
35052 extern u32 mh_pauses_sent;
35053 extern u32 cm_packets_sent;
35054@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35055 extern u32 cm_listens_created;
35056 extern u32 cm_listens_destroyed;
35057 extern u32 cm_backlog_drops;
35058-extern atomic_t cm_loopbacks;
35059-extern atomic_t cm_nodes_created;
35060-extern atomic_t cm_nodes_destroyed;
35061-extern atomic_t cm_accel_dropped_pkts;
35062-extern atomic_t cm_resets_recvd;
35063+extern atomic_unchecked_t cm_loopbacks;
35064+extern atomic_unchecked_t cm_nodes_created;
35065+extern atomic_unchecked_t cm_nodes_destroyed;
35066+extern atomic_unchecked_t cm_accel_dropped_pkts;
35067+extern atomic_unchecked_t cm_resets_recvd;
35068
35069 extern u32 int_mod_timer_init;
35070 extern u32 int_mod_cq_depth_256;
35071diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35072index 73473db..5ed06e8 100644
35073--- a/drivers/infiniband/hw/nes/nes_cm.c
35074+++ b/drivers/infiniband/hw/nes/nes_cm.c
35075@@ -69,11 +69,11 @@ u32 cm_packets_received;
35076 u32 cm_listens_created;
35077 u32 cm_listens_destroyed;
35078 u32 cm_backlog_drops;
35079-atomic_t cm_loopbacks;
35080-atomic_t cm_nodes_created;
35081-atomic_t cm_nodes_destroyed;
35082-atomic_t cm_accel_dropped_pkts;
35083-atomic_t cm_resets_recvd;
35084+atomic_unchecked_t cm_loopbacks;
35085+atomic_unchecked_t cm_nodes_created;
35086+atomic_unchecked_t cm_nodes_destroyed;
35087+atomic_unchecked_t cm_accel_dropped_pkts;
35088+atomic_unchecked_t cm_resets_recvd;
35089
35090 static inline int mini_cm_accelerated(struct nes_cm_core *,
35091 struct nes_cm_node *);
35092@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35093
35094 static struct nes_cm_core *g_cm_core;
35095
35096-atomic_t cm_connects;
35097-atomic_t cm_accepts;
35098-atomic_t cm_disconnects;
35099-atomic_t cm_closes;
35100-atomic_t cm_connecteds;
35101-atomic_t cm_connect_reqs;
35102-atomic_t cm_rejects;
35103+atomic_unchecked_t cm_connects;
35104+atomic_unchecked_t cm_accepts;
35105+atomic_unchecked_t cm_disconnects;
35106+atomic_unchecked_t cm_closes;
35107+atomic_unchecked_t cm_connecteds;
35108+atomic_unchecked_t cm_connect_reqs;
35109+atomic_unchecked_t cm_rejects;
35110
35111
35112 /**
35113@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35114 cm_node->rem_mac);
35115
35116 add_hte_node(cm_core, cm_node);
35117- atomic_inc(&cm_nodes_created);
35118+ atomic_inc_unchecked(&cm_nodes_created);
35119
35120 return cm_node;
35121 }
35122@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35123 }
35124
35125 atomic_dec(&cm_core->node_cnt);
35126- atomic_inc(&cm_nodes_destroyed);
35127+ atomic_inc_unchecked(&cm_nodes_destroyed);
35128 nesqp = cm_node->nesqp;
35129 if (nesqp) {
35130 nesqp->cm_node = NULL;
35131@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35132
35133 static void drop_packet(struct sk_buff *skb)
35134 {
35135- atomic_inc(&cm_accel_dropped_pkts);
35136+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35137 dev_kfree_skb_any(skb);
35138 }
35139
35140@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35141
35142 int reset = 0; /* whether to send reset in case of err.. */
35143 int passive_state;
35144- atomic_inc(&cm_resets_recvd);
35145+ atomic_inc_unchecked(&cm_resets_recvd);
35146 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35147 " refcnt=%d\n", cm_node, cm_node->state,
35148 atomic_read(&cm_node->ref_count));
35149@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35150 rem_ref_cm_node(cm_node->cm_core, cm_node);
35151 return NULL;
35152 }
35153- atomic_inc(&cm_loopbacks);
35154+ atomic_inc_unchecked(&cm_loopbacks);
35155 loopbackremotenode->loopbackpartner = cm_node;
35156 loopbackremotenode->tcp_cntxt.rcv_wscale =
35157 NES_CM_DEFAULT_RCV_WND_SCALE;
35158@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35159 add_ref_cm_node(cm_node);
35160 } else if (cm_node->state == NES_CM_STATE_TSA) {
35161 rem_ref_cm_node(cm_core, cm_node);
35162- atomic_inc(&cm_accel_dropped_pkts);
35163+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35164 dev_kfree_skb_any(skb);
35165 break;
35166 }
35167@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35168
35169 if ((cm_id) && (cm_id->event_handler)) {
35170 if (issue_disconn) {
35171- atomic_inc(&cm_disconnects);
35172+ atomic_inc_unchecked(&cm_disconnects);
35173 cm_event.event = IW_CM_EVENT_DISCONNECT;
35174 cm_event.status = disconn_status;
35175 cm_event.local_addr = cm_id->local_addr;
35176@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35177 }
35178
35179 if (issue_close) {
35180- atomic_inc(&cm_closes);
35181+ atomic_inc_unchecked(&cm_closes);
35182 nes_disconnect(nesqp, 1);
35183
35184 cm_id->provider_data = nesqp;
35185@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35186
35187 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35188 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35189- atomic_inc(&cm_accepts);
35190+ atomic_inc_unchecked(&cm_accepts);
35191
35192 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35193 atomic_read(&nesvnic->netdev->refcnt));
35194@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35195
35196 struct nes_cm_core *cm_core;
35197
35198- atomic_inc(&cm_rejects);
35199+ atomic_inc_unchecked(&cm_rejects);
35200 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35201 loopback = cm_node->loopbackpartner;
35202 cm_core = cm_node->cm_core;
35203@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35204 ntohl(cm_id->local_addr.sin_addr.s_addr),
35205 ntohs(cm_id->local_addr.sin_port));
35206
35207- atomic_inc(&cm_connects);
35208+ atomic_inc_unchecked(&cm_connects);
35209 nesqp->active_conn = 1;
35210
35211 /* cache the cm_id in the qp */
35212@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35213 if (nesqp->destroyed) {
35214 return;
35215 }
35216- atomic_inc(&cm_connecteds);
35217+ atomic_inc_unchecked(&cm_connecteds);
35218 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35219 " local port 0x%04X. jiffies = %lu.\n",
35220 nesqp->hwqp.qp_id,
35221@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35222
35223 ret = cm_id->event_handler(cm_id, &cm_event);
35224 cm_id->add_ref(cm_id);
35225- atomic_inc(&cm_closes);
35226+ atomic_inc_unchecked(&cm_closes);
35227 cm_event.event = IW_CM_EVENT_CLOSE;
35228 cm_event.status = IW_CM_EVENT_STATUS_OK;
35229 cm_event.provider_data = cm_id->provider_data;
35230@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35231 return;
35232 cm_id = cm_node->cm_id;
35233
35234- atomic_inc(&cm_connect_reqs);
35235+ atomic_inc_unchecked(&cm_connect_reqs);
35236 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35237 cm_node, cm_id, jiffies);
35238
35239@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35240 return;
35241 cm_id = cm_node->cm_id;
35242
35243- atomic_inc(&cm_connect_reqs);
35244+ atomic_inc_unchecked(&cm_connect_reqs);
35245 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35246 cm_node, cm_id, jiffies);
35247
35248diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35249index e593af3..870694a 100644
35250--- a/drivers/infiniband/hw/nes/nes_nic.c
35251+++ b/drivers/infiniband/hw/nes/nes_nic.c
35252@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35253 target_stat_values[++index] = mh_detected;
35254 target_stat_values[++index] = mh_pauses_sent;
35255 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35256- target_stat_values[++index] = atomic_read(&cm_connects);
35257- target_stat_values[++index] = atomic_read(&cm_accepts);
35258- target_stat_values[++index] = atomic_read(&cm_disconnects);
35259- target_stat_values[++index] = atomic_read(&cm_connecteds);
35260- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35261- target_stat_values[++index] = atomic_read(&cm_rejects);
35262- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35263- target_stat_values[++index] = atomic_read(&qps_created);
35264- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35265- target_stat_values[++index] = atomic_read(&qps_destroyed);
35266- target_stat_values[++index] = atomic_read(&cm_closes);
35267+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35268+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35269+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35270+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35271+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35272+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35273+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35274+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35275+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35276+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35277+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35278 target_stat_values[++index] = cm_packets_sent;
35279 target_stat_values[++index] = cm_packets_bounced;
35280 target_stat_values[++index] = cm_packets_created;
35281@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35282 target_stat_values[++index] = cm_listens_created;
35283 target_stat_values[++index] = cm_listens_destroyed;
35284 target_stat_values[++index] = cm_backlog_drops;
35285- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35286- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35287- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35288- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35289- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35290+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35291+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35292+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35293+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35294+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35295 target_stat_values[++index] = int_mod_timer_init;
35296 target_stat_values[++index] = int_mod_cq_depth_1;
35297 target_stat_values[++index] = int_mod_cq_depth_4;
35298diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35299index a680c42..f914deb 100644
35300--- a/drivers/infiniband/hw/nes/nes_verbs.c
35301+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35302@@ -45,9 +45,9 @@
35303
35304 #include <rdma/ib_umem.h>
35305
35306-atomic_t mod_qp_timouts;
35307-atomic_t qps_created;
35308-atomic_t sw_qps_destroyed;
35309+atomic_unchecked_t mod_qp_timouts;
35310+atomic_unchecked_t qps_created;
35311+atomic_unchecked_t sw_qps_destroyed;
35312
35313 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35314
35315@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35316 if (init_attr->create_flags)
35317 return ERR_PTR(-EINVAL);
35318
35319- atomic_inc(&qps_created);
35320+ atomic_inc_unchecked(&qps_created);
35321 switch (init_attr->qp_type) {
35322 case IB_QPT_RC:
35323 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35324@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35325 struct iw_cm_event cm_event;
35326 int ret;
35327
35328- atomic_inc(&sw_qps_destroyed);
35329+ atomic_inc_unchecked(&sw_qps_destroyed);
35330 nesqp->destroyed = 1;
35331
35332 /* Blow away the connection if it exists. */
35333diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35334index ac11be0..3883c04 100644
35335--- a/drivers/input/gameport/gameport.c
35336+++ b/drivers/input/gameport/gameport.c
35337@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35338 */
35339 static void gameport_init_port(struct gameport *gameport)
35340 {
35341- static atomic_t gameport_no = ATOMIC_INIT(0);
35342+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35343
35344 __module_get(THIS_MODULE);
35345
35346 mutex_init(&gameport->drv_mutex);
35347 device_initialize(&gameport->dev);
35348- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35349+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35350 gameport->dev.bus = &gameport_bus;
35351 gameport->dev.release = gameport_release_port;
35352 if (gameport->parent)
35353diff --git a/drivers/input/input.c b/drivers/input/input.c
35354index c82ae82..8cfb9cb 100644
35355--- a/drivers/input/input.c
35356+++ b/drivers/input/input.c
35357@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35358 */
35359 int input_register_device(struct input_dev *dev)
35360 {
35361- static atomic_t input_no = ATOMIC_INIT(0);
35362+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35363 struct input_handler *handler;
35364 const char *path;
35365 int error;
35366@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35367 dev->setkeycode = input_default_setkeycode;
35368
35369 dev_set_name(&dev->dev, "input%ld",
35370- (unsigned long) atomic_inc_return(&input_no) - 1);
35371+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35372
35373 error = device_add(&dev->dev);
35374 if (error)
35375diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35376index ca13a6b..b032b0c 100644
35377--- a/drivers/input/joystick/sidewinder.c
35378+++ b/drivers/input/joystick/sidewinder.c
35379@@ -30,6 +30,7 @@
35380 #include <linux/kernel.h>
35381 #include <linux/module.h>
35382 #include <linux/slab.h>
35383+#include <linux/sched.h>
35384 #include <linux/init.h>
35385 #include <linux/input.h>
35386 #include <linux/gameport.h>
35387@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35388 unsigned char buf[SW_LENGTH];
35389 int i;
35390
35391+ pax_track_stack();
35392+
35393 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35394
35395 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35396diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35397index 79e3edc..01412b9 100644
35398--- a/drivers/input/joystick/xpad.c
35399+++ b/drivers/input/joystick/xpad.c
35400@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35401
35402 static int xpad_led_probe(struct usb_xpad *xpad)
35403 {
35404- static atomic_t led_seq = ATOMIC_INIT(0);
35405+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35406 long led_no;
35407 struct xpad_led *led;
35408 struct led_classdev *led_cdev;
35409@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35410 if (!led)
35411 return -ENOMEM;
35412
35413- led_no = (long)atomic_inc_return(&led_seq) - 1;
35414+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35415
35416 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35417 led->xpad = xpad;
35418diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35419index 0236f0d..c7327f1 100644
35420--- a/drivers/input/serio/serio.c
35421+++ b/drivers/input/serio/serio.c
35422@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35423 */
35424 static void serio_init_port(struct serio *serio)
35425 {
35426- static atomic_t serio_no = ATOMIC_INIT(0);
35427+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35428
35429 __module_get(THIS_MODULE);
35430
35431@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35432 mutex_init(&serio->drv_mutex);
35433 device_initialize(&serio->dev);
35434 dev_set_name(&serio->dev, "serio%ld",
35435- (long)atomic_inc_return(&serio_no) - 1);
35436+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35437 serio->dev.bus = &serio_bus;
35438 serio->dev.release = serio_release_port;
35439 if (serio->parent) {
35440diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35441index 33dcd8d..2783d25 100644
35442--- a/drivers/isdn/gigaset/common.c
35443+++ b/drivers/isdn/gigaset/common.c
35444@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35445 cs->commands_pending = 0;
35446 cs->cur_at_seq = 0;
35447 cs->gotfwver = -1;
35448- cs->open_count = 0;
35449+ local_set(&cs->open_count, 0);
35450 cs->dev = NULL;
35451 cs->tty = NULL;
35452 cs->tty_dev = NULL;
35453diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35454index a2f6125..6a70677 100644
35455--- a/drivers/isdn/gigaset/gigaset.h
35456+++ b/drivers/isdn/gigaset/gigaset.h
35457@@ -34,6 +34,7 @@
35458 #include <linux/tty_driver.h>
35459 #include <linux/list.h>
35460 #include <asm/atomic.h>
35461+#include <asm/local.h>
35462
35463 #define GIG_VERSION {0,5,0,0}
35464 #define GIG_COMPAT {0,4,0,0}
35465@@ -446,7 +447,7 @@ struct cardstate {
35466 spinlock_t cmdlock;
35467 unsigned curlen, cmdbytes;
35468
35469- unsigned open_count;
35470+ local_t open_count;
35471 struct tty_struct *tty;
35472 struct tasklet_struct if_wake_tasklet;
35473 unsigned control_state;
35474diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35475index b3065b8..c7e8cc9 100644
35476--- a/drivers/isdn/gigaset/interface.c
35477+++ b/drivers/isdn/gigaset/interface.c
35478@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35479 return -ERESTARTSYS; // FIXME -EINTR?
35480 tty->driver_data = cs;
35481
35482- ++cs->open_count;
35483-
35484- if (cs->open_count == 1) {
35485+ if (local_inc_return(&cs->open_count) == 1) {
35486 spin_lock_irqsave(&cs->lock, flags);
35487 cs->tty = tty;
35488 spin_unlock_irqrestore(&cs->lock, flags);
35489@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35490
35491 if (!cs->connected)
35492 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35493- else if (!cs->open_count)
35494+ else if (!local_read(&cs->open_count))
35495 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35496 else {
35497- if (!--cs->open_count) {
35498+ if (!local_dec_return(&cs->open_count)) {
35499 spin_lock_irqsave(&cs->lock, flags);
35500 cs->tty = NULL;
35501 spin_unlock_irqrestore(&cs->lock, flags);
35502@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35503 if (!cs->connected) {
35504 gig_dbg(DEBUG_IF, "not connected");
35505 retval = -ENODEV;
35506- } else if (!cs->open_count)
35507+ } else if (!local_read(&cs->open_count))
35508 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35509 else {
35510 retval = 0;
35511@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35512 if (!cs->connected) {
35513 gig_dbg(DEBUG_IF, "not connected");
35514 retval = -ENODEV;
35515- } else if (!cs->open_count)
35516+ } else if (!local_read(&cs->open_count))
35517 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35518 else if (cs->mstate != MS_LOCKED) {
35519 dev_warn(cs->dev, "can't write to unlocked device\n");
35520@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35521 if (!cs->connected) {
35522 gig_dbg(DEBUG_IF, "not connected");
35523 retval = -ENODEV;
35524- } else if (!cs->open_count)
35525+ } else if (!local_read(&cs->open_count))
35526 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35527 else if (cs->mstate != MS_LOCKED) {
35528 dev_warn(cs->dev, "can't write to unlocked device\n");
35529@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35530
35531 if (!cs->connected)
35532 gig_dbg(DEBUG_IF, "not connected");
35533- else if (!cs->open_count)
35534+ else if (!local_read(&cs->open_count))
35535 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35536 else if (cs->mstate != MS_LOCKED)
35537 dev_warn(cs->dev, "can't write to unlocked device\n");
35538@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35539
35540 if (!cs->connected)
35541 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35542- else if (!cs->open_count)
35543+ else if (!local_read(&cs->open_count))
35544 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35545 else {
35546 //FIXME
35547@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35548
35549 if (!cs->connected)
35550 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35551- else if (!cs->open_count)
35552+ else if (!local_read(&cs->open_count))
35553 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35554 else {
35555 //FIXME
35556@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35557 goto out;
35558 }
35559
35560- if (!cs->open_count) {
35561+ if (!local_read(&cs->open_count)) {
35562 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35563 goto out;
35564 }
35565diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35566index a7c0083..62a7cb6 100644
35567--- a/drivers/isdn/hardware/avm/b1.c
35568+++ b/drivers/isdn/hardware/avm/b1.c
35569@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35570 }
35571 if (left) {
35572 if (t4file->user) {
35573- if (copy_from_user(buf, dp, left))
35574+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35575 return -EFAULT;
35576 } else {
35577 memcpy(buf, dp, left);
35578@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35579 }
35580 if (left) {
35581 if (config->user) {
35582- if (copy_from_user(buf, dp, left))
35583+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35584 return -EFAULT;
35585 } else {
35586 memcpy(buf, dp, left);
35587diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35588index f130724..c373c68 100644
35589--- a/drivers/isdn/hardware/eicon/capidtmf.c
35590+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35591@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35592 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35593 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35594
35595+ pax_track_stack();
35596
35597 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35598 {
35599diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35600index 4d425c6..a9be6c4 100644
35601--- a/drivers/isdn/hardware/eicon/capifunc.c
35602+++ b/drivers/isdn/hardware/eicon/capifunc.c
35603@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35604 IDI_SYNC_REQ req;
35605 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35606
35607+ pax_track_stack();
35608+
35609 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35610
35611 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35612diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35613index 3029234..ef0d9e2 100644
35614--- a/drivers/isdn/hardware/eicon/diddfunc.c
35615+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35616@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35617 IDI_SYNC_REQ req;
35618 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35619
35620+ pax_track_stack();
35621+
35622 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35623
35624 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35625diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35626index d36a4c0..11e7d1a 100644
35627--- a/drivers/isdn/hardware/eicon/divasfunc.c
35628+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35629@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35630 IDI_SYNC_REQ req;
35631 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35632
35633+ pax_track_stack();
35634+
35635 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35636
35637 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35638diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35639index 85784a7..a19ca98 100644
35640--- a/drivers/isdn/hardware/eicon/divasync.h
35641+++ b/drivers/isdn/hardware/eicon/divasync.h
35642@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35643 } diva_didd_add_adapter_t;
35644 typedef struct _diva_didd_remove_adapter {
35645 IDI_CALL p_request;
35646-} diva_didd_remove_adapter_t;
35647+} __no_const diva_didd_remove_adapter_t;
35648 typedef struct _diva_didd_read_adapter_array {
35649 void * buffer;
35650 dword length;
35651diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35652index db87d51..7d09acf 100644
35653--- a/drivers/isdn/hardware/eicon/idifunc.c
35654+++ b/drivers/isdn/hardware/eicon/idifunc.c
35655@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35656 IDI_SYNC_REQ req;
35657 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35658
35659+ pax_track_stack();
35660+
35661 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35662
35663 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35664diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35665index ae89fb8..0fab299 100644
35666--- a/drivers/isdn/hardware/eicon/message.c
35667+++ b/drivers/isdn/hardware/eicon/message.c
35668@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35669 dword d;
35670 word w;
35671
35672+ pax_track_stack();
35673+
35674 a = plci->adapter;
35675 Id = ((word)plci->Id<<8)|a->Id;
35676 PUT_WORD(&SS_Ind[4],0x0000);
35677@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35678 word j, n, w;
35679 dword d;
35680
35681+ pax_track_stack();
35682+
35683
35684 for(i=0;i<8;i++) bp_parms[i].length = 0;
35685 for(i=0;i<2;i++) global_config[i].length = 0;
35686@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35687 const byte llc3[] = {4,3,2,2,6,6,0};
35688 const byte header[] = {0,2,3,3,0,0,0};
35689
35690+ pax_track_stack();
35691+
35692 for(i=0;i<8;i++) bp_parms[i].length = 0;
35693 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35694 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35695@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35696 word appl_number_group_type[MAX_APPL];
35697 PLCI *auxplci;
35698
35699+ pax_track_stack();
35700+
35701 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35702
35703 if(!a->group_optimization_enabled)
35704diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35705index a564b75..f3cf8b5 100644
35706--- a/drivers/isdn/hardware/eicon/mntfunc.c
35707+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35708@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35709 IDI_SYNC_REQ req;
35710 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35711
35712+ pax_track_stack();
35713+
35714 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35715
35716 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35717diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35718index a3bd163..8956575 100644
35719--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35720+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35721@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35722 typedef struct _diva_os_idi_adapter_interface {
35723 diva_init_card_proc_t cleanup_adapter_proc;
35724 diva_cmd_card_proc_t cmd_proc;
35725-} diva_os_idi_adapter_interface_t;
35726+} __no_const diva_os_idi_adapter_interface_t;
35727
35728 typedef struct _diva_os_xdi_adapter {
35729 struct list_head link;
35730diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35731index adb1e8c..21b590b 100644
35732--- a/drivers/isdn/i4l/isdn_common.c
35733+++ b/drivers/isdn/i4l/isdn_common.c
35734@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35735 } iocpar;
35736 void __user *argp = (void __user *)arg;
35737
35738+ pax_track_stack();
35739+
35740 #define name iocpar.name
35741 #define bname iocpar.bname
35742 #define iocts iocpar.iocts
35743diff --git a/drivers/isdn/i4l/isdn_net.c b/drivers/isdn/i4l/isdn_net.c
35744index 90b56ed..5ed3305 100644
35745--- a/drivers/isdn/i4l/isdn_net.c
35746+++ b/drivers/isdn/i4l/isdn_net.c
35747@@ -1902,7 +1902,7 @@ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev,
35748 {
35749 isdn_net_local *lp = netdev_priv(dev);
35750 unsigned char *p;
35751- ushort len = 0;
35752+ int len = 0;
35753
35754 switch (lp->p_encap) {
35755 case ISDN_NET_ENCAP_ETHER:
35756diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35757index bf7997a..cf091db 100644
35758--- a/drivers/isdn/icn/icn.c
35759+++ b/drivers/isdn/icn/icn.c
35760@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35761 if (count > len)
35762 count = len;
35763 if (user) {
35764- if (copy_from_user(msg, buf, count))
35765+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35766 return -EFAULT;
35767 } else
35768 memcpy(msg, buf, count);
35769diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35770index feb0fa4..f76f830 100644
35771--- a/drivers/isdn/mISDN/socket.c
35772+++ b/drivers/isdn/mISDN/socket.c
35773@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35774 if (dev) {
35775 struct mISDN_devinfo di;
35776
35777+ memset(&di, 0, sizeof(di));
35778 di.id = dev->id;
35779 di.Dprotocols = dev->Dprotocols;
35780 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35781@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35782 if (dev) {
35783 struct mISDN_devinfo di;
35784
35785+ memset(&di, 0, sizeof(di));
35786 di.id = dev->id;
35787 di.Dprotocols = dev->Dprotocols;
35788 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35789diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35790index 485be8b..f0225bc 100644
35791--- a/drivers/isdn/sc/interrupt.c
35792+++ b/drivers/isdn/sc/interrupt.c
35793@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35794 }
35795 else if(callid>=0x0000 && callid<=0x7FFF)
35796 {
35797+ int len;
35798+
35799 pr_debug("%s: Got Incoming Call\n",
35800 sc_adapter[card]->devicename);
35801- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35802- strcpy(setup.eazmsn,
35803- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35804+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35805+ sizeof(setup.phone));
35806+ if (len >= sizeof(setup.phone))
35807+ continue;
35808+ len = strlcpy(setup.eazmsn,
35809+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35810+ sizeof(setup.eazmsn));
35811+ if (len >= sizeof(setup.eazmsn))
35812+ continue;
35813 setup.si1 = 7;
35814 setup.si2 = 0;
35815 setup.plan = 0;
35816@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35817 * Handle a GetMyNumber Rsp
35818 */
35819 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35820- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35821+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35822+ rcvmsg.msg_data.byte_array,
35823+ sizeof(rcvmsg.msg_data.byte_array));
35824 continue;
35825 }
35826
35827diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35828index 8744d24..d1f9a9a 100644
35829--- a/drivers/lguest/core.c
35830+++ b/drivers/lguest/core.c
35831@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35832 * it's worked so far. The end address needs +1 because __get_vm_area
35833 * allocates an extra guard page, so we need space for that.
35834 */
35835+
35836+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35837+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35838+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35839+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35840+#else
35841 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35842 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35843 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35844+#endif
35845+
35846 if (!switcher_vma) {
35847 err = -ENOMEM;
35848 printk("lguest: could not map switcher pages high\n");
35849@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35850 * Now the Switcher is mapped at the right address, we can't fail!
35851 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35852 */
35853- memcpy(switcher_vma->addr, start_switcher_text,
35854+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35855 end_switcher_text - start_switcher_text);
35856
35857 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35858diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35859index 6ae3888..8b38145 100644
35860--- a/drivers/lguest/x86/core.c
35861+++ b/drivers/lguest/x86/core.c
35862@@ -59,7 +59,7 @@ static struct {
35863 /* Offset from where switcher.S was compiled to where we've copied it */
35864 static unsigned long switcher_offset(void)
35865 {
35866- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35867+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35868 }
35869
35870 /* This cpu's struct lguest_pages. */
35871@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35872 * These copies are pretty cheap, so we do them unconditionally: */
35873 /* Save the current Host top-level page directory.
35874 */
35875+
35876+#ifdef CONFIG_PAX_PER_CPU_PGD
35877+ pages->state.host_cr3 = read_cr3();
35878+#else
35879 pages->state.host_cr3 = __pa(current->mm->pgd);
35880+#endif
35881+
35882 /*
35883 * Set up the Guest's page tables to see this CPU's pages (and no
35884 * other CPU's pages).
35885@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35886 * compiled-in switcher code and the high-mapped copy we just made.
35887 */
35888 for (i = 0; i < IDT_ENTRIES; i++)
35889- default_idt_entries[i] += switcher_offset();
35890+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35891
35892 /*
35893 * Set up the Switcher's per-cpu areas.
35894@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35895 * it will be undisturbed when we switch. To change %cs and jump we
35896 * need this structure to feed to Intel's "lcall" instruction.
35897 */
35898- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35899+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35900 lguest_entry.segment = LGUEST_CS;
35901
35902 /*
35903diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35904index 40634b0..4f5855e 100644
35905--- a/drivers/lguest/x86/switcher_32.S
35906+++ b/drivers/lguest/x86/switcher_32.S
35907@@ -87,6 +87,7 @@
35908 #include <asm/page.h>
35909 #include <asm/segment.h>
35910 #include <asm/lguest.h>
35911+#include <asm/processor-flags.h>
35912
35913 // We mark the start of the code to copy
35914 // It's placed in .text tho it's never run here
35915@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35916 // Changes type when we load it: damn Intel!
35917 // For after we switch over our page tables
35918 // That entry will be read-only: we'd crash.
35919+
35920+#ifdef CONFIG_PAX_KERNEXEC
35921+ mov %cr0, %edx
35922+ xor $X86_CR0_WP, %edx
35923+ mov %edx, %cr0
35924+#endif
35925+
35926 movl $(GDT_ENTRY_TSS*8), %edx
35927 ltr %dx
35928
35929@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35930 // Let's clear it again for our return.
35931 // The GDT descriptor of the Host
35932 // Points to the table after two "size" bytes
35933- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35934+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35935 // Clear "used" from type field (byte 5, bit 2)
35936- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35937+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35938+
35939+#ifdef CONFIG_PAX_KERNEXEC
35940+ mov %cr0, %eax
35941+ xor $X86_CR0_WP, %eax
35942+ mov %eax, %cr0
35943+#endif
35944
35945 // Once our page table's switched, the Guest is live!
35946 // The Host fades as we run this final step.
35947@@ -295,13 +309,12 @@ deliver_to_host:
35948 // I consulted gcc, and it gave
35949 // These instructions, which I gladly credit:
35950 leal (%edx,%ebx,8), %eax
35951- movzwl (%eax),%edx
35952- movl 4(%eax), %eax
35953- xorw %ax, %ax
35954- orl %eax, %edx
35955+ movl 4(%eax), %edx
35956+ movw (%eax), %dx
35957 // Now the address of the handler's in %edx
35958 // We call it now: its "iret" drops us home.
35959- jmp *%edx
35960+ ljmp $__KERNEL_CS, $1f
35961+1: jmp *%edx
35962
35963 // Every interrupt can come to us here
35964 // But we must truly tell each apart.
35965diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35966index 588a5b0..b71db89 100644
35967--- a/drivers/macintosh/macio_asic.c
35968+++ b/drivers/macintosh/macio_asic.c
35969@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35970 * MacIO is matched against any Apple ID, it's probe() function
35971 * will then decide wether it applies or not
35972 */
35973-static const struct pci_device_id __devinitdata pci_ids [] = { {
35974+static const struct pci_device_id __devinitconst pci_ids [] = { {
35975 .vendor = PCI_VENDOR_ID_APPLE,
35976 .device = PCI_ANY_ID,
35977 .subvendor = PCI_ANY_ID,
35978diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35979index a348bb0..ecd9b3f 100644
35980--- a/drivers/macintosh/via-pmu-backlight.c
35981+++ b/drivers/macintosh/via-pmu-backlight.c
35982@@ -15,7 +15,7 @@
35983
35984 #define MAX_PMU_LEVEL 0xFF
35985
35986-static struct backlight_ops pmu_backlight_data;
35987+static const struct backlight_ops pmu_backlight_data;
35988 static DEFINE_SPINLOCK(pmu_backlight_lock);
35989 static int sleeping, uses_pmu_bl;
35990 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35991@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35992 return bd->props.brightness;
35993 }
35994
35995-static struct backlight_ops pmu_backlight_data = {
35996+static const struct backlight_ops pmu_backlight_data = {
35997 .get_brightness = pmu_backlight_get_brightness,
35998 .update_status = pmu_backlight_update_status,
35999
36000diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
36001index 6f308a4..b5f7ff7 100644
36002--- a/drivers/macintosh/via-pmu.c
36003+++ b/drivers/macintosh/via-pmu.c
36004@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
36005 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
36006 }
36007
36008-static struct platform_suspend_ops pmu_pm_ops = {
36009+static const struct platform_suspend_ops pmu_pm_ops = {
36010 .enter = powerbook_sleep,
36011 .valid = pmu_sleep_valid,
36012 };
36013diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
36014index 818b617..4656e38 100644
36015--- a/drivers/md/dm-ioctl.c
36016+++ b/drivers/md/dm-ioctl.c
36017@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
36018 cmd == DM_LIST_VERSIONS_CMD)
36019 return 0;
36020
36021- if ((cmd == DM_DEV_CREATE_CMD)) {
36022+ if (cmd == DM_DEV_CREATE_CMD) {
36023 if (!*param->name) {
36024 DMWARN("name not supplied when creating device");
36025 return -EINVAL;
36026diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36027index 6021d0a..a878643 100644
36028--- a/drivers/md/dm-raid1.c
36029+++ b/drivers/md/dm-raid1.c
36030@@ -41,7 +41,7 @@ enum dm_raid1_error {
36031
36032 struct mirror {
36033 struct mirror_set *ms;
36034- atomic_t error_count;
36035+ atomic_unchecked_t error_count;
36036 unsigned long error_type;
36037 struct dm_dev *dev;
36038 sector_t offset;
36039@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36040 * simple way to tell if a device has encountered
36041 * errors.
36042 */
36043- atomic_inc(&m->error_count);
36044+ atomic_inc_unchecked(&m->error_count);
36045
36046 if (test_and_set_bit(error_type, &m->error_type))
36047 return;
36048@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36049 }
36050
36051 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36052- if (!atomic_read(&new->error_count)) {
36053+ if (!atomic_read_unchecked(&new->error_count)) {
36054 set_default_mirror(new);
36055 break;
36056 }
36057@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36058 struct mirror *m = get_default_mirror(ms);
36059
36060 do {
36061- if (likely(!atomic_read(&m->error_count)))
36062+ if (likely(!atomic_read_unchecked(&m->error_count)))
36063 return m;
36064
36065 if (m-- == ms->mirror)
36066@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36067 {
36068 struct mirror *default_mirror = get_default_mirror(m->ms);
36069
36070- return !atomic_read(&default_mirror->error_count);
36071+ return !atomic_read_unchecked(&default_mirror->error_count);
36072 }
36073
36074 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36075@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36076 */
36077 if (likely(region_in_sync(ms, region, 1)))
36078 m = choose_mirror(ms, bio->bi_sector);
36079- else if (m && atomic_read(&m->error_count))
36080+ else if (m && atomic_read_unchecked(&m->error_count))
36081 m = NULL;
36082
36083 if (likely(m))
36084@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36085 }
36086
36087 ms->mirror[mirror].ms = ms;
36088- atomic_set(&(ms->mirror[mirror].error_count), 0);
36089+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36090 ms->mirror[mirror].error_type = 0;
36091 ms->mirror[mirror].offset = offset;
36092
36093@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36094 */
36095 static char device_status_char(struct mirror *m)
36096 {
36097- if (!atomic_read(&(m->error_count)))
36098+ if (!atomic_read_unchecked(&(m->error_count)))
36099 return 'A';
36100
36101 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36102diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36103index bd58703..9f26571 100644
36104--- a/drivers/md/dm-stripe.c
36105+++ b/drivers/md/dm-stripe.c
36106@@ -20,7 +20,7 @@ struct stripe {
36107 struct dm_dev *dev;
36108 sector_t physical_start;
36109
36110- atomic_t error_count;
36111+ atomic_unchecked_t error_count;
36112 };
36113
36114 struct stripe_c {
36115@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36116 kfree(sc);
36117 return r;
36118 }
36119- atomic_set(&(sc->stripe[i].error_count), 0);
36120+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36121 }
36122
36123 ti->private = sc;
36124@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36125 DMEMIT("%d ", sc->stripes);
36126 for (i = 0; i < sc->stripes; i++) {
36127 DMEMIT("%s ", sc->stripe[i].dev->name);
36128- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36129+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36130 'D' : 'A';
36131 }
36132 buffer[i] = '\0';
36133@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36134 */
36135 for (i = 0; i < sc->stripes; i++)
36136 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36137- atomic_inc(&(sc->stripe[i].error_count));
36138- if (atomic_read(&(sc->stripe[i].error_count)) <
36139+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
36140+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36141 DM_IO_ERROR_THRESHOLD)
36142 queue_work(kstriped, &sc->kstriped_ws);
36143 }
36144diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36145index 4b04590..13a77b2 100644
36146--- a/drivers/md/dm-sysfs.c
36147+++ b/drivers/md/dm-sysfs.c
36148@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36149 NULL,
36150 };
36151
36152-static struct sysfs_ops dm_sysfs_ops = {
36153+static const struct sysfs_ops dm_sysfs_ops = {
36154 .show = dm_attr_show,
36155 };
36156
36157diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36158index 03345bb..332250d 100644
36159--- a/drivers/md/dm-table.c
36160+++ b/drivers/md/dm-table.c
36161@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36162 if (!dev_size)
36163 return 0;
36164
36165- if ((start >= dev_size) || (start + len > dev_size)) {
36166+ if ((start >= dev_size) || (len > dev_size - start)) {
36167 DMWARN("%s: %s too small for target: "
36168 "start=%llu, len=%llu, dev_size=%llu",
36169 dm_device_name(ti->table->md), bdevname(bdev, b),
36170diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36171index c988ac2..c418141 100644
36172--- a/drivers/md/dm.c
36173+++ b/drivers/md/dm.c
36174@@ -165,9 +165,9 @@ struct mapped_device {
36175 /*
36176 * Event handling.
36177 */
36178- atomic_t event_nr;
36179+ atomic_unchecked_t event_nr;
36180 wait_queue_head_t eventq;
36181- atomic_t uevent_seq;
36182+ atomic_unchecked_t uevent_seq;
36183 struct list_head uevent_list;
36184 spinlock_t uevent_lock; /* Protect access to uevent_list */
36185
36186@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36187 rwlock_init(&md->map_lock);
36188 atomic_set(&md->holders, 1);
36189 atomic_set(&md->open_count, 0);
36190- atomic_set(&md->event_nr, 0);
36191- atomic_set(&md->uevent_seq, 0);
36192+ atomic_set_unchecked(&md->event_nr, 0);
36193+ atomic_set_unchecked(&md->uevent_seq, 0);
36194 INIT_LIST_HEAD(&md->uevent_list);
36195 spin_lock_init(&md->uevent_lock);
36196
36197@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36198
36199 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36200
36201- atomic_inc(&md->event_nr);
36202+ atomic_inc_unchecked(&md->event_nr);
36203 wake_up(&md->eventq);
36204 }
36205
36206@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36207
36208 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36209 {
36210- return atomic_add_return(1, &md->uevent_seq);
36211+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36212 }
36213
36214 uint32_t dm_get_event_nr(struct mapped_device *md)
36215 {
36216- return atomic_read(&md->event_nr);
36217+ return atomic_read_unchecked(&md->event_nr);
36218 }
36219
36220 int dm_wait_event(struct mapped_device *md, int event_nr)
36221 {
36222 return wait_event_interruptible(md->eventq,
36223- (event_nr != atomic_read(&md->event_nr)));
36224+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36225 }
36226
36227 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36228diff --git a/drivers/md/md.c b/drivers/md/md.c
36229index 4ce6e2f..7a9530a 100644
36230--- a/drivers/md/md.c
36231+++ b/drivers/md/md.c
36232@@ -153,10 +153,10 @@ static int start_readonly;
36233 * start build, activate spare
36234 */
36235 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36236-static atomic_t md_event_count;
36237+static atomic_unchecked_t md_event_count;
36238 void md_new_event(mddev_t *mddev)
36239 {
36240- atomic_inc(&md_event_count);
36241+ atomic_inc_unchecked(&md_event_count);
36242 wake_up(&md_event_waiters);
36243 }
36244 EXPORT_SYMBOL_GPL(md_new_event);
36245@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36246 */
36247 static void md_new_event_inintr(mddev_t *mddev)
36248 {
36249- atomic_inc(&md_event_count);
36250+ atomic_inc_unchecked(&md_event_count);
36251 wake_up(&md_event_waiters);
36252 }
36253
36254@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36255
36256 rdev->preferred_minor = 0xffff;
36257 rdev->data_offset = le64_to_cpu(sb->data_offset);
36258- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36259+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36260
36261 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36262 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36263@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36264 else
36265 sb->resync_offset = cpu_to_le64(0);
36266
36267- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36268+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36269
36270 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36271 sb->size = cpu_to_le64(mddev->dev_sectors);
36272@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36273 static ssize_t
36274 errors_show(mdk_rdev_t *rdev, char *page)
36275 {
36276- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36277+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36278 }
36279
36280 static ssize_t
36281@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36282 char *e;
36283 unsigned long n = simple_strtoul(buf, &e, 10);
36284 if (*buf && (*e == 0 || *e == '\n')) {
36285- atomic_set(&rdev->corrected_errors, n);
36286+ atomic_set_unchecked(&rdev->corrected_errors, n);
36287 return len;
36288 }
36289 return -EINVAL;
36290@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36291 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36292 kfree(rdev);
36293 }
36294-static struct sysfs_ops rdev_sysfs_ops = {
36295+static const struct sysfs_ops rdev_sysfs_ops = {
36296 .show = rdev_attr_show,
36297 .store = rdev_attr_store,
36298 };
36299@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36300 rdev->data_offset = 0;
36301 rdev->sb_events = 0;
36302 atomic_set(&rdev->nr_pending, 0);
36303- atomic_set(&rdev->read_errors, 0);
36304- atomic_set(&rdev->corrected_errors, 0);
36305+ atomic_set_unchecked(&rdev->read_errors, 0);
36306+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36307
36308 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36309 if (!size) {
36310@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36311 kfree(mddev);
36312 }
36313
36314-static struct sysfs_ops md_sysfs_ops = {
36315+static const struct sysfs_ops md_sysfs_ops = {
36316 .show = md_attr_show,
36317 .store = md_attr_store,
36318 };
36319@@ -4482,7 +4482,8 @@ out:
36320 err = 0;
36321 blk_integrity_unregister(disk);
36322 md_new_event(mddev);
36323- sysfs_notify_dirent(mddev->sysfs_state);
36324+ if (mddev->sysfs_state)
36325+ sysfs_notify_dirent(mddev->sysfs_state);
36326 return err;
36327 }
36328
36329@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36330
36331 spin_unlock(&pers_lock);
36332 seq_printf(seq, "\n");
36333- mi->event = atomic_read(&md_event_count);
36334+ mi->event = atomic_read_unchecked(&md_event_count);
36335 return 0;
36336 }
36337 if (v == (void*)2) {
36338@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36339 chunk_kb ? "KB" : "B");
36340 if (bitmap->file) {
36341 seq_printf(seq, ", file: ");
36342- seq_path(seq, &bitmap->file->f_path, " \t\n");
36343+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36344 }
36345
36346 seq_printf(seq, "\n");
36347@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36348 else {
36349 struct seq_file *p = file->private_data;
36350 p->private = mi;
36351- mi->event = atomic_read(&md_event_count);
36352+ mi->event = atomic_read_unchecked(&md_event_count);
36353 }
36354 return error;
36355 }
36356@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36357 /* always allow read */
36358 mask = POLLIN | POLLRDNORM;
36359
36360- if (mi->event != atomic_read(&md_event_count))
36361+ if (mi->event != atomic_read_unchecked(&md_event_count))
36362 mask |= POLLERR | POLLPRI;
36363 return mask;
36364 }
36365@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36366 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36367 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36368 (int)part_stat_read(&disk->part0, sectors[1]) -
36369- atomic_read(&disk->sync_io);
36370+ atomic_read_unchecked(&disk->sync_io);
36371 /* sync IO will cause sync_io to increase before the disk_stats
36372 * as sync_io is counted when a request starts, and
36373 * disk_stats is counted when it completes.
36374diff --git a/drivers/md/md.h b/drivers/md/md.h
36375index 87430fe..0024a4c 100644
36376--- a/drivers/md/md.h
36377+++ b/drivers/md/md.h
36378@@ -94,10 +94,10 @@ struct mdk_rdev_s
36379 * only maintained for arrays that
36380 * support hot removal
36381 */
36382- atomic_t read_errors; /* number of consecutive read errors that
36383+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36384 * we have tried to ignore.
36385 */
36386- atomic_t corrected_errors; /* number of corrected read errors,
36387+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36388 * for reporting to userspace and storing
36389 * in superblock.
36390 */
36391@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36392
36393 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36394 {
36395- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36396+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36397 }
36398
36399 struct mdk_personality
36400diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36401index 968cb14..f0ad2e4 100644
36402--- a/drivers/md/raid1.c
36403+++ b/drivers/md/raid1.c
36404@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36405 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36406 continue;
36407 rdev = conf->mirrors[d].rdev;
36408- atomic_add(s, &rdev->corrected_errors);
36409+ atomic_add_unchecked(s, &rdev->corrected_errors);
36410 if (sync_page_io(rdev->bdev,
36411 sect + rdev->data_offset,
36412 s<<9,
36413@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36414 /* Well, this device is dead */
36415 md_error(mddev, rdev);
36416 else {
36417- atomic_add(s, &rdev->corrected_errors);
36418+ atomic_add_unchecked(s, &rdev->corrected_errors);
36419 printk(KERN_INFO
36420 "raid1:%s: read error corrected "
36421 "(%d sectors at %llu on %s)\n",
36422diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36423index 1b4e232..cf0f534 100644
36424--- a/drivers/md/raid10.c
36425+++ b/drivers/md/raid10.c
36426@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36427 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36428 set_bit(R10BIO_Uptodate, &r10_bio->state);
36429 else {
36430- atomic_add(r10_bio->sectors,
36431+ atomic_add_unchecked(r10_bio->sectors,
36432 &conf->mirrors[d].rdev->corrected_errors);
36433 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36434 md_error(r10_bio->mddev,
36435@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36436 test_bit(In_sync, &rdev->flags)) {
36437 atomic_inc(&rdev->nr_pending);
36438 rcu_read_unlock();
36439- atomic_add(s, &rdev->corrected_errors);
36440+ atomic_add_unchecked(s, &rdev->corrected_errors);
36441 if (sync_page_io(rdev->bdev,
36442 r10_bio->devs[sl].addr +
36443 sect + rdev->data_offset,
36444diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36445index 883215d..675bf47 100644
36446--- a/drivers/md/raid5.c
36447+++ b/drivers/md/raid5.c
36448@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36449 bi->bi_next = NULL;
36450 if ((rw & WRITE) &&
36451 test_bit(R5_ReWrite, &sh->dev[i].flags))
36452- atomic_add(STRIPE_SECTORS,
36453+ atomic_add_unchecked(STRIPE_SECTORS,
36454 &rdev->corrected_errors);
36455 generic_make_request(bi);
36456 } else {
36457@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36458 clear_bit(R5_ReadError, &sh->dev[i].flags);
36459 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36460 }
36461- if (atomic_read(&conf->disks[i].rdev->read_errors))
36462- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36463+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36464+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36465 } else {
36466 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36467 int retry = 0;
36468 rdev = conf->disks[i].rdev;
36469
36470 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36471- atomic_inc(&rdev->read_errors);
36472+ atomic_inc_unchecked(&rdev->read_errors);
36473 if (conf->mddev->degraded >= conf->max_degraded)
36474 printk_rl(KERN_WARNING
36475 "raid5:%s: read error not correctable "
36476@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36477 (unsigned long long)(sh->sector
36478 + rdev->data_offset),
36479 bdn);
36480- else if (atomic_read(&rdev->read_errors)
36481+ else if (atomic_read_unchecked(&rdev->read_errors)
36482 > conf->max_nr_stripes)
36483 printk(KERN_WARNING
36484 "raid5:%s: Too many read errors, failing device %s.\n",
36485@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36486 sector_t r_sector;
36487 struct stripe_head sh2;
36488
36489+ pax_track_stack();
36490
36491 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36492 stripe = new_sector;
36493diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36494index 05bde9c..2f31d40 100644
36495--- a/drivers/media/common/saa7146_hlp.c
36496+++ b/drivers/media/common/saa7146_hlp.c
36497@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36498
36499 int x[32], y[32], w[32], h[32];
36500
36501+ pax_track_stack();
36502+
36503 /* clear out memory */
36504 memset(&line_list[0], 0x00, sizeof(u32)*32);
36505 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36506diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36507index cb22da5..82b686e 100644
36508--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36509+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36510@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36511 u8 buf[HOST_LINK_BUF_SIZE];
36512 int i;
36513
36514+ pax_track_stack();
36515+
36516 dprintk("%s\n", __func__);
36517
36518 /* check if we have space for a link buf in the rx_buffer */
36519@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36520 unsigned long timeout;
36521 int written;
36522
36523+ pax_track_stack();
36524+
36525 dprintk("%s\n", __func__);
36526
36527 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36528diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36529index 2fe05d0..a3289c4 100644
36530--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36531+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36532@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36533 union {
36534 dmx_ts_cb ts;
36535 dmx_section_cb sec;
36536- } cb;
36537+ } __no_const cb;
36538
36539 struct dvb_demux *demux;
36540 void *priv;
36541diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36542index 94159b9..376bd8e 100644
36543--- a/drivers/media/dvb/dvb-core/dvbdev.c
36544+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36545@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36546 const struct dvb_device *template, void *priv, int type)
36547 {
36548 struct dvb_device *dvbdev;
36549- struct file_operations *dvbdevfops;
36550+ file_operations_no_const *dvbdevfops;
36551 struct device *clsdev;
36552 int minor;
36553 int id;
36554diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36555index 2a53dd0..db8c07a 100644
36556--- a/drivers/media/dvb/dvb-usb/cxusb.c
36557+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36558@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36559 struct dib0700_adapter_state {
36560 int (*set_param_save) (struct dvb_frontend *,
36561 struct dvb_frontend_parameters *);
36562-};
36563+} __no_const;
36564
36565 static int dib7070_set_param_override(struct dvb_frontend *fe,
36566 struct dvb_frontend_parameters *fep)
36567diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36568index db7f7f7..f55e96f 100644
36569--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36570+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36571@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36572
36573 u8 buf[260];
36574
36575+ pax_track_stack();
36576+
36577 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36578 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36579
36580diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36581index 524acf5..5ffc403 100644
36582--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36583+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36584@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36585
36586 struct dib0700_adapter_state {
36587 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36588-};
36589+} __no_const;
36590
36591 /* Hauppauge Nova-T 500 (aka Bristol)
36592 * has a LNA on GPIO0 which is enabled by setting 1 */
36593diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36594index ba91735..4261d84 100644
36595--- a/drivers/media/dvb/frontends/dib3000.h
36596+++ b/drivers/media/dvb/frontends/dib3000.h
36597@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36598 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36599 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36600 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36601-};
36602+} __no_const;
36603
36604 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36605 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36606diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36607index c709ce6..b3fe620 100644
36608--- a/drivers/media/dvb/frontends/or51211.c
36609+++ b/drivers/media/dvb/frontends/or51211.c
36610@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36611 u8 tudata[585];
36612 int i;
36613
36614+ pax_track_stack();
36615+
36616 dprintk("Firmware is %zd bytes\n",fw->size);
36617
36618 /* Get eprom data */
36619diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36620index 482d0f3..ee1e202 100644
36621--- a/drivers/media/radio/radio-cadet.c
36622+++ b/drivers/media/radio/radio-cadet.c
36623@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36624 while (i < count && dev->rdsin != dev->rdsout)
36625 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36626
36627- if (copy_to_user(data, readbuf, i))
36628+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36629 return -EFAULT;
36630 return i;
36631 }
36632diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36633index 6dd51e2..0359b92 100644
36634--- a/drivers/media/video/cx18/cx18-driver.c
36635+++ b/drivers/media/video/cx18/cx18-driver.c
36636@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36637
36638 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36639
36640-static atomic_t cx18_instance = ATOMIC_INIT(0);
36641+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36642
36643 /* Parameter declarations */
36644 static int cardtype[CX18_MAX_CARDS];
36645@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36646 struct i2c_client c;
36647 u8 eedata[256];
36648
36649+ pax_track_stack();
36650+
36651 memset(&c, 0, sizeof(c));
36652 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36653 c.adapter = &cx->i2c_adap[0];
36654@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36655 struct cx18 *cx;
36656
36657 /* FIXME - module parameter arrays constrain max instances */
36658- i = atomic_inc_return(&cx18_instance) - 1;
36659+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36660 if (i >= CX18_MAX_CARDS) {
36661 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36662 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36663diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36664index 463ec34..2f4625a 100644
36665--- a/drivers/media/video/ivtv/ivtv-driver.c
36666+++ b/drivers/media/video/ivtv/ivtv-driver.c
36667@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36668 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36669
36670 /* ivtv instance counter */
36671-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36672+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36673
36674 /* Parameter declarations */
36675 static int cardtype[IVTV_MAX_CARDS];
36676diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36677index 5fc4ac0..652a54a 100644
36678--- a/drivers/media/video/omap24xxcam.c
36679+++ b/drivers/media/video/omap24xxcam.c
36680@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36681 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36682
36683 do_gettimeofday(&vb->ts);
36684- vb->field_count = atomic_add_return(2, &fh->field_count);
36685+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36686 if (csr & csr_error) {
36687 vb->state = VIDEOBUF_ERROR;
36688 if (!atomic_read(&fh->cam->in_reset)) {
36689diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36690index 2ce67f5..cf26a5b 100644
36691--- a/drivers/media/video/omap24xxcam.h
36692+++ b/drivers/media/video/omap24xxcam.h
36693@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36694 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36695 struct videobuf_queue vbq;
36696 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36697- atomic_t field_count; /* field counter for videobuf_buffer */
36698+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36699 /* accessing cam here doesn't need serialisation: it's constant */
36700 struct omap24xxcam_device *cam;
36701 };
36702diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36703index 299afa4..eb47459 100644
36704--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36705+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36706@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36707 u8 *eeprom;
36708 struct tveeprom tvdata;
36709
36710+ pax_track_stack();
36711+
36712 memset(&tvdata,0,sizeof(tvdata));
36713
36714 eeprom = pvr2_eeprom_fetch(hdw);
36715diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36716index 5b152ff..3320638 100644
36717--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36718+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36719@@ -195,7 +195,7 @@ struct pvr2_hdw {
36720
36721 /* I2C stuff */
36722 struct i2c_adapter i2c_adap;
36723- struct i2c_algorithm i2c_algo;
36724+ i2c_algorithm_no_const i2c_algo;
36725 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36726 int i2c_cx25840_hack_state;
36727 int i2c_linked;
36728diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36729index 1eabff6..8e2313a 100644
36730--- a/drivers/media/video/saa7134/saa6752hs.c
36731+++ b/drivers/media/video/saa7134/saa6752hs.c
36732@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36733 unsigned char localPAT[256];
36734 unsigned char localPMT[256];
36735
36736+ pax_track_stack();
36737+
36738 /* Set video format - must be done first as it resets other settings */
36739 set_reg8(client, 0x41, h->video_format);
36740
36741diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36742index 9c1d3ac..b1b49e9 100644
36743--- a/drivers/media/video/saa7164/saa7164-cmd.c
36744+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36745@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36746 wait_queue_head_t *q = 0;
36747 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36748
36749+ pax_track_stack();
36750+
36751 /* While any outstand message on the bus exists... */
36752 do {
36753
36754@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36755 u8 tmp[512];
36756 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36757
36758+ pax_track_stack();
36759+
36760 while (loop) {
36761
36762 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36763diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36764index b085496..cde0270 100644
36765--- a/drivers/media/video/usbvideo/ibmcam.c
36766+++ b/drivers/media/video/usbvideo/ibmcam.c
36767@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36768 static int __init ibmcam_init(void)
36769 {
36770 struct usbvideo_cb cbTbl;
36771- memset(&cbTbl, 0, sizeof(cbTbl));
36772- cbTbl.probe = ibmcam_probe;
36773- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36774- cbTbl.videoStart = ibmcam_video_start;
36775- cbTbl.videoStop = ibmcam_video_stop;
36776- cbTbl.processData = ibmcam_ProcessIsocData;
36777- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36778- cbTbl.adjustPicture = ibmcam_adjust_picture;
36779- cbTbl.getFPS = ibmcam_calculate_fps;
36780+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36781+ *(void **)&cbTbl.probe = ibmcam_probe;
36782+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36783+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36784+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36785+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36786+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36787+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36788+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36789 return usbvideo_register(
36790 &cams,
36791 MAX_IBMCAM,
36792diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36793index 31d57f2..600b735 100644
36794--- a/drivers/media/video/usbvideo/konicawc.c
36795+++ b/drivers/media/video/usbvideo/konicawc.c
36796@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36797 int error;
36798
36799 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36800- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36801+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36802
36803 cam->input = input_dev = input_allocate_device();
36804 if (!input_dev) {
36805@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36806 struct usbvideo_cb cbTbl;
36807 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36808 DRIVER_DESC "\n");
36809- memset(&cbTbl, 0, sizeof(cbTbl));
36810- cbTbl.probe = konicawc_probe;
36811- cbTbl.setupOnOpen = konicawc_setup_on_open;
36812- cbTbl.processData = konicawc_process_isoc;
36813- cbTbl.getFPS = konicawc_calculate_fps;
36814- cbTbl.setVideoMode = konicawc_set_video_mode;
36815- cbTbl.startDataPump = konicawc_start_data;
36816- cbTbl.stopDataPump = konicawc_stop_data;
36817- cbTbl.adjustPicture = konicawc_adjust_picture;
36818- cbTbl.userFree = konicawc_free_uvd;
36819+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36820+ *(void **)&cbTbl.probe = konicawc_probe;
36821+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36822+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36823+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36824+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36825+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36826+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36827+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36828+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36829 return usbvideo_register(
36830 &cams,
36831 MAX_CAMERAS,
36832diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36833index 803d3e4..c4d1b96 100644
36834--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36835+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36836@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36837 int error;
36838
36839 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36840- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36841+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36842
36843 cam->input = input_dev = input_allocate_device();
36844 if (!input_dev) {
36845diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36846index fbd1b63..292f9f0 100644
36847--- a/drivers/media/video/usbvideo/ultracam.c
36848+++ b/drivers/media/video/usbvideo/ultracam.c
36849@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36850 {
36851 struct usbvideo_cb cbTbl;
36852 memset(&cbTbl, 0, sizeof(cbTbl));
36853- cbTbl.probe = ultracam_probe;
36854- cbTbl.setupOnOpen = ultracam_setup_on_open;
36855- cbTbl.videoStart = ultracam_video_start;
36856- cbTbl.videoStop = ultracam_video_stop;
36857- cbTbl.processData = ultracam_ProcessIsocData;
36858- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36859- cbTbl.adjustPicture = ultracam_adjust_picture;
36860- cbTbl.getFPS = ultracam_calculate_fps;
36861+ *(void **)&cbTbl.probe = ultracam_probe;
36862+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36863+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36864+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36865+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36866+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36867+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36868+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36869 return usbvideo_register(
36870 &cams,
36871 MAX_CAMERAS,
36872diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36873index dea8b32..34f6878 100644
36874--- a/drivers/media/video/usbvideo/usbvideo.c
36875+++ b/drivers/media/video/usbvideo/usbvideo.c
36876@@ -697,15 +697,15 @@ int usbvideo_register(
36877 __func__, cams, base_size, num_cams);
36878
36879 /* Copy callbacks, apply defaults for those that are not set */
36880- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36881+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36882 if (cams->cb.getFrame == NULL)
36883- cams->cb.getFrame = usbvideo_GetFrame;
36884+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36885 if (cams->cb.disconnect == NULL)
36886- cams->cb.disconnect = usbvideo_Disconnect;
36887+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36888 if (cams->cb.startDataPump == NULL)
36889- cams->cb.startDataPump = usbvideo_StartDataPump;
36890+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36891 if (cams->cb.stopDataPump == NULL)
36892- cams->cb.stopDataPump = usbvideo_StopDataPump;
36893+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36894
36895 cams->num_cameras = num_cams;
36896 cams->cam = (struct uvd *) &cams[1];
36897diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36898index c66985b..7fa143a 100644
36899--- a/drivers/media/video/usbvideo/usbvideo.h
36900+++ b/drivers/media/video/usbvideo/usbvideo.h
36901@@ -268,7 +268,7 @@ struct usbvideo_cb {
36902 int (*startDataPump)(struct uvd *uvd);
36903 void (*stopDataPump)(struct uvd *uvd);
36904 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36905-};
36906+} __no_const;
36907
36908 struct usbvideo {
36909 int num_cameras; /* As allocated */
36910diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36911index e0f91e4..37554ea 100644
36912--- a/drivers/media/video/usbvision/usbvision-core.c
36913+++ b/drivers/media/video/usbvision/usbvision-core.c
36914@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36915 unsigned char rv, gv, bv;
36916 static unsigned char *Y, *U, *V;
36917
36918+ pax_track_stack();
36919+
36920 frame = usbvision->curFrame;
36921 imageSize = frame->frmwidth * frame->frmheight;
36922 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36923diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36924index 0d06e7c..3d17d24 100644
36925--- a/drivers/media/video/v4l2-device.c
36926+++ b/drivers/media/video/v4l2-device.c
36927@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36928 EXPORT_SYMBOL_GPL(v4l2_device_register);
36929
36930 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36931- atomic_t *instance)
36932+ atomic_unchecked_t *instance)
36933 {
36934- int num = atomic_inc_return(instance) - 1;
36935+ int num = atomic_inc_return_unchecked(instance) - 1;
36936 int len = strlen(basename);
36937
36938 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36939diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36940index 032ebae..6a3532c 100644
36941--- a/drivers/media/video/videobuf-dma-sg.c
36942+++ b/drivers/media/video/videobuf-dma-sg.c
36943@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36944 {
36945 struct videobuf_queue q;
36946
36947+ pax_track_stack();
36948+
36949 /* Required to make generic handler to call __videobuf_alloc */
36950 q.int_ops = &sg_ops;
36951
36952diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36953index b6992b7..9fa7547 100644
36954--- a/drivers/message/fusion/mptbase.c
36955+++ b/drivers/message/fusion/mptbase.c
36956@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36957 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36958 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36959
36960+#ifdef CONFIG_GRKERNSEC_HIDESYM
36961+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36962+ NULL, NULL);
36963+#else
36964 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36965 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36966+#endif
36967+
36968 /*
36969 * Rounding UP to nearest 4-kB boundary here...
36970 */
36971diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36972index 83873e3..e360e9a 100644
36973--- a/drivers/message/fusion/mptsas.c
36974+++ b/drivers/message/fusion/mptsas.c
36975@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36976 return 0;
36977 }
36978
36979+static inline void
36980+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36981+{
36982+ if (phy_info->port_details) {
36983+ phy_info->port_details->rphy = rphy;
36984+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36985+ ioc->name, rphy));
36986+ }
36987+
36988+ if (rphy) {
36989+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36990+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36991+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36992+ ioc->name, rphy, rphy->dev.release));
36993+ }
36994+}
36995+
36996 /* no mutex */
36997 static void
36998 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36999@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
37000 return NULL;
37001 }
37002
37003-static inline void
37004-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
37005-{
37006- if (phy_info->port_details) {
37007- phy_info->port_details->rphy = rphy;
37008- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
37009- ioc->name, rphy));
37010- }
37011-
37012- if (rphy) {
37013- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
37014- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
37015- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
37016- ioc->name, rphy, rphy->dev.release));
37017- }
37018-}
37019-
37020 static inline struct sas_port *
37021 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37022 {
37023diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37024index bd096ca..332cf76 100644
37025--- a/drivers/message/fusion/mptscsih.c
37026+++ b/drivers/message/fusion/mptscsih.c
37027@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37028
37029 h = shost_priv(SChost);
37030
37031- if (h) {
37032- if (h->info_kbuf == NULL)
37033- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37034- return h->info_kbuf;
37035- h->info_kbuf[0] = '\0';
37036+ if (!h)
37037+ return NULL;
37038
37039- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37040- h->info_kbuf[size-1] = '\0';
37041- }
37042+ if (h->info_kbuf == NULL)
37043+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37044+ return h->info_kbuf;
37045+ h->info_kbuf[0] = '\0';
37046+
37047+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37048+ h->info_kbuf[size-1] = '\0';
37049
37050 return h->info_kbuf;
37051 }
37052diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37053index efba702..59b2c0f 100644
37054--- a/drivers/message/i2o/i2o_config.c
37055+++ b/drivers/message/i2o/i2o_config.c
37056@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37057 struct i2o_message *msg;
37058 unsigned int iop;
37059
37060+ pax_track_stack();
37061+
37062 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37063 return -EFAULT;
37064
37065diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37066index 7045c45..c07b170 100644
37067--- a/drivers/message/i2o/i2o_proc.c
37068+++ b/drivers/message/i2o/i2o_proc.c
37069@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37070 "Array Controller Device"
37071 };
37072
37073-static char *chtostr(u8 * chars, int n)
37074-{
37075- char tmp[256];
37076- tmp[0] = 0;
37077- return strncat(tmp, (char *)chars, n);
37078-}
37079-
37080 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37081 char *group)
37082 {
37083@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37084
37085 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37086 seq_printf(seq, "%-#8x", ddm_table.module_id);
37087- seq_printf(seq, "%-29s",
37088- chtostr(ddm_table.module_name_version, 28));
37089+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37090 seq_printf(seq, "%9d ", ddm_table.data_size);
37091 seq_printf(seq, "%8d", ddm_table.code_size);
37092
37093@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37094
37095 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37096 seq_printf(seq, "%-#8x", dst->module_id);
37097- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37098- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37099+ seq_printf(seq, "%-.28s", dst->module_name_version);
37100+ seq_printf(seq, "%-.8s", dst->date);
37101 seq_printf(seq, "%8d ", dst->module_size);
37102 seq_printf(seq, "%8d ", dst->mpb_size);
37103 seq_printf(seq, "0x%04x", dst->module_flags);
37104@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37105 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37106 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37107 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37108- seq_printf(seq, "Vendor info : %s\n",
37109- chtostr((u8 *) (work32 + 2), 16));
37110- seq_printf(seq, "Product info : %s\n",
37111- chtostr((u8 *) (work32 + 6), 16));
37112- seq_printf(seq, "Description : %s\n",
37113- chtostr((u8 *) (work32 + 10), 16));
37114- seq_printf(seq, "Product rev. : %s\n",
37115- chtostr((u8 *) (work32 + 14), 8));
37116+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37117+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37118+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37119+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37120
37121 seq_printf(seq, "Serial number : ");
37122 print_serial_number(seq, (u8 *) (work32 + 16),
37123@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37124 }
37125
37126 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37127- seq_printf(seq, "Module name : %s\n",
37128- chtostr(result.module_name, 24));
37129- seq_printf(seq, "Module revision : %s\n",
37130- chtostr(result.module_rev, 8));
37131+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
37132+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37133
37134 seq_printf(seq, "Serial number : ");
37135 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37136@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37137 return 0;
37138 }
37139
37140- seq_printf(seq, "Device name : %s\n",
37141- chtostr(result.device_name, 64));
37142- seq_printf(seq, "Service name : %s\n",
37143- chtostr(result.service_name, 64));
37144- seq_printf(seq, "Physical name : %s\n",
37145- chtostr(result.physical_location, 64));
37146- seq_printf(seq, "Instance number : %s\n",
37147- chtostr(result.instance_number, 4));
37148+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
37149+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
37150+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37151+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37152
37153 return 0;
37154 }
37155diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37156index 27cf4af..b1205b8 100644
37157--- a/drivers/message/i2o/iop.c
37158+++ b/drivers/message/i2o/iop.c
37159@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37160
37161 spin_lock_irqsave(&c->context_list_lock, flags);
37162
37163- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37164- atomic_inc(&c->context_list_counter);
37165+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37166+ atomic_inc_unchecked(&c->context_list_counter);
37167
37168- entry->context = atomic_read(&c->context_list_counter);
37169+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37170
37171 list_add(&entry->list, &c->context_list);
37172
37173@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37174
37175 #if BITS_PER_LONG == 64
37176 spin_lock_init(&c->context_list_lock);
37177- atomic_set(&c->context_list_counter, 0);
37178+ atomic_set_unchecked(&c->context_list_counter, 0);
37179 INIT_LIST_HEAD(&c->context_list);
37180 #endif
37181
37182diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37183index 78e3e85..66c9a0d 100644
37184--- a/drivers/mfd/ab3100-core.c
37185+++ b/drivers/mfd/ab3100-core.c
37186@@ -777,7 +777,7 @@ struct ab_family_id {
37187 char *name;
37188 };
37189
37190-static const struct ab_family_id ids[] __initdata = {
37191+static const struct ab_family_id ids[] __initconst = {
37192 /* AB3100 */
37193 {
37194 .id = 0xc0,
37195diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37196index 8d8c932..8104515 100644
37197--- a/drivers/mfd/wm8350-i2c.c
37198+++ b/drivers/mfd/wm8350-i2c.c
37199@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37200 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37201 int ret;
37202
37203+ pax_track_stack();
37204+
37205 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37206 return -EINVAL;
37207
37208diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37209index e4ff50b..4cc3f04 100644
37210--- a/drivers/misc/kgdbts.c
37211+++ b/drivers/misc/kgdbts.c
37212@@ -118,7 +118,7 @@
37213 } while (0)
37214 #define MAX_CONFIG_LEN 40
37215
37216-static struct kgdb_io kgdbts_io_ops;
37217+static const struct kgdb_io kgdbts_io_ops;
37218 static char get_buf[BUFMAX];
37219 static int get_buf_cnt;
37220 static char put_buf[BUFMAX];
37221@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37222 module_put(THIS_MODULE);
37223 }
37224
37225-static struct kgdb_io kgdbts_io_ops = {
37226+static const struct kgdb_io kgdbts_io_ops = {
37227 .name = "kgdbts",
37228 .read_char = kgdbts_get_char,
37229 .write_char = kgdbts_put_char,
37230diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37231index 37e7cfc..67cfb76 100644
37232--- a/drivers/misc/sgi-gru/gruhandles.c
37233+++ b/drivers/misc/sgi-gru/gruhandles.c
37234@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37235
37236 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37237 {
37238- atomic_long_inc(&mcs_op_statistics[op].count);
37239- atomic_long_add(clks, &mcs_op_statistics[op].total);
37240+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37241+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37242 if (mcs_op_statistics[op].max < clks)
37243 mcs_op_statistics[op].max = clks;
37244 }
37245diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37246index 3f2375c..467c6e6 100644
37247--- a/drivers/misc/sgi-gru/gruprocfs.c
37248+++ b/drivers/misc/sgi-gru/gruprocfs.c
37249@@ -32,9 +32,9 @@
37250
37251 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37252
37253-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37254+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37255 {
37256- unsigned long val = atomic_long_read(v);
37257+ unsigned long val = atomic_long_read_unchecked(v);
37258
37259 if (val)
37260 seq_printf(s, "%16lu %s\n", val, id);
37261@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37262 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37263
37264 for (op = 0; op < mcsop_last; op++) {
37265- count = atomic_long_read(&mcs_op_statistics[op].count);
37266- total = atomic_long_read(&mcs_op_statistics[op].total);
37267+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37268+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37269 max = mcs_op_statistics[op].max;
37270 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37271 count ? total / count : 0, max);
37272diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37273index 46990bc..4a251b5 100644
37274--- a/drivers/misc/sgi-gru/grutables.h
37275+++ b/drivers/misc/sgi-gru/grutables.h
37276@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37277 * GRU statistics.
37278 */
37279 struct gru_stats_s {
37280- atomic_long_t vdata_alloc;
37281- atomic_long_t vdata_free;
37282- atomic_long_t gts_alloc;
37283- atomic_long_t gts_free;
37284- atomic_long_t vdata_double_alloc;
37285- atomic_long_t gts_double_allocate;
37286- atomic_long_t assign_context;
37287- atomic_long_t assign_context_failed;
37288- atomic_long_t free_context;
37289- atomic_long_t load_user_context;
37290- atomic_long_t load_kernel_context;
37291- atomic_long_t lock_kernel_context;
37292- atomic_long_t unlock_kernel_context;
37293- atomic_long_t steal_user_context;
37294- atomic_long_t steal_kernel_context;
37295- atomic_long_t steal_context_failed;
37296- atomic_long_t nopfn;
37297- atomic_long_t break_cow;
37298- atomic_long_t asid_new;
37299- atomic_long_t asid_next;
37300- atomic_long_t asid_wrap;
37301- atomic_long_t asid_reuse;
37302- atomic_long_t intr;
37303- atomic_long_t intr_mm_lock_failed;
37304- atomic_long_t call_os;
37305- atomic_long_t call_os_offnode_reference;
37306- atomic_long_t call_os_check_for_bug;
37307- atomic_long_t call_os_wait_queue;
37308- atomic_long_t user_flush_tlb;
37309- atomic_long_t user_unload_context;
37310- atomic_long_t user_exception;
37311- atomic_long_t set_context_option;
37312- atomic_long_t migrate_check;
37313- atomic_long_t migrated_retarget;
37314- atomic_long_t migrated_unload;
37315- atomic_long_t migrated_unload_delay;
37316- atomic_long_t migrated_nopfn_retarget;
37317- atomic_long_t migrated_nopfn_unload;
37318- atomic_long_t tlb_dropin;
37319- atomic_long_t tlb_dropin_fail_no_asid;
37320- atomic_long_t tlb_dropin_fail_upm;
37321- atomic_long_t tlb_dropin_fail_invalid;
37322- atomic_long_t tlb_dropin_fail_range_active;
37323- atomic_long_t tlb_dropin_fail_idle;
37324- atomic_long_t tlb_dropin_fail_fmm;
37325- atomic_long_t tlb_dropin_fail_no_exception;
37326- atomic_long_t tlb_dropin_fail_no_exception_war;
37327- atomic_long_t tfh_stale_on_fault;
37328- atomic_long_t mmu_invalidate_range;
37329- atomic_long_t mmu_invalidate_page;
37330- atomic_long_t mmu_clear_flush_young;
37331- atomic_long_t flush_tlb;
37332- atomic_long_t flush_tlb_gru;
37333- atomic_long_t flush_tlb_gru_tgh;
37334- atomic_long_t flush_tlb_gru_zero_asid;
37335+ atomic_long_unchecked_t vdata_alloc;
37336+ atomic_long_unchecked_t vdata_free;
37337+ atomic_long_unchecked_t gts_alloc;
37338+ atomic_long_unchecked_t gts_free;
37339+ atomic_long_unchecked_t vdata_double_alloc;
37340+ atomic_long_unchecked_t gts_double_allocate;
37341+ atomic_long_unchecked_t assign_context;
37342+ atomic_long_unchecked_t assign_context_failed;
37343+ atomic_long_unchecked_t free_context;
37344+ atomic_long_unchecked_t load_user_context;
37345+ atomic_long_unchecked_t load_kernel_context;
37346+ atomic_long_unchecked_t lock_kernel_context;
37347+ atomic_long_unchecked_t unlock_kernel_context;
37348+ atomic_long_unchecked_t steal_user_context;
37349+ atomic_long_unchecked_t steal_kernel_context;
37350+ atomic_long_unchecked_t steal_context_failed;
37351+ atomic_long_unchecked_t nopfn;
37352+ atomic_long_unchecked_t break_cow;
37353+ atomic_long_unchecked_t asid_new;
37354+ atomic_long_unchecked_t asid_next;
37355+ atomic_long_unchecked_t asid_wrap;
37356+ atomic_long_unchecked_t asid_reuse;
37357+ atomic_long_unchecked_t intr;
37358+ atomic_long_unchecked_t intr_mm_lock_failed;
37359+ atomic_long_unchecked_t call_os;
37360+ atomic_long_unchecked_t call_os_offnode_reference;
37361+ atomic_long_unchecked_t call_os_check_for_bug;
37362+ atomic_long_unchecked_t call_os_wait_queue;
37363+ atomic_long_unchecked_t user_flush_tlb;
37364+ atomic_long_unchecked_t user_unload_context;
37365+ atomic_long_unchecked_t user_exception;
37366+ atomic_long_unchecked_t set_context_option;
37367+ atomic_long_unchecked_t migrate_check;
37368+ atomic_long_unchecked_t migrated_retarget;
37369+ atomic_long_unchecked_t migrated_unload;
37370+ atomic_long_unchecked_t migrated_unload_delay;
37371+ atomic_long_unchecked_t migrated_nopfn_retarget;
37372+ atomic_long_unchecked_t migrated_nopfn_unload;
37373+ atomic_long_unchecked_t tlb_dropin;
37374+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37375+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37376+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37377+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37378+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37379+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37380+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37381+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37382+ atomic_long_unchecked_t tfh_stale_on_fault;
37383+ atomic_long_unchecked_t mmu_invalidate_range;
37384+ atomic_long_unchecked_t mmu_invalidate_page;
37385+ atomic_long_unchecked_t mmu_clear_flush_young;
37386+ atomic_long_unchecked_t flush_tlb;
37387+ atomic_long_unchecked_t flush_tlb_gru;
37388+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37389+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37390
37391- atomic_long_t copy_gpa;
37392+ atomic_long_unchecked_t copy_gpa;
37393
37394- atomic_long_t mesq_receive;
37395- atomic_long_t mesq_receive_none;
37396- atomic_long_t mesq_send;
37397- atomic_long_t mesq_send_failed;
37398- atomic_long_t mesq_noop;
37399- atomic_long_t mesq_send_unexpected_error;
37400- atomic_long_t mesq_send_lb_overflow;
37401- atomic_long_t mesq_send_qlimit_reached;
37402- atomic_long_t mesq_send_amo_nacked;
37403- atomic_long_t mesq_send_put_nacked;
37404- atomic_long_t mesq_qf_not_full;
37405- atomic_long_t mesq_qf_locked;
37406- atomic_long_t mesq_qf_noop_not_full;
37407- atomic_long_t mesq_qf_switch_head_failed;
37408- atomic_long_t mesq_qf_unexpected_error;
37409- atomic_long_t mesq_noop_unexpected_error;
37410- atomic_long_t mesq_noop_lb_overflow;
37411- atomic_long_t mesq_noop_qlimit_reached;
37412- atomic_long_t mesq_noop_amo_nacked;
37413- atomic_long_t mesq_noop_put_nacked;
37414+ atomic_long_unchecked_t mesq_receive;
37415+ atomic_long_unchecked_t mesq_receive_none;
37416+ atomic_long_unchecked_t mesq_send;
37417+ atomic_long_unchecked_t mesq_send_failed;
37418+ atomic_long_unchecked_t mesq_noop;
37419+ atomic_long_unchecked_t mesq_send_unexpected_error;
37420+ atomic_long_unchecked_t mesq_send_lb_overflow;
37421+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37422+ atomic_long_unchecked_t mesq_send_amo_nacked;
37423+ atomic_long_unchecked_t mesq_send_put_nacked;
37424+ atomic_long_unchecked_t mesq_qf_not_full;
37425+ atomic_long_unchecked_t mesq_qf_locked;
37426+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37427+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37428+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37429+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37430+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37431+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37432+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37433+ atomic_long_unchecked_t mesq_noop_put_nacked;
37434
37435 };
37436
37437@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37438 cchop_deallocate, tghop_invalidate, mcsop_last};
37439
37440 struct mcs_op_statistic {
37441- atomic_long_t count;
37442- atomic_long_t total;
37443+ atomic_long_unchecked_t count;
37444+ atomic_long_unchecked_t total;
37445 unsigned long max;
37446 };
37447
37448@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37449
37450 #define STAT(id) do { \
37451 if (gru_options & OPT_STATS) \
37452- atomic_long_inc(&gru_stats.id); \
37453+ atomic_long_inc_unchecked(&gru_stats.id); \
37454 } while (0)
37455
37456 #ifdef CONFIG_SGI_GRU_DEBUG
37457diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37458index 2275126..12a9dbfb 100644
37459--- a/drivers/misc/sgi-xp/xp.h
37460+++ b/drivers/misc/sgi-xp/xp.h
37461@@ -289,7 +289,7 @@ struct xpc_interface {
37462 xpc_notify_func, void *);
37463 void (*received) (short, int, void *);
37464 enum xp_retval (*partid_to_nasids) (short, void *);
37465-};
37466+} __no_const;
37467
37468 extern struct xpc_interface xpc_interface;
37469
37470diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37471index b94d5f7..7f494c5 100644
37472--- a/drivers/misc/sgi-xp/xpc.h
37473+++ b/drivers/misc/sgi-xp/xpc.h
37474@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37475 void (*received_payload) (struct xpc_channel *, void *);
37476 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37477 };
37478+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37479
37480 /* struct xpc_partition act_state values (for XPC HB) */
37481
37482@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37483 /* found in xpc_main.c */
37484 extern struct device *xpc_part;
37485 extern struct device *xpc_chan;
37486-extern struct xpc_arch_operations xpc_arch_ops;
37487+extern xpc_arch_operations_no_const xpc_arch_ops;
37488 extern int xpc_disengage_timelimit;
37489 extern int xpc_disengage_timedout;
37490 extern int xpc_activate_IRQ_rcvd;
37491diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37492index fd3688a..7e211a4 100644
37493--- a/drivers/misc/sgi-xp/xpc_main.c
37494+++ b/drivers/misc/sgi-xp/xpc_main.c
37495@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37496 .notifier_call = xpc_system_die,
37497 };
37498
37499-struct xpc_arch_operations xpc_arch_ops;
37500+xpc_arch_operations_no_const xpc_arch_ops;
37501
37502 /*
37503 * Timer function to enforce the timelimit on the partition disengage.
37504diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37505index 8b70e03..700bda6 100644
37506--- a/drivers/misc/sgi-xp/xpc_sn2.c
37507+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37508@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37509 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37510 }
37511
37512-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37513+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37514 .setup_partitions = xpc_setup_partitions_sn2,
37515 .teardown_partitions = xpc_teardown_partitions_sn2,
37516 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37517@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37518 int ret;
37519 size_t buf_size;
37520
37521- xpc_arch_ops = xpc_arch_ops_sn2;
37522+ pax_open_kernel();
37523+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37524+ pax_close_kernel();
37525
37526 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37527 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37528diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37529index 8e08d71..7cb8c9b 100644
37530--- a/drivers/misc/sgi-xp/xpc_uv.c
37531+++ b/drivers/misc/sgi-xp/xpc_uv.c
37532@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37533 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37534 }
37535
37536-static struct xpc_arch_operations xpc_arch_ops_uv = {
37537+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37538 .setup_partitions = xpc_setup_partitions_uv,
37539 .teardown_partitions = xpc_teardown_partitions_uv,
37540 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37541@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37542 int
37543 xpc_init_uv(void)
37544 {
37545- xpc_arch_ops = xpc_arch_ops_uv;
37546+ pax_open_kernel();
37547+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37548+ pax_close_kernel();
37549
37550 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37551 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37552diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37553index 6fd20b42..650efe3 100644
37554--- a/drivers/mmc/host/sdhci-pci.c
37555+++ b/drivers/mmc/host/sdhci-pci.c
37556@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37557 .probe = via_probe,
37558 };
37559
37560-static const struct pci_device_id pci_ids[] __devinitdata = {
37561+static const struct pci_device_id pci_ids[] __devinitconst = {
37562 {
37563 .vendor = PCI_VENDOR_ID_RICOH,
37564 .device = PCI_DEVICE_ID_RICOH_R5C822,
37565diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37566index e7563a9..5f90ce5 100644
37567--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37568+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37569@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37570 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37571 unsigned long timeo = jiffies + HZ;
37572
37573+ pax_track_stack();
37574+
37575 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37576 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37577 goto sleep;
37578@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37579 unsigned long initial_adr;
37580 int initial_len = len;
37581
37582+ pax_track_stack();
37583+
37584 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37585 adr += chip->start;
37586 initial_adr = adr;
37587@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37588 int retries = 3;
37589 int ret;
37590
37591+ pax_track_stack();
37592+
37593 adr += chip->start;
37594
37595 retry:
37596diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37597index 0667a67..3ab97ed 100644
37598--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37599+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37600@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37601 unsigned long cmd_addr;
37602 struct cfi_private *cfi = map->fldrv_priv;
37603
37604+ pax_track_stack();
37605+
37606 adr += chip->start;
37607
37608 /* Ensure cmd read/writes are aligned. */
37609@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37610 DECLARE_WAITQUEUE(wait, current);
37611 int wbufsize, z;
37612
37613+ pax_track_stack();
37614+
37615 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37616 if (adr & (map_bankwidth(map)-1))
37617 return -EINVAL;
37618@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37619 DECLARE_WAITQUEUE(wait, current);
37620 int ret = 0;
37621
37622+ pax_track_stack();
37623+
37624 adr += chip->start;
37625
37626 /* Let's determine this according to the interleave only once */
37627@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37628 unsigned long timeo = jiffies + HZ;
37629 DECLARE_WAITQUEUE(wait, current);
37630
37631+ pax_track_stack();
37632+
37633 adr += chip->start;
37634
37635 /* Let's determine this according to the interleave only once */
37636@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37637 unsigned long timeo = jiffies + HZ;
37638 DECLARE_WAITQUEUE(wait, current);
37639
37640+ pax_track_stack();
37641+
37642 adr += chip->start;
37643
37644 /* Let's determine this according to the interleave only once */
37645diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37646index 5bf5f46..c5de373 100644
37647--- a/drivers/mtd/devices/doc2000.c
37648+++ b/drivers/mtd/devices/doc2000.c
37649@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37650
37651 /* The ECC will not be calculated correctly if less than 512 is written */
37652 /* DBB-
37653- if (len != 0x200 && eccbuf)
37654+ if (len != 0x200)
37655 printk(KERN_WARNING
37656 "ECC needs a full sector write (adr: %lx size %lx)\n",
37657 (long) to, (long) len);
37658diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37659index 0990f78..bb4e8a4 100644
37660--- a/drivers/mtd/devices/doc2001.c
37661+++ b/drivers/mtd/devices/doc2001.c
37662@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37663 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37664
37665 /* Don't allow read past end of device */
37666- if (from >= this->totlen)
37667+ if (from >= this->totlen || !len)
37668 return -EINVAL;
37669
37670 /* Don't allow a single read to cross a 512-byte block boundary */
37671diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37672index e56d6b4..f07e6cf 100644
37673--- a/drivers/mtd/ftl.c
37674+++ b/drivers/mtd/ftl.c
37675@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37676 loff_t offset;
37677 uint16_t srcunitswap = cpu_to_le16(srcunit);
37678
37679+ pax_track_stack();
37680+
37681 eun = &part->EUNInfo[srcunit];
37682 xfer = &part->XferInfo[xferunit];
37683 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37684diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37685index 8aca552..146446e 100755
37686--- a/drivers/mtd/inftlcore.c
37687+++ b/drivers/mtd/inftlcore.c
37688@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37689 struct inftl_oob oob;
37690 size_t retlen;
37691
37692+ pax_track_stack();
37693+
37694 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37695 "pending=%d)\n", inftl, thisVUC, pendingblock);
37696
37697diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37698index 32e82ae..ed50953 100644
37699--- a/drivers/mtd/inftlmount.c
37700+++ b/drivers/mtd/inftlmount.c
37701@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37702 struct INFTLPartition *ip;
37703 size_t retlen;
37704
37705+ pax_track_stack();
37706+
37707 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37708
37709 /*
37710diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37711index 79bf40f..fe5f8fd 100644
37712--- a/drivers/mtd/lpddr/qinfo_probe.c
37713+++ b/drivers/mtd/lpddr/qinfo_probe.c
37714@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37715 {
37716 map_word pfow_val[4];
37717
37718+ pax_track_stack();
37719+
37720 /* Check identification string */
37721 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37722 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37723diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37724index 726a1b8..f46b460 100644
37725--- a/drivers/mtd/mtdchar.c
37726+++ b/drivers/mtd/mtdchar.c
37727@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37728 u_long size;
37729 struct mtd_info_user info;
37730
37731+ pax_track_stack();
37732+
37733 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37734
37735 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37736diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37737index 1002e18..26d82d5 100644
37738--- a/drivers/mtd/nftlcore.c
37739+++ b/drivers/mtd/nftlcore.c
37740@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37741 int inplace = 1;
37742 size_t retlen;
37743
37744+ pax_track_stack();
37745+
37746 memset(BlockMap, 0xff, sizeof(BlockMap));
37747 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37748
37749diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37750index 8b22b18..6fada85 100644
37751--- a/drivers/mtd/nftlmount.c
37752+++ b/drivers/mtd/nftlmount.c
37753@@ -23,6 +23,7 @@
37754 #include <asm/errno.h>
37755 #include <linux/delay.h>
37756 #include <linux/slab.h>
37757+#include <linux/sched.h>
37758 #include <linux/mtd/mtd.h>
37759 #include <linux/mtd/nand.h>
37760 #include <linux/mtd/nftl.h>
37761@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37762 struct mtd_info *mtd = nftl->mbd.mtd;
37763 unsigned int i;
37764
37765+ pax_track_stack();
37766+
37767 /* Assume logical EraseSize == physical erasesize for starting the scan.
37768 We'll sort it out later if we find a MediaHeader which says otherwise */
37769 /* Actually, we won't. The new DiskOnChip driver has already scanned
37770diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37771index 14cec04..d775b87 100644
37772--- a/drivers/mtd/ubi/build.c
37773+++ b/drivers/mtd/ubi/build.c
37774@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37775 static int __init bytes_str_to_int(const char *str)
37776 {
37777 char *endp;
37778- unsigned long result;
37779+ unsigned long result, scale = 1;
37780
37781 result = simple_strtoul(str, &endp, 0);
37782 if (str == endp || result >= INT_MAX) {
37783@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37784
37785 switch (*endp) {
37786 case 'G':
37787- result *= 1024;
37788+ scale *= 1024;
37789 case 'M':
37790- result *= 1024;
37791+ scale *= 1024;
37792 case 'K':
37793- result *= 1024;
37794+ scale *= 1024;
37795 if (endp[1] == 'i' && endp[2] == 'B')
37796 endp += 2;
37797 case '\0':
37798@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37799 return -EINVAL;
37800 }
37801
37802- return result;
37803+ if ((intoverflow_t)result*scale >= INT_MAX) {
37804+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37805+ str);
37806+ return -EINVAL;
37807+ }
37808+
37809+ return result*scale;
37810 }
37811
37812 /**
37813diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37814index ab68886..ca405e8 100644
37815--- a/drivers/net/atlx/atl2.c
37816+++ b/drivers/net/atlx/atl2.c
37817@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37818 */
37819
37820 #define ATL2_PARAM(X, desc) \
37821- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37822+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37823 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37824 MODULE_PARM_DESC(X, desc);
37825 #else
37826diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37827index 4874b2b..67f8526 100644
37828--- a/drivers/net/bnx2.c
37829+++ b/drivers/net/bnx2.c
37830@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37831 int rc = 0;
37832 u32 magic, csum;
37833
37834+ pax_track_stack();
37835+
37836 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37837 goto test_nvram_done;
37838
37839diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37840index fd3eb07..8a6978d 100644
37841--- a/drivers/net/cxgb3/l2t.h
37842+++ b/drivers/net/cxgb3/l2t.h
37843@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37844 */
37845 struct l2t_skb_cb {
37846 arp_failure_handler_func arp_failure_handler;
37847-};
37848+} __no_const;
37849
37850 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37851
37852diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37853index 032cfe0..411af379 100644
37854--- a/drivers/net/cxgb3/t3_hw.c
37855+++ b/drivers/net/cxgb3/t3_hw.c
37856@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37857 int i, addr, ret;
37858 struct t3_vpd vpd;
37859
37860+ pax_track_stack();
37861+
37862 /*
37863 * Card information is normally at VPD_BASE but some early cards had
37864 * it at 0.
37865diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37866index d1e0563..b9e129c 100644
37867--- a/drivers/net/e1000e/82571.c
37868+++ b/drivers/net/e1000e/82571.c
37869@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37870 {
37871 struct e1000_hw *hw = &adapter->hw;
37872 struct e1000_mac_info *mac = &hw->mac;
37873- struct e1000_mac_operations *func = &mac->ops;
37874+ e1000_mac_operations_no_const *func = &mac->ops;
37875 u32 swsm = 0;
37876 u32 swsm2 = 0;
37877 bool force_clear_smbi = false;
37878@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37879 temp = er32(ICRXDMTC);
37880 }
37881
37882-static struct e1000_mac_operations e82571_mac_ops = {
37883+static const struct e1000_mac_operations e82571_mac_ops = {
37884 /* .check_mng_mode: mac type dependent */
37885 /* .check_for_link: media type dependent */
37886 .id_led_init = e1000e_id_led_init,
37887@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37888 .setup_led = e1000e_setup_led_generic,
37889 };
37890
37891-static struct e1000_phy_operations e82_phy_ops_igp = {
37892+static const struct e1000_phy_operations e82_phy_ops_igp = {
37893 .acquire_phy = e1000_get_hw_semaphore_82571,
37894 .check_reset_block = e1000e_check_reset_block_generic,
37895 .commit_phy = NULL,
37896@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37897 .cfg_on_link_up = NULL,
37898 };
37899
37900-static struct e1000_phy_operations e82_phy_ops_m88 = {
37901+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37902 .acquire_phy = e1000_get_hw_semaphore_82571,
37903 .check_reset_block = e1000e_check_reset_block_generic,
37904 .commit_phy = e1000e_phy_sw_reset,
37905@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37906 .cfg_on_link_up = NULL,
37907 };
37908
37909-static struct e1000_phy_operations e82_phy_ops_bm = {
37910+static const struct e1000_phy_operations e82_phy_ops_bm = {
37911 .acquire_phy = e1000_get_hw_semaphore_82571,
37912 .check_reset_block = e1000e_check_reset_block_generic,
37913 .commit_phy = e1000e_phy_sw_reset,
37914@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37915 .cfg_on_link_up = NULL,
37916 };
37917
37918-static struct e1000_nvm_operations e82571_nvm_ops = {
37919+static const struct e1000_nvm_operations e82571_nvm_ops = {
37920 .acquire_nvm = e1000_acquire_nvm_82571,
37921 .read_nvm = e1000e_read_nvm_eerd,
37922 .release_nvm = e1000_release_nvm_82571,
37923diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37924index 47db9bd..fa58ccd 100644
37925--- a/drivers/net/e1000e/e1000.h
37926+++ b/drivers/net/e1000e/e1000.h
37927@@ -375,9 +375,9 @@ struct e1000_info {
37928 u32 pba;
37929 u32 max_hw_frame_size;
37930 s32 (*get_variants)(struct e1000_adapter *);
37931- struct e1000_mac_operations *mac_ops;
37932- struct e1000_phy_operations *phy_ops;
37933- struct e1000_nvm_operations *nvm_ops;
37934+ const struct e1000_mac_operations *mac_ops;
37935+ const struct e1000_phy_operations *phy_ops;
37936+ const struct e1000_nvm_operations *nvm_ops;
37937 };
37938
37939 /* hardware capability, feature, and workaround flags */
37940diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37941index ae5d736..e9a93a1 100644
37942--- a/drivers/net/e1000e/es2lan.c
37943+++ b/drivers/net/e1000e/es2lan.c
37944@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37945 {
37946 struct e1000_hw *hw = &adapter->hw;
37947 struct e1000_mac_info *mac = &hw->mac;
37948- struct e1000_mac_operations *func = &mac->ops;
37949+ e1000_mac_operations_no_const *func = &mac->ops;
37950
37951 /* Set media type */
37952 switch (adapter->pdev->device) {
37953@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37954 temp = er32(ICRXDMTC);
37955 }
37956
37957-static struct e1000_mac_operations es2_mac_ops = {
37958+static const struct e1000_mac_operations es2_mac_ops = {
37959 .id_led_init = e1000e_id_led_init,
37960 .check_mng_mode = e1000e_check_mng_mode_generic,
37961 /* check_for_link dependent on media type */
37962@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37963 .setup_led = e1000e_setup_led_generic,
37964 };
37965
37966-static struct e1000_phy_operations es2_phy_ops = {
37967+static const struct e1000_phy_operations es2_phy_ops = {
37968 .acquire_phy = e1000_acquire_phy_80003es2lan,
37969 .check_reset_block = e1000e_check_reset_block_generic,
37970 .commit_phy = e1000e_phy_sw_reset,
37971@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37972 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37973 };
37974
37975-static struct e1000_nvm_operations es2_nvm_ops = {
37976+static const struct e1000_nvm_operations es2_nvm_ops = {
37977 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37978 .read_nvm = e1000e_read_nvm_eerd,
37979 .release_nvm = e1000_release_nvm_80003es2lan,
37980diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37981index 11f3b7c..6381887 100644
37982--- a/drivers/net/e1000e/hw.h
37983+++ b/drivers/net/e1000e/hw.h
37984@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37985 s32 (*setup_physical_interface)(struct e1000_hw *);
37986 s32 (*setup_led)(struct e1000_hw *);
37987 };
37988+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37989
37990 /* Function pointers for the PHY. */
37991 struct e1000_phy_operations {
37992@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37993 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37994 s32 (*cfg_on_link_up)(struct e1000_hw *);
37995 };
37996+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37997
37998 /* Function pointers for the NVM. */
37999 struct e1000_nvm_operations {
38000@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
38001 s32 (*validate_nvm)(struct e1000_hw *);
38002 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
38003 };
38004+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38005
38006 struct e1000_mac_info {
38007- struct e1000_mac_operations ops;
38008+ e1000_mac_operations_no_const ops;
38009
38010 u8 addr[6];
38011 u8 perm_addr[6];
38012@@ -823,7 +826,7 @@ struct e1000_mac_info {
38013 };
38014
38015 struct e1000_phy_info {
38016- struct e1000_phy_operations ops;
38017+ e1000_phy_operations_no_const ops;
38018
38019 enum e1000_phy_type type;
38020
38021@@ -857,7 +860,7 @@ struct e1000_phy_info {
38022 };
38023
38024 struct e1000_nvm_info {
38025- struct e1000_nvm_operations ops;
38026+ e1000_nvm_operations_no_const ops;
38027
38028 enum e1000_nvm_type type;
38029 enum e1000_nvm_override override;
38030diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38031index de39f9a..e28d3e0 100644
38032--- a/drivers/net/e1000e/ich8lan.c
38033+++ b/drivers/net/e1000e/ich8lan.c
38034@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38035 }
38036 }
38037
38038-static struct e1000_mac_operations ich8_mac_ops = {
38039+static const struct e1000_mac_operations ich8_mac_ops = {
38040 .id_led_init = e1000e_id_led_init,
38041 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38042 .check_for_link = e1000_check_for_copper_link_ich8lan,
38043@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38044 /* id_led_init dependent on mac type */
38045 };
38046
38047-static struct e1000_phy_operations ich8_phy_ops = {
38048+static const struct e1000_phy_operations ich8_phy_ops = {
38049 .acquire_phy = e1000_acquire_swflag_ich8lan,
38050 .check_reset_block = e1000_check_reset_block_ich8lan,
38051 .commit_phy = NULL,
38052@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38053 .write_phy_reg = e1000e_write_phy_reg_igp,
38054 };
38055
38056-static struct e1000_nvm_operations ich8_nvm_ops = {
38057+static const struct e1000_nvm_operations ich8_nvm_ops = {
38058 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38059 .read_nvm = e1000_read_nvm_ich8lan,
38060 .release_nvm = e1000_release_nvm_ich8lan,
38061diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38062index 18d5fbb..542d96d 100644
38063--- a/drivers/net/fealnx.c
38064+++ b/drivers/net/fealnx.c
38065@@ -151,7 +151,7 @@ struct chip_info {
38066 int flags;
38067 };
38068
38069-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38070+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38071 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38072 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38073 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38074diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38075index 0e5b54b..b503f82 100644
38076--- a/drivers/net/hamradio/6pack.c
38077+++ b/drivers/net/hamradio/6pack.c
38078@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38079 unsigned char buf[512];
38080 int count1;
38081
38082+ pax_track_stack();
38083+
38084 if (!count)
38085 return;
38086
38087diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38088index 5862282..7cce8cb 100644
38089--- a/drivers/net/ibmveth.c
38090+++ b/drivers/net/ibmveth.c
38091@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38092 NULL,
38093 };
38094
38095-static struct sysfs_ops veth_pool_ops = {
38096+static const struct sysfs_ops veth_pool_ops = {
38097 .show = veth_pool_show,
38098 .store = veth_pool_store,
38099 };
38100diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38101index d617f2d..57b5309 100644
38102--- a/drivers/net/igb/e1000_82575.c
38103+++ b/drivers/net/igb/e1000_82575.c
38104@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38105 wr32(E1000_VT_CTL, vt_ctl);
38106 }
38107
38108-static struct e1000_mac_operations e1000_mac_ops_82575 = {
38109+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38110 .reset_hw = igb_reset_hw_82575,
38111 .init_hw = igb_init_hw_82575,
38112 .check_for_link = igb_check_for_link_82575,
38113@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38114 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38115 };
38116
38117-static struct e1000_phy_operations e1000_phy_ops_82575 = {
38118+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38119 .acquire = igb_acquire_phy_82575,
38120 .get_cfg_done = igb_get_cfg_done_82575,
38121 .release = igb_release_phy_82575,
38122 };
38123
38124-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38125+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38126 .acquire = igb_acquire_nvm_82575,
38127 .read = igb_read_nvm_eerd,
38128 .release = igb_release_nvm_82575,
38129diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38130index 72081df..d855cf5 100644
38131--- a/drivers/net/igb/e1000_hw.h
38132+++ b/drivers/net/igb/e1000_hw.h
38133@@ -288,6 +288,7 @@ struct e1000_mac_operations {
38134 s32 (*read_mac_addr)(struct e1000_hw *);
38135 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38136 };
38137+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38138
38139 struct e1000_phy_operations {
38140 s32 (*acquire)(struct e1000_hw *);
38141@@ -303,6 +304,7 @@ struct e1000_phy_operations {
38142 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38143 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38144 };
38145+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38146
38147 struct e1000_nvm_operations {
38148 s32 (*acquire)(struct e1000_hw *);
38149@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38150 void (*release)(struct e1000_hw *);
38151 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38152 };
38153+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38154
38155 struct e1000_info {
38156 s32 (*get_invariants)(struct e1000_hw *);
38157@@ -321,7 +324,7 @@ struct e1000_info {
38158 extern const struct e1000_info e1000_82575_info;
38159
38160 struct e1000_mac_info {
38161- struct e1000_mac_operations ops;
38162+ e1000_mac_operations_no_const ops;
38163
38164 u8 addr[6];
38165 u8 perm_addr[6];
38166@@ -365,7 +368,7 @@ struct e1000_mac_info {
38167 };
38168
38169 struct e1000_phy_info {
38170- struct e1000_phy_operations ops;
38171+ e1000_phy_operations_no_const ops;
38172
38173 enum e1000_phy_type type;
38174
38175@@ -400,7 +403,7 @@ struct e1000_phy_info {
38176 };
38177
38178 struct e1000_nvm_info {
38179- struct e1000_nvm_operations ops;
38180+ e1000_nvm_operations_no_const ops;
38181
38182 enum e1000_nvm_type type;
38183 enum e1000_nvm_override override;
38184@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38185 s32 (*check_for_ack)(struct e1000_hw *, u16);
38186 s32 (*check_for_rst)(struct e1000_hw *, u16);
38187 };
38188+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38189
38190 struct e1000_mbx_stats {
38191 u32 msgs_tx;
38192@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38193 };
38194
38195 struct e1000_mbx_info {
38196- struct e1000_mbx_operations ops;
38197+ e1000_mbx_operations_no_const ops;
38198 struct e1000_mbx_stats stats;
38199 u32 timeout;
38200 u32 usec_delay;
38201diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38202index 1e8ce37..549c453 100644
38203--- a/drivers/net/igbvf/vf.h
38204+++ b/drivers/net/igbvf/vf.h
38205@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38206 s32 (*read_mac_addr)(struct e1000_hw *);
38207 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38208 };
38209+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38210
38211 struct e1000_mac_info {
38212- struct e1000_mac_operations ops;
38213+ e1000_mac_operations_no_const ops;
38214 u8 addr[6];
38215 u8 perm_addr[6];
38216
38217@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38218 s32 (*check_for_ack)(struct e1000_hw *);
38219 s32 (*check_for_rst)(struct e1000_hw *);
38220 };
38221+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38222
38223 struct e1000_mbx_stats {
38224 u32 msgs_tx;
38225@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38226 };
38227
38228 struct e1000_mbx_info {
38229- struct e1000_mbx_operations ops;
38230+ e1000_mbx_operations_no_const ops;
38231 struct e1000_mbx_stats stats;
38232 u32 timeout;
38233 u32 usec_delay;
38234diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38235index aa7286b..a61394f 100644
38236--- a/drivers/net/iseries_veth.c
38237+++ b/drivers/net/iseries_veth.c
38238@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38239 NULL
38240 };
38241
38242-static struct sysfs_ops veth_cnx_sysfs_ops = {
38243+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38244 .show = veth_cnx_attribute_show
38245 };
38246
38247@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38248 NULL
38249 };
38250
38251-static struct sysfs_ops veth_port_sysfs_ops = {
38252+static const struct sysfs_ops veth_port_sysfs_ops = {
38253 .show = veth_port_attribute_show
38254 };
38255
38256diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38257index 8aa44dc..fa1e797 100644
38258--- a/drivers/net/ixgb/ixgb_main.c
38259+++ b/drivers/net/ixgb/ixgb_main.c
38260@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38261 u32 rctl;
38262 int i;
38263
38264+ pax_track_stack();
38265+
38266 /* Check for Promiscuous and All Multicast modes */
38267
38268 rctl = IXGB_READ_REG(hw, RCTL);
38269diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38270index af35e1d..8781785 100644
38271--- a/drivers/net/ixgb/ixgb_param.c
38272+++ b/drivers/net/ixgb/ixgb_param.c
38273@@ -260,6 +260,9 @@ void __devinit
38274 ixgb_check_options(struct ixgb_adapter *adapter)
38275 {
38276 int bd = adapter->bd_number;
38277+
38278+ pax_track_stack();
38279+
38280 if (bd >= IXGB_MAX_NIC) {
38281 printk(KERN_NOTICE
38282 "Warning: no configuration for board #%i\n", bd);
38283diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38284index b17aa73..ed74540 100644
38285--- a/drivers/net/ixgbe/ixgbe_type.h
38286+++ b/drivers/net/ixgbe/ixgbe_type.h
38287@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38288 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38289 s32 (*update_checksum)(struct ixgbe_hw *);
38290 };
38291+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38292
38293 struct ixgbe_mac_operations {
38294 s32 (*init_hw)(struct ixgbe_hw *);
38295@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38296 /* Flow Control */
38297 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38298 };
38299+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38300
38301 struct ixgbe_phy_operations {
38302 s32 (*identify)(struct ixgbe_hw *);
38303@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38304 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38305 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38306 };
38307+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38308
38309 struct ixgbe_eeprom_info {
38310- struct ixgbe_eeprom_operations ops;
38311+ ixgbe_eeprom_operations_no_const ops;
38312 enum ixgbe_eeprom_type type;
38313 u32 semaphore_delay;
38314 u16 word_size;
38315@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38316 };
38317
38318 struct ixgbe_mac_info {
38319- struct ixgbe_mac_operations ops;
38320+ ixgbe_mac_operations_no_const ops;
38321 enum ixgbe_mac_type type;
38322 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38323 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38324@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38325 };
38326
38327 struct ixgbe_phy_info {
38328- struct ixgbe_phy_operations ops;
38329+ ixgbe_phy_operations_no_const ops;
38330 struct mdio_if_info mdio;
38331 enum ixgbe_phy_type type;
38332 u32 id;
38333diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38334index 291a505..2543756 100644
38335--- a/drivers/net/mlx4/main.c
38336+++ b/drivers/net/mlx4/main.c
38337@@ -38,6 +38,7 @@
38338 #include <linux/errno.h>
38339 #include <linux/pci.h>
38340 #include <linux/dma-mapping.h>
38341+#include <linux/sched.h>
38342
38343 #include <linux/mlx4/device.h>
38344 #include <linux/mlx4/doorbell.h>
38345@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38346 u64 icm_size;
38347 int err;
38348
38349+ pax_track_stack();
38350+
38351 err = mlx4_QUERY_FW(dev);
38352 if (err) {
38353 if (err == -EACCES)
38354diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38355index 2dce134..fa5ce75 100644
38356--- a/drivers/net/niu.c
38357+++ b/drivers/net/niu.c
38358@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38359 int i, num_irqs, err;
38360 u8 first_ldg;
38361
38362+ pax_track_stack();
38363+
38364 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38365 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38366 ldg_num_map[i] = first_ldg + i;
38367diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38368index c1b3f09..97cd8c4 100644
38369--- a/drivers/net/pcnet32.c
38370+++ b/drivers/net/pcnet32.c
38371@@ -79,7 +79,7 @@ static int cards_found;
38372 /*
38373 * VLB I/O addresses
38374 */
38375-static unsigned int pcnet32_portlist[] __initdata =
38376+static unsigned int pcnet32_portlist[] __devinitdata =
38377 { 0x300, 0x320, 0x340, 0x360, 0 };
38378
38379 static int pcnet32_debug = 0;
38380@@ -267,7 +267,7 @@ struct pcnet32_private {
38381 struct sk_buff **rx_skbuff;
38382 dma_addr_t *tx_dma_addr;
38383 dma_addr_t *rx_dma_addr;
38384- struct pcnet32_access a;
38385+ struct pcnet32_access *a;
38386 spinlock_t lock; /* Guard lock */
38387 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38388 unsigned int rx_ring_size; /* current rx ring size */
38389@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38390 u16 val;
38391
38392 netif_wake_queue(dev);
38393- val = lp->a.read_csr(ioaddr, CSR3);
38394+ val = lp->a->read_csr(ioaddr, CSR3);
38395 val &= 0x00ff;
38396- lp->a.write_csr(ioaddr, CSR3, val);
38397+ lp->a->write_csr(ioaddr, CSR3, val);
38398 napi_enable(&lp->napi);
38399 }
38400
38401@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38402 r = mii_link_ok(&lp->mii_if);
38403 } else if (lp->chip_version >= PCNET32_79C970A) {
38404 ulong ioaddr = dev->base_addr; /* card base I/O address */
38405- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38406+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38407 } else { /* can not detect link on really old chips */
38408 r = 1;
38409 }
38410@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38411 pcnet32_netif_stop(dev);
38412
38413 spin_lock_irqsave(&lp->lock, flags);
38414- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38415+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38416
38417 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38418
38419@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38420 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38421 {
38422 struct pcnet32_private *lp = netdev_priv(dev);
38423- struct pcnet32_access *a = &lp->a; /* access to registers */
38424+ struct pcnet32_access *a = lp->a; /* access to registers */
38425 ulong ioaddr = dev->base_addr; /* card base I/O address */
38426 struct sk_buff *skb; /* sk buff */
38427 int x, i; /* counters */
38428@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38429 pcnet32_netif_stop(dev);
38430
38431 spin_lock_irqsave(&lp->lock, flags);
38432- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38433+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38434
38435 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38436
38437 /* Reset the PCNET32 */
38438- lp->a.reset(ioaddr);
38439- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38440+ lp->a->reset(ioaddr);
38441+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38442
38443 /* switch pcnet32 to 32bit mode */
38444- lp->a.write_bcr(ioaddr, 20, 2);
38445+ lp->a->write_bcr(ioaddr, 20, 2);
38446
38447 /* purge & init rings but don't actually restart */
38448 pcnet32_restart(dev, 0x0000);
38449
38450- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38451+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38452
38453 /* Initialize Transmit buffers. */
38454 size = data_len + 15;
38455@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38456
38457 /* set int loopback in CSR15 */
38458 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38459- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38460+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38461
38462 teststatus = cpu_to_le16(0x8000);
38463- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38464+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38465
38466 /* Check status of descriptors */
38467 for (x = 0; x < numbuffs; x++) {
38468@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38469 }
38470 }
38471
38472- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38473+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38474 wmb();
38475 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38476 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38477@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38478 pcnet32_restart(dev, CSR0_NORMAL);
38479 } else {
38480 pcnet32_purge_rx_ring(dev);
38481- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38482+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38483 }
38484 spin_unlock_irqrestore(&lp->lock, flags);
38485
38486@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38487 static void pcnet32_led_blink_callback(struct net_device *dev)
38488 {
38489 struct pcnet32_private *lp = netdev_priv(dev);
38490- struct pcnet32_access *a = &lp->a;
38491+ struct pcnet32_access *a = lp->a;
38492 ulong ioaddr = dev->base_addr;
38493 unsigned long flags;
38494 int i;
38495@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38496 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38497 {
38498 struct pcnet32_private *lp = netdev_priv(dev);
38499- struct pcnet32_access *a = &lp->a;
38500+ struct pcnet32_access *a = lp->a;
38501 ulong ioaddr = dev->base_addr;
38502 unsigned long flags;
38503 int i, regs[4];
38504@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38505 {
38506 int csr5;
38507 struct pcnet32_private *lp = netdev_priv(dev);
38508- struct pcnet32_access *a = &lp->a;
38509+ struct pcnet32_access *a = lp->a;
38510 ulong ioaddr = dev->base_addr;
38511 int ticks;
38512
38513@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38514 spin_lock_irqsave(&lp->lock, flags);
38515 if (pcnet32_tx(dev)) {
38516 /* reset the chip to clear the error condition, then restart */
38517- lp->a.reset(ioaddr);
38518- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38519+ lp->a->reset(ioaddr);
38520+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38521 pcnet32_restart(dev, CSR0_START);
38522 netif_wake_queue(dev);
38523 }
38524@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38525 __napi_complete(napi);
38526
38527 /* clear interrupt masks */
38528- val = lp->a.read_csr(ioaddr, CSR3);
38529+ val = lp->a->read_csr(ioaddr, CSR3);
38530 val &= 0x00ff;
38531- lp->a.write_csr(ioaddr, CSR3, val);
38532+ lp->a->write_csr(ioaddr, CSR3, val);
38533
38534 /* Set interrupt enable. */
38535- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38536+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38537
38538 spin_unlock_irqrestore(&lp->lock, flags);
38539 }
38540@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38541 int i, csr0;
38542 u16 *buff = ptr;
38543 struct pcnet32_private *lp = netdev_priv(dev);
38544- struct pcnet32_access *a = &lp->a;
38545+ struct pcnet32_access *a = lp->a;
38546 ulong ioaddr = dev->base_addr;
38547 unsigned long flags;
38548
38549@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38550 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38551 if (lp->phymask & (1 << j)) {
38552 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38553- lp->a.write_bcr(ioaddr, 33,
38554+ lp->a->write_bcr(ioaddr, 33,
38555 (j << 5) | i);
38556- *buff++ = lp->a.read_bcr(ioaddr, 34);
38557+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38558 }
38559 }
38560 }
38561@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38562 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38563 lp->options |= PCNET32_PORT_FD;
38564
38565- lp->a = *a;
38566+ lp->a = a;
38567
38568 /* prior to register_netdev, dev->name is not yet correct */
38569 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38570@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38571 if (lp->mii) {
38572 /* lp->phycount and lp->phymask are set to 0 by memset above */
38573
38574- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38575+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38576 /* scan for PHYs */
38577 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38578 unsigned short id1, id2;
38579@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38580 "Found PHY %04x:%04x at address %d.\n",
38581 id1, id2, i);
38582 }
38583- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38584+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38585 if (lp->phycount > 1) {
38586 lp->options |= PCNET32_PORT_MII;
38587 }
38588@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38589 }
38590
38591 /* Reset the PCNET32 */
38592- lp->a.reset(ioaddr);
38593+ lp->a->reset(ioaddr);
38594
38595 /* switch pcnet32 to 32bit mode */
38596- lp->a.write_bcr(ioaddr, 20, 2);
38597+ lp->a->write_bcr(ioaddr, 20, 2);
38598
38599 if (netif_msg_ifup(lp))
38600 printk(KERN_DEBUG
38601@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38602 (u32) (lp->init_dma_addr));
38603
38604 /* set/reset autoselect bit */
38605- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38606+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38607 if (lp->options & PCNET32_PORT_ASEL)
38608 val |= 2;
38609- lp->a.write_bcr(ioaddr, 2, val);
38610+ lp->a->write_bcr(ioaddr, 2, val);
38611
38612 /* handle full duplex setting */
38613 if (lp->mii_if.full_duplex) {
38614- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38615+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38616 if (lp->options & PCNET32_PORT_FD) {
38617 val |= 1;
38618 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38619@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38620 if (lp->chip_version == 0x2627)
38621 val |= 3;
38622 }
38623- lp->a.write_bcr(ioaddr, 9, val);
38624+ lp->a->write_bcr(ioaddr, 9, val);
38625 }
38626
38627 /* set/reset GPSI bit in test register */
38628- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38629+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38630 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38631 val |= 0x10;
38632- lp->a.write_csr(ioaddr, 124, val);
38633+ lp->a->write_csr(ioaddr, 124, val);
38634
38635 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38636 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38637@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38638 * duplex, and/or enable auto negotiation, and clear DANAS
38639 */
38640 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38641- lp->a.write_bcr(ioaddr, 32,
38642- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38643+ lp->a->write_bcr(ioaddr, 32,
38644+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38645 /* disable Auto Negotiation, set 10Mpbs, HD */
38646- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38647+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38648 if (lp->options & PCNET32_PORT_FD)
38649 val |= 0x10;
38650 if (lp->options & PCNET32_PORT_100)
38651 val |= 0x08;
38652- lp->a.write_bcr(ioaddr, 32, val);
38653+ lp->a->write_bcr(ioaddr, 32, val);
38654 } else {
38655 if (lp->options & PCNET32_PORT_ASEL) {
38656- lp->a.write_bcr(ioaddr, 32,
38657- lp->a.read_bcr(ioaddr,
38658+ lp->a->write_bcr(ioaddr, 32,
38659+ lp->a->read_bcr(ioaddr,
38660 32) | 0x0080);
38661 /* enable auto negotiate, setup, disable fd */
38662- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38663+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38664 val |= 0x20;
38665- lp->a.write_bcr(ioaddr, 32, val);
38666+ lp->a->write_bcr(ioaddr, 32, val);
38667 }
38668 }
38669 } else {
38670@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38671 * There is really no good other way to handle multiple PHYs
38672 * other than turning off all automatics
38673 */
38674- val = lp->a.read_bcr(ioaddr, 2);
38675- lp->a.write_bcr(ioaddr, 2, val & ~2);
38676- val = lp->a.read_bcr(ioaddr, 32);
38677- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38678+ val = lp->a->read_bcr(ioaddr, 2);
38679+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38680+ val = lp->a->read_bcr(ioaddr, 32);
38681+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38682
38683 if (!(lp->options & PCNET32_PORT_ASEL)) {
38684 /* setup ecmd */
38685@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38686 ecmd.speed =
38687 lp->
38688 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38689- bcr9 = lp->a.read_bcr(ioaddr, 9);
38690+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38691
38692 if (lp->options & PCNET32_PORT_FD) {
38693 ecmd.duplex = DUPLEX_FULL;
38694@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38695 ecmd.duplex = DUPLEX_HALF;
38696 bcr9 |= ~(1 << 0);
38697 }
38698- lp->a.write_bcr(ioaddr, 9, bcr9);
38699+ lp->a->write_bcr(ioaddr, 9, bcr9);
38700 }
38701
38702 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38703@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38704
38705 #ifdef DO_DXSUFLO
38706 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38707- val = lp->a.read_csr(ioaddr, CSR3);
38708+ val = lp->a->read_csr(ioaddr, CSR3);
38709 val |= 0x40;
38710- lp->a.write_csr(ioaddr, CSR3, val);
38711+ lp->a->write_csr(ioaddr, CSR3, val);
38712 }
38713 #endif
38714
38715@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38716 napi_enable(&lp->napi);
38717
38718 /* Re-initialize the PCNET32, and start it when done. */
38719- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38720- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38721+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38722+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38723
38724- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38725- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38726+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38727+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38728
38729 netif_start_queue(dev);
38730
38731@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38732
38733 i = 0;
38734 while (i++ < 100)
38735- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38736+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38737 break;
38738 /*
38739 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38740 * reports that doing so triggers a bug in the '974.
38741 */
38742- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38743+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38744
38745 if (netif_msg_ifup(lp))
38746 printk(KERN_DEBUG
38747 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38748 dev->name, i,
38749 (u32) (lp->init_dma_addr),
38750- lp->a.read_csr(ioaddr, CSR0));
38751+ lp->a->read_csr(ioaddr, CSR0));
38752
38753 spin_unlock_irqrestore(&lp->lock, flags);
38754
38755@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38756 * Switch back to 16bit mode to avoid problems with dumb
38757 * DOS packet driver after a warm reboot
38758 */
38759- lp->a.write_bcr(ioaddr, 20, 4);
38760+ lp->a->write_bcr(ioaddr, 20, 4);
38761
38762 err_free_irq:
38763 spin_unlock_irqrestore(&lp->lock, flags);
38764@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38765
38766 /* wait for stop */
38767 for (i = 0; i < 100; i++)
38768- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38769+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38770 break;
38771
38772 if (i >= 100 && netif_msg_drv(lp))
38773@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38774 return;
38775
38776 /* ReInit Ring */
38777- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38778+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38779 i = 0;
38780 while (i++ < 1000)
38781- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38782+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38783 break;
38784
38785- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38786+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38787 }
38788
38789 static void pcnet32_tx_timeout(struct net_device *dev)
38790@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38791 if (pcnet32_debug & NETIF_MSG_DRV)
38792 printk(KERN_ERR
38793 "%s: transmit timed out, status %4.4x, resetting.\n",
38794- dev->name, lp->a.read_csr(ioaddr, CSR0));
38795- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38796+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38797+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38798 dev->stats.tx_errors++;
38799 if (netif_msg_tx_err(lp)) {
38800 int i;
38801@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38802 if (netif_msg_tx_queued(lp)) {
38803 printk(KERN_DEBUG
38804 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38805- dev->name, lp->a.read_csr(ioaddr, CSR0));
38806+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38807 }
38808
38809 /* Default status -- will not enable Successful-TxDone
38810@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38811 dev->stats.tx_bytes += skb->len;
38812
38813 /* Trigger an immediate send poll. */
38814- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38815+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38816
38817 dev->trans_start = jiffies;
38818
38819@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38820
38821 spin_lock(&lp->lock);
38822
38823- csr0 = lp->a.read_csr(ioaddr, CSR0);
38824+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38825 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38826 if (csr0 == 0xffff) {
38827 break; /* PCMCIA remove happened */
38828 }
38829 /* Acknowledge all of the current interrupt sources ASAP. */
38830- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38831+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38832
38833 if (netif_msg_intr(lp))
38834 printk(KERN_DEBUG
38835 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38836- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38837+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38838
38839 /* Log misc errors. */
38840 if (csr0 & 0x4000)
38841@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38842 if (napi_schedule_prep(&lp->napi)) {
38843 u16 val;
38844 /* set interrupt masks */
38845- val = lp->a.read_csr(ioaddr, CSR3);
38846+ val = lp->a->read_csr(ioaddr, CSR3);
38847 val |= 0x5f00;
38848- lp->a.write_csr(ioaddr, CSR3, val);
38849+ lp->a->write_csr(ioaddr, CSR3, val);
38850
38851 __napi_schedule(&lp->napi);
38852 break;
38853 }
38854- csr0 = lp->a.read_csr(ioaddr, CSR0);
38855+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38856 }
38857
38858 if (netif_msg_intr(lp))
38859 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38860- dev->name, lp->a.read_csr(ioaddr, CSR0));
38861+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38862
38863 spin_unlock(&lp->lock);
38864
38865@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38866
38867 spin_lock_irqsave(&lp->lock, flags);
38868
38869- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38870+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38871
38872 if (netif_msg_ifdown(lp))
38873 printk(KERN_DEBUG
38874 "%s: Shutting down ethercard, status was %2.2x.\n",
38875- dev->name, lp->a.read_csr(ioaddr, CSR0));
38876+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38877
38878 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38879- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38880+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38881
38882 /*
38883 * Switch back to 16bit mode to avoid problems with dumb
38884 * DOS packet driver after a warm reboot
38885 */
38886- lp->a.write_bcr(ioaddr, 20, 4);
38887+ lp->a->write_bcr(ioaddr, 20, 4);
38888
38889 spin_unlock_irqrestore(&lp->lock, flags);
38890
38891@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38892 unsigned long flags;
38893
38894 spin_lock_irqsave(&lp->lock, flags);
38895- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38896+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38897 spin_unlock_irqrestore(&lp->lock, flags);
38898
38899 return &dev->stats;
38900@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38901 if (dev->flags & IFF_ALLMULTI) {
38902 ib->filter[0] = cpu_to_le32(~0U);
38903 ib->filter[1] = cpu_to_le32(~0U);
38904- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38905- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38906- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38907- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38908+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38909+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38910+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38911+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38912 return;
38913 }
38914 /* clear the multicast filter */
38915@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38916 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38917 }
38918 for (i = 0; i < 4; i++)
38919- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38920+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38921 le16_to_cpu(mcast_table[i]));
38922 return;
38923 }
38924@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38925
38926 spin_lock_irqsave(&lp->lock, flags);
38927 suspended = pcnet32_suspend(dev, &flags, 0);
38928- csr15 = lp->a.read_csr(ioaddr, CSR15);
38929+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38930 if (dev->flags & IFF_PROMISC) {
38931 /* Log any net taps. */
38932 if (netif_msg_hw(lp))
38933@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38934 lp->init_block->mode =
38935 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38936 7);
38937- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38938+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38939 } else {
38940 lp->init_block->mode =
38941 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38942- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38943+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38944 pcnet32_load_multicast(dev);
38945 }
38946
38947 if (suspended) {
38948 int csr5;
38949 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38950- csr5 = lp->a.read_csr(ioaddr, CSR5);
38951- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38952+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38953+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38954 } else {
38955- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38956+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38957 pcnet32_restart(dev, CSR0_NORMAL);
38958 netif_wake_queue(dev);
38959 }
38960@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38961 if (!lp->mii)
38962 return 0;
38963
38964- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38965- val_out = lp->a.read_bcr(ioaddr, 34);
38966+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38967+ val_out = lp->a->read_bcr(ioaddr, 34);
38968
38969 return val_out;
38970 }
38971@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38972 if (!lp->mii)
38973 return;
38974
38975- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38976- lp->a.write_bcr(ioaddr, 34, val);
38977+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38978+ lp->a->write_bcr(ioaddr, 34, val);
38979 }
38980
38981 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38982@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38983 curr_link = mii_link_ok(&lp->mii_if);
38984 } else {
38985 ulong ioaddr = dev->base_addr; /* card base I/O address */
38986- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38987+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38988 }
38989 if (!curr_link) {
38990 if (prev_link || verbose) {
38991@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38992 (ecmd.duplex ==
38993 DUPLEX_FULL) ? "full" : "half");
38994 }
38995- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38996+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38997 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38998 if (lp->mii_if.full_duplex)
38999 bcr9 |= (1 << 0);
39000 else
39001 bcr9 &= ~(1 << 0);
39002- lp->a.write_bcr(dev->base_addr, 9, bcr9);
39003+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
39004 }
39005 } else {
39006 if (netif_msg_link(lp))
39007diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
39008index 7cc9898..6eb50d3 100644
39009--- a/drivers/net/sis190.c
39010+++ b/drivers/net/sis190.c
39011@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
39012 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
39013 struct net_device *dev)
39014 {
39015- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
39016+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
39017 struct sis190_private *tp = netdev_priv(dev);
39018 struct pci_dev *isa_bridge;
39019 u8 reg, tmp8;
39020diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39021index e13685a..60c948c 100644
39022--- a/drivers/net/sundance.c
39023+++ b/drivers/net/sundance.c
39024@@ -225,7 +225,7 @@ enum {
39025 struct pci_id_info {
39026 const char *name;
39027 };
39028-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39029+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39030 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39031 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39032 {"D-Link DFE-580TX 4 port Server Adapter"},
39033diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39034index 529f55a..cccaa18 100644
39035--- a/drivers/net/tg3.h
39036+++ b/drivers/net/tg3.h
39037@@ -95,6 +95,7 @@
39038 #define CHIPREV_ID_5750_A0 0x4000
39039 #define CHIPREV_ID_5750_A1 0x4001
39040 #define CHIPREV_ID_5750_A3 0x4003
39041+#define CHIPREV_ID_5750_C1 0x4201
39042 #define CHIPREV_ID_5750_C2 0x4202
39043 #define CHIPREV_ID_5752_A0_HW 0x5000
39044 #define CHIPREV_ID_5752_A0 0x6000
39045diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39046index b9db1b5..720f9ce 100644
39047--- a/drivers/net/tokenring/abyss.c
39048+++ b/drivers/net/tokenring/abyss.c
39049@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39050
39051 static int __init abyss_init (void)
39052 {
39053- abyss_netdev_ops = tms380tr_netdev_ops;
39054+ pax_open_kernel();
39055+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39056
39057- abyss_netdev_ops.ndo_open = abyss_open;
39058- abyss_netdev_ops.ndo_stop = abyss_close;
39059+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39060+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39061+ pax_close_kernel();
39062
39063 return pci_register_driver(&abyss_driver);
39064 }
39065diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39066index 456f8bf..373e56d 100644
39067--- a/drivers/net/tokenring/madgemc.c
39068+++ b/drivers/net/tokenring/madgemc.c
39069@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39070
39071 static int __init madgemc_init (void)
39072 {
39073- madgemc_netdev_ops = tms380tr_netdev_ops;
39074- madgemc_netdev_ops.ndo_open = madgemc_open;
39075- madgemc_netdev_ops.ndo_stop = madgemc_close;
39076+ pax_open_kernel();
39077+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39078+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39079+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39080+ pax_close_kernel();
39081
39082 return mca_register_driver (&madgemc_driver);
39083 }
39084diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39085index 16e8783..925bd49 100644
39086--- a/drivers/net/tokenring/proteon.c
39087+++ b/drivers/net/tokenring/proteon.c
39088@@ -353,9 +353,11 @@ static int __init proteon_init(void)
39089 struct platform_device *pdev;
39090 int i, num = 0, err = 0;
39091
39092- proteon_netdev_ops = tms380tr_netdev_ops;
39093- proteon_netdev_ops.ndo_open = proteon_open;
39094- proteon_netdev_ops.ndo_stop = tms380tr_close;
39095+ pax_open_kernel();
39096+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39097+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39098+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39099+ pax_close_kernel();
39100
39101 err = platform_driver_register(&proteon_driver);
39102 if (err)
39103diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39104index 46db5c5..37c1536 100644
39105--- a/drivers/net/tokenring/skisa.c
39106+++ b/drivers/net/tokenring/skisa.c
39107@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39108 struct platform_device *pdev;
39109 int i, num = 0, err = 0;
39110
39111- sk_isa_netdev_ops = tms380tr_netdev_ops;
39112- sk_isa_netdev_ops.ndo_open = sk_isa_open;
39113- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39114+ pax_open_kernel();
39115+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39116+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39117+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39118+ pax_close_kernel();
39119
39120 err = platform_driver_register(&sk_isa_driver);
39121 if (err)
39122diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39123index 74e5ba4..5cf6bc9 100644
39124--- a/drivers/net/tulip/de2104x.c
39125+++ b/drivers/net/tulip/de2104x.c
39126@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39127 struct de_srom_info_leaf *il;
39128 void *bufp;
39129
39130+ pax_track_stack();
39131+
39132 /* download entire eeprom */
39133 for (i = 0; i < DE_EEPROM_WORDS; i++)
39134 ((__le16 *)ee_data)[i] =
39135diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39136index a8349b7..90f9dfe 100644
39137--- a/drivers/net/tulip/de4x5.c
39138+++ b/drivers/net/tulip/de4x5.c
39139@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39140 for (i=0; i<ETH_ALEN; i++) {
39141 tmp.addr[i] = dev->dev_addr[i];
39142 }
39143- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39144+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39145 break;
39146
39147 case DE4X5_SET_HWADDR: /* Set the hardware address */
39148@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39149 spin_lock_irqsave(&lp->lock, flags);
39150 memcpy(&statbuf, &lp->pktStats, ioc->len);
39151 spin_unlock_irqrestore(&lp->lock, flags);
39152- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39153+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39154 return -EFAULT;
39155 break;
39156 }
39157diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39158index 391acd3..56d11cd 100644
39159--- a/drivers/net/tulip/eeprom.c
39160+++ b/drivers/net/tulip/eeprom.c
39161@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39162 {NULL}};
39163
39164
39165-static const char *block_name[] __devinitdata = {
39166+static const char *block_name[] __devinitconst = {
39167 "21140 non-MII",
39168 "21140 MII PHY",
39169 "21142 Serial PHY",
39170diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39171index b38d3b7..b1cff23 100644
39172--- a/drivers/net/tulip/winbond-840.c
39173+++ b/drivers/net/tulip/winbond-840.c
39174@@ -235,7 +235,7 @@ struct pci_id_info {
39175 int drv_flags; /* Driver use, intended as capability flags. */
39176 };
39177
39178-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39179+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39180 { /* Sometime a Level-One switch card. */
39181 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39182 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39183diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39184index f450bc9..2b747c8 100644
39185--- a/drivers/net/usb/hso.c
39186+++ b/drivers/net/usb/hso.c
39187@@ -71,7 +71,7 @@
39188 #include <asm/byteorder.h>
39189 #include <linux/serial_core.h>
39190 #include <linux/serial.h>
39191-
39192+#include <asm/local.h>
39193
39194 #define DRIVER_VERSION "1.2"
39195 #define MOD_AUTHOR "Option Wireless"
39196@@ -258,7 +258,7 @@ struct hso_serial {
39197
39198 /* from usb_serial_port */
39199 struct tty_struct *tty;
39200- int open_count;
39201+ local_t open_count;
39202 spinlock_t serial_lock;
39203
39204 int (*write_data) (struct hso_serial *serial);
39205@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39206 struct urb *urb;
39207
39208 urb = serial->rx_urb[0];
39209- if (serial->open_count > 0) {
39210+ if (local_read(&serial->open_count) > 0) {
39211 count = put_rxbuf_data(urb, serial);
39212 if (count == -1)
39213 return;
39214@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39215 DUMP1(urb->transfer_buffer, urb->actual_length);
39216
39217 /* Anyone listening? */
39218- if (serial->open_count == 0)
39219+ if (local_read(&serial->open_count) == 0)
39220 return;
39221
39222 if (status == 0) {
39223@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39224 spin_unlock_irq(&serial->serial_lock);
39225
39226 /* check for port already opened, if not set the termios */
39227- serial->open_count++;
39228- if (serial->open_count == 1) {
39229+ if (local_inc_return(&serial->open_count) == 1) {
39230 tty->low_latency = 1;
39231 serial->rx_state = RX_IDLE;
39232 /* Force default termio settings */
39233@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39234 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39235 if (result) {
39236 hso_stop_serial_device(serial->parent);
39237- serial->open_count--;
39238+ local_dec(&serial->open_count);
39239 kref_put(&serial->parent->ref, hso_serial_ref_free);
39240 }
39241 } else {
39242@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39243
39244 /* reset the rts and dtr */
39245 /* do the actual close */
39246- serial->open_count--;
39247+ local_dec(&serial->open_count);
39248
39249- if (serial->open_count <= 0) {
39250- serial->open_count = 0;
39251+ if (local_read(&serial->open_count) <= 0) {
39252+ local_set(&serial->open_count, 0);
39253 spin_lock_irq(&serial->serial_lock);
39254 if (serial->tty == tty) {
39255 serial->tty->driver_data = NULL;
39256@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39257
39258 /* the actual setup */
39259 spin_lock_irqsave(&serial->serial_lock, flags);
39260- if (serial->open_count)
39261+ if (local_read(&serial->open_count))
39262 _hso_serial_set_termios(tty, old);
39263 else
39264 tty->termios = old;
39265@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39266 /* Start all serial ports */
39267 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39268 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39269- if (dev2ser(serial_table[i])->open_count) {
39270+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39271 result =
39272 hso_start_serial_device(serial_table[i], GFP_NOIO);
39273 hso_kick_transmit(dev2ser(serial_table[i]));
39274diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39275index 3e94f0c..ffdd926 100644
39276--- a/drivers/net/vxge/vxge-config.h
39277+++ b/drivers/net/vxge/vxge-config.h
39278@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39279 void (*link_down)(struct __vxge_hw_device *devh);
39280 void (*crit_err)(struct __vxge_hw_device *devh,
39281 enum vxge_hw_event type, u64 ext_data);
39282-};
39283+} __no_const;
39284
39285 /*
39286 * struct __vxge_hw_blockpool_entry - Block private data structure
39287diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39288index 068d7a9..35293de 100644
39289--- a/drivers/net/vxge/vxge-main.c
39290+++ b/drivers/net/vxge/vxge-main.c
39291@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39292 struct sk_buff *completed[NR_SKB_COMPLETED];
39293 int more;
39294
39295+ pax_track_stack();
39296+
39297 do {
39298 more = 0;
39299 skb_ptr = completed;
39300@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39301 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39302 int index;
39303
39304+ pax_track_stack();
39305+
39306 /*
39307 * Filling
39308 * - itable with bucket numbers
39309diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39310index 461742b..81be42e 100644
39311--- a/drivers/net/vxge/vxge-traffic.h
39312+++ b/drivers/net/vxge/vxge-traffic.h
39313@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39314 struct vxge_hw_mempool_dma *dma_object,
39315 u32 index,
39316 u32 is_last);
39317-};
39318+} __no_const;
39319
39320 void
39321 __vxge_hw_mempool_destroy(
39322diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39323index cd8cb95..4153b79 100644
39324--- a/drivers/net/wan/cycx_x25.c
39325+++ b/drivers/net/wan/cycx_x25.c
39326@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39327 unsigned char hex[1024],
39328 * phex = hex;
39329
39330+ pax_track_stack();
39331+
39332 if (len >= (sizeof(hex) / 2))
39333 len = (sizeof(hex) / 2) - 1;
39334
39335diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39336index aa9248f..a4e3c3b 100644
39337--- a/drivers/net/wan/hdlc_x25.c
39338+++ b/drivers/net/wan/hdlc_x25.c
39339@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39340
39341 static int x25_open(struct net_device *dev)
39342 {
39343- struct lapb_register_struct cb;
39344+ static struct lapb_register_struct cb = {
39345+ .connect_confirmation = x25_connected,
39346+ .connect_indication = x25_connected,
39347+ .disconnect_confirmation = x25_disconnected,
39348+ .disconnect_indication = x25_disconnected,
39349+ .data_indication = x25_data_indication,
39350+ .data_transmit = x25_data_transmit
39351+ };
39352 int result;
39353
39354- cb.connect_confirmation = x25_connected;
39355- cb.connect_indication = x25_connected;
39356- cb.disconnect_confirmation = x25_disconnected;
39357- cb.disconnect_indication = x25_disconnected;
39358- cb.data_indication = x25_data_indication;
39359- cb.data_transmit = x25_data_transmit;
39360-
39361 result = lapb_register(dev, &cb);
39362 if (result != LAPB_OK)
39363 return result;
39364diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39365index 5ad287c..783b020 100644
39366--- a/drivers/net/wimax/i2400m/usb-fw.c
39367+++ b/drivers/net/wimax/i2400m/usb-fw.c
39368@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39369 int do_autopm = 1;
39370 DECLARE_COMPLETION_ONSTACK(notif_completion);
39371
39372+ pax_track_stack();
39373+
39374 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39375 i2400m, ack, ack_size);
39376 BUG_ON(_ack == i2400m->bm_ack_buf);
39377diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39378index 6c26840..62c97c3 100644
39379--- a/drivers/net/wireless/airo.c
39380+++ b/drivers/net/wireless/airo.c
39381@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39382 BSSListElement * loop_net;
39383 BSSListElement * tmp_net;
39384
39385+ pax_track_stack();
39386+
39387 /* Blow away current list of scan results */
39388 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39389 list_move_tail (&loop_net->list, &ai->network_free_list);
39390@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39391 WepKeyRid wkr;
39392 int rc;
39393
39394+ pax_track_stack();
39395+
39396 memset( &mySsid, 0, sizeof( mySsid ) );
39397 kfree (ai->flash);
39398 ai->flash = NULL;
39399@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39400 __le32 *vals = stats.vals;
39401 int len;
39402
39403+ pax_track_stack();
39404+
39405 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39406 return -ENOMEM;
39407 data = (struct proc_data *)file->private_data;
39408@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39409 /* If doLoseSync is not 1, we won't do a Lose Sync */
39410 int doLoseSync = -1;
39411
39412+ pax_track_stack();
39413+
39414 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39415 return -ENOMEM;
39416 data = (struct proc_data *)file->private_data;
39417@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39418 int i;
39419 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39420
39421+ pax_track_stack();
39422+
39423 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39424 if (!qual)
39425 return -ENOMEM;
39426@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39427 CapabilityRid cap_rid;
39428 __le32 *vals = stats_rid.vals;
39429
39430+ pax_track_stack();
39431+
39432 /* Get stats out of the card */
39433 clear_bit(JOB_WSTATS, &local->jobs);
39434 if (local->power.event) {
39435diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39436index 747508c..82e965d 100644
39437--- a/drivers/net/wireless/ath/ath5k/debug.c
39438+++ b/drivers/net/wireless/ath/ath5k/debug.c
39439@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39440 unsigned int v;
39441 u64 tsf;
39442
39443+ pax_track_stack();
39444+
39445 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39446 len += snprintf(buf+len, sizeof(buf)-len,
39447 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39448@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39449 unsigned int len = 0;
39450 unsigned int i;
39451
39452+ pax_track_stack();
39453+
39454 len += snprintf(buf+len, sizeof(buf)-len,
39455 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39456
39457diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39458index 2be4c22..593b1eb 100644
39459--- a/drivers/net/wireless/ath/ath9k/debug.c
39460+++ b/drivers/net/wireless/ath/ath9k/debug.c
39461@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39462 char buf[512];
39463 unsigned int len = 0;
39464
39465+ pax_track_stack();
39466+
39467 len += snprintf(buf + len, sizeof(buf) - len,
39468 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39469 len += snprintf(buf + len, sizeof(buf) - len,
39470@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39471 int i;
39472 u8 addr[ETH_ALEN];
39473
39474+ pax_track_stack();
39475+
39476 len += snprintf(buf + len, sizeof(buf) - len,
39477 "primary: %s (%s chan=%d ht=%d)\n",
39478 wiphy_name(sc->pri_wiphy->hw->wiphy),
39479diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39480index 80b19a4..dab3a45 100644
39481--- a/drivers/net/wireless/b43/debugfs.c
39482+++ b/drivers/net/wireless/b43/debugfs.c
39483@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39484 struct b43_debugfs_fops {
39485 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39486 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39487- struct file_operations fops;
39488+ const struct file_operations fops;
39489 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39490 size_t file_struct_offset;
39491 };
39492diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39493index 1f85ac5..c99b4b4 100644
39494--- a/drivers/net/wireless/b43legacy/debugfs.c
39495+++ b/drivers/net/wireless/b43legacy/debugfs.c
39496@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39497 struct b43legacy_debugfs_fops {
39498 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39499 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39500- struct file_operations fops;
39501+ const struct file_operations fops;
39502 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39503 size_t file_struct_offset;
39504 /* Take wl->irq_lock before calling read/write? */
39505diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39506index 43102bf..3b569c3 100644
39507--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39508+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39509@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39510 int err;
39511 DECLARE_SSID_BUF(ssid);
39512
39513+ pax_track_stack();
39514+
39515 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39516
39517 if (ssid_len)
39518@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39519 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39520 int err;
39521
39522+ pax_track_stack();
39523+
39524 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39525 idx, keylen, len);
39526
39527diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39528index 282b1f7..169f0cf 100644
39529--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39530+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39531@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39532 unsigned long flags;
39533 DECLARE_SSID_BUF(ssid);
39534
39535+ pax_track_stack();
39536+
39537 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39538 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39539 print_ssid(ssid, info_element->data, info_element->len),
39540diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39541index 950267a..80d5fd2 100644
39542--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39543+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39544@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39545 },
39546 };
39547
39548-static struct iwl_ops iwl1000_ops = {
39549+static const struct iwl_ops iwl1000_ops = {
39550 .ucode = &iwl5000_ucode,
39551 .lib = &iwl1000_lib,
39552 .hcmd = &iwl5000_hcmd,
39553diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39554index 56bfcc3..b348020 100644
39555--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39556+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39557@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39558 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39559 };
39560
39561-static struct iwl_ops iwl3945_ops = {
39562+static const struct iwl_ops iwl3945_ops = {
39563 .ucode = &iwl3945_ucode,
39564 .lib = &iwl3945_lib,
39565 .hcmd = &iwl3945_hcmd,
39566diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39567index 585b8d4..e142963 100644
39568--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39569+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39570@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39571 },
39572 };
39573
39574-static struct iwl_ops iwl4965_ops = {
39575+static const struct iwl_ops iwl4965_ops = {
39576 .ucode = &iwl4965_ucode,
39577 .lib = &iwl4965_lib,
39578 .hcmd = &iwl4965_hcmd,
39579diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39580index 1f423f2..e37c192 100644
39581--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39582+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39583@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39584 },
39585 };
39586
39587-struct iwl_ops iwl5000_ops = {
39588+const struct iwl_ops iwl5000_ops = {
39589 .ucode = &iwl5000_ucode,
39590 .lib = &iwl5000_lib,
39591 .hcmd = &iwl5000_hcmd,
39592 .utils = &iwl5000_hcmd_utils,
39593 };
39594
39595-static struct iwl_ops iwl5150_ops = {
39596+static const struct iwl_ops iwl5150_ops = {
39597 .ucode = &iwl5000_ucode,
39598 .lib = &iwl5150_lib,
39599 .hcmd = &iwl5000_hcmd,
39600diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39601index 1473452..f07d5e1 100644
39602--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39603+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39604@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39605 .calc_rssi = iwl5000_calc_rssi,
39606 };
39607
39608-static struct iwl_ops iwl6000_ops = {
39609+static const struct iwl_ops iwl6000_ops = {
39610 .ucode = &iwl5000_ucode,
39611 .lib = &iwl6000_lib,
39612 .hcmd = &iwl5000_hcmd,
39613diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39614index 1a3dfa2..b3e0a61 100644
39615--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39616+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39617@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39618 u8 active_index = 0;
39619 s32 tpt = 0;
39620
39621+ pax_track_stack();
39622+
39623 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39624
39625 if (!ieee80211_is_data(hdr->frame_control) ||
39626@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39627 u8 valid_tx_ant = 0;
39628 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39629
39630+ pax_track_stack();
39631+
39632 /* Override starting rate (index 0) if needed for debug purposes */
39633 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39634
39635diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39636index 0e56d78..6a3c107 100644
39637--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39638+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39639@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39640 if (iwl_debug_level & IWL_DL_INFO)
39641 dev_printk(KERN_DEBUG, &(pdev->dev),
39642 "Disabling hw_scan\n");
39643- iwl_hw_ops.hw_scan = NULL;
39644+ pax_open_kernel();
39645+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39646+ pax_close_kernel();
39647 }
39648
39649 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39650diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39651index cbc6290..eb323d7 100644
39652--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39653+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39654@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39655 #endif
39656
39657 #else
39658-#define IWL_DEBUG(__priv, level, fmt, args...)
39659-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39660+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39661+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39662 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39663 void *p, u32 len)
39664 {}
39665diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39666index a198bcf..8e68233 100644
39667--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39668+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39669@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39670 int pos = 0;
39671 const size_t bufsz = sizeof(buf);
39672
39673+ pax_track_stack();
39674+
39675 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39676 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39677 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39678@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39679 const size_t bufsz = sizeof(buf);
39680 ssize_t ret;
39681
39682+ pax_track_stack();
39683+
39684 for (i = 0; i < AC_NUM; i++) {
39685 pos += scnprintf(buf + pos, bufsz - pos,
39686 "\tcw_min\tcw_max\taifsn\ttxop\n");
39687diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39688index 3539ea4..b174bfa 100644
39689--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39690+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39691@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39692
39693 /* shared structures from iwl-5000.c */
39694 extern struct iwl_mod_params iwl50_mod_params;
39695-extern struct iwl_ops iwl5000_ops;
39696+extern const struct iwl_ops iwl5000_ops;
39697 extern struct iwl_ucode_ops iwl5000_ucode;
39698 extern struct iwl_lib_ops iwl5000_lib;
39699 extern struct iwl_hcmd_ops iwl5000_hcmd;
39700diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39701index 619590d..69235ee 100644
39702--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39703+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39704@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39705 */
39706 if (iwl3945_mod_params.disable_hw_scan) {
39707 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39708- iwl3945_hw_ops.hw_scan = NULL;
39709+ pax_open_kernel();
39710+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39711+ pax_close_kernel();
39712 }
39713
39714
39715diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39716index 1465379..fe4d78b 100644
39717--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39718+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39719@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39720 int buf_len = 512;
39721 size_t len = 0;
39722
39723+ pax_track_stack();
39724+
39725 if (*ppos != 0)
39726 return 0;
39727 if (count < sizeof(buf))
39728diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39729index 893a55c..7f66a50 100644
39730--- a/drivers/net/wireless/libertas/debugfs.c
39731+++ b/drivers/net/wireless/libertas/debugfs.c
39732@@ -708,7 +708,7 @@ out_unlock:
39733 struct lbs_debugfs_files {
39734 const char *name;
39735 int perm;
39736- struct file_operations fops;
39737+ const struct file_operations fops;
39738 };
39739
39740 static const struct lbs_debugfs_files debugfs_files[] = {
39741diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39742index 2ecbedb..42704f0 100644
39743--- a/drivers/net/wireless/rndis_wlan.c
39744+++ b/drivers/net/wireless/rndis_wlan.c
39745@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39746
39747 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39748
39749- if (rts_threshold < 0 || rts_threshold > 2347)
39750+ if (rts_threshold > 2347)
39751 rts_threshold = 2347;
39752
39753 tmp = cpu_to_le32(rts_threshold);
39754diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39755index 334ccd6..47f8944 100644
39756--- a/drivers/oprofile/buffer_sync.c
39757+++ b/drivers/oprofile/buffer_sync.c
39758@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39759 if (cookie == NO_COOKIE)
39760 offset = pc;
39761 if (cookie == INVALID_COOKIE) {
39762- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39763+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39764 offset = pc;
39765 }
39766 if (cookie != last_cookie) {
39767@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39768 /* add userspace sample */
39769
39770 if (!mm) {
39771- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39772+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39773 return 0;
39774 }
39775
39776 cookie = lookup_dcookie(mm, s->eip, &offset);
39777
39778 if (cookie == INVALID_COOKIE) {
39779- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39780+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39781 return 0;
39782 }
39783
39784@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39785 /* ignore backtraces if failed to add a sample */
39786 if (state == sb_bt_start) {
39787 state = sb_bt_ignore;
39788- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39789+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39790 }
39791 }
39792 release_mm(mm);
39793diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39794index 5df60a6..72f5c1c 100644
39795--- a/drivers/oprofile/event_buffer.c
39796+++ b/drivers/oprofile/event_buffer.c
39797@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39798 }
39799
39800 if (buffer_pos == buffer_size) {
39801- atomic_inc(&oprofile_stats.event_lost_overflow);
39802+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39803 return;
39804 }
39805
39806diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39807index dc8a042..fe5f315 100644
39808--- a/drivers/oprofile/oprof.c
39809+++ b/drivers/oprofile/oprof.c
39810@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39811 if (oprofile_ops.switch_events())
39812 return;
39813
39814- atomic_inc(&oprofile_stats.multiplex_counter);
39815+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39816 start_switch_worker();
39817 }
39818
39819diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39820index 61689e8..387f7f8 100644
39821--- a/drivers/oprofile/oprofile_stats.c
39822+++ b/drivers/oprofile/oprofile_stats.c
39823@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39824 cpu_buf->sample_invalid_eip = 0;
39825 }
39826
39827- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39828- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39829- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39830- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39831- atomic_set(&oprofile_stats.multiplex_counter, 0);
39832+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39833+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39834+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39835+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39836+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39837 }
39838
39839
39840diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39841index 0b54e46..a37c527 100644
39842--- a/drivers/oprofile/oprofile_stats.h
39843+++ b/drivers/oprofile/oprofile_stats.h
39844@@ -13,11 +13,11 @@
39845 #include <asm/atomic.h>
39846
39847 struct oprofile_stat_struct {
39848- atomic_t sample_lost_no_mm;
39849- atomic_t sample_lost_no_mapping;
39850- atomic_t bt_lost_no_mapping;
39851- atomic_t event_lost_overflow;
39852- atomic_t multiplex_counter;
39853+ atomic_unchecked_t sample_lost_no_mm;
39854+ atomic_unchecked_t sample_lost_no_mapping;
39855+ atomic_unchecked_t bt_lost_no_mapping;
39856+ atomic_unchecked_t event_lost_overflow;
39857+ atomic_unchecked_t multiplex_counter;
39858 };
39859
39860 extern struct oprofile_stat_struct oprofile_stats;
39861diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39862index 2766a6d..80c77e2 100644
39863--- a/drivers/oprofile/oprofilefs.c
39864+++ b/drivers/oprofile/oprofilefs.c
39865@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39866
39867
39868 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39869- char const *name, atomic_t *val)
39870+ char const *name, atomic_unchecked_t *val)
39871 {
39872 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39873 &atomic_ro_fops, 0444);
39874diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39875index 13a64bc..ad62835 100644
39876--- a/drivers/parisc/pdc_stable.c
39877+++ b/drivers/parisc/pdc_stable.c
39878@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39879 return ret;
39880 }
39881
39882-static struct sysfs_ops pdcspath_attr_ops = {
39883+static const struct sysfs_ops pdcspath_attr_ops = {
39884 .show = pdcspath_attr_show,
39885 .store = pdcspath_attr_store,
39886 };
39887diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39888index 8eefe56..40751a7 100644
39889--- a/drivers/parport/procfs.c
39890+++ b/drivers/parport/procfs.c
39891@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39892
39893 *ppos += len;
39894
39895- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39896+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39897 }
39898
39899 #ifdef CONFIG_PARPORT_1284
39900@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39901
39902 *ppos += len;
39903
39904- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39905+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39906 }
39907 #endif /* IEEE1284.3 support. */
39908
39909diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39910index 73e7d8e..c80f3d2 100644
39911--- a/drivers/pci/hotplug/acpiphp_glue.c
39912+++ b/drivers/pci/hotplug/acpiphp_glue.c
39913@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39914 }
39915
39916
39917-static struct acpi_dock_ops acpiphp_dock_ops = {
39918+static const struct acpi_dock_ops acpiphp_dock_ops = {
39919 .handler = handle_hotplug_event_func,
39920 };
39921
39922diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39923index 9fff878..ad0ad53 100644
39924--- a/drivers/pci/hotplug/cpci_hotplug.h
39925+++ b/drivers/pci/hotplug/cpci_hotplug.h
39926@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39927 int (*hardware_test) (struct slot* slot, u32 value);
39928 u8 (*get_power) (struct slot* slot);
39929 int (*set_power) (struct slot* slot, int value);
39930-};
39931+} __no_const;
39932
39933 struct cpci_hp_controller {
39934 unsigned int irq;
39935diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39936index 76ba8a1..20ca857 100644
39937--- a/drivers/pci/hotplug/cpqphp_nvram.c
39938+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39939@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39940
39941 void compaq_nvram_init (void __iomem *rom_start)
39942 {
39943+
39944+#ifndef CONFIG_PAX_KERNEXEC
39945 if (rom_start) {
39946 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39947 }
39948+#endif
39949+
39950 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39951
39952 /* initialize our int15 lock */
39953diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39954index 6151389..0a894ef 100644
39955--- a/drivers/pci/hotplug/fakephp.c
39956+++ b/drivers/pci/hotplug/fakephp.c
39957@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39958 }
39959
39960 static struct kobj_type legacy_ktype = {
39961- .sysfs_ops = &(struct sysfs_ops){
39962+ .sysfs_ops = &(const struct sysfs_ops){
39963 .store = legacy_store, .show = legacy_show
39964 },
39965 .release = &legacy_release,
39966diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39967index 5b680df..fe05b7e 100644
39968--- a/drivers/pci/intel-iommu.c
39969+++ b/drivers/pci/intel-iommu.c
39970@@ -2643,7 +2643,7 @@ error:
39971 return 0;
39972 }
39973
39974-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39975+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39976 unsigned long offset, size_t size,
39977 enum dma_data_direction dir,
39978 struct dma_attrs *attrs)
39979@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39980 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39981 }
39982
39983-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39984+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39985 size_t size, enum dma_data_direction dir,
39986 struct dma_attrs *attrs)
39987 {
39988@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39989 }
39990 }
39991
39992-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39993+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39994 dma_addr_t *dma_handle, gfp_t flags)
39995 {
39996 void *vaddr;
39997@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39998 return NULL;
39999 }
40000
40001-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40002+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40003 dma_addr_t dma_handle)
40004 {
40005 int order;
40006@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
40007 free_pages((unsigned long)vaddr, order);
40008 }
40009
40010-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40011+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
40012 int nelems, enum dma_data_direction dir,
40013 struct dma_attrs *attrs)
40014 {
40015@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
40016 return nelems;
40017 }
40018
40019-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40020+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40021 enum dma_data_direction dir, struct dma_attrs *attrs)
40022 {
40023 int i;
40024@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40025 return nelems;
40026 }
40027
40028-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40029+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40030 {
40031 return !dma_addr;
40032 }
40033
40034-struct dma_map_ops intel_dma_ops = {
40035+const struct dma_map_ops intel_dma_ops = {
40036 .alloc_coherent = intel_alloc_coherent,
40037 .free_coherent = intel_free_coherent,
40038 .map_sg = intel_map_sg,
40039diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40040index 5b7056c..607bc94 100644
40041--- a/drivers/pci/pcie/aspm.c
40042+++ b/drivers/pci/pcie/aspm.c
40043@@ -27,9 +27,9 @@
40044 #define MODULE_PARAM_PREFIX "pcie_aspm."
40045
40046 /* Note: those are not register definitions */
40047-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40048-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40049-#define ASPM_STATE_L1 (4) /* L1 state */
40050+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40051+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40052+#define ASPM_STATE_L1 (4U) /* L1 state */
40053 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40054 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40055
40056diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40057index 8105e32..ca10419 100644
40058--- a/drivers/pci/probe.c
40059+++ b/drivers/pci/probe.c
40060@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40061 return ret;
40062 }
40063
40064-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40065+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40066 struct device_attribute *attr,
40067 char *buf)
40068 {
40069 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40070 }
40071
40072-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40073+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40074 struct device_attribute *attr,
40075 char *buf)
40076 {
40077diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40078index a03ad8c..024b0da 100644
40079--- a/drivers/pci/proc.c
40080+++ b/drivers/pci/proc.c
40081@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40082 static int __init pci_proc_init(void)
40083 {
40084 struct pci_dev *dev = NULL;
40085+
40086+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40087+#ifdef CONFIG_GRKERNSEC_PROC_USER
40088+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40089+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40090+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40091+#endif
40092+#else
40093 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40094+#endif
40095 proc_create("devices", 0, proc_bus_pci_dir,
40096 &proc_bus_pci_dev_operations);
40097 proc_initialized = 1;
40098diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40099index 8c02b6c..5584d8e 100644
40100--- a/drivers/pci/slot.c
40101+++ b/drivers/pci/slot.c
40102@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40103 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40104 }
40105
40106-static struct sysfs_ops pci_slot_sysfs_ops = {
40107+static const struct sysfs_ops pci_slot_sysfs_ops = {
40108 .show = pci_slot_attr_show,
40109 .store = pci_slot_attr_store,
40110 };
40111diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40112index 30cf71d2..50938f1 100644
40113--- a/drivers/pcmcia/pcmcia_ioctl.c
40114+++ b/drivers/pcmcia/pcmcia_ioctl.c
40115@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40116 return -EFAULT;
40117 }
40118 }
40119- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40120+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40121 if (!buf)
40122 return -ENOMEM;
40123
40124diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40125index 52183c4..b224c69 100644
40126--- a/drivers/platform/x86/acer-wmi.c
40127+++ b/drivers/platform/x86/acer-wmi.c
40128@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40129 return 0;
40130 }
40131
40132-static struct backlight_ops acer_bl_ops = {
40133+static const struct backlight_ops acer_bl_ops = {
40134 .get_brightness = read_brightness,
40135 .update_status = update_bl_status,
40136 };
40137diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40138index 767cb61..a87380b 100644
40139--- a/drivers/platform/x86/asus-laptop.c
40140+++ b/drivers/platform/x86/asus-laptop.c
40141@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40142 */
40143 static int read_brightness(struct backlight_device *bd);
40144 static int update_bl_status(struct backlight_device *bd);
40145-static struct backlight_ops asusbl_ops = {
40146+static const struct backlight_ops asusbl_ops = {
40147 .get_brightness = read_brightness,
40148 .update_status = update_bl_status,
40149 };
40150diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40151index d66c07a..a4abaac 100644
40152--- a/drivers/platform/x86/asus_acpi.c
40153+++ b/drivers/platform/x86/asus_acpi.c
40154@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40155 return 0;
40156 }
40157
40158-static struct backlight_ops asus_backlight_data = {
40159+static const struct backlight_ops asus_backlight_data = {
40160 .get_brightness = read_brightness,
40161 .update_status = set_brightness_status,
40162 };
40163diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40164index 11003bb..550ff1b 100644
40165--- a/drivers/platform/x86/compal-laptop.c
40166+++ b/drivers/platform/x86/compal-laptop.c
40167@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40168 return set_lcd_level(b->props.brightness);
40169 }
40170
40171-static struct backlight_ops compalbl_ops = {
40172+static const struct backlight_ops compalbl_ops = {
40173 .get_brightness = bl_get_brightness,
40174 .update_status = bl_update_status,
40175 };
40176diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40177index 07a74da..9dc99fa 100644
40178--- a/drivers/platform/x86/dell-laptop.c
40179+++ b/drivers/platform/x86/dell-laptop.c
40180@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40181 return buffer.output[1];
40182 }
40183
40184-static struct backlight_ops dell_ops = {
40185+static const struct backlight_ops dell_ops = {
40186 .get_brightness = dell_get_intensity,
40187 .update_status = dell_send_intensity,
40188 };
40189diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40190index c533b1c..5c81f22 100644
40191--- a/drivers/platform/x86/eeepc-laptop.c
40192+++ b/drivers/platform/x86/eeepc-laptop.c
40193@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40194 */
40195 static int read_brightness(struct backlight_device *bd);
40196 static int update_bl_status(struct backlight_device *bd);
40197-static struct backlight_ops eeepcbl_ops = {
40198+static const struct backlight_ops eeepcbl_ops = {
40199 .get_brightness = read_brightness,
40200 .update_status = update_bl_status,
40201 };
40202diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40203index bcd4ba8..a249b35 100644
40204--- a/drivers/platform/x86/fujitsu-laptop.c
40205+++ b/drivers/platform/x86/fujitsu-laptop.c
40206@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40207 return ret;
40208 }
40209
40210-static struct backlight_ops fujitsubl_ops = {
40211+static const struct backlight_ops fujitsubl_ops = {
40212 .get_brightness = bl_get_brightness,
40213 .update_status = bl_update_status,
40214 };
40215diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40216index 759763d..1093ba2 100644
40217--- a/drivers/platform/x86/msi-laptop.c
40218+++ b/drivers/platform/x86/msi-laptop.c
40219@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40220 return set_lcd_level(b->props.brightness);
40221 }
40222
40223-static struct backlight_ops msibl_ops = {
40224+static const struct backlight_ops msibl_ops = {
40225 .get_brightness = bl_get_brightness,
40226 .update_status = bl_update_status,
40227 };
40228diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40229index fe7cf01..9012d8d 100644
40230--- a/drivers/platform/x86/panasonic-laptop.c
40231+++ b/drivers/platform/x86/panasonic-laptop.c
40232@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40233 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40234 }
40235
40236-static struct backlight_ops pcc_backlight_ops = {
40237+static const struct backlight_ops pcc_backlight_ops = {
40238 .get_brightness = bl_get,
40239 .update_status = bl_set_status,
40240 };
40241diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40242index a2a742c..b37e25e 100644
40243--- a/drivers/platform/x86/sony-laptop.c
40244+++ b/drivers/platform/x86/sony-laptop.c
40245@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40246 }
40247
40248 static struct backlight_device *sony_backlight_device;
40249-static struct backlight_ops sony_backlight_ops = {
40250+static const struct backlight_ops sony_backlight_ops = {
40251 .update_status = sony_backlight_update_status,
40252 .get_brightness = sony_backlight_get_brightness,
40253 };
40254diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40255index 68271ae..5e8fb10 100644
40256--- a/drivers/platform/x86/thinkpad_acpi.c
40257+++ b/drivers/platform/x86/thinkpad_acpi.c
40258@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40259 return 0;
40260 }
40261
40262-void static hotkey_mask_warn_incomplete_mask(void)
40263+static void hotkey_mask_warn_incomplete_mask(void)
40264 {
40265 /* log only what the user can fix... */
40266 const u32 wantedmask = hotkey_driver_mask &
40267@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40268 BACKLIGHT_UPDATE_HOTKEY);
40269 }
40270
40271-static struct backlight_ops ibm_backlight_data = {
40272+static const struct backlight_ops ibm_backlight_data = {
40273 .get_brightness = brightness_get,
40274 .update_status = brightness_update_status,
40275 };
40276diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40277index 51c0a8b..0786629 100644
40278--- a/drivers/platform/x86/toshiba_acpi.c
40279+++ b/drivers/platform/x86/toshiba_acpi.c
40280@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40281 return AE_OK;
40282 }
40283
40284-static struct backlight_ops toshiba_backlight_data = {
40285+static const struct backlight_ops toshiba_backlight_data = {
40286 .get_brightness = get_lcd,
40287 .update_status = set_lcd_status,
40288 };
40289diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40290index fc83783c..cf370d7 100644
40291--- a/drivers/pnp/pnpbios/bioscalls.c
40292+++ b/drivers/pnp/pnpbios/bioscalls.c
40293@@ -60,7 +60,7 @@ do { \
40294 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40295 } while(0)
40296
40297-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40298+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40299 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40300
40301 /*
40302@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40303
40304 cpu = get_cpu();
40305 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40306+
40307+ pax_open_kernel();
40308 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40309+ pax_close_kernel();
40310
40311 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40312 spin_lock_irqsave(&pnp_bios_lock, flags);
40313@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40314 :"memory");
40315 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40316
40317+ pax_open_kernel();
40318 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40319+ pax_close_kernel();
40320+
40321 put_cpu();
40322
40323 /* If we get here and this is set then the PnP BIOS faulted on us. */
40324@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40325 return status;
40326 }
40327
40328-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40329+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40330 {
40331 int i;
40332
40333@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40334 pnp_bios_callpoint.offset = header->fields.pm16offset;
40335 pnp_bios_callpoint.segment = PNP_CS16;
40336
40337+ pax_open_kernel();
40338+
40339 for_each_possible_cpu(i) {
40340 struct desc_struct *gdt = get_cpu_gdt_table(i);
40341 if (!gdt)
40342@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40343 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40344 (unsigned long)__va(header->fields.pm16dseg));
40345 }
40346+
40347+ pax_close_kernel();
40348 }
40349diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40350index ba97654..66b99d4 100644
40351--- a/drivers/pnp/resource.c
40352+++ b/drivers/pnp/resource.c
40353@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40354 return 1;
40355
40356 /* check if the resource is valid */
40357- if (*irq < 0 || *irq > 15)
40358+ if (*irq > 15)
40359 return 0;
40360
40361 /* check if the resource is reserved */
40362@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40363 return 1;
40364
40365 /* check if the resource is valid */
40366- if (*dma < 0 || *dma == 4 || *dma > 7)
40367+ if (*dma == 4 || *dma > 7)
40368 return 0;
40369
40370 /* check if the resource is reserved */
40371diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40372index 62bb981..24a2dc9 100644
40373--- a/drivers/power/bq27x00_battery.c
40374+++ b/drivers/power/bq27x00_battery.c
40375@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40376 struct bq27x00_access_methods {
40377 int (*read)(u8 reg, int *rt_value, int b_single,
40378 struct bq27x00_device_info *di);
40379-};
40380+} __no_const;
40381
40382 struct bq27x00_device_info {
40383 struct device *dev;
40384diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40385index 62227cd..b5b538b 100644
40386--- a/drivers/rtc/rtc-dev.c
40387+++ b/drivers/rtc/rtc-dev.c
40388@@ -14,6 +14,7 @@
40389 #include <linux/module.h>
40390 #include <linux/rtc.h>
40391 #include <linux/sched.h>
40392+#include <linux/grsecurity.h>
40393 #include "rtc-core.h"
40394
40395 static dev_t rtc_devt;
40396@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40397 if (copy_from_user(&tm, uarg, sizeof(tm)))
40398 return -EFAULT;
40399
40400+ gr_log_timechange();
40401+
40402 return rtc_set_time(rtc, &tm);
40403
40404 case RTC_PIE_ON:
40405diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40406index 968e3c7..fbc637a 100644
40407--- a/drivers/s390/cio/qdio_perf.c
40408+++ b/drivers/s390/cio/qdio_perf.c
40409@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40410 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40411 {
40412 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40413- (long)atomic_long_read(&perf_stats.qdio_int));
40414+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40415 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40416- (long)atomic_long_read(&perf_stats.pci_int));
40417+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40418 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40419- (long)atomic_long_read(&perf_stats.thin_int));
40420+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40421 seq_printf(m, "\n");
40422 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40423- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40424+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40425 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40426- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40427+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40428 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40429- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40430- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40431+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40432+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40433 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40434- (long)atomic_long_read(&perf_stats.thinint_inbound),
40435- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40436+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40437+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40438 seq_printf(m, "\n");
40439 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40440- (long)atomic_long_read(&perf_stats.siga_in));
40441+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40442 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40443- (long)atomic_long_read(&perf_stats.siga_out));
40444+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40445 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40446- (long)atomic_long_read(&perf_stats.siga_sync));
40447+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40448 seq_printf(m, "\n");
40449 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40450- (long)atomic_long_read(&perf_stats.inbound_handler));
40451+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40452 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40453- (long)atomic_long_read(&perf_stats.outbound_handler));
40454+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40455 seq_printf(m, "\n");
40456 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40457- (long)atomic_long_read(&perf_stats.fast_requeue));
40458+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40459 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40460- (long)atomic_long_read(&perf_stats.outbound_target_full));
40461+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40462 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40463- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40464+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40465 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40466- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40467+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40468 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40469- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40470+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40471 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40472- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40473- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40474+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40475+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40476 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40477- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40478- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40479+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40480+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40481 seq_printf(m, "\n");
40482 return 0;
40483 }
40484diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40485index ff4504c..b3604c3 100644
40486--- a/drivers/s390/cio/qdio_perf.h
40487+++ b/drivers/s390/cio/qdio_perf.h
40488@@ -13,46 +13,46 @@
40489
40490 struct qdio_perf_stats {
40491 /* interrupt handler calls */
40492- atomic_long_t qdio_int;
40493- atomic_long_t pci_int;
40494- atomic_long_t thin_int;
40495+ atomic_long_unchecked_t qdio_int;
40496+ atomic_long_unchecked_t pci_int;
40497+ atomic_long_unchecked_t thin_int;
40498
40499 /* tasklet runs */
40500- atomic_long_t tasklet_inbound;
40501- atomic_long_t tasklet_outbound;
40502- atomic_long_t tasklet_thinint;
40503- atomic_long_t tasklet_thinint_loop;
40504- atomic_long_t thinint_inbound;
40505- atomic_long_t thinint_inbound_loop;
40506- atomic_long_t thinint_inbound_loop2;
40507+ atomic_long_unchecked_t tasklet_inbound;
40508+ atomic_long_unchecked_t tasklet_outbound;
40509+ atomic_long_unchecked_t tasklet_thinint;
40510+ atomic_long_unchecked_t tasklet_thinint_loop;
40511+ atomic_long_unchecked_t thinint_inbound;
40512+ atomic_long_unchecked_t thinint_inbound_loop;
40513+ atomic_long_unchecked_t thinint_inbound_loop2;
40514
40515 /* signal adapter calls */
40516- atomic_long_t siga_out;
40517- atomic_long_t siga_in;
40518- atomic_long_t siga_sync;
40519+ atomic_long_unchecked_t siga_out;
40520+ atomic_long_unchecked_t siga_in;
40521+ atomic_long_unchecked_t siga_sync;
40522
40523 /* misc */
40524- atomic_long_t inbound_handler;
40525- atomic_long_t outbound_handler;
40526- atomic_long_t fast_requeue;
40527- atomic_long_t outbound_target_full;
40528+ atomic_long_unchecked_t inbound_handler;
40529+ atomic_long_unchecked_t outbound_handler;
40530+ atomic_long_unchecked_t fast_requeue;
40531+ atomic_long_unchecked_t outbound_target_full;
40532
40533 /* for debugging */
40534- atomic_long_t debug_tl_out_timer;
40535- atomic_long_t debug_stop_polling;
40536- atomic_long_t debug_eqbs_all;
40537- atomic_long_t debug_eqbs_incomplete;
40538- atomic_long_t debug_sqbs_all;
40539- atomic_long_t debug_sqbs_incomplete;
40540+ atomic_long_unchecked_t debug_tl_out_timer;
40541+ atomic_long_unchecked_t debug_stop_polling;
40542+ atomic_long_unchecked_t debug_eqbs_all;
40543+ atomic_long_unchecked_t debug_eqbs_incomplete;
40544+ atomic_long_unchecked_t debug_sqbs_all;
40545+ atomic_long_unchecked_t debug_sqbs_incomplete;
40546 };
40547
40548 extern struct qdio_perf_stats perf_stats;
40549 extern int qdio_performance_stats;
40550
40551-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40552+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40553 {
40554 if (qdio_performance_stats)
40555- atomic_long_inc(count);
40556+ atomic_long_inc_unchecked(count);
40557 }
40558
40559 int qdio_setup_perf_stats(void);
40560diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40561index 1ddcf40..a85f062 100644
40562--- a/drivers/scsi/BusLogic.c
40563+++ b/drivers/scsi/BusLogic.c
40564@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40565 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40566 *PrototypeHostAdapter)
40567 {
40568+ pax_track_stack();
40569+
40570 /*
40571 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40572 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40573diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40574index cdbdec9..b7d560b 100644
40575--- a/drivers/scsi/aacraid/aacraid.h
40576+++ b/drivers/scsi/aacraid/aacraid.h
40577@@ -471,7 +471,7 @@ struct adapter_ops
40578 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40579 /* Administrative operations */
40580 int (*adapter_comm)(struct aac_dev * dev, int comm);
40581-};
40582+} __no_const;
40583
40584 /*
40585 * Define which interrupt handler needs to be installed
40586diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40587index a5b8e7b..a6a0e43 100644
40588--- a/drivers/scsi/aacraid/commctrl.c
40589+++ b/drivers/scsi/aacraid/commctrl.c
40590@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40591 u32 actual_fibsize64, actual_fibsize = 0;
40592 int i;
40593
40594+ pax_track_stack();
40595
40596 if (dev->in_reset) {
40597 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40598diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40599index 9b97c3e..f099725 100644
40600--- a/drivers/scsi/aacraid/linit.c
40601+++ b/drivers/scsi/aacraid/linit.c
40602@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40603 #elif defined(__devinitconst)
40604 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40605 #else
40606-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40607+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40608 #endif
40609 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40610 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40611diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40612index 996f722..9127845 100644
40613--- a/drivers/scsi/aic94xx/aic94xx_init.c
40614+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40615@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40616 flash_error_table[i].reason);
40617 }
40618
40619-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40620+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40621 asd_show_update_bios, asd_store_update_bios);
40622
40623 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40624@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40625 .lldd_control_phy = asd_control_phy,
40626 };
40627
40628-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40629+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40630 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40631 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40632 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40633diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40634index 58efd4b..cb48dc7 100644
40635--- a/drivers/scsi/bfa/bfa_ioc.h
40636+++ b/drivers/scsi/bfa/bfa_ioc.h
40637@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40638 bfa_ioc_disable_cbfn_t disable_cbfn;
40639 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40640 bfa_ioc_reset_cbfn_t reset_cbfn;
40641-};
40642+} __no_const;
40643
40644 /**
40645 * Heartbeat failure notification queue element.
40646diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40647index 7ad177e..5503586 100644
40648--- a/drivers/scsi/bfa/bfa_iocfc.h
40649+++ b/drivers/scsi/bfa/bfa_iocfc.h
40650@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40651 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40652 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40653 u32 *nvecs, u32 *maxvec);
40654-};
40655+} __no_const;
40656 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40657
40658 struct bfa_iocfc_s {
40659diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40660index 4967643..cbec06b 100644
40661--- a/drivers/scsi/dpt_i2o.c
40662+++ b/drivers/scsi/dpt_i2o.c
40663@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40664 dma_addr_t addr;
40665 ulong flags = 0;
40666
40667+ pax_track_stack();
40668+
40669 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40670 // get user msg size in u32s
40671 if(get_user(size, &user_msg[0])){
40672@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40673 s32 rcode;
40674 dma_addr_t addr;
40675
40676+ pax_track_stack();
40677+
40678 memset(msg, 0 , sizeof(msg));
40679 len = scsi_bufflen(cmd);
40680 direction = 0x00000000;
40681diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40682index c7076ce..e20c67c 100644
40683--- a/drivers/scsi/eata.c
40684+++ b/drivers/scsi/eata.c
40685@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40686 struct hostdata *ha;
40687 char name[16];
40688
40689+ pax_track_stack();
40690+
40691 sprintf(name, "%s%d", driver_name, j);
40692
40693 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40694diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40695index 11ae5c9..891daec 100644
40696--- a/drivers/scsi/fcoe/libfcoe.c
40697+++ b/drivers/scsi/fcoe/libfcoe.c
40698@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40699 size_t rlen;
40700 size_t dlen;
40701
40702+ pax_track_stack();
40703+
40704 fiph = (struct fip_header *)skb->data;
40705 sub = fiph->fip_subcode;
40706 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40707diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40708index 71c7bbe..e93088a 100644
40709--- a/drivers/scsi/fnic/fnic_main.c
40710+++ b/drivers/scsi/fnic/fnic_main.c
40711@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40712 /* Start local port initiatialization */
40713
40714 lp->link_up = 0;
40715- lp->tt = fnic_transport_template;
40716+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40717
40718 lp->max_retry_count = fnic->config.flogi_retries;
40719 lp->max_rport_retry_count = fnic->config.plogi_retries;
40720diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40721index bb96d74..9ec3ce4 100644
40722--- a/drivers/scsi/gdth.c
40723+++ b/drivers/scsi/gdth.c
40724@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40725 ulong flags;
40726 gdth_ha_str *ha;
40727
40728+ pax_track_stack();
40729+
40730 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40731 return -EFAULT;
40732 ha = gdth_find_ha(ldrv.ionode);
40733@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40734 gdth_ha_str *ha;
40735 int rval;
40736
40737+ pax_track_stack();
40738+
40739 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40740 res.number >= MAX_HDRIVES)
40741 return -EFAULT;
40742@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40743 gdth_ha_str *ha;
40744 int rval;
40745
40746+ pax_track_stack();
40747+
40748 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40749 return -EFAULT;
40750 ha = gdth_find_ha(gen.ionode);
40751@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40752 int i;
40753 gdth_cmd_str gdtcmd;
40754 char cmnd[MAX_COMMAND_SIZE];
40755+
40756+ pax_track_stack();
40757+
40758 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40759
40760 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40761diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40762index 1258da3..20d8ae6 100644
40763--- a/drivers/scsi/gdth_proc.c
40764+++ b/drivers/scsi/gdth_proc.c
40765@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40766 ulong64 paddr;
40767
40768 char cmnd[MAX_COMMAND_SIZE];
40769+
40770+ pax_track_stack();
40771+
40772 memset(cmnd, 0xff, 12);
40773 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40774
40775@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40776 gdth_hget_str *phg;
40777 char cmnd[MAX_COMMAND_SIZE];
40778
40779+ pax_track_stack();
40780+
40781 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40782 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40783 if (!gdtcmd || !estr)
40784diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40785index d03a926..f324286 100644
40786--- a/drivers/scsi/hosts.c
40787+++ b/drivers/scsi/hosts.c
40788@@ -40,7 +40,7 @@
40789 #include "scsi_logging.h"
40790
40791
40792-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40793+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40794
40795
40796 static void scsi_host_cls_release(struct device *dev)
40797@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40798 * subtract one because we increment first then return, but we need to
40799 * know what the next host number was before increment
40800 */
40801- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40802+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40803 shost->dma_channel = 0xff;
40804
40805 /* These three are default values which can be overridden */
40806diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40807index a601159..55e19d2 100644
40808--- a/drivers/scsi/ipr.c
40809+++ b/drivers/scsi/ipr.c
40810@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40811 return true;
40812 }
40813
40814-static struct ata_port_operations ipr_sata_ops = {
40815+static const struct ata_port_operations ipr_sata_ops = {
40816 .phy_reset = ipr_ata_phy_reset,
40817 .hardreset = ipr_sata_reset,
40818 .post_internal_cmd = ipr_ata_post_internal,
40819diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40820index 4e49fbc..97907ff 100644
40821--- a/drivers/scsi/ips.h
40822+++ b/drivers/scsi/ips.h
40823@@ -1027,7 +1027,7 @@ typedef struct {
40824 int (*intr)(struct ips_ha *);
40825 void (*enableint)(struct ips_ha *);
40826 uint32_t (*statupd)(struct ips_ha *);
40827-} ips_hw_func_t;
40828+} __no_const ips_hw_func_t;
40829
40830 typedef struct ips_ha {
40831 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40832diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40833index c1c1574..a9c9348 100644
40834--- a/drivers/scsi/libfc/fc_exch.c
40835+++ b/drivers/scsi/libfc/fc_exch.c
40836@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40837 * all together if not used XXX
40838 */
40839 struct {
40840- atomic_t no_free_exch;
40841- atomic_t no_free_exch_xid;
40842- atomic_t xid_not_found;
40843- atomic_t xid_busy;
40844- atomic_t seq_not_found;
40845- atomic_t non_bls_resp;
40846+ atomic_unchecked_t no_free_exch;
40847+ atomic_unchecked_t no_free_exch_xid;
40848+ atomic_unchecked_t xid_not_found;
40849+ atomic_unchecked_t xid_busy;
40850+ atomic_unchecked_t seq_not_found;
40851+ atomic_unchecked_t non_bls_resp;
40852 } stats;
40853 };
40854 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40855@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40856 /* allocate memory for exchange */
40857 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40858 if (!ep) {
40859- atomic_inc(&mp->stats.no_free_exch);
40860+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40861 goto out;
40862 }
40863 memset(ep, 0, sizeof(*ep));
40864@@ -557,7 +557,7 @@ out:
40865 return ep;
40866 err:
40867 spin_unlock_bh(&pool->lock);
40868- atomic_inc(&mp->stats.no_free_exch_xid);
40869+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40870 mempool_free(ep, mp->ep_pool);
40871 return NULL;
40872 }
40873@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40874 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40875 ep = fc_exch_find(mp, xid);
40876 if (!ep) {
40877- atomic_inc(&mp->stats.xid_not_found);
40878+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40879 reject = FC_RJT_OX_ID;
40880 goto out;
40881 }
40882@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40883 ep = fc_exch_find(mp, xid);
40884 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40885 if (ep) {
40886- atomic_inc(&mp->stats.xid_busy);
40887+ atomic_inc_unchecked(&mp->stats.xid_busy);
40888 reject = FC_RJT_RX_ID;
40889 goto rel;
40890 }
40891@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40892 }
40893 xid = ep->xid; /* get our XID */
40894 } else if (!ep) {
40895- atomic_inc(&mp->stats.xid_not_found);
40896+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40897 reject = FC_RJT_RX_ID; /* XID not found */
40898 goto out;
40899 }
40900@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40901 } else {
40902 sp = &ep->seq;
40903 if (sp->id != fh->fh_seq_id) {
40904- atomic_inc(&mp->stats.seq_not_found);
40905+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40906 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40907 goto rel;
40908 }
40909@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40910
40911 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40912 if (!ep) {
40913- atomic_inc(&mp->stats.xid_not_found);
40914+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40915 goto out;
40916 }
40917 if (ep->esb_stat & ESB_ST_COMPLETE) {
40918- atomic_inc(&mp->stats.xid_not_found);
40919+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40920 goto out;
40921 }
40922 if (ep->rxid == FC_XID_UNKNOWN)
40923 ep->rxid = ntohs(fh->fh_rx_id);
40924 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40925- atomic_inc(&mp->stats.xid_not_found);
40926+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40927 goto rel;
40928 }
40929 if (ep->did != ntoh24(fh->fh_s_id) &&
40930 ep->did != FC_FID_FLOGI) {
40931- atomic_inc(&mp->stats.xid_not_found);
40932+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40933 goto rel;
40934 }
40935 sof = fr_sof(fp);
40936@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40937 } else {
40938 sp = &ep->seq;
40939 if (sp->id != fh->fh_seq_id) {
40940- atomic_inc(&mp->stats.seq_not_found);
40941+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40942 goto rel;
40943 }
40944 }
40945@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40946 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40947
40948 if (!sp)
40949- atomic_inc(&mp->stats.xid_not_found);
40950+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40951 else
40952- atomic_inc(&mp->stats.non_bls_resp);
40953+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40954
40955 fc_frame_free(fp);
40956 }
40957diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40958index 0ee989f..a582241 100644
40959--- a/drivers/scsi/libsas/sas_ata.c
40960+++ b/drivers/scsi/libsas/sas_ata.c
40961@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40962 }
40963 }
40964
40965-static struct ata_port_operations sas_sata_ops = {
40966+static const struct ata_port_operations sas_sata_ops = {
40967 .phy_reset = sas_ata_phy_reset,
40968 .post_internal_cmd = sas_ata_post_internal,
40969 .qc_defer = ata_std_qc_defer,
40970diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40971index aa10f79..5cc79e4 100644
40972--- a/drivers/scsi/lpfc/lpfc.h
40973+++ b/drivers/scsi/lpfc/lpfc.h
40974@@ -400,7 +400,7 @@ struct lpfc_vport {
40975 struct dentry *debug_nodelist;
40976 struct dentry *vport_debugfs_root;
40977 struct lpfc_debugfs_trc *disc_trc;
40978- atomic_t disc_trc_cnt;
40979+ atomic_unchecked_t disc_trc_cnt;
40980 #endif
40981 uint8_t stat_data_enabled;
40982 uint8_t stat_data_blocked;
40983@@ -725,8 +725,8 @@ struct lpfc_hba {
40984 struct timer_list fabric_block_timer;
40985 unsigned long bit_flags;
40986 #define FABRIC_COMANDS_BLOCKED 0
40987- atomic_t num_rsrc_err;
40988- atomic_t num_cmd_success;
40989+ atomic_unchecked_t num_rsrc_err;
40990+ atomic_unchecked_t num_cmd_success;
40991 unsigned long last_rsrc_error_time;
40992 unsigned long last_ramp_down_time;
40993 unsigned long last_ramp_up_time;
40994@@ -740,7 +740,7 @@ struct lpfc_hba {
40995 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40996 struct dentry *debug_slow_ring_trc;
40997 struct lpfc_debugfs_trc *slow_ring_trc;
40998- atomic_t slow_ring_trc_cnt;
40999+ atomic_unchecked_t slow_ring_trc_cnt;
41000 #endif
41001
41002 /* Used for deferred freeing of ELS data buffers */
41003diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
41004index 8d0f0de..7c77a62 100644
41005--- a/drivers/scsi/lpfc/lpfc_debugfs.c
41006+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
41007@@ -124,7 +124,7 @@ struct lpfc_debug {
41008 int len;
41009 };
41010
41011-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41012+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
41013 static unsigned long lpfc_debugfs_start_time = 0L;
41014
41015 /**
41016@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
41017 lpfc_debugfs_enable = 0;
41018
41019 len = 0;
41020- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41021+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41022 (lpfc_debugfs_max_disc_trc - 1);
41023 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41024 dtp = vport->disc_trc + i;
41025@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41026 lpfc_debugfs_enable = 0;
41027
41028 len = 0;
41029- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41030+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41031 (lpfc_debugfs_max_slow_ring_trc - 1);
41032 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41033 dtp = phba->slow_ring_trc + i;
41034@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41035 uint32_t *ptr;
41036 char buffer[1024];
41037
41038+ pax_track_stack();
41039+
41040 off = 0;
41041 spin_lock_irq(&phba->hbalock);
41042
41043@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41044 !vport || !vport->disc_trc)
41045 return;
41046
41047- index = atomic_inc_return(&vport->disc_trc_cnt) &
41048+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41049 (lpfc_debugfs_max_disc_trc - 1);
41050 dtp = vport->disc_trc + index;
41051 dtp->fmt = fmt;
41052 dtp->data1 = data1;
41053 dtp->data2 = data2;
41054 dtp->data3 = data3;
41055- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41056+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41057 dtp->jif = jiffies;
41058 #endif
41059 return;
41060@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41061 !phba || !phba->slow_ring_trc)
41062 return;
41063
41064- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41065+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41066 (lpfc_debugfs_max_slow_ring_trc - 1);
41067 dtp = phba->slow_ring_trc + index;
41068 dtp->fmt = fmt;
41069 dtp->data1 = data1;
41070 dtp->data2 = data2;
41071 dtp->data3 = data3;
41072- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41073+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41074 dtp->jif = jiffies;
41075 #endif
41076 return;
41077@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41078 "slow_ring buffer\n");
41079 goto debug_failed;
41080 }
41081- atomic_set(&phba->slow_ring_trc_cnt, 0);
41082+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41083 memset(phba->slow_ring_trc, 0,
41084 (sizeof(struct lpfc_debugfs_trc) *
41085 lpfc_debugfs_max_slow_ring_trc));
41086@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41087 "buffer\n");
41088 goto debug_failed;
41089 }
41090- atomic_set(&vport->disc_trc_cnt, 0);
41091+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41092
41093 snprintf(name, sizeof(name), "discovery_trace");
41094 vport->debug_disc_trc =
41095diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41096index 549bc7d..8189dbb 100644
41097--- a/drivers/scsi/lpfc/lpfc_init.c
41098+++ b/drivers/scsi/lpfc/lpfc_init.c
41099@@ -8021,8 +8021,10 @@ lpfc_init(void)
41100 printk(LPFC_COPYRIGHT "\n");
41101
41102 if (lpfc_enable_npiv) {
41103- lpfc_transport_functions.vport_create = lpfc_vport_create;
41104- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41105+ pax_open_kernel();
41106+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41107+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41108+ pax_close_kernel();
41109 }
41110 lpfc_transport_template =
41111 fc_attach_transport(&lpfc_transport_functions);
41112diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41113index c88f59f..ff2a42f 100644
41114--- a/drivers/scsi/lpfc/lpfc_scsi.c
41115+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41116@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41117 uint32_t evt_posted;
41118
41119 spin_lock_irqsave(&phba->hbalock, flags);
41120- atomic_inc(&phba->num_rsrc_err);
41121+ atomic_inc_unchecked(&phba->num_rsrc_err);
41122 phba->last_rsrc_error_time = jiffies;
41123
41124 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41125@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41126 unsigned long flags;
41127 struct lpfc_hba *phba = vport->phba;
41128 uint32_t evt_posted;
41129- atomic_inc(&phba->num_cmd_success);
41130+ atomic_inc_unchecked(&phba->num_cmd_success);
41131
41132 if (vport->cfg_lun_queue_depth <= queue_depth)
41133 return;
41134@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41135 int i;
41136 struct lpfc_rport_data *rdata;
41137
41138- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41139- num_cmd_success = atomic_read(&phba->num_cmd_success);
41140+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41141+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41142
41143 vports = lpfc_create_vport_work_array(phba);
41144 if (vports != NULL)
41145@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41146 }
41147 }
41148 lpfc_destroy_vport_work_array(phba, vports);
41149- atomic_set(&phba->num_rsrc_err, 0);
41150- atomic_set(&phba->num_cmd_success, 0);
41151+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41152+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41153 }
41154
41155 /**
41156@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41157 }
41158 }
41159 lpfc_destroy_vport_work_array(phba, vports);
41160- atomic_set(&phba->num_rsrc_err, 0);
41161- atomic_set(&phba->num_cmd_success, 0);
41162+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41163+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41164 }
41165
41166 /**
41167diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41168index 234f0b7..3020aea 100644
41169--- a/drivers/scsi/megaraid/megaraid_mbox.c
41170+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41171@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41172 int rval;
41173 int i;
41174
41175+ pax_track_stack();
41176+
41177 // Allocate memory for the base list of scb for management module.
41178 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41179
41180diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41181index 7a117c1..ee01e9e 100644
41182--- a/drivers/scsi/osd/osd_initiator.c
41183+++ b/drivers/scsi/osd/osd_initiator.c
41184@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41185 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41186 int ret;
41187
41188+ pax_track_stack();
41189+
41190 or = osd_start_request(od, GFP_KERNEL);
41191 if (!or)
41192 return -ENOMEM;
41193diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41194index 9ab8c86..9425ad3 100644
41195--- a/drivers/scsi/pmcraid.c
41196+++ b/drivers/scsi/pmcraid.c
41197@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41198 res->scsi_dev = scsi_dev;
41199 scsi_dev->hostdata = res;
41200 res->change_detected = 0;
41201- atomic_set(&res->read_failures, 0);
41202- atomic_set(&res->write_failures, 0);
41203+ atomic_set_unchecked(&res->read_failures, 0);
41204+ atomic_set_unchecked(&res->write_failures, 0);
41205 rc = 0;
41206 }
41207 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41208@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41209
41210 /* If this was a SCSI read/write command keep count of errors */
41211 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41212- atomic_inc(&res->read_failures);
41213+ atomic_inc_unchecked(&res->read_failures);
41214 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41215- atomic_inc(&res->write_failures);
41216+ atomic_inc_unchecked(&res->write_failures);
41217
41218 if (!RES_IS_GSCSI(res->cfg_entry) &&
41219 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41220@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41221
41222 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41223 /* add resources only after host is added into system */
41224- if (!atomic_read(&pinstance->expose_resources))
41225+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41226 return;
41227
41228 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41229@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41230 init_waitqueue_head(&pinstance->reset_wait_q);
41231
41232 atomic_set(&pinstance->outstanding_cmds, 0);
41233- atomic_set(&pinstance->expose_resources, 0);
41234+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41235
41236 INIT_LIST_HEAD(&pinstance->free_res_q);
41237 INIT_LIST_HEAD(&pinstance->used_res_q);
41238@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41239 /* Schedule worker thread to handle CCN and take care of adding and
41240 * removing devices to OS
41241 */
41242- atomic_set(&pinstance->expose_resources, 1);
41243+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41244 schedule_work(&pinstance->worker_q);
41245 return rc;
41246
41247diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41248index 3441b3f..6cbe8f7 100644
41249--- a/drivers/scsi/pmcraid.h
41250+++ b/drivers/scsi/pmcraid.h
41251@@ -690,7 +690,7 @@ struct pmcraid_instance {
41252 atomic_t outstanding_cmds;
41253
41254 /* should add/delete resources to mid-layer now ?*/
41255- atomic_t expose_resources;
41256+ atomic_unchecked_t expose_resources;
41257
41258 /* Tasklet to handle deferred processing */
41259 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41260@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41261 struct list_head queue; /* link to "to be exposed" resources */
41262 struct pmcraid_config_table_entry cfg_entry;
41263 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41264- atomic_t read_failures; /* count of failed READ commands */
41265- atomic_t write_failures; /* count of failed WRITE commands */
41266+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41267+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41268
41269 /* To indicate add/delete/modify during CCN */
41270 u8 change_detected;
41271diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41272index 2150618..7034215 100644
41273--- a/drivers/scsi/qla2xxx/qla_def.h
41274+++ b/drivers/scsi/qla2xxx/qla_def.h
41275@@ -2089,7 +2089,7 @@ struct isp_operations {
41276
41277 int (*get_flash_version) (struct scsi_qla_host *, void *);
41278 int (*start_scsi) (srb_t *);
41279-};
41280+} __no_const;
41281
41282 /* MSI-X Support *************************************************************/
41283
41284diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41285index 81b5f29..2ae1fad 100644
41286--- a/drivers/scsi/qla4xxx/ql4_def.h
41287+++ b/drivers/scsi/qla4xxx/ql4_def.h
41288@@ -240,7 +240,7 @@ struct ddb_entry {
41289 atomic_t retry_relogin_timer; /* Min Time between relogins
41290 * (4000 only) */
41291 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41292- atomic_t relogin_retry_count; /* Num of times relogin has been
41293+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41294 * retried */
41295
41296 uint16_t port;
41297diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41298index af8c323..515dd51 100644
41299--- a/drivers/scsi/qla4xxx/ql4_init.c
41300+++ b/drivers/scsi/qla4xxx/ql4_init.c
41301@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41302 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41303 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41304 atomic_set(&ddb_entry->relogin_timer, 0);
41305- atomic_set(&ddb_entry->relogin_retry_count, 0);
41306+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41307 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41308 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41309 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41310@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41311 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41312 atomic_set(&ddb_entry->port_down_timer,
41313 ha->port_down_retry_count);
41314- atomic_set(&ddb_entry->relogin_retry_count, 0);
41315+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41316 atomic_set(&ddb_entry->relogin_timer, 0);
41317 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41318 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41319diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41320index 83c8b5e..a82b348 100644
41321--- a/drivers/scsi/qla4xxx/ql4_os.c
41322+++ b/drivers/scsi/qla4xxx/ql4_os.c
41323@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41324 ddb_entry->fw_ddb_device_state ==
41325 DDB_DS_SESSION_FAILED) {
41326 /* Reset retry relogin timer */
41327- atomic_inc(&ddb_entry->relogin_retry_count);
41328+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41329 DEBUG2(printk("scsi%ld: index[%d] relogin"
41330 " timed out-retrying"
41331 " relogin (%d)\n",
41332 ha->host_no,
41333 ddb_entry->fw_ddb_index,
41334- atomic_read(&ddb_entry->
41335+ atomic_read_unchecked(&ddb_entry->
41336 relogin_retry_count))
41337 );
41338 start_dpc++;
41339diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41340index dd098ca..686ce01 100644
41341--- a/drivers/scsi/scsi.c
41342+++ b/drivers/scsi/scsi.c
41343@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41344 unsigned long timeout;
41345 int rtn = 0;
41346
41347- atomic_inc(&cmd->device->iorequest_cnt);
41348+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41349
41350 /* check if the device is still usable */
41351 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41352diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41353index bc3e363..e1a8e50 100644
41354--- a/drivers/scsi/scsi_debug.c
41355+++ b/drivers/scsi/scsi_debug.c
41356@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41357 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41358 unsigned char *cmd = (unsigned char *)scp->cmnd;
41359
41360+ pax_track_stack();
41361+
41362 if ((errsts = check_readiness(scp, 1, devip)))
41363 return errsts;
41364 memset(arr, 0, sizeof(arr));
41365@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41366 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41367 unsigned char *cmd = (unsigned char *)scp->cmnd;
41368
41369+ pax_track_stack();
41370+
41371 if ((errsts = check_readiness(scp, 1, devip)))
41372 return errsts;
41373 memset(arr, 0, sizeof(arr));
41374diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41375index 8df12522..c4c1472 100644
41376--- a/drivers/scsi/scsi_lib.c
41377+++ b/drivers/scsi/scsi_lib.c
41378@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41379 shost = sdev->host;
41380 scsi_init_cmd_errh(cmd);
41381 cmd->result = DID_NO_CONNECT << 16;
41382- atomic_inc(&cmd->device->iorequest_cnt);
41383+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41384
41385 /*
41386 * SCSI request completion path will do scsi_device_unbusy(),
41387@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41388 */
41389 cmd->serial_number = 0;
41390
41391- atomic_inc(&cmd->device->iodone_cnt);
41392+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41393 if (cmd->result)
41394- atomic_inc(&cmd->device->ioerr_cnt);
41395+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41396
41397 disposition = scsi_decide_disposition(cmd);
41398 if (disposition != SUCCESS &&
41399diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41400index 91a93e0..eae0fe3 100644
41401--- a/drivers/scsi/scsi_sysfs.c
41402+++ b/drivers/scsi/scsi_sysfs.c
41403@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41404 char *buf) \
41405 { \
41406 struct scsi_device *sdev = to_scsi_device(dev); \
41407- unsigned long long count = atomic_read(&sdev->field); \
41408+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41409 return snprintf(buf, 20, "0x%llx\n", count); \
41410 } \
41411 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41412diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41413index 1030327..f91fd30 100644
41414--- a/drivers/scsi/scsi_tgt_lib.c
41415+++ b/drivers/scsi/scsi_tgt_lib.c
41416@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41417 int err;
41418
41419 dprintk("%lx %u\n", uaddr, len);
41420- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41421+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41422 if (err) {
41423 /*
41424 * TODO: need to fixup sg_tablesize, max_segment_size,
41425diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41426index db02e31..1b42ea9 100644
41427--- a/drivers/scsi/scsi_transport_fc.c
41428+++ b/drivers/scsi/scsi_transport_fc.c
41429@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41430 * Netlink Infrastructure
41431 */
41432
41433-static atomic_t fc_event_seq;
41434+static atomic_unchecked_t fc_event_seq;
41435
41436 /**
41437 * fc_get_event_number - Obtain the next sequential FC event number
41438@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41439 u32
41440 fc_get_event_number(void)
41441 {
41442- return atomic_add_return(1, &fc_event_seq);
41443+ return atomic_add_return_unchecked(1, &fc_event_seq);
41444 }
41445 EXPORT_SYMBOL(fc_get_event_number);
41446
41447@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41448 {
41449 int error;
41450
41451- atomic_set(&fc_event_seq, 0);
41452+ atomic_set_unchecked(&fc_event_seq, 0);
41453
41454 error = transport_class_register(&fc_host_class);
41455 if (error)
41456diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41457index de2f8c4..63c5278 100644
41458--- a/drivers/scsi/scsi_transport_iscsi.c
41459+++ b/drivers/scsi/scsi_transport_iscsi.c
41460@@ -81,7 +81,7 @@ struct iscsi_internal {
41461 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41462 };
41463
41464-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41465+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41466 static struct workqueue_struct *iscsi_eh_timer_workq;
41467
41468 /*
41469@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41470 int err;
41471
41472 ihost = shost->shost_data;
41473- session->sid = atomic_add_return(1, &iscsi_session_nr);
41474+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41475
41476 if (id == ISCSI_MAX_TARGET) {
41477 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41478@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41479 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41480 ISCSI_TRANSPORT_VERSION);
41481
41482- atomic_set(&iscsi_session_nr, 0);
41483+ atomic_set_unchecked(&iscsi_session_nr, 0);
41484
41485 err = class_register(&iscsi_transport_class);
41486 if (err)
41487diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41488index 21a045e..ec89e03 100644
41489--- a/drivers/scsi/scsi_transport_srp.c
41490+++ b/drivers/scsi/scsi_transport_srp.c
41491@@ -33,7 +33,7 @@
41492 #include "scsi_transport_srp_internal.h"
41493
41494 struct srp_host_attrs {
41495- atomic_t next_port_id;
41496+ atomic_unchecked_t next_port_id;
41497 };
41498 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41499
41500@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41501 struct Scsi_Host *shost = dev_to_shost(dev);
41502 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41503
41504- atomic_set(&srp_host->next_port_id, 0);
41505+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41506 return 0;
41507 }
41508
41509@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41510 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41511 rport->roles = ids->roles;
41512
41513- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41514+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41515 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41516
41517 transport_setup_device(&rport->dev);
41518diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41519index 040f751..98a5ed2 100644
41520--- a/drivers/scsi/sg.c
41521+++ b/drivers/scsi/sg.c
41522@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41523 sdp->disk->disk_name,
41524 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41525 NULL,
41526- (char *)arg);
41527+ (char __user *)arg);
41528 case BLKTRACESTART:
41529 return blk_trace_startstop(sdp->device->request_queue, 1);
41530 case BLKTRACESTOP:
41531@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41532 const struct file_operations * fops;
41533 };
41534
41535-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41536+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41537 {"allow_dio", &adio_fops},
41538 {"debug", &debug_fops},
41539 {"def_reserved_size", &dressz_fops},
41540@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41541 {
41542 int k, mask;
41543 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41544- struct sg_proc_leaf * leaf;
41545+ const struct sg_proc_leaf * leaf;
41546
41547 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41548 if (!sg_proc_sgp)
41549diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41550index c19ca5e..3eb5959 100644
41551--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41552+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41553@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41554 int do_iounmap = 0;
41555 int do_disable_device = 1;
41556
41557+ pax_track_stack();
41558+
41559 memset(&sym_dev, 0, sizeof(sym_dev));
41560 memset(&nvram, 0, sizeof(nvram));
41561 sym_dev.pdev = pdev;
41562diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41563index eadc1ab..2d81457 100644
41564--- a/drivers/serial/kgdboc.c
41565+++ b/drivers/serial/kgdboc.c
41566@@ -18,7 +18,7 @@
41567
41568 #define MAX_CONFIG_LEN 40
41569
41570-static struct kgdb_io kgdboc_io_ops;
41571+static const struct kgdb_io kgdboc_io_ops;
41572
41573 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41574 static int configured = -1;
41575@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41576 module_put(THIS_MODULE);
41577 }
41578
41579-static struct kgdb_io kgdboc_io_ops = {
41580+static const struct kgdb_io kgdboc_io_ops = {
41581 .name = "kgdboc",
41582 .read_char = kgdboc_get_char,
41583 .write_char = kgdboc_put_char,
41584diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41585index b76f246..7f41af7 100644
41586--- a/drivers/spi/spi.c
41587+++ b/drivers/spi/spi.c
41588@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41589 EXPORT_SYMBOL_GPL(spi_sync);
41590
41591 /* portable code must never pass more than 32 bytes */
41592-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41593+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41594
41595 static u8 *buf;
41596
41597diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41598index b9b37ff..19dfa23 100644
41599--- a/drivers/staging/android/binder.c
41600+++ b/drivers/staging/android/binder.c
41601@@ -2761,7 +2761,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41602 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41603 }
41604
41605-static struct vm_operations_struct binder_vm_ops = {
41606+static const struct vm_operations_struct binder_vm_ops = {
41607 .open = binder_vma_open,
41608 .close = binder_vma_close,
41609 };
41610diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41611index cda26bb..39fed3f 100644
41612--- a/drivers/staging/b3dfg/b3dfg.c
41613+++ b/drivers/staging/b3dfg/b3dfg.c
41614@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41615 return VM_FAULT_NOPAGE;
41616 }
41617
41618-static struct vm_operations_struct b3dfg_vm_ops = {
41619+static const struct vm_operations_struct b3dfg_vm_ops = {
41620 .fault = b3dfg_vma_fault,
41621 };
41622
41623@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41624 return r;
41625 }
41626
41627-static struct file_operations b3dfg_fops = {
41628+static const struct file_operations b3dfg_fops = {
41629 .owner = THIS_MODULE,
41630 .open = b3dfg_open,
41631 .release = b3dfg_release,
41632diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41633index 908f25a..c9a579b 100644
41634--- a/drivers/staging/comedi/comedi_fops.c
41635+++ b/drivers/staging/comedi/comedi_fops.c
41636@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41637 mutex_unlock(&dev->mutex);
41638 }
41639
41640-static struct vm_operations_struct comedi_vm_ops = {
41641+static const struct vm_operations_struct comedi_vm_ops = {
41642 .close = comedi_unmap,
41643 };
41644
41645diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41646index e55a0db..577b776 100644
41647--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41648+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41649@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41650 static dev_t adsp_devno;
41651 static struct class *adsp_class;
41652
41653-static struct file_operations adsp_fops = {
41654+static const struct file_operations adsp_fops = {
41655 .owner = THIS_MODULE,
41656 .open = adsp_open,
41657 .unlocked_ioctl = adsp_ioctl,
41658diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41659index ad2390f..4116ee8 100644
41660--- a/drivers/staging/dream/qdsp5/audio_aac.c
41661+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41662@@ -1022,7 +1022,7 @@ done:
41663 return rc;
41664 }
41665
41666-static struct file_operations audio_aac_fops = {
41667+static const struct file_operations audio_aac_fops = {
41668 .owner = THIS_MODULE,
41669 .open = audio_open,
41670 .release = audio_release,
41671diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41672index cd818a5..870b37b 100644
41673--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41674+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41675@@ -833,7 +833,7 @@ done:
41676 return rc;
41677 }
41678
41679-static struct file_operations audio_amrnb_fops = {
41680+static const struct file_operations audio_amrnb_fops = {
41681 .owner = THIS_MODULE,
41682 .open = audamrnb_open,
41683 .release = audamrnb_release,
41684diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41685index 4b43e18..cedafda 100644
41686--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41687+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41688@@ -805,7 +805,7 @@ dma_fail:
41689 return rc;
41690 }
41691
41692-static struct file_operations audio_evrc_fops = {
41693+static const struct file_operations audio_evrc_fops = {
41694 .owner = THIS_MODULE,
41695 .open = audevrc_open,
41696 .release = audevrc_release,
41697diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41698index 3d950a2..9431118 100644
41699--- a/drivers/staging/dream/qdsp5/audio_in.c
41700+++ b/drivers/staging/dream/qdsp5/audio_in.c
41701@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41702 return 0;
41703 }
41704
41705-static struct file_operations audio_fops = {
41706+static const struct file_operations audio_fops = {
41707 .owner = THIS_MODULE,
41708 .open = audio_in_open,
41709 .release = audio_in_release,
41710@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41711 .unlocked_ioctl = audio_in_ioctl,
41712 };
41713
41714-static struct file_operations audpre_fops = {
41715+static const struct file_operations audpre_fops = {
41716 .owner = THIS_MODULE,
41717 .open = audpre_open,
41718 .unlocked_ioctl = audpre_ioctl,
41719diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41720index b95574f..286c2f4 100644
41721--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41722+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41723@@ -941,7 +941,7 @@ done:
41724 return rc;
41725 }
41726
41727-static struct file_operations audio_mp3_fops = {
41728+static const struct file_operations audio_mp3_fops = {
41729 .owner = THIS_MODULE,
41730 .open = audio_open,
41731 .release = audio_release,
41732diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41733index d1adcf6..f8f9833 100644
41734--- a/drivers/staging/dream/qdsp5/audio_out.c
41735+++ b/drivers/staging/dream/qdsp5/audio_out.c
41736@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41737 return 0;
41738 }
41739
41740-static struct file_operations audio_fops = {
41741+static const struct file_operations audio_fops = {
41742 .owner = THIS_MODULE,
41743 .open = audio_open,
41744 .release = audio_release,
41745@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41746 .unlocked_ioctl = audio_ioctl,
41747 };
41748
41749-static struct file_operations audpp_fops = {
41750+static const struct file_operations audpp_fops = {
41751 .owner = THIS_MODULE,
41752 .open = audpp_open,
41753 .unlocked_ioctl = audpp_ioctl,
41754diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41755index f0f50e3..f6b9dbc 100644
41756--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41757+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41758@@ -816,7 +816,7 @@ err:
41759 return rc;
41760 }
41761
41762-static struct file_operations audio_qcelp_fops = {
41763+static const struct file_operations audio_qcelp_fops = {
41764 .owner = THIS_MODULE,
41765 .open = audqcelp_open,
41766 .release = audqcelp_release,
41767diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41768index 037d7ff..5469ec3 100644
41769--- a/drivers/staging/dream/qdsp5/snd.c
41770+++ b/drivers/staging/dream/qdsp5/snd.c
41771@@ -242,7 +242,7 @@ err:
41772 return rc;
41773 }
41774
41775-static struct file_operations snd_fops = {
41776+static const struct file_operations snd_fops = {
41777 .owner = THIS_MODULE,
41778 .open = snd_open,
41779 .release = snd_release,
41780diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41781index d4e7d88..0ea632a 100644
41782--- a/drivers/staging/dream/smd/smd_qmi.c
41783+++ b/drivers/staging/dream/smd/smd_qmi.c
41784@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41785 return 0;
41786 }
41787
41788-static struct file_operations qmi_fops = {
41789+static const struct file_operations qmi_fops = {
41790 .owner = THIS_MODULE,
41791 .read = qmi_read,
41792 .write = qmi_write,
41793diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41794index cd3910b..ff053d3 100644
41795--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41796+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41797@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41798 return rc;
41799 }
41800
41801-static struct file_operations rpcrouter_server_fops = {
41802+static const struct file_operations rpcrouter_server_fops = {
41803 .owner = THIS_MODULE,
41804 .open = rpcrouter_open,
41805 .release = rpcrouter_release,
41806@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41807 .unlocked_ioctl = rpcrouter_ioctl,
41808 };
41809
41810-static struct file_operations rpcrouter_router_fops = {
41811+static const struct file_operations rpcrouter_router_fops = {
41812 .owner = THIS_MODULE,
41813 .open = rpcrouter_open,
41814 .release = rpcrouter_release,
41815diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41816index c24e4e0..07665be 100644
41817--- a/drivers/staging/dst/dcore.c
41818+++ b/drivers/staging/dst/dcore.c
41819@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41820 return 0;
41821 }
41822
41823-static struct block_device_operations dst_blk_ops = {
41824+static const struct block_device_operations dst_blk_ops = {
41825 .open = dst_bdev_open,
41826 .release = dst_bdev_release,
41827 .owner = THIS_MODULE,
41828@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41829 n->size = ctl->size;
41830
41831 atomic_set(&n->refcnt, 1);
41832- atomic_long_set(&n->gen, 0);
41833+ atomic_long_set_unchecked(&n->gen, 0);
41834 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41835
41836 err = dst_node_sysfs_init(n);
41837diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41838index 557d372..8d84422 100644
41839--- a/drivers/staging/dst/trans.c
41840+++ b/drivers/staging/dst/trans.c
41841@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41842 t->error = 0;
41843 t->retries = 0;
41844 atomic_set(&t->refcnt, 1);
41845- t->gen = atomic_long_inc_return(&n->gen);
41846+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41847
41848 t->enc = bio_data_dir(bio);
41849 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41850diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41851index 94f7752..d051514 100644
41852--- a/drivers/staging/et131x/et1310_tx.c
41853+++ b/drivers/staging/et131x/et1310_tx.c
41854@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41855 struct net_device_stats *stats = &etdev->net_stats;
41856
41857 if (pMpTcb->Flags & fMP_DEST_BROAD)
41858- atomic_inc(&etdev->Stats.brdcstxmt);
41859+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41860 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41861- atomic_inc(&etdev->Stats.multixmt);
41862+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41863 else
41864- atomic_inc(&etdev->Stats.unixmt);
41865+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41866
41867 if (pMpTcb->Packet) {
41868 stats->tx_bytes += pMpTcb->Packet->len;
41869diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41870index 1dfe06f..f469b4d 100644
41871--- a/drivers/staging/et131x/et131x_adapter.h
41872+++ b/drivers/staging/et131x/et131x_adapter.h
41873@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41874 * operations
41875 */
41876 u32 unircv; /* # multicast packets received */
41877- atomic_t unixmt; /* # multicast packets for Tx */
41878+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41879 u32 multircv; /* # multicast packets received */
41880- atomic_t multixmt; /* # multicast packets for Tx */
41881+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41882 u32 brdcstrcv; /* # broadcast packets received */
41883- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41884+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41885 u32 norcvbuf; /* # Rx packets discarded */
41886 u32 noxmtbuf; /* # Tx packets discarded */
41887
41888diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41889index 4bd353a..e28f455 100644
41890--- a/drivers/staging/go7007/go7007-v4l2.c
41891+++ b/drivers/staging/go7007/go7007-v4l2.c
41892@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41893 return 0;
41894 }
41895
41896-static struct vm_operations_struct go7007_vm_ops = {
41897+static const struct vm_operations_struct go7007_vm_ops = {
41898 .open = go7007_vm_open,
41899 .close = go7007_vm_close,
41900 .fault = go7007_vm_fault,
41901diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41902index 366dc95..b974d87 100644
41903--- a/drivers/staging/hv/Channel.c
41904+++ b/drivers/staging/hv/Channel.c
41905@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41906
41907 DPRINT_ENTER(VMBUS);
41908
41909- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41910- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41911+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41912+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41913
41914 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41915 ASSERT(msgInfo != NULL);
41916diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41917index b12237f..01ae28a 100644
41918--- a/drivers/staging/hv/Hv.c
41919+++ b/drivers/staging/hv/Hv.c
41920@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41921 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41922 u32 outputAddressHi = outputAddress >> 32;
41923 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41924- volatile void *hypercallPage = gHvContext.HypercallPage;
41925+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41926
41927 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41928 Control, Input, Output);
41929diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41930index d089bb1..2ebc158 100644
41931--- a/drivers/staging/hv/VmbusApi.h
41932+++ b/drivers/staging/hv/VmbusApi.h
41933@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41934 u32 *GpadlHandle);
41935 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41936 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41937-};
41938+} __no_const;
41939
41940 /* Base driver object */
41941 struct hv_driver {
41942diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41943index 5a37cce..6ecc88c 100644
41944--- a/drivers/staging/hv/VmbusPrivate.h
41945+++ b/drivers/staging/hv/VmbusPrivate.h
41946@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41947 struct VMBUS_CONNECTION {
41948 enum VMBUS_CONNECT_STATE ConnectState;
41949
41950- atomic_t NextGpadlHandle;
41951+ atomic_unchecked_t NextGpadlHandle;
41952
41953 /*
41954 * Represents channel interrupts. Each bit position represents a
41955diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41956index 871a202..ca50ddf 100644
41957--- a/drivers/staging/hv/blkvsc_drv.c
41958+++ b/drivers/staging/hv/blkvsc_drv.c
41959@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41960 /* The one and only one */
41961 static struct blkvsc_driver_context g_blkvsc_drv;
41962
41963-static struct block_device_operations block_ops = {
41964+static const struct block_device_operations block_ops = {
41965 .owner = THIS_MODULE,
41966 .open = blkvsc_open,
41967 .release = blkvsc_release,
41968diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41969index 6acc49a..fbc8d46 100644
41970--- a/drivers/staging/hv/vmbus_drv.c
41971+++ b/drivers/staging/hv/vmbus_drv.c
41972@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41973 to_device_context(root_device_obj);
41974 struct device_context *child_device_ctx =
41975 to_device_context(child_device_obj);
41976- static atomic_t device_num = ATOMIC_INIT(0);
41977+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41978
41979 DPRINT_ENTER(VMBUS_DRV);
41980
41981@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41982
41983 /* Set the device name. Otherwise, device_register() will fail. */
41984 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41985- atomic_inc_return(&device_num));
41986+ atomic_inc_return_unchecked(&device_num));
41987
41988 /* The new device belongs to this bus */
41989 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41990diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41991index d926189..17b19fd 100644
41992--- a/drivers/staging/iio/ring_generic.h
41993+++ b/drivers/staging/iio/ring_generic.h
41994@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41995
41996 int (*is_enabled)(struct iio_ring_buffer *ring);
41997 int (*enable)(struct iio_ring_buffer *ring);
41998-};
41999+} __no_const;
42000
42001 /**
42002 * struct iio_ring_buffer - general ring buffer structure
42003diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
42004index 1b237b7..88c624e 100644
42005--- a/drivers/staging/octeon/ethernet-rx.c
42006+++ b/drivers/staging/octeon/ethernet-rx.c
42007@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42008 /* Increment RX stats for virtual ports */
42009 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
42010 #ifdef CONFIG_64BIT
42011- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
42012- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
42013+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
42014+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
42015 #else
42016- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
42017- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
42018+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
42019+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42020 #endif
42021 }
42022 netif_receive_skb(skb);
42023@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42024 dev->name);
42025 */
42026 #ifdef CONFIG_64BIT
42027- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42028+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42029 #else
42030- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42031+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42032 #endif
42033 dev_kfree_skb_irq(skb);
42034 }
42035diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42036index 492c502..d9909f1 100644
42037--- a/drivers/staging/octeon/ethernet.c
42038+++ b/drivers/staging/octeon/ethernet.c
42039@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42040 * since the RX tasklet also increments it.
42041 */
42042 #ifdef CONFIG_64BIT
42043- atomic64_add(rx_status.dropped_packets,
42044- (atomic64_t *)&priv->stats.rx_dropped);
42045+ atomic64_add_unchecked(rx_status.dropped_packets,
42046+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42047 #else
42048- atomic_add(rx_status.dropped_packets,
42049- (atomic_t *)&priv->stats.rx_dropped);
42050+ atomic_add_unchecked(rx_status.dropped_packets,
42051+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42052 #endif
42053 }
42054
42055diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42056index a35bd5d..28fff45 100644
42057--- a/drivers/staging/otus/80211core/pub_zfi.h
42058+++ b/drivers/staging/otus/80211core/pub_zfi.h
42059@@ -531,7 +531,7 @@ struct zsCbFuncTbl
42060 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42061
42062 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42063-};
42064+} __no_const;
42065
42066 extern void zfZeroMemory(u8_t* va, u16_t length);
42067 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42068diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42069index c39a25f..696f5aa 100644
42070--- a/drivers/staging/panel/panel.c
42071+++ b/drivers/staging/panel/panel.c
42072@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42073 return 0;
42074 }
42075
42076-static struct file_operations lcd_fops = {
42077+static const struct file_operations lcd_fops = {
42078 .write = lcd_write,
42079 .open = lcd_open,
42080 .release = lcd_release,
42081@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42082 return 0;
42083 }
42084
42085-static struct file_operations keypad_fops = {
42086+static const struct file_operations keypad_fops = {
42087 .read = keypad_read, /* read */
42088 .open = keypad_open, /* open */
42089 .release = keypad_release, /* close */
42090diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42091index 270ebcb..37e46af 100644
42092--- a/drivers/staging/phison/phison.c
42093+++ b/drivers/staging/phison/phison.c
42094@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42095 ATA_BMDMA_SHT(DRV_NAME),
42096 };
42097
42098-static struct ata_port_operations phison_ops = {
42099+static const struct ata_port_operations phison_ops = {
42100 .inherits = &ata_bmdma_port_ops,
42101 .prereset = phison_pre_reset,
42102 };
42103diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42104index 2eb8e3d..57616a7 100644
42105--- a/drivers/staging/poch/poch.c
42106+++ b/drivers/staging/poch/poch.c
42107@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42108 return 0;
42109 }
42110
42111-static struct file_operations poch_fops = {
42112+static const struct file_operations poch_fops = {
42113 .owner = THIS_MODULE,
42114 .open = poch_open,
42115 .release = poch_release,
42116diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42117index c94de31..19402bc 100644
42118--- a/drivers/staging/pohmelfs/inode.c
42119+++ b/drivers/staging/pohmelfs/inode.c
42120@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42121 mutex_init(&psb->mcache_lock);
42122 psb->mcache_root = RB_ROOT;
42123 psb->mcache_timeout = msecs_to_jiffies(5000);
42124- atomic_long_set(&psb->mcache_gen, 0);
42125+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
42126
42127 psb->trans_max_pages = 100;
42128
42129@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42130 INIT_LIST_HEAD(&psb->crypto_ready_list);
42131 INIT_LIST_HEAD(&psb->crypto_active_list);
42132
42133- atomic_set(&psb->trans_gen, 1);
42134+ atomic_set_unchecked(&psb->trans_gen, 1);
42135 atomic_long_set(&psb->total_inodes, 0);
42136
42137 mutex_init(&psb->state_lock);
42138diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42139index e22665c..a2a9390 100644
42140--- a/drivers/staging/pohmelfs/mcache.c
42141+++ b/drivers/staging/pohmelfs/mcache.c
42142@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42143 m->data = data;
42144 m->start = start;
42145 m->size = size;
42146- m->gen = atomic_long_inc_return(&psb->mcache_gen);
42147+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42148
42149 mutex_lock(&psb->mcache_lock);
42150 err = pohmelfs_mcache_insert(psb, m);
42151diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42152index 623a07d..4035c19 100644
42153--- a/drivers/staging/pohmelfs/netfs.h
42154+++ b/drivers/staging/pohmelfs/netfs.h
42155@@ -570,14 +570,14 @@ struct pohmelfs_config;
42156 struct pohmelfs_sb {
42157 struct rb_root mcache_root;
42158 struct mutex mcache_lock;
42159- atomic_long_t mcache_gen;
42160+ atomic_long_unchecked_t mcache_gen;
42161 unsigned long mcache_timeout;
42162
42163 unsigned int idx;
42164
42165 unsigned int trans_retries;
42166
42167- atomic_t trans_gen;
42168+ atomic_unchecked_t trans_gen;
42169
42170 unsigned int crypto_attached_size;
42171 unsigned int crypto_align_size;
42172diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42173index 36a2535..0591bf4 100644
42174--- a/drivers/staging/pohmelfs/trans.c
42175+++ b/drivers/staging/pohmelfs/trans.c
42176@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42177 int err;
42178 struct netfs_cmd *cmd = t->iovec.iov_base;
42179
42180- t->gen = atomic_inc_return(&psb->trans_gen);
42181+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42182
42183 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42184 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42185diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42186index f890a16..509ece8 100644
42187--- a/drivers/staging/sep/sep_driver.c
42188+++ b/drivers/staging/sep/sep_driver.c
42189@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42190 static dev_t sep_devno;
42191
42192 /* the files operations structure of the driver */
42193-static struct file_operations sep_file_operations = {
42194+static const struct file_operations sep_file_operations = {
42195 .owner = THIS_MODULE,
42196 .ioctl = sep_ioctl,
42197 .poll = sep_poll,
42198diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42199index 5e16bc3..7655b10 100644
42200--- a/drivers/staging/usbip/usbip_common.h
42201+++ b/drivers/staging/usbip/usbip_common.h
42202@@ -374,7 +374,7 @@ struct usbip_device {
42203 void (*shutdown)(struct usbip_device *);
42204 void (*reset)(struct usbip_device *);
42205 void (*unusable)(struct usbip_device *);
42206- } eh_ops;
42207+ } __no_const eh_ops;
42208 };
42209
42210
42211diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42212index 57f7946..d9df23d 100644
42213--- a/drivers/staging/usbip/vhci.h
42214+++ b/drivers/staging/usbip/vhci.h
42215@@ -92,7 +92,7 @@ struct vhci_hcd {
42216 unsigned resuming:1;
42217 unsigned long re_timeout;
42218
42219- atomic_t seqnum;
42220+ atomic_unchecked_t seqnum;
42221
42222 /*
42223 * NOTE:
42224diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42225index 20cd7db..c2693ff 100644
42226--- a/drivers/staging/usbip/vhci_hcd.c
42227+++ b/drivers/staging/usbip/vhci_hcd.c
42228@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42229 return;
42230 }
42231
42232- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42233+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42234 if (priv->seqnum == 0xffff)
42235 usbip_uinfo("seqnum max\n");
42236
42237@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42238 return -ENOMEM;
42239 }
42240
42241- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42242+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42243 if (unlink->seqnum == 0xffff)
42244 usbip_uinfo("seqnum max\n");
42245
42246@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42247 vdev->rhport = rhport;
42248 }
42249
42250- atomic_set(&vhci->seqnum, 0);
42251+ atomic_set_unchecked(&vhci->seqnum, 0);
42252 spin_lock_init(&vhci->lock);
42253
42254
42255diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42256index 7fd76fe..673695a 100644
42257--- a/drivers/staging/usbip/vhci_rx.c
42258+++ b/drivers/staging/usbip/vhci_rx.c
42259@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42260 usbip_uerr("cannot find a urb of seqnum %u\n",
42261 pdu->base.seqnum);
42262 usbip_uinfo("max seqnum %d\n",
42263- atomic_read(&the_controller->seqnum));
42264+ atomic_read_unchecked(&the_controller->seqnum));
42265 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42266 return;
42267 }
42268diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42269index 7891288..8e31300 100644
42270--- a/drivers/staging/vme/devices/vme_user.c
42271+++ b/drivers/staging/vme/devices/vme_user.c
42272@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42273 static int __init vme_user_probe(struct device *, int, int);
42274 static int __exit vme_user_remove(struct device *, int, int);
42275
42276-static struct file_operations vme_user_fops = {
42277+static const struct file_operations vme_user_fops = {
42278 .open = vme_user_open,
42279 .release = vme_user_release,
42280 .read = vme_user_read,
42281diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42282index 58abf44..00c1fc8 100644
42283--- a/drivers/staging/vt6655/hostap.c
42284+++ b/drivers/staging/vt6655/hostap.c
42285@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42286 PSDevice apdev_priv;
42287 struct net_device *dev = pDevice->dev;
42288 int ret;
42289- const struct net_device_ops apdev_netdev_ops = {
42290+ net_device_ops_no_const apdev_netdev_ops = {
42291 .ndo_start_xmit = pDevice->tx_80211,
42292 };
42293
42294diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42295index 0c8267a..db1f363 100644
42296--- a/drivers/staging/vt6656/hostap.c
42297+++ b/drivers/staging/vt6656/hostap.c
42298@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42299 PSDevice apdev_priv;
42300 struct net_device *dev = pDevice->dev;
42301 int ret;
42302- const struct net_device_ops apdev_netdev_ops = {
42303+ net_device_ops_no_const apdev_netdev_ops = {
42304 .ndo_start_xmit = pDevice->tx_80211,
42305 };
42306
42307diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42308index 925678b..da7f5ed 100644
42309--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42310+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42311@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42312
42313 struct usbctlx_completor {
42314 int (*complete) (struct usbctlx_completor *);
42315-};
42316+} __no_const;
42317 typedef struct usbctlx_completor usbctlx_completor_t;
42318
42319 static int
42320diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42321index 40de151..924f268 100644
42322--- a/drivers/telephony/ixj.c
42323+++ b/drivers/telephony/ixj.c
42324@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42325 bool mContinue;
42326 char *pIn, *pOut;
42327
42328+ pax_track_stack();
42329+
42330 if (!SCI_Prepare(j))
42331 return 0;
42332
42333diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42334index e941367..b631f5a 100644
42335--- a/drivers/uio/uio.c
42336+++ b/drivers/uio/uio.c
42337@@ -23,6 +23,7 @@
42338 #include <linux/string.h>
42339 #include <linux/kobject.h>
42340 #include <linux/uio_driver.h>
42341+#include <asm/local.h>
42342
42343 #define UIO_MAX_DEVICES 255
42344
42345@@ -30,10 +31,10 @@ struct uio_device {
42346 struct module *owner;
42347 struct device *dev;
42348 int minor;
42349- atomic_t event;
42350+ atomic_unchecked_t event;
42351 struct fasync_struct *async_queue;
42352 wait_queue_head_t wait;
42353- int vma_count;
42354+ local_t vma_count;
42355 struct uio_info *info;
42356 struct kobject *map_dir;
42357 struct kobject *portio_dir;
42358@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42359 return entry->show(mem, buf);
42360 }
42361
42362-static struct sysfs_ops map_sysfs_ops = {
42363+static const struct sysfs_ops map_sysfs_ops = {
42364 .show = map_type_show,
42365 };
42366
42367@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42368 return entry->show(port, buf);
42369 }
42370
42371-static struct sysfs_ops portio_sysfs_ops = {
42372+static const struct sysfs_ops portio_sysfs_ops = {
42373 .show = portio_type_show,
42374 };
42375
42376@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42377 struct uio_device *idev = dev_get_drvdata(dev);
42378 if (idev)
42379 return sprintf(buf, "%u\n",
42380- (unsigned int)atomic_read(&idev->event));
42381+ (unsigned int)atomic_read_unchecked(&idev->event));
42382 else
42383 return -ENODEV;
42384 }
42385@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42386 {
42387 struct uio_device *idev = info->uio_dev;
42388
42389- atomic_inc(&idev->event);
42390+ atomic_inc_unchecked(&idev->event);
42391 wake_up_interruptible(&idev->wait);
42392 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42393 }
42394@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42395 }
42396
42397 listener->dev = idev;
42398- listener->event_count = atomic_read(&idev->event);
42399+ listener->event_count = atomic_read_unchecked(&idev->event);
42400 filep->private_data = listener;
42401
42402 if (idev->info->open) {
42403@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42404 return -EIO;
42405
42406 poll_wait(filep, &idev->wait, wait);
42407- if (listener->event_count != atomic_read(&idev->event))
42408+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42409 return POLLIN | POLLRDNORM;
42410 return 0;
42411 }
42412@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42413 do {
42414 set_current_state(TASK_INTERRUPTIBLE);
42415
42416- event_count = atomic_read(&idev->event);
42417+ event_count = atomic_read_unchecked(&idev->event);
42418 if (event_count != listener->event_count) {
42419 if (copy_to_user(buf, &event_count, count))
42420 retval = -EFAULT;
42421@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42422 static void uio_vma_open(struct vm_area_struct *vma)
42423 {
42424 struct uio_device *idev = vma->vm_private_data;
42425- idev->vma_count++;
42426+ local_inc(&idev->vma_count);
42427 }
42428
42429 static void uio_vma_close(struct vm_area_struct *vma)
42430 {
42431 struct uio_device *idev = vma->vm_private_data;
42432- idev->vma_count--;
42433+ local_dec(&idev->vma_count);
42434 }
42435
42436 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42437@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42438 idev->owner = owner;
42439 idev->info = info;
42440 init_waitqueue_head(&idev->wait);
42441- atomic_set(&idev->event, 0);
42442+ atomic_set_unchecked(&idev->event, 0);
42443
42444 ret = uio_get_minor(idev);
42445 if (ret)
42446diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42447index fbea856..06efea6 100644
42448--- a/drivers/usb/atm/usbatm.c
42449+++ b/drivers/usb/atm/usbatm.c
42450@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42451 if (printk_ratelimit())
42452 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42453 __func__, vpi, vci);
42454- atomic_inc(&vcc->stats->rx_err);
42455+ atomic_inc_unchecked(&vcc->stats->rx_err);
42456 return;
42457 }
42458
42459@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42460 if (length > ATM_MAX_AAL5_PDU) {
42461 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42462 __func__, length, vcc);
42463- atomic_inc(&vcc->stats->rx_err);
42464+ atomic_inc_unchecked(&vcc->stats->rx_err);
42465 goto out;
42466 }
42467
42468@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42469 if (sarb->len < pdu_length) {
42470 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42471 __func__, pdu_length, sarb->len, vcc);
42472- atomic_inc(&vcc->stats->rx_err);
42473+ atomic_inc_unchecked(&vcc->stats->rx_err);
42474 goto out;
42475 }
42476
42477 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42478 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42479 __func__, vcc);
42480- atomic_inc(&vcc->stats->rx_err);
42481+ atomic_inc_unchecked(&vcc->stats->rx_err);
42482 goto out;
42483 }
42484
42485@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42486 if (printk_ratelimit())
42487 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42488 __func__, length);
42489- atomic_inc(&vcc->stats->rx_drop);
42490+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42491 goto out;
42492 }
42493
42494@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42495
42496 vcc->push(vcc, skb);
42497
42498- atomic_inc(&vcc->stats->rx);
42499+ atomic_inc_unchecked(&vcc->stats->rx);
42500 out:
42501 skb_trim(sarb, 0);
42502 }
42503@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42504 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42505
42506 usbatm_pop(vcc, skb);
42507- atomic_inc(&vcc->stats->tx);
42508+ atomic_inc_unchecked(&vcc->stats->tx);
42509
42510 skb = skb_dequeue(&instance->sndqueue);
42511 }
42512@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42513 if (!left--)
42514 return sprintf(page,
42515 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42516- atomic_read(&atm_dev->stats.aal5.tx),
42517- atomic_read(&atm_dev->stats.aal5.tx_err),
42518- atomic_read(&atm_dev->stats.aal5.rx),
42519- atomic_read(&atm_dev->stats.aal5.rx_err),
42520- atomic_read(&atm_dev->stats.aal5.rx_drop));
42521+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42522+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42523+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42524+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42525+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42526
42527 if (!left--) {
42528 if (instance->disconnected)
42529diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42530index 24e6205..fe5a5d4 100644
42531--- a/drivers/usb/core/hcd.c
42532+++ b/drivers/usb/core/hcd.c
42533@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42534
42535 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42536
42537-struct usb_mon_operations *mon_ops;
42538+const struct usb_mon_operations *mon_ops;
42539
42540 /*
42541 * The registration is unlocked.
42542@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42543 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42544 */
42545
42546-int usb_mon_register (struct usb_mon_operations *ops)
42547+int usb_mon_register (const struct usb_mon_operations *ops)
42548 {
42549
42550 if (mon_ops)
42551diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42552index bcbe104..9cfd1c6 100644
42553--- a/drivers/usb/core/hcd.h
42554+++ b/drivers/usb/core/hcd.h
42555@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42556 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42557
42558 struct usb_mon_operations {
42559- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42560- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42561- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42562+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42563+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42564+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42565 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42566 };
42567
42568-extern struct usb_mon_operations *mon_ops;
42569+extern const struct usb_mon_operations *mon_ops;
42570
42571 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42572 {
42573@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42574 (*mon_ops->urb_complete)(bus, urb, status);
42575 }
42576
42577-int usb_mon_register(struct usb_mon_operations *ops);
42578+int usb_mon_register(const struct usb_mon_operations *ops);
42579 void usb_mon_deregister(void);
42580
42581 #else
42582diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42583index 409cc94..a673bad 100644
42584--- a/drivers/usb/core/message.c
42585+++ b/drivers/usb/core/message.c
42586@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42587 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42588 if (buf) {
42589 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42590- if (len > 0) {
42591- smallbuf = kmalloc(++len, GFP_NOIO);
42592+ if (len++ > 0) {
42593+ smallbuf = kmalloc(len, GFP_NOIO);
42594 if (!smallbuf)
42595 return buf;
42596 memcpy(smallbuf, buf, len);
42597diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42598index 62ff5e7..530b74e 100644
42599--- a/drivers/usb/misc/appledisplay.c
42600+++ b/drivers/usb/misc/appledisplay.c
42601@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42602 return pdata->msgdata[1];
42603 }
42604
42605-static struct backlight_ops appledisplay_bl_data = {
42606+static const struct backlight_ops appledisplay_bl_data = {
42607 .get_brightness = appledisplay_bl_get_brightness,
42608 .update_status = appledisplay_bl_update_status,
42609 };
42610diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42611index e0c2db3..bd8cb66 100644
42612--- a/drivers/usb/mon/mon_main.c
42613+++ b/drivers/usb/mon/mon_main.c
42614@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42615 /*
42616 * Ops
42617 */
42618-static struct usb_mon_operations mon_ops_0 = {
42619+static const struct usb_mon_operations mon_ops_0 = {
42620 .urb_submit = mon_submit,
42621 .urb_submit_error = mon_submit_error,
42622 .urb_complete = mon_complete,
42623diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42624index d6bea3e..60b250e 100644
42625--- a/drivers/usb/wusbcore/wa-hc.h
42626+++ b/drivers/usb/wusbcore/wa-hc.h
42627@@ -192,7 +192,7 @@ struct wahc {
42628 struct list_head xfer_delayed_list;
42629 spinlock_t xfer_list_lock;
42630 struct work_struct xfer_work;
42631- atomic_t xfer_id_count;
42632+ atomic_unchecked_t xfer_id_count;
42633 };
42634
42635
42636@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42637 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42638 spin_lock_init(&wa->xfer_list_lock);
42639 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42640- atomic_set(&wa->xfer_id_count, 1);
42641+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42642 }
42643
42644 /**
42645diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42646index 613a5fc..3174865 100644
42647--- a/drivers/usb/wusbcore/wa-xfer.c
42648+++ b/drivers/usb/wusbcore/wa-xfer.c
42649@@ -293,7 +293,7 @@ out:
42650 */
42651 static void wa_xfer_id_init(struct wa_xfer *xfer)
42652 {
42653- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42654+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42655 }
42656
42657 /*
42658diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42659index aa42fce..f8a828c 100644
42660--- a/drivers/uwb/wlp/messages.c
42661+++ b/drivers/uwb/wlp/messages.c
42662@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42663 size_t len = skb->len;
42664 size_t used;
42665 ssize_t result;
42666- struct wlp_nonce enonce, rnonce;
42667+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42668 enum wlp_assc_error assc_err;
42669 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42670 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42671diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42672index 0370399..6627c94 100644
42673--- a/drivers/uwb/wlp/sysfs.c
42674+++ b/drivers/uwb/wlp/sysfs.c
42675@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42676 return ret;
42677 }
42678
42679-static
42680-struct sysfs_ops wss_sysfs_ops = {
42681+static const struct sysfs_ops wss_sysfs_ops = {
42682 .show = wlp_wss_attr_show,
42683 .store = wlp_wss_attr_store,
42684 };
42685diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42686index 8c5e432..5ee90ea 100644
42687--- a/drivers/video/atmel_lcdfb.c
42688+++ b/drivers/video/atmel_lcdfb.c
42689@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42690 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42691 }
42692
42693-static struct backlight_ops atmel_lcdc_bl_ops = {
42694+static const struct backlight_ops atmel_lcdc_bl_ops = {
42695 .update_status = atmel_bl_update_status,
42696 .get_brightness = atmel_bl_get_brightness,
42697 };
42698diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42699index e4e4d43..66bcbcc 100644
42700--- a/drivers/video/aty/aty128fb.c
42701+++ b/drivers/video/aty/aty128fb.c
42702@@ -149,7 +149,7 @@ enum {
42703 };
42704
42705 /* Must match above enum */
42706-static const char *r128_family[] __devinitdata = {
42707+static const char *r128_family[] __devinitconst = {
42708 "AGP",
42709 "PCI",
42710 "PRO AGP",
42711@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42712 return bd->props.brightness;
42713 }
42714
42715-static struct backlight_ops aty128_bl_data = {
42716+static const struct backlight_ops aty128_bl_data = {
42717 .get_brightness = aty128_bl_get_brightness,
42718 .update_status = aty128_bl_update_status,
42719 };
42720diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42721index 913b4a4..9295a38 100644
42722--- a/drivers/video/aty/atyfb_base.c
42723+++ b/drivers/video/aty/atyfb_base.c
42724@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42725 return bd->props.brightness;
42726 }
42727
42728-static struct backlight_ops aty_bl_data = {
42729+static const struct backlight_ops aty_bl_data = {
42730 .get_brightness = aty_bl_get_brightness,
42731 .update_status = aty_bl_update_status,
42732 };
42733diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42734index 1a056ad..221bd6a 100644
42735--- a/drivers/video/aty/radeon_backlight.c
42736+++ b/drivers/video/aty/radeon_backlight.c
42737@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42738 return bd->props.brightness;
42739 }
42740
42741-static struct backlight_ops radeon_bl_data = {
42742+static const struct backlight_ops radeon_bl_data = {
42743 .get_brightness = radeon_bl_get_brightness,
42744 .update_status = radeon_bl_update_status,
42745 };
42746diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42747index ad05da5..3cb2cb9 100644
42748--- a/drivers/video/backlight/adp5520_bl.c
42749+++ b/drivers/video/backlight/adp5520_bl.c
42750@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42751 return error ? data->current_brightness : reg_val;
42752 }
42753
42754-static struct backlight_ops adp5520_bl_ops = {
42755+static const struct backlight_ops adp5520_bl_ops = {
42756 .update_status = adp5520_bl_update_status,
42757 .get_brightness = adp5520_bl_get_brightness,
42758 };
42759diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42760index 2c3bdfc..d769b0b 100644
42761--- a/drivers/video/backlight/adx_bl.c
42762+++ b/drivers/video/backlight/adx_bl.c
42763@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42764 return 1;
42765 }
42766
42767-static struct backlight_ops adx_backlight_ops = {
42768+static const struct backlight_ops adx_backlight_ops = {
42769 .options = 0,
42770 .update_status = adx_backlight_update_status,
42771 .get_brightness = adx_backlight_get_brightness,
42772diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42773index 505c082..6b6b3cc 100644
42774--- a/drivers/video/backlight/atmel-pwm-bl.c
42775+++ b/drivers/video/backlight/atmel-pwm-bl.c
42776@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42777 return pwm_channel_enable(&pwmbl->pwmc);
42778 }
42779
42780-static struct backlight_ops atmel_pwm_bl_ops = {
42781+static const struct backlight_ops atmel_pwm_bl_ops = {
42782 .get_brightness = atmel_pwm_bl_get_intensity,
42783 .update_status = atmel_pwm_bl_set_intensity,
42784 };
42785diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42786index 5e20e6e..89025e6 100644
42787--- a/drivers/video/backlight/backlight.c
42788+++ b/drivers/video/backlight/backlight.c
42789@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42790 * ERR_PTR() or a pointer to the newly allocated device.
42791 */
42792 struct backlight_device *backlight_device_register(const char *name,
42793- struct device *parent, void *devdata, struct backlight_ops *ops)
42794+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42795 {
42796 struct backlight_device *new_bd;
42797 int rc;
42798diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42799index 9677494..b4bcf80 100644
42800--- a/drivers/video/backlight/corgi_lcd.c
42801+++ b/drivers/video/backlight/corgi_lcd.c
42802@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42803 }
42804 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42805
42806-static struct backlight_ops corgi_bl_ops = {
42807+static const struct backlight_ops corgi_bl_ops = {
42808 .get_brightness = corgi_bl_get_intensity,
42809 .update_status = corgi_bl_update_status,
42810 };
42811diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42812index b9fe62b..2914bf1 100644
42813--- a/drivers/video/backlight/cr_bllcd.c
42814+++ b/drivers/video/backlight/cr_bllcd.c
42815@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42816 return intensity;
42817 }
42818
42819-static struct backlight_ops cr_backlight_ops = {
42820+static const struct backlight_ops cr_backlight_ops = {
42821 .get_brightness = cr_backlight_get_intensity,
42822 .update_status = cr_backlight_set_intensity,
42823 };
42824diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42825index 701a108..feacfd5 100644
42826--- a/drivers/video/backlight/da903x_bl.c
42827+++ b/drivers/video/backlight/da903x_bl.c
42828@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42829 return data->current_brightness;
42830 }
42831
42832-static struct backlight_ops da903x_backlight_ops = {
42833+static const struct backlight_ops da903x_backlight_ops = {
42834 .update_status = da903x_backlight_update_status,
42835 .get_brightness = da903x_backlight_get_brightness,
42836 };
42837diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42838index 6d27f62..e6d348e 100644
42839--- a/drivers/video/backlight/generic_bl.c
42840+++ b/drivers/video/backlight/generic_bl.c
42841@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42842 }
42843 EXPORT_SYMBOL(corgibl_limit_intensity);
42844
42845-static struct backlight_ops genericbl_ops = {
42846+static const struct backlight_ops genericbl_ops = {
42847 .options = BL_CORE_SUSPENDRESUME,
42848 .get_brightness = genericbl_get_intensity,
42849 .update_status = genericbl_send_intensity,
42850diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42851index 7fb4eef..f7cc528 100644
42852--- a/drivers/video/backlight/hp680_bl.c
42853+++ b/drivers/video/backlight/hp680_bl.c
42854@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42855 return current_intensity;
42856 }
42857
42858-static struct backlight_ops hp680bl_ops = {
42859+static const struct backlight_ops hp680bl_ops = {
42860 .get_brightness = hp680bl_get_intensity,
42861 .update_status = hp680bl_set_intensity,
42862 };
42863diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42864index 7aed256..db9071f 100644
42865--- a/drivers/video/backlight/jornada720_bl.c
42866+++ b/drivers/video/backlight/jornada720_bl.c
42867@@ -93,7 +93,7 @@ out:
42868 return ret;
42869 }
42870
42871-static struct backlight_ops jornada_bl_ops = {
42872+static const struct backlight_ops jornada_bl_ops = {
42873 .get_brightness = jornada_bl_get_brightness,
42874 .update_status = jornada_bl_update_status,
42875 .options = BL_CORE_SUSPENDRESUME,
42876diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42877index a38fda1..939e7b8 100644
42878--- a/drivers/video/backlight/kb3886_bl.c
42879+++ b/drivers/video/backlight/kb3886_bl.c
42880@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42881 return kb3886bl_intensity;
42882 }
42883
42884-static struct backlight_ops kb3886bl_ops = {
42885+static const struct backlight_ops kb3886bl_ops = {
42886 .get_brightness = kb3886bl_get_intensity,
42887 .update_status = kb3886bl_send_intensity,
42888 };
42889diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42890index 6b488b8..00a9591 100644
42891--- a/drivers/video/backlight/locomolcd.c
42892+++ b/drivers/video/backlight/locomolcd.c
42893@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42894 return current_intensity;
42895 }
42896
42897-static struct backlight_ops locomobl_data = {
42898+static const struct backlight_ops locomobl_data = {
42899 .get_brightness = locomolcd_get_intensity,
42900 .update_status = locomolcd_set_intensity,
42901 };
42902diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42903index 99bdfa8..3dac448 100644
42904--- a/drivers/video/backlight/mbp_nvidia_bl.c
42905+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42906@@ -33,7 +33,7 @@ struct dmi_match_data {
42907 unsigned long iostart;
42908 unsigned long iolen;
42909 /* Backlight operations structure. */
42910- struct backlight_ops backlight_ops;
42911+ const struct backlight_ops backlight_ops;
42912 };
42913
42914 /* Module parameters. */
42915diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42916index cbad67e..3cf900e 100644
42917--- a/drivers/video/backlight/omap1_bl.c
42918+++ b/drivers/video/backlight/omap1_bl.c
42919@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42920 return bl->current_intensity;
42921 }
42922
42923-static struct backlight_ops omapbl_ops = {
42924+static const struct backlight_ops omapbl_ops = {
42925 .get_brightness = omapbl_get_intensity,
42926 .update_status = omapbl_update_status,
42927 };
42928diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42929index 9edaf24..075786e 100644
42930--- a/drivers/video/backlight/progear_bl.c
42931+++ b/drivers/video/backlight/progear_bl.c
42932@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42933 return intensity - HW_LEVEL_MIN;
42934 }
42935
42936-static struct backlight_ops progearbl_ops = {
42937+static const struct backlight_ops progearbl_ops = {
42938 .get_brightness = progearbl_get_intensity,
42939 .update_status = progearbl_set_intensity,
42940 };
42941diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42942index 8871662..df9e0b3 100644
42943--- a/drivers/video/backlight/pwm_bl.c
42944+++ b/drivers/video/backlight/pwm_bl.c
42945@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42946 return bl->props.brightness;
42947 }
42948
42949-static struct backlight_ops pwm_backlight_ops = {
42950+static const struct backlight_ops pwm_backlight_ops = {
42951 .update_status = pwm_backlight_update_status,
42952 .get_brightness = pwm_backlight_get_brightness,
42953 };
42954diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42955index 43edbad..e14ce4d 100644
42956--- a/drivers/video/backlight/tosa_bl.c
42957+++ b/drivers/video/backlight/tosa_bl.c
42958@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42959 return props->brightness;
42960 }
42961
42962-static struct backlight_ops bl_ops = {
42963+static const struct backlight_ops bl_ops = {
42964 .get_brightness = tosa_bl_get_brightness,
42965 .update_status = tosa_bl_update_status,
42966 };
42967diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42968index 467bdb7..e32add3 100644
42969--- a/drivers/video/backlight/wm831x_bl.c
42970+++ b/drivers/video/backlight/wm831x_bl.c
42971@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42972 return data->current_brightness;
42973 }
42974
42975-static struct backlight_ops wm831x_backlight_ops = {
42976+static const struct backlight_ops wm831x_backlight_ops = {
42977 .options = BL_CORE_SUSPENDRESUME,
42978 .update_status = wm831x_backlight_update_status,
42979 .get_brightness = wm831x_backlight_get_brightness,
42980diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42981index e49ae5e..db4e6f7 100644
42982--- a/drivers/video/bf54x-lq043fb.c
42983+++ b/drivers/video/bf54x-lq043fb.c
42984@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42985 return 0;
42986 }
42987
42988-static struct backlight_ops bfin_lq043fb_bl_ops = {
42989+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42990 .get_brightness = bl_get_brightness,
42991 };
42992
42993diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42994index 2c72a7c..d523e52 100644
42995--- a/drivers/video/bfin-t350mcqb-fb.c
42996+++ b/drivers/video/bfin-t350mcqb-fb.c
42997@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42998 return 0;
42999 }
43000
43001-static struct backlight_ops bfin_lq043fb_bl_ops = {
43002+static const struct backlight_ops bfin_lq043fb_bl_ops = {
43003 .get_brightness = bl_get_brightness,
43004 };
43005
43006diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
43007index f53b9f1..958bf4e 100644
43008--- a/drivers/video/fbcmap.c
43009+++ b/drivers/video/fbcmap.c
43010@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
43011 rc = -ENODEV;
43012 goto out;
43013 }
43014- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
43015- !info->fbops->fb_setcmap)) {
43016+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
43017 rc = -EINVAL;
43018 goto out1;
43019 }
43020diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43021index 99bbd28..ad3829e 100644
43022--- a/drivers/video/fbmem.c
43023+++ b/drivers/video/fbmem.c
43024@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43025 image->dx += image->width + 8;
43026 }
43027 } else if (rotate == FB_ROTATE_UD) {
43028- for (x = 0; x < num && image->dx >= 0; x++) {
43029+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43030 info->fbops->fb_imageblit(info, image);
43031 image->dx -= image->width + 8;
43032 }
43033@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43034 image->dy += image->height + 8;
43035 }
43036 } else if (rotate == FB_ROTATE_CCW) {
43037- for (x = 0; x < num && image->dy >= 0; x++) {
43038+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43039 info->fbops->fb_imageblit(info, image);
43040 image->dy -= image->height + 8;
43041 }
43042@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43043 int flags = info->flags;
43044 int ret = 0;
43045
43046+ pax_track_stack();
43047+
43048 if (var->activate & FB_ACTIVATE_INV_MODE) {
43049 struct fb_videomode mode1, mode2;
43050
43051@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43052 void __user *argp = (void __user *)arg;
43053 long ret = 0;
43054
43055+ pax_track_stack();
43056+
43057 switch (cmd) {
43058 case FBIOGET_VSCREENINFO:
43059 if (!lock_fb_info(info))
43060@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43061 return -EFAULT;
43062 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43063 return -EINVAL;
43064- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43065+ if (con2fb.framebuffer >= FB_MAX)
43066 return -EINVAL;
43067 if (!registered_fb[con2fb.framebuffer])
43068 request_module("fb%d", con2fb.framebuffer);
43069diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43070index f20eff8..3e4f622 100644
43071--- a/drivers/video/geode/gx1fb_core.c
43072+++ b/drivers/video/geode/gx1fb_core.c
43073@@ -30,7 +30,7 @@ static int crt_option = 1;
43074 static char panel_option[32] = "";
43075
43076 /* Modes relevant to the GX1 (taken from modedb.c) */
43077-static const struct fb_videomode __initdata gx1_modedb[] = {
43078+static const struct fb_videomode __initconst gx1_modedb[] = {
43079 /* 640x480-60 VESA */
43080 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43081 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43082diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43083index 896e53d..4d87d0b 100644
43084--- a/drivers/video/gxt4500.c
43085+++ b/drivers/video/gxt4500.c
43086@@ -156,7 +156,7 @@ struct gxt4500_par {
43087 static char *mode_option;
43088
43089 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43090-static const struct fb_videomode defaultmode __devinitdata = {
43091+static const struct fb_videomode defaultmode __devinitconst = {
43092 .refresh = 60,
43093 .xres = 1280,
43094 .yres = 1024,
43095@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43096 return 0;
43097 }
43098
43099-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43100+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43101 .id = "IBM GXT4500P",
43102 .type = FB_TYPE_PACKED_PIXELS,
43103 .visual = FB_VISUAL_PSEUDOCOLOR,
43104diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43105index f5bedee..28c6028 100644
43106--- a/drivers/video/i810/i810_accel.c
43107+++ b/drivers/video/i810/i810_accel.c
43108@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43109 }
43110 }
43111 printk("ringbuffer lockup!!!\n");
43112+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43113 i810_report_error(mmio);
43114 par->dev_flags |= LOCKUP;
43115 info->pixmap.scan_align = 1;
43116diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43117index 5743ea2..457f82c 100644
43118--- a/drivers/video/i810/i810_main.c
43119+++ b/drivers/video/i810/i810_main.c
43120@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43121 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43122
43123 /* PCI */
43124-static const char *i810_pci_list[] __devinitdata = {
43125+static const char *i810_pci_list[] __devinitconst = {
43126 "Intel(R) 810 Framebuffer Device" ,
43127 "Intel(R) 810-DC100 Framebuffer Device" ,
43128 "Intel(R) 810E Framebuffer Device" ,
43129diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43130index 3c14e43..eafa544 100644
43131--- a/drivers/video/logo/logo_linux_clut224.ppm
43132+++ b/drivers/video/logo/logo_linux_clut224.ppm
43133@@ -1,1604 +1,1123 @@
43134 P3
43135-# Standard 224-color Linux logo
43136 80 80
43137 255
43138- 0 0 0 0 0 0 0 0 0 0 0 0
43139- 0 0 0 0 0 0 0 0 0 0 0 0
43140- 0 0 0 0 0 0 0 0 0 0 0 0
43141- 0 0 0 0 0 0 0 0 0 0 0 0
43142- 0 0 0 0 0 0 0 0 0 0 0 0
43143- 0 0 0 0 0 0 0 0 0 0 0 0
43144- 0 0 0 0 0 0 0 0 0 0 0 0
43145- 0 0 0 0 0 0 0 0 0 0 0 0
43146- 0 0 0 0 0 0 0 0 0 0 0 0
43147- 6 6 6 6 6 6 10 10 10 10 10 10
43148- 10 10 10 6 6 6 6 6 6 6 6 6
43149- 0 0 0 0 0 0 0 0 0 0 0 0
43150- 0 0 0 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 0 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 0 0 0 0 0 0 0 0 0 0
43159- 0 0 0 0 0 0 0 0 0 0 0 0
43160- 0 0 0 0 0 0 0 0 0 0 0 0
43161- 0 0 0 0 0 0 0 0 0 0 0 0
43162- 0 0 0 0 0 0 0 0 0 0 0 0
43163- 0 0 0 0 0 0 0 0 0 0 0 0
43164- 0 0 0 0 0 0 0 0 0 0 0 0
43165- 0 0 0 0 0 0 0 0 0 0 0 0
43166- 0 0 0 6 6 6 10 10 10 14 14 14
43167- 22 22 22 26 26 26 30 30 30 34 34 34
43168- 30 30 30 30 30 30 26 26 26 18 18 18
43169- 14 14 14 10 10 10 6 6 6 0 0 0
43170- 0 0 0 0 0 0 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 0 0 0 0 0 0 0 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 0 0 0
43179- 0 0 0 0 0 1 0 0 1 0 0 0
43180- 0 0 0 0 0 0 0 0 0 0 0 0
43181- 0 0 0 0 0 0 0 0 0 0 0 0
43182- 0 0 0 0 0 0 0 0 0 0 0 0
43183- 0 0 0 0 0 0 0 0 0 0 0 0
43184- 0 0 0 0 0 0 0 0 0 0 0 0
43185- 0 0 0 0 0 0 0 0 0 0 0 0
43186- 6 6 6 14 14 14 26 26 26 42 42 42
43187- 54 54 54 66 66 66 78 78 78 78 78 78
43188- 78 78 78 74 74 74 66 66 66 54 54 54
43189- 42 42 42 26 26 26 18 18 18 10 10 10
43190- 6 6 6 0 0 0 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 0 0 0 0 0 0 0
43199- 0 0 1 0 0 0 0 0 0 0 0 0
43200- 0 0 0 0 0 0 0 0 0 0 0 0
43201- 0 0 0 0 0 0 0 0 0 0 0 0
43202- 0 0 0 0 0 0 0 0 0 0 0 0
43203- 0 0 0 0 0 0 0 0 0 0 0 0
43204- 0 0 0 0 0 0 0 0 0 0 0 0
43205- 0 0 0 0 0 0 0 0 0 10 10 10
43206- 22 22 22 42 42 42 66 66 66 86 86 86
43207- 66 66 66 38 38 38 38 38 38 22 22 22
43208- 26 26 26 34 34 34 54 54 54 66 66 66
43209- 86 86 86 70 70 70 46 46 46 26 26 26
43210- 14 14 14 6 6 6 0 0 0 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 0 0 0 0
43213- 0 0 0 0 0 0 0 0 0 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 0 0 0 0 0 0 0 0 0 0
43219- 0 0 1 0 0 1 0 0 1 0 0 0
43220- 0 0 0 0 0 0 0 0 0 0 0 0
43221- 0 0 0 0 0 0 0 0 0 0 0 0
43222- 0 0 0 0 0 0 0 0 0 0 0 0
43223- 0 0 0 0 0 0 0 0 0 0 0 0
43224- 0 0 0 0 0 0 0 0 0 0 0 0
43225- 0 0 0 0 0 0 10 10 10 26 26 26
43226- 50 50 50 82 82 82 58 58 58 6 6 6
43227- 2 2 6 2 2 6 2 2 6 2 2 6
43228- 2 2 6 2 2 6 2 2 6 2 2 6
43229- 6 6 6 54 54 54 86 86 86 66 66 66
43230- 38 38 38 18 18 18 6 6 6 0 0 0
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 0 0 0 0 0 0 0
43239- 0 0 0 0 0 0 0 0 0 0 0 0
43240- 0 0 0 0 0 0 0 0 0 0 0 0
43241- 0 0 0 0 0 0 0 0 0 0 0 0
43242- 0 0 0 0 0 0 0 0 0 0 0 0
43243- 0 0 0 0 0 0 0 0 0 0 0 0
43244- 0 0 0 0 0 0 0 0 0 0 0 0
43245- 0 0 0 6 6 6 22 22 22 50 50 50
43246- 78 78 78 34 34 34 2 2 6 2 2 6
43247- 2 2 6 2 2 6 2 2 6 2 2 6
43248- 2 2 6 2 2 6 2 2 6 2 2 6
43249- 2 2 6 2 2 6 6 6 6 70 70 70
43250- 78 78 78 46 46 46 22 22 22 6 6 6
43251- 0 0 0 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 0 0 0 0
43253- 0 0 0 0 0 0 0 0 0 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 0 0 0 0 0 0 0 0 0 0
43259- 0 0 1 0 0 1 0 0 1 0 0 0
43260- 0 0 0 0 0 0 0 0 0 0 0 0
43261- 0 0 0 0 0 0 0 0 0 0 0 0
43262- 0 0 0 0 0 0 0 0 0 0 0 0
43263- 0 0 0 0 0 0 0 0 0 0 0 0
43264- 0 0 0 0 0 0 0 0 0 0 0 0
43265- 6 6 6 18 18 18 42 42 42 82 82 82
43266- 26 26 26 2 2 6 2 2 6 2 2 6
43267- 2 2 6 2 2 6 2 2 6 2 2 6
43268- 2 2 6 2 2 6 2 2 6 14 14 14
43269- 46 46 46 34 34 34 6 6 6 2 2 6
43270- 42 42 42 78 78 78 42 42 42 18 18 18
43271- 6 6 6 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 0 0 0 0
43273- 0 0 0 0 0 0 0 0 0 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 0 0 0 0 0 0 0 0 0 0
43279- 0 0 1 0 0 0 0 0 1 0 0 0
43280- 0 0 0 0 0 0 0 0 0 0 0 0
43281- 0 0 0 0 0 0 0 0 0 0 0 0
43282- 0 0 0 0 0 0 0 0 0 0 0 0
43283- 0 0 0 0 0 0 0 0 0 0 0 0
43284- 0 0 0 0 0 0 0 0 0 0 0 0
43285- 10 10 10 30 30 30 66 66 66 58 58 58
43286- 2 2 6 2 2 6 2 2 6 2 2 6
43287- 2 2 6 2 2 6 2 2 6 2 2 6
43288- 2 2 6 2 2 6 2 2 6 26 26 26
43289- 86 86 86 101 101 101 46 46 46 10 10 10
43290- 2 2 6 58 58 58 70 70 70 34 34 34
43291- 10 10 10 0 0 0 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 0 0 0 0
43293- 0 0 0 0 0 0 0 0 0 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 0 0 0
43299- 0 0 1 0 0 1 0 0 1 0 0 0
43300- 0 0 0 0 0 0 0 0 0 0 0 0
43301- 0 0 0 0 0 0 0 0 0 0 0 0
43302- 0 0 0 0 0 0 0 0 0 0 0 0
43303- 0 0 0 0 0 0 0 0 0 0 0 0
43304- 0 0 0 0 0 0 0 0 0 0 0 0
43305- 14 14 14 42 42 42 86 86 86 10 10 10
43306- 2 2 6 2 2 6 2 2 6 2 2 6
43307- 2 2 6 2 2 6 2 2 6 2 2 6
43308- 2 2 6 2 2 6 2 2 6 30 30 30
43309- 94 94 94 94 94 94 58 58 58 26 26 26
43310- 2 2 6 6 6 6 78 78 78 54 54 54
43311- 22 22 22 6 6 6 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 0 0 0 0
43318- 0 0 0 0 0 0 0 0 0 0 0 0
43319- 0 0 0 0 0 0 0 0 0 0 0 0
43320- 0 0 0 0 0 0 0 0 0 0 0 0
43321- 0 0 0 0 0 0 0 0 0 0 0 0
43322- 0 0 0 0 0 0 0 0 0 0 0 0
43323- 0 0 0 0 0 0 0 0 0 0 0 0
43324- 0 0 0 0 0 0 0 0 0 6 6 6
43325- 22 22 22 62 62 62 62 62 62 2 2 6
43326- 2 2 6 2 2 6 2 2 6 2 2 6
43327- 2 2 6 2 2 6 2 2 6 2 2 6
43328- 2 2 6 2 2 6 2 2 6 26 26 26
43329- 54 54 54 38 38 38 18 18 18 10 10 10
43330- 2 2 6 2 2 6 34 34 34 82 82 82
43331- 38 38 38 14 14 14 0 0 0 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 0 0 0
43339- 0 0 0 0 0 1 0 0 1 0 0 0
43340- 0 0 0 0 0 0 0 0 0 0 0 0
43341- 0 0 0 0 0 0 0 0 0 0 0 0
43342- 0 0 0 0 0 0 0 0 0 0 0 0
43343- 0 0 0 0 0 0 0 0 0 0 0 0
43344- 0 0 0 0 0 0 0 0 0 6 6 6
43345- 30 30 30 78 78 78 30 30 30 2 2 6
43346- 2 2 6 2 2 6 2 2 6 2 2 6
43347- 2 2 6 2 2 6 2 2 6 2 2 6
43348- 2 2 6 2 2 6 2 2 6 10 10 10
43349- 10 10 10 2 2 6 2 2 6 2 2 6
43350- 2 2 6 2 2 6 2 2 6 78 78 78
43351- 50 50 50 18 18 18 6 6 6 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 0 0 0 0
43358- 0 0 0 0 0 0 0 0 0 0 0 0
43359- 0 0 1 0 0 0 0 0 0 0 0 0
43360- 0 0 0 0 0 0 0 0 0 0 0 0
43361- 0 0 0 0 0 0 0 0 0 0 0 0
43362- 0 0 0 0 0 0 0 0 0 0 0 0
43363- 0 0 0 0 0 0 0 0 0 0 0 0
43364- 0 0 0 0 0 0 0 0 0 10 10 10
43365- 38 38 38 86 86 86 14 14 14 2 2 6
43366- 2 2 6 2 2 6 2 2 6 2 2 6
43367- 2 2 6 2 2 6 2 2 6 2 2 6
43368- 2 2 6 2 2 6 2 2 6 2 2 6
43369- 2 2 6 2 2 6 2 2 6 2 2 6
43370- 2 2 6 2 2 6 2 2 6 54 54 54
43371- 66 66 66 26 26 26 6 6 6 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 0 0 0
43379- 0 0 0 0 0 1 0 0 1 0 0 0
43380- 0 0 0 0 0 0 0 0 0 0 0 0
43381- 0 0 0 0 0 0 0 0 0 0 0 0
43382- 0 0 0 0 0 0 0 0 0 0 0 0
43383- 0 0 0 0 0 0 0 0 0 0 0 0
43384- 0 0 0 0 0 0 0 0 0 14 14 14
43385- 42 42 42 82 82 82 2 2 6 2 2 6
43386- 2 2 6 6 6 6 10 10 10 2 2 6
43387- 2 2 6 2 2 6 2 2 6 2 2 6
43388- 2 2 6 2 2 6 2 2 6 6 6 6
43389- 14 14 14 10 10 10 2 2 6 2 2 6
43390- 2 2 6 2 2 6 2 2 6 18 18 18
43391- 82 82 82 34 34 34 10 10 10 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 0 0 0
43399- 0 0 1 0 0 0 0 0 0 0 0 0
43400- 0 0 0 0 0 0 0 0 0 0 0 0
43401- 0 0 0 0 0 0 0 0 0 0 0 0
43402- 0 0 0 0 0 0 0 0 0 0 0 0
43403- 0 0 0 0 0 0 0 0 0 0 0 0
43404- 0 0 0 0 0 0 0 0 0 14 14 14
43405- 46 46 46 86 86 86 2 2 6 2 2 6
43406- 6 6 6 6 6 6 22 22 22 34 34 34
43407- 6 6 6 2 2 6 2 2 6 2 2 6
43408- 2 2 6 2 2 6 18 18 18 34 34 34
43409- 10 10 10 50 50 50 22 22 22 2 2 6
43410- 2 2 6 2 2 6 2 2 6 10 10 10
43411- 86 86 86 42 42 42 14 14 14 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 1 0 0 1 0 0 1 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 0 0 0
43424- 0 0 0 0 0 0 0 0 0 14 14 14
43425- 46 46 46 86 86 86 2 2 6 2 2 6
43426- 38 38 38 116 116 116 94 94 94 22 22 22
43427- 22 22 22 2 2 6 2 2 6 2 2 6
43428- 14 14 14 86 86 86 138 138 138 162 162 162
43429-154 154 154 38 38 38 26 26 26 6 6 6
43430- 2 2 6 2 2 6 2 2 6 2 2 6
43431- 86 86 86 46 46 46 14 14 14 0 0 0
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 0 0 0 0
43439- 0 0 0 0 0 0 0 0 0 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 0 0 0
43444- 0 0 0 0 0 0 0 0 0 14 14 14
43445- 46 46 46 86 86 86 2 2 6 14 14 14
43446-134 134 134 198 198 198 195 195 195 116 116 116
43447- 10 10 10 2 2 6 2 2 6 6 6 6
43448-101 98 89 187 187 187 210 210 210 218 218 218
43449-214 214 214 134 134 134 14 14 14 6 6 6
43450- 2 2 6 2 2 6 2 2 6 2 2 6
43451- 86 86 86 50 50 50 18 18 18 6 6 6
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 1 0 0 0
43459- 0 0 1 0 0 1 0 0 1 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 0 0 0
43464- 0 0 0 0 0 0 0 0 0 14 14 14
43465- 46 46 46 86 86 86 2 2 6 54 54 54
43466-218 218 218 195 195 195 226 226 226 246 246 246
43467- 58 58 58 2 2 6 2 2 6 30 30 30
43468-210 210 210 253 253 253 174 174 174 123 123 123
43469-221 221 221 234 234 234 74 74 74 2 2 6
43470- 2 2 6 2 2 6 2 2 6 2 2 6
43471- 70 70 70 58 58 58 22 22 22 6 6 6
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 0 0 0 0
43479- 0 0 0 0 0 0 0 0 0 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 0 0 0
43484- 0 0 0 0 0 0 0 0 0 14 14 14
43485- 46 46 46 82 82 82 2 2 6 106 106 106
43486-170 170 170 26 26 26 86 86 86 226 226 226
43487-123 123 123 10 10 10 14 14 14 46 46 46
43488-231 231 231 190 190 190 6 6 6 70 70 70
43489- 90 90 90 238 238 238 158 158 158 2 2 6
43490- 2 2 6 2 2 6 2 2 6 2 2 6
43491- 70 70 70 58 58 58 22 22 22 6 6 6
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 0 0 0 0 0 1 0 0 0
43499- 0 0 1 0 0 1 0 0 1 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 0 0 0
43504- 0 0 0 0 0 0 0 0 0 14 14 14
43505- 42 42 42 86 86 86 6 6 6 116 116 116
43506-106 106 106 6 6 6 70 70 70 149 149 149
43507-128 128 128 18 18 18 38 38 38 54 54 54
43508-221 221 221 106 106 106 2 2 6 14 14 14
43509- 46 46 46 190 190 190 198 198 198 2 2 6
43510- 2 2 6 2 2 6 2 2 6 2 2 6
43511- 74 74 74 62 62 62 22 22 22 6 6 6
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 0 0 0 0 0 0 0 0 1 0 0 0
43519- 0 0 1 0 0 0 0 0 1 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 0 0 0
43524- 0 0 0 0 0 0 0 0 0 14 14 14
43525- 42 42 42 94 94 94 14 14 14 101 101 101
43526-128 128 128 2 2 6 18 18 18 116 116 116
43527-118 98 46 121 92 8 121 92 8 98 78 10
43528-162 162 162 106 106 106 2 2 6 2 2 6
43529- 2 2 6 195 195 195 195 195 195 6 6 6
43530- 2 2 6 2 2 6 2 2 6 2 2 6
43531- 74 74 74 62 62 62 22 22 22 6 6 6
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 0 0 0
43538- 0 0 0 0 0 0 0 0 1 0 0 1
43539- 0 0 1 0 0 0 0 0 1 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 0 0 0
43544- 0 0 0 0 0 0 0 0 0 10 10 10
43545- 38 38 38 90 90 90 14 14 14 58 58 58
43546-210 210 210 26 26 26 54 38 6 154 114 10
43547-226 170 11 236 186 11 225 175 15 184 144 12
43548-215 174 15 175 146 61 37 26 9 2 2 6
43549- 70 70 70 246 246 246 138 138 138 2 2 6
43550- 2 2 6 2 2 6 2 2 6 2 2 6
43551- 70 70 70 66 66 66 26 26 26 6 6 6
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 0 0 0
43558- 0 0 0 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 0 0 0 0 0 0
43564- 0 0 0 0 0 0 0 0 0 10 10 10
43565- 38 38 38 86 86 86 14 14 14 10 10 10
43566-195 195 195 188 164 115 192 133 9 225 175 15
43567-239 182 13 234 190 10 232 195 16 232 200 30
43568-245 207 45 241 208 19 232 195 16 184 144 12
43569-218 194 134 211 206 186 42 42 42 2 2 6
43570- 2 2 6 2 2 6 2 2 6 2 2 6
43571- 50 50 50 74 74 74 30 30 30 6 6 6
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 0 0 0 0 0 0
43578- 0 0 0 0 0 0 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 0 0 0 0 0 0
43584- 0 0 0 0 0 0 0 0 0 10 10 10
43585- 34 34 34 86 86 86 14 14 14 2 2 6
43586-121 87 25 192 133 9 219 162 10 239 182 13
43587-236 186 11 232 195 16 241 208 19 244 214 54
43588-246 218 60 246 218 38 246 215 20 241 208 19
43589-241 208 19 226 184 13 121 87 25 2 2 6
43590- 2 2 6 2 2 6 2 2 6 2 2 6
43591- 50 50 50 82 82 82 34 34 34 10 10 10
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 0 0 0 0 0 0 0 0 0
43598- 0 0 0 0 0 0 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 0 0 0 0 0 0 0 0 0
43604- 0 0 0 0 0 0 0 0 0 10 10 10
43605- 34 34 34 82 82 82 30 30 30 61 42 6
43606-180 123 7 206 145 10 230 174 11 239 182 13
43607-234 190 10 238 202 15 241 208 19 246 218 74
43608-246 218 38 246 215 20 246 215 20 246 215 20
43609-226 184 13 215 174 15 184 144 12 6 6 6
43610- 2 2 6 2 2 6 2 2 6 2 2 6
43611- 26 26 26 94 94 94 42 42 42 14 14 14
43612- 0 0 0 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 0 0 0 0 0 0 0 0 0 0 0 0
43618- 0 0 0 0 0 0 0 0 0 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 0 0 0 0 0 0 0 0 0 0 0 0
43624- 0 0 0 0 0 0 0 0 0 10 10 10
43625- 30 30 30 78 78 78 50 50 50 104 69 6
43626-192 133 9 216 158 10 236 178 12 236 186 11
43627-232 195 16 241 208 19 244 214 54 245 215 43
43628-246 215 20 246 215 20 241 208 19 198 155 10
43629-200 144 11 216 158 10 156 118 10 2 2 6
43630- 2 2 6 2 2 6 2 2 6 2 2 6
43631- 6 6 6 90 90 90 54 54 54 18 18 18
43632- 6 6 6 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 0 0 0 0 0 0 0 0 0 0 0 0
43638- 0 0 0 0 0 0 0 0 0 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 0 0 0
43643- 0 0 0 0 0 0 0 0 0 0 0 0
43644- 0 0 0 0 0 0 0 0 0 10 10 10
43645- 30 30 30 78 78 78 46 46 46 22 22 22
43646-137 92 6 210 162 10 239 182 13 238 190 10
43647-238 202 15 241 208 19 246 215 20 246 215 20
43648-241 208 19 203 166 17 185 133 11 210 150 10
43649-216 158 10 210 150 10 102 78 10 2 2 6
43650- 6 6 6 54 54 54 14 14 14 2 2 6
43651- 2 2 6 62 62 62 74 74 74 30 30 30
43652- 10 10 10 0 0 0 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 0 0 0 0 0 0 0 0 0 0 0 0
43658- 0 0 0 0 0 0 0 0 0 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 0 0 0
43663- 0 0 0 0 0 0 0 0 0 0 0 0
43664- 0 0 0 0 0 0 0 0 0 10 10 10
43665- 34 34 34 78 78 78 50 50 50 6 6 6
43666- 94 70 30 139 102 15 190 146 13 226 184 13
43667-232 200 30 232 195 16 215 174 15 190 146 13
43668-168 122 10 192 133 9 210 150 10 213 154 11
43669-202 150 34 182 157 106 101 98 89 2 2 6
43670- 2 2 6 78 78 78 116 116 116 58 58 58
43671- 2 2 6 22 22 22 90 90 90 46 46 46
43672- 18 18 18 6 6 6 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 0 0 0
43677- 0 0 0 0 0 0 0 0 0 0 0 0
43678- 0 0 0 0 0 0 0 0 0 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 0 0 0 0 0 0
43683- 0 0 0 0 0 0 0 0 0 0 0 0
43684- 0 0 0 0 0 0 0 0 0 10 10 10
43685- 38 38 38 86 86 86 50 50 50 6 6 6
43686-128 128 128 174 154 114 156 107 11 168 122 10
43687-198 155 10 184 144 12 197 138 11 200 144 11
43688-206 145 10 206 145 10 197 138 11 188 164 115
43689-195 195 195 198 198 198 174 174 174 14 14 14
43690- 2 2 6 22 22 22 116 116 116 116 116 116
43691- 22 22 22 2 2 6 74 74 74 70 70 70
43692- 30 30 30 10 10 10 0 0 0 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 0 0 0
43697- 0 0 0 0 0 0 0 0 0 0 0 0
43698- 0 0 0 0 0 0 0 0 0 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 0 0 0 0 0 0 0 0 0
43703- 0 0 0 0 0 0 0 0 0 0 0 0
43704- 0 0 0 0 0 0 6 6 6 18 18 18
43705- 50 50 50 101 101 101 26 26 26 10 10 10
43706-138 138 138 190 190 190 174 154 114 156 107 11
43707-197 138 11 200 144 11 197 138 11 192 133 9
43708-180 123 7 190 142 34 190 178 144 187 187 187
43709-202 202 202 221 221 221 214 214 214 66 66 66
43710- 2 2 6 2 2 6 50 50 50 62 62 62
43711- 6 6 6 2 2 6 10 10 10 90 90 90
43712- 50 50 50 18 18 18 6 6 6 0 0 0
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 0 0 0
43717- 0 0 0 0 0 0 0 0 0 0 0 0
43718- 0 0 0 0 0 0 0 0 0 0 0 0
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 0 0 0 0 0 0 0 0 0 0 0 0
43723- 0 0 0 0 0 0 0 0 0 0 0 0
43724- 0 0 0 0 0 0 10 10 10 34 34 34
43725- 74 74 74 74 74 74 2 2 6 6 6 6
43726-144 144 144 198 198 198 190 190 190 178 166 146
43727-154 121 60 156 107 11 156 107 11 168 124 44
43728-174 154 114 187 187 187 190 190 190 210 210 210
43729-246 246 246 253 253 253 253 253 253 182 182 182
43730- 6 6 6 2 2 6 2 2 6 2 2 6
43731- 2 2 6 2 2 6 2 2 6 62 62 62
43732- 74 74 74 34 34 34 14 14 14 0 0 0
43733- 0 0 0 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 0 0 0 0 0 0
43737- 0 0 0 0 0 0 0 0 0 0 0 0
43738- 0 0 0 0 0 0 0 0 0 0 0 0
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 0 0 0 0 0 0 0 0 0 0 0 0
43743- 0 0 0 0 0 0 0 0 0 0 0 0
43744- 0 0 0 10 10 10 22 22 22 54 54 54
43745- 94 94 94 18 18 18 2 2 6 46 46 46
43746-234 234 234 221 221 221 190 190 190 190 190 190
43747-190 190 190 187 187 187 187 187 187 190 190 190
43748-190 190 190 195 195 195 214 214 214 242 242 242
43749-253 253 253 253 253 253 253 253 253 253 253 253
43750- 82 82 82 2 2 6 2 2 6 2 2 6
43751- 2 2 6 2 2 6 2 2 6 14 14 14
43752- 86 86 86 54 54 54 22 22 22 6 6 6
43753- 0 0 0 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 0 0 0 0 0 0
43757- 0 0 0 0 0 0 0 0 0 0 0 0
43758- 0 0 0 0 0 0 0 0 0 0 0 0
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 0 0 0 0 0 0 0 0 0 0 0 0
43763- 0 0 0 0 0 0 0 0 0 0 0 0
43764- 6 6 6 18 18 18 46 46 46 90 90 90
43765- 46 46 46 18 18 18 6 6 6 182 182 182
43766-253 253 253 246 246 246 206 206 206 190 190 190
43767-190 190 190 190 190 190 190 190 190 190 190 190
43768-206 206 206 231 231 231 250 250 250 253 253 253
43769-253 253 253 253 253 253 253 253 253 253 253 253
43770-202 202 202 14 14 14 2 2 6 2 2 6
43771- 2 2 6 2 2 6 2 2 6 2 2 6
43772- 42 42 42 86 86 86 42 42 42 18 18 18
43773- 6 6 6 0 0 0 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 0 0 0 0 0 0 0 0 0
43777- 0 0 0 0 0 0 0 0 0 0 0 0
43778- 0 0 0 0 0 0 0 0 0 0 0 0
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 0 0 0
43782- 0 0 0 0 0 0 0 0 0 0 0 0
43783- 0 0 0 0 0 0 0 0 0 6 6 6
43784- 14 14 14 38 38 38 74 74 74 66 66 66
43785- 2 2 6 6 6 6 90 90 90 250 250 250
43786-253 253 253 253 253 253 238 238 238 198 198 198
43787-190 190 190 190 190 190 195 195 195 221 221 221
43788-246 246 246 253 253 253 253 253 253 253 253 253
43789-253 253 253 253 253 253 253 253 253 253 253 253
43790-253 253 253 82 82 82 2 2 6 2 2 6
43791- 2 2 6 2 2 6 2 2 6 2 2 6
43792- 2 2 6 78 78 78 70 70 70 34 34 34
43793- 14 14 14 6 6 6 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 0 0 0 0 0 0 0 0 0
43797- 0 0 0 0 0 0 0 0 0 0 0 0
43798- 0 0 0 0 0 0 0 0 0 0 0 0
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 0 0 0
43802- 0 0 0 0 0 0 0 0 0 0 0 0
43803- 0 0 0 0 0 0 0 0 0 14 14 14
43804- 34 34 34 66 66 66 78 78 78 6 6 6
43805- 2 2 6 18 18 18 218 218 218 253 253 253
43806-253 253 253 253 253 253 253 253 253 246 246 246
43807-226 226 226 231 231 231 246 246 246 253 253 253
43808-253 253 253 253 253 253 253 253 253 253 253 253
43809-253 253 253 253 253 253 253 253 253 253 253 253
43810-253 253 253 178 178 178 2 2 6 2 2 6
43811- 2 2 6 2 2 6 2 2 6 2 2 6
43812- 2 2 6 18 18 18 90 90 90 62 62 62
43813- 30 30 30 10 10 10 0 0 0 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 0 0 0 0 0 0 0 0 0 0 0 0
43817- 0 0 0 0 0 0 0 0 0 0 0 0
43818- 0 0 0 0 0 0 0 0 0 0 0 0
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 0 0 0
43822- 0 0 0 0 0 0 0 0 0 0 0 0
43823- 0 0 0 0 0 0 10 10 10 26 26 26
43824- 58 58 58 90 90 90 18 18 18 2 2 6
43825- 2 2 6 110 110 110 253 253 253 253 253 253
43826-253 253 253 253 253 253 253 253 253 253 253 253
43827-250 250 250 253 253 253 253 253 253 253 253 253
43828-253 253 253 253 253 253 253 253 253 253 253 253
43829-253 253 253 253 253 253 253 253 253 253 253 253
43830-253 253 253 231 231 231 18 18 18 2 2 6
43831- 2 2 6 2 2 6 2 2 6 2 2 6
43832- 2 2 6 2 2 6 18 18 18 94 94 94
43833- 54 54 54 26 26 26 10 10 10 0 0 0
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 0 0 0 0 0 0 0 0 0 0 0 0
43837- 0 0 0 0 0 0 0 0 0 0 0 0
43838- 0 0 0 0 0 0 0 0 0 0 0 0
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 0 0 0 0 0 0
43842- 0 0 0 0 0 0 0 0 0 0 0 0
43843- 0 0 0 6 6 6 22 22 22 50 50 50
43844- 90 90 90 26 26 26 2 2 6 2 2 6
43845- 14 14 14 195 195 195 250 250 250 253 253 253
43846-253 253 253 253 253 253 253 253 253 253 253 253
43847-253 253 253 253 253 253 253 253 253 253 253 253
43848-253 253 253 253 253 253 253 253 253 253 253 253
43849-253 253 253 253 253 253 253 253 253 253 253 253
43850-250 250 250 242 242 242 54 54 54 2 2 6
43851- 2 2 6 2 2 6 2 2 6 2 2 6
43852- 2 2 6 2 2 6 2 2 6 38 38 38
43853- 86 86 86 50 50 50 22 22 22 6 6 6
43854- 0 0 0 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 0 0 0
43856- 0 0 0 0 0 0 0 0 0 0 0 0
43857- 0 0 0 0 0 0 0 0 0 0 0 0
43858- 0 0 0 0 0 0 0 0 0 0 0 0
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 0 0 0 0 0 0
43862- 0 0 0 0 0 0 0 0 0 0 0 0
43863- 6 6 6 14 14 14 38 38 38 82 82 82
43864- 34 34 34 2 2 6 2 2 6 2 2 6
43865- 42 42 42 195 195 195 246 246 246 253 253 253
43866-253 253 253 253 253 253 253 253 253 250 250 250
43867-242 242 242 242 242 242 250 250 250 253 253 253
43868-253 253 253 253 253 253 253 253 253 253 253 253
43869-253 253 253 250 250 250 246 246 246 238 238 238
43870-226 226 226 231 231 231 101 101 101 6 6 6
43871- 2 2 6 2 2 6 2 2 6 2 2 6
43872- 2 2 6 2 2 6 2 2 6 2 2 6
43873- 38 38 38 82 82 82 42 42 42 14 14 14
43874- 6 6 6 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 0 0 0
43876- 0 0 0 0 0 0 0 0 0 0 0 0
43877- 0 0 0 0 0 0 0 0 0 0 0 0
43878- 0 0 0 0 0 0 0 0 0 0 0 0
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 0 0 0 0 0 0 0 0 0
43882- 0 0 0 0 0 0 0 0 0 0 0 0
43883- 10 10 10 26 26 26 62 62 62 66 66 66
43884- 2 2 6 2 2 6 2 2 6 6 6 6
43885- 70 70 70 170 170 170 206 206 206 234 234 234
43886-246 246 246 250 250 250 250 250 250 238 238 238
43887-226 226 226 231 231 231 238 238 238 250 250 250
43888-250 250 250 250 250 250 246 246 246 231 231 231
43889-214 214 214 206 206 206 202 202 202 202 202 202
43890-198 198 198 202 202 202 182 182 182 18 18 18
43891- 2 2 6 2 2 6 2 2 6 2 2 6
43892- 2 2 6 2 2 6 2 2 6 2 2 6
43893- 2 2 6 62 62 62 66 66 66 30 30 30
43894- 10 10 10 0 0 0 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 0 0 0
43896- 0 0 0 0 0 0 0 0 0 0 0 0
43897- 0 0 0 0 0 0 0 0 0 0 0 0
43898- 0 0 0 0 0 0 0 0 0 0 0 0
43899- 0 0 0 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 0 0 0 0 0 0 0 0 0
43902- 0 0 0 0 0 0 0 0 0 0 0 0
43903- 14 14 14 42 42 42 82 82 82 18 18 18
43904- 2 2 6 2 2 6 2 2 6 10 10 10
43905- 94 94 94 182 182 182 218 218 218 242 242 242
43906-250 250 250 253 253 253 253 253 253 250 250 250
43907-234 234 234 253 253 253 253 253 253 253 253 253
43908-253 253 253 253 253 253 253 253 253 246 246 246
43909-238 238 238 226 226 226 210 210 210 202 202 202
43910-195 195 195 195 195 195 210 210 210 158 158 158
43911- 6 6 6 14 14 14 50 50 50 14 14 14
43912- 2 2 6 2 2 6 2 2 6 2 2 6
43913- 2 2 6 6 6 6 86 86 86 46 46 46
43914- 18 18 18 6 6 6 0 0 0 0 0 0
43915- 0 0 0 0 0 0 0 0 0 0 0 0
43916- 0 0 0 0 0 0 0 0 0 0 0 0
43917- 0 0 0 0 0 0 0 0 0 0 0 0
43918- 0 0 0 0 0 0 0 0 0 0 0 0
43919- 0 0 0 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 0 0 0 0 0 0 0 0 0 0 0 0
43922- 0 0 0 0 0 0 0 0 0 6 6 6
43923- 22 22 22 54 54 54 70 70 70 2 2 6
43924- 2 2 6 10 10 10 2 2 6 22 22 22
43925-166 166 166 231 231 231 250 250 250 253 253 253
43926-253 253 253 253 253 253 253 253 253 250 250 250
43927-242 242 242 253 253 253 253 253 253 253 253 253
43928-253 253 253 253 253 253 253 253 253 253 253 253
43929-253 253 253 253 253 253 253 253 253 246 246 246
43930-231 231 231 206 206 206 198 198 198 226 226 226
43931- 94 94 94 2 2 6 6 6 6 38 38 38
43932- 30 30 30 2 2 6 2 2 6 2 2 6
43933- 2 2 6 2 2 6 62 62 62 66 66 66
43934- 26 26 26 10 10 10 0 0 0 0 0 0
43935- 0 0 0 0 0 0 0 0 0 0 0 0
43936- 0 0 0 0 0 0 0 0 0 0 0 0
43937- 0 0 0 0 0 0 0 0 0 0 0 0
43938- 0 0 0 0 0 0 0 0 0 0 0 0
43939- 0 0 0 0 0 0 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 0 0 0 0 0 0 0 0 0 0 0 0
43942- 0 0 0 0 0 0 0 0 0 10 10 10
43943- 30 30 30 74 74 74 50 50 50 2 2 6
43944- 26 26 26 26 26 26 2 2 6 106 106 106
43945-238 238 238 253 253 253 253 253 253 253 253 253
43946-253 253 253 253 253 253 253 253 253 253 253 253
43947-253 253 253 253 253 253 253 253 253 253 253 253
43948-253 253 253 253 253 253 253 253 253 253 253 253
43949-253 253 253 253 253 253 253 253 253 253 253 253
43950-253 253 253 246 246 246 218 218 218 202 202 202
43951-210 210 210 14 14 14 2 2 6 2 2 6
43952- 30 30 30 22 22 22 2 2 6 2 2 6
43953- 2 2 6 2 2 6 18 18 18 86 86 86
43954- 42 42 42 14 14 14 0 0 0 0 0 0
43955- 0 0 0 0 0 0 0 0 0 0 0 0
43956- 0 0 0 0 0 0 0 0 0 0 0 0
43957- 0 0 0 0 0 0 0 0 0 0 0 0
43958- 0 0 0 0 0 0 0 0 0 0 0 0
43959- 0 0 0 0 0 0 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 0 0 0
43961- 0 0 0 0 0 0 0 0 0 0 0 0
43962- 0 0 0 0 0 0 0 0 0 14 14 14
43963- 42 42 42 90 90 90 22 22 22 2 2 6
43964- 42 42 42 2 2 6 18 18 18 218 218 218
43965-253 253 253 253 253 253 253 253 253 253 253 253
43966-253 253 253 253 253 253 253 253 253 253 253 253
43967-253 253 253 253 253 253 253 253 253 253 253 253
43968-253 253 253 253 253 253 253 253 253 253 253 253
43969-253 253 253 253 253 253 253 253 253 253 253 253
43970-253 253 253 253 253 253 250 250 250 221 221 221
43971-218 218 218 101 101 101 2 2 6 14 14 14
43972- 18 18 18 38 38 38 10 10 10 2 2 6
43973- 2 2 6 2 2 6 2 2 6 78 78 78
43974- 58 58 58 22 22 22 6 6 6 0 0 0
43975- 0 0 0 0 0 0 0 0 0 0 0 0
43976- 0 0 0 0 0 0 0 0 0 0 0 0
43977- 0 0 0 0 0 0 0 0 0 0 0 0
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 6 6 6 18 18 18
43983- 54 54 54 82 82 82 2 2 6 26 26 26
43984- 22 22 22 2 2 6 123 123 123 253 253 253
43985-253 253 253 253 253 253 253 253 253 253 253 253
43986-253 253 253 253 253 253 253 253 253 253 253 253
43987-253 253 253 253 253 253 253 253 253 253 253 253
43988-253 253 253 253 253 253 253 253 253 253 253 253
43989-253 253 253 253 253 253 253 253 253 253 253 253
43990-253 253 253 253 253 253 253 253 253 250 250 250
43991-238 238 238 198 198 198 6 6 6 38 38 38
43992- 58 58 58 26 26 26 38 38 38 2 2 6
43993- 2 2 6 2 2 6 2 2 6 46 46 46
43994- 78 78 78 30 30 30 10 10 10 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 0 0 0 10 10 10 30 30 30
44003- 74 74 74 58 58 58 2 2 6 42 42 42
44004- 2 2 6 22 22 22 231 231 231 253 253 253
44005-253 253 253 253 253 253 253 253 253 253 253 253
44006-253 253 253 253 253 253 253 253 253 250 250 250
44007-253 253 253 253 253 253 253 253 253 253 253 253
44008-253 253 253 253 253 253 253 253 253 253 253 253
44009-253 253 253 253 253 253 253 253 253 253 253 253
44010-253 253 253 253 253 253 253 253 253 253 253 253
44011-253 253 253 246 246 246 46 46 46 38 38 38
44012- 42 42 42 14 14 14 38 38 38 14 14 14
44013- 2 2 6 2 2 6 2 2 6 6 6 6
44014- 86 86 86 46 46 46 14 14 14 0 0 0
44015- 0 0 0 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 6 6 6 14 14 14 42 42 42
44023- 90 90 90 18 18 18 18 18 18 26 26 26
44024- 2 2 6 116 116 116 253 253 253 253 253 253
44025-253 253 253 253 253 253 253 253 253 253 253 253
44026-253 253 253 253 253 253 250 250 250 238 238 238
44027-253 253 253 253 253 253 253 253 253 253 253 253
44028-253 253 253 253 253 253 253 253 253 253 253 253
44029-253 253 253 253 253 253 253 253 253 253 253 253
44030-253 253 253 253 253 253 253 253 253 253 253 253
44031-253 253 253 253 253 253 94 94 94 6 6 6
44032- 2 2 6 2 2 6 10 10 10 34 34 34
44033- 2 2 6 2 2 6 2 2 6 2 2 6
44034- 74 74 74 58 58 58 22 22 22 6 6 6
44035- 0 0 0 0 0 0 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 0 0 0 10 10 10 26 26 26 66 66 66
44043- 82 82 82 2 2 6 38 38 38 6 6 6
44044- 14 14 14 210 210 210 253 253 253 253 253 253
44045-253 253 253 253 253 253 253 253 253 253 253 253
44046-253 253 253 253 253 253 246 246 246 242 242 242
44047-253 253 253 253 253 253 253 253 253 253 253 253
44048-253 253 253 253 253 253 253 253 253 253 253 253
44049-253 253 253 253 253 253 253 253 253 253 253 253
44050-253 253 253 253 253 253 253 253 253 253 253 253
44051-253 253 253 253 253 253 144 144 144 2 2 6
44052- 2 2 6 2 2 6 2 2 6 46 46 46
44053- 2 2 6 2 2 6 2 2 6 2 2 6
44054- 42 42 42 74 74 74 30 30 30 10 10 10
44055- 0 0 0 0 0 0 0 0 0 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 6 6 6 14 14 14 42 42 42 90 90 90
44063- 26 26 26 6 6 6 42 42 42 2 2 6
44064- 74 74 74 250 250 250 253 253 253 253 253 253
44065-253 253 253 253 253 253 253 253 253 253 253 253
44066-253 253 253 253 253 253 242 242 242 242 242 242
44067-253 253 253 253 253 253 253 253 253 253 253 253
44068-253 253 253 253 253 253 253 253 253 253 253 253
44069-253 253 253 253 253 253 253 253 253 253 253 253
44070-253 253 253 253 253 253 253 253 253 253 253 253
44071-253 253 253 253 253 253 182 182 182 2 2 6
44072- 2 2 6 2 2 6 2 2 6 46 46 46
44073- 2 2 6 2 2 6 2 2 6 2 2 6
44074- 10 10 10 86 86 86 38 38 38 10 10 10
44075- 0 0 0 0 0 0 0 0 0 0 0 0
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 0 0 0
44082- 10 10 10 26 26 26 66 66 66 82 82 82
44083- 2 2 6 22 22 22 18 18 18 2 2 6
44084-149 149 149 253 253 253 253 253 253 253 253 253
44085-253 253 253 253 253 253 253 253 253 253 253 253
44086-253 253 253 253 253 253 234 234 234 242 242 242
44087-253 253 253 253 253 253 253 253 253 253 253 253
44088-253 253 253 253 253 253 253 253 253 253 253 253
44089-253 253 253 253 253 253 253 253 253 253 253 253
44090-253 253 253 253 253 253 253 253 253 253 253 253
44091-253 253 253 253 253 253 206 206 206 2 2 6
44092- 2 2 6 2 2 6 2 2 6 38 38 38
44093- 2 2 6 2 2 6 2 2 6 2 2 6
44094- 6 6 6 86 86 86 46 46 46 14 14 14
44095- 0 0 0 0 0 0 0 0 0 0 0 0
44096- 0 0 0 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 6 6 6
44102- 18 18 18 46 46 46 86 86 86 18 18 18
44103- 2 2 6 34 34 34 10 10 10 6 6 6
44104-210 210 210 253 253 253 253 253 253 253 253 253
44105-253 253 253 253 253 253 253 253 253 253 253 253
44106-253 253 253 253 253 253 234 234 234 242 242 242
44107-253 253 253 253 253 253 253 253 253 253 253 253
44108-253 253 253 253 253 253 253 253 253 253 253 253
44109-253 253 253 253 253 253 253 253 253 253 253 253
44110-253 253 253 253 253 253 253 253 253 253 253 253
44111-253 253 253 253 253 253 221 221 221 6 6 6
44112- 2 2 6 2 2 6 6 6 6 30 30 30
44113- 2 2 6 2 2 6 2 2 6 2 2 6
44114- 2 2 6 82 82 82 54 54 54 18 18 18
44115- 6 6 6 0 0 0 0 0 0 0 0 0
44116- 0 0 0 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 10 10 10
44122- 26 26 26 66 66 66 62 62 62 2 2 6
44123- 2 2 6 38 38 38 10 10 10 26 26 26
44124-238 238 238 253 253 253 253 253 253 253 253 253
44125-253 253 253 253 253 253 253 253 253 253 253 253
44126-253 253 253 253 253 253 231 231 231 238 238 238
44127-253 253 253 253 253 253 253 253 253 253 253 253
44128-253 253 253 253 253 253 253 253 253 253 253 253
44129-253 253 253 253 253 253 253 253 253 253 253 253
44130-253 253 253 253 253 253 253 253 253 253 253 253
44131-253 253 253 253 253 253 231 231 231 6 6 6
44132- 2 2 6 2 2 6 10 10 10 30 30 30
44133- 2 2 6 2 2 6 2 2 6 2 2 6
44134- 2 2 6 66 66 66 58 58 58 22 22 22
44135- 6 6 6 0 0 0 0 0 0 0 0 0
44136- 0 0 0 0 0 0 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 0 0 0 0 0 0 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 0 0 0 10 10 10
44142- 38 38 38 78 78 78 6 6 6 2 2 6
44143- 2 2 6 46 46 46 14 14 14 42 42 42
44144-246 246 246 253 253 253 253 253 253 253 253 253
44145-253 253 253 253 253 253 253 253 253 253 253 253
44146-253 253 253 253 253 253 231 231 231 242 242 242
44147-253 253 253 253 253 253 253 253 253 253 253 253
44148-253 253 253 253 253 253 253 253 253 253 253 253
44149-253 253 253 253 253 253 253 253 253 253 253 253
44150-253 253 253 253 253 253 253 253 253 253 253 253
44151-253 253 253 253 253 253 234 234 234 10 10 10
44152- 2 2 6 2 2 6 22 22 22 14 14 14
44153- 2 2 6 2 2 6 2 2 6 2 2 6
44154- 2 2 6 66 66 66 62 62 62 22 22 22
44155- 6 6 6 0 0 0 0 0 0 0 0 0
44156- 0 0 0 0 0 0 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 0 0 0 0 0 0 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 6 6 6 18 18 18
44162- 50 50 50 74 74 74 2 2 6 2 2 6
44163- 14 14 14 70 70 70 34 34 34 62 62 62
44164-250 250 250 253 253 253 253 253 253 253 253 253
44165-253 253 253 253 253 253 253 253 253 253 253 253
44166-253 253 253 253 253 253 231 231 231 246 246 246
44167-253 253 253 253 253 253 253 253 253 253 253 253
44168-253 253 253 253 253 253 253 253 253 253 253 253
44169-253 253 253 253 253 253 253 253 253 253 253 253
44170-253 253 253 253 253 253 253 253 253 253 253 253
44171-253 253 253 253 253 253 234 234 234 14 14 14
44172- 2 2 6 2 2 6 30 30 30 2 2 6
44173- 2 2 6 2 2 6 2 2 6 2 2 6
44174- 2 2 6 66 66 66 62 62 62 22 22 22
44175- 6 6 6 0 0 0 0 0 0 0 0 0
44176- 0 0 0 0 0 0 0 0 0 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 0 0 0
44179- 0 0 0 0 0 0 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 6 6 6 18 18 18
44182- 54 54 54 62 62 62 2 2 6 2 2 6
44183- 2 2 6 30 30 30 46 46 46 70 70 70
44184-250 250 250 253 253 253 253 253 253 253 253 253
44185-253 253 253 253 253 253 253 253 253 253 253 253
44186-253 253 253 253 253 253 231 231 231 246 246 246
44187-253 253 253 253 253 253 253 253 253 253 253 253
44188-253 253 253 253 253 253 253 253 253 253 253 253
44189-253 253 253 253 253 253 253 253 253 253 253 253
44190-253 253 253 253 253 253 253 253 253 253 253 253
44191-253 253 253 253 253 253 226 226 226 10 10 10
44192- 2 2 6 6 6 6 30 30 30 2 2 6
44193- 2 2 6 2 2 6 2 2 6 2 2 6
44194- 2 2 6 66 66 66 58 58 58 22 22 22
44195- 6 6 6 0 0 0 0 0 0 0 0 0
44196- 0 0 0 0 0 0 0 0 0 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 0 0 0
44199- 0 0 0 0 0 0 0 0 0 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 6 6 6 22 22 22
44202- 58 58 58 62 62 62 2 2 6 2 2 6
44203- 2 2 6 2 2 6 30 30 30 78 78 78
44204-250 250 250 253 253 253 253 253 253 253 253 253
44205-253 253 253 253 253 253 253 253 253 253 253 253
44206-253 253 253 253 253 253 231 231 231 246 246 246
44207-253 253 253 253 253 253 253 253 253 253 253 253
44208-253 253 253 253 253 253 253 253 253 253 253 253
44209-253 253 253 253 253 253 253 253 253 253 253 253
44210-253 253 253 253 253 253 253 253 253 253 253 253
44211-253 253 253 253 253 253 206 206 206 2 2 6
44212- 22 22 22 34 34 34 18 14 6 22 22 22
44213- 26 26 26 18 18 18 6 6 6 2 2 6
44214- 2 2 6 82 82 82 54 54 54 18 18 18
44215- 6 6 6 0 0 0 0 0 0 0 0 0
44216- 0 0 0 0 0 0 0 0 0 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 0 0 0
44219- 0 0 0 0 0 0 0 0 0 0 0 0
44220- 0 0 0 0 0 0 0 0 0 0 0 0
44221- 0 0 0 0 0 0 6 6 6 26 26 26
44222- 62 62 62 106 106 106 74 54 14 185 133 11
44223-210 162 10 121 92 8 6 6 6 62 62 62
44224-238 238 238 253 253 253 253 253 253 253 253 253
44225-253 253 253 253 253 253 253 253 253 253 253 253
44226-253 253 253 253 253 253 231 231 231 246 246 246
44227-253 253 253 253 253 253 253 253 253 253 253 253
44228-253 253 253 253 253 253 253 253 253 253 253 253
44229-253 253 253 253 253 253 253 253 253 253 253 253
44230-253 253 253 253 253 253 253 253 253 253 253 253
44231-253 253 253 253 253 253 158 158 158 18 18 18
44232- 14 14 14 2 2 6 2 2 6 2 2 6
44233- 6 6 6 18 18 18 66 66 66 38 38 38
44234- 6 6 6 94 94 94 50 50 50 18 18 18
44235- 6 6 6 0 0 0 0 0 0 0 0 0
44236- 0 0 0 0 0 0 0 0 0 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 0 0 0
44239- 0 0 0 0 0 0 0 0 0 0 0 0
44240- 0 0 0 0 0 0 0 0 0 6 6 6
44241- 10 10 10 10 10 10 18 18 18 38 38 38
44242- 78 78 78 142 134 106 216 158 10 242 186 14
44243-246 190 14 246 190 14 156 118 10 10 10 10
44244- 90 90 90 238 238 238 253 253 253 253 253 253
44245-253 253 253 253 253 253 253 253 253 253 253 253
44246-253 253 253 253 253 253 231 231 231 250 250 250
44247-253 253 253 253 253 253 253 253 253 253 253 253
44248-253 253 253 253 253 253 253 253 253 253 253 253
44249-253 253 253 253 253 253 253 253 253 253 253 253
44250-253 253 253 253 253 253 253 253 253 246 230 190
44251-238 204 91 238 204 91 181 142 44 37 26 9
44252- 2 2 6 2 2 6 2 2 6 2 2 6
44253- 2 2 6 2 2 6 38 38 38 46 46 46
44254- 26 26 26 106 106 106 54 54 54 18 18 18
44255- 6 6 6 0 0 0 0 0 0 0 0 0
44256- 0 0 0 0 0 0 0 0 0 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 0 0 0
44259- 0 0 0 0 0 0 0 0 0 0 0 0
44260- 0 0 0 6 6 6 14 14 14 22 22 22
44261- 30 30 30 38 38 38 50 50 50 70 70 70
44262-106 106 106 190 142 34 226 170 11 242 186 14
44263-246 190 14 246 190 14 246 190 14 154 114 10
44264- 6 6 6 74 74 74 226 226 226 253 253 253
44265-253 253 253 253 253 253 253 253 253 253 253 253
44266-253 253 253 253 253 253 231 231 231 250 250 250
44267-253 253 253 253 253 253 253 253 253 253 253 253
44268-253 253 253 253 253 253 253 253 253 253 253 253
44269-253 253 253 253 253 253 253 253 253 253 253 253
44270-253 253 253 253 253 253 253 253 253 228 184 62
44271-241 196 14 241 208 19 232 195 16 38 30 10
44272- 2 2 6 2 2 6 2 2 6 2 2 6
44273- 2 2 6 6 6 6 30 30 30 26 26 26
44274-203 166 17 154 142 90 66 66 66 26 26 26
44275- 6 6 6 0 0 0 0 0 0 0 0 0
44276- 0 0 0 0 0 0 0 0 0 0 0 0
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 0 0 0
44279- 0 0 0 0 0 0 0 0 0 0 0 0
44280- 6 6 6 18 18 18 38 38 38 58 58 58
44281- 78 78 78 86 86 86 101 101 101 123 123 123
44282-175 146 61 210 150 10 234 174 13 246 186 14
44283-246 190 14 246 190 14 246 190 14 238 190 10
44284-102 78 10 2 2 6 46 46 46 198 198 198
44285-253 253 253 253 253 253 253 253 253 253 253 253
44286-253 253 253 253 253 253 234 234 234 242 242 242
44287-253 253 253 253 253 253 253 253 253 253 253 253
44288-253 253 253 253 253 253 253 253 253 253 253 253
44289-253 253 253 253 253 253 253 253 253 253 253 253
44290-253 253 253 253 253 253 253 253 253 224 178 62
44291-242 186 14 241 196 14 210 166 10 22 18 6
44292- 2 2 6 2 2 6 2 2 6 2 2 6
44293- 2 2 6 2 2 6 6 6 6 121 92 8
44294-238 202 15 232 195 16 82 82 82 34 34 34
44295- 10 10 10 0 0 0 0 0 0 0 0 0
44296- 0 0 0 0 0 0 0 0 0 0 0 0
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 0 0 0
44299- 0 0 0 0 0 0 0 0 0 0 0 0
44300- 14 14 14 38 38 38 70 70 70 154 122 46
44301-190 142 34 200 144 11 197 138 11 197 138 11
44302-213 154 11 226 170 11 242 186 14 246 190 14
44303-246 190 14 246 190 14 246 190 14 246 190 14
44304-225 175 15 46 32 6 2 2 6 22 22 22
44305-158 158 158 250 250 250 253 253 253 253 253 253
44306-253 253 253 253 253 253 253 253 253 253 253 253
44307-253 253 253 253 253 253 253 253 253 253 253 253
44308-253 253 253 253 253 253 253 253 253 253 253 253
44309-253 253 253 253 253 253 253 253 253 253 253 253
44310-253 253 253 250 250 250 242 242 242 224 178 62
44311-239 182 13 236 186 11 213 154 11 46 32 6
44312- 2 2 6 2 2 6 2 2 6 2 2 6
44313- 2 2 6 2 2 6 61 42 6 225 175 15
44314-238 190 10 236 186 11 112 100 78 42 42 42
44315- 14 14 14 0 0 0 0 0 0 0 0 0
44316- 0 0 0 0 0 0 0 0 0 0 0 0
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 0 0 0
44319- 0 0 0 0 0 0 0 0 0 6 6 6
44320- 22 22 22 54 54 54 154 122 46 213 154 11
44321-226 170 11 230 174 11 226 170 11 226 170 11
44322-236 178 12 242 186 14 246 190 14 246 190 14
44323-246 190 14 246 190 14 246 190 14 246 190 14
44324-241 196 14 184 144 12 10 10 10 2 2 6
44325- 6 6 6 116 116 116 242 242 242 253 253 253
44326-253 253 253 253 253 253 253 253 253 253 253 253
44327-253 253 253 253 253 253 253 253 253 253 253 253
44328-253 253 253 253 253 253 253 253 253 253 253 253
44329-253 253 253 253 253 253 253 253 253 253 253 253
44330-253 253 253 231 231 231 198 198 198 214 170 54
44331-236 178 12 236 178 12 210 150 10 137 92 6
44332- 18 14 6 2 2 6 2 2 6 2 2 6
44333- 6 6 6 70 47 6 200 144 11 236 178 12
44334-239 182 13 239 182 13 124 112 88 58 58 58
44335- 22 22 22 6 6 6 0 0 0 0 0 0
44336- 0 0 0 0 0 0 0 0 0 0 0 0
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 0 0 0
44339- 0 0 0 0 0 0 0 0 0 10 10 10
44340- 30 30 30 70 70 70 180 133 36 226 170 11
44341-239 182 13 242 186 14 242 186 14 246 186 14
44342-246 190 14 246 190 14 246 190 14 246 190 14
44343-246 190 14 246 190 14 246 190 14 246 190 14
44344-246 190 14 232 195 16 98 70 6 2 2 6
44345- 2 2 6 2 2 6 66 66 66 221 221 221
44346-253 253 253 253 253 253 253 253 253 253 253 253
44347-253 253 253 253 253 253 253 253 253 253 253 253
44348-253 253 253 253 253 253 253 253 253 253 253 253
44349-253 253 253 253 253 253 253 253 253 253 253 253
44350-253 253 253 206 206 206 198 198 198 214 166 58
44351-230 174 11 230 174 11 216 158 10 192 133 9
44352-163 110 8 116 81 8 102 78 10 116 81 8
44353-167 114 7 197 138 11 226 170 11 239 182 13
44354-242 186 14 242 186 14 162 146 94 78 78 78
44355- 34 34 34 14 14 14 6 6 6 0 0 0
44356- 0 0 0 0 0 0 0 0 0 0 0 0
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 0 0 0 0 0 0
44359- 0 0 0 0 0 0 0 0 0 6 6 6
44360- 30 30 30 78 78 78 190 142 34 226 170 11
44361-239 182 13 246 190 14 246 190 14 246 190 14
44362-246 190 14 246 190 14 246 190 14 246 190 14
44363-246 190 14 246 190 14 246 190 14 246 190 14
44364-246 190 14 241 196 14 203 166 17 22 18 6
44365- 2 2 6 2 2 6 2 2 6 38 38 38
44366-218 218 218 253 253 253 253 253 253 253 253 253
44367-253 253 253 253 253 253 253 253 253 253 253 253
44368-253 253 253 253 253 253 253 253 253 253 253 253
44369-253 253 253 253 253 253 253 253 253 253 253 253
44370-250 250 250 206 206 206 198 198 198 202 162 69
44371-226 170 11 236 178 12 224 166 10 210 150 10
44372-200 144 11 197 138 11 192 133 9 197 138 11
44373-210 150 10 226 170 11 242 186 14 246 190 14
44374-246 190 14 246 186 14 225 175 15 124 112 88
44375- 62 62 62 30 30 30 14 14 14 6 6 6
44376- 0 0 0 0 0 0 0 0 0 0 0 0
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 0 0 0 0 0 0
44379- 0 0 0 0 0 0 0 0 0 10 10 10
44380- 30 30 30 78 78 78 174 135 50 224 166 10
44381-239 182 13 246 190 14 246 190 14 246 190 14
44382-246 190 14 246 190 14 246 190 14 246 190 14
44383-246 190 14 246 190 14 246 190 14 246 190 14
44384-246 190 14 246 190 14 241 196 14 139 102 15
44385- 2 2 6 2 2 6 2 2 6 2 2 6
44386- 78 78 78 250 250 250 253 253 253 253 253 253
44387-253 253 253 253 253 253 253 253 253 253 253 253
44388-253 253 253 253 253 253 253 253 253 253 253 253
44389-253 253 253 253 253 253 253 253 253 253 253 253
44390-250 250 250 214 214 214 198 198 198 190 150 46
44391-219 162 10 236 178 12 234 174 13 224 166 10
44392-216 158 10 213 154 11 213 154 11 216 158 10
44393-226 170 11 239 182 13 246 190 14 246 190 14
44394-246 190 14 246 190 14 242 186 14 206 162 42
44395-101 101 101 58 58 58 30 30 30 14 14 14
44396- 6 6 6 0 0 0 0 0 0 0 0 0
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 0 0 0 0 0 0
44399- 0 0 0 0 0 0 0 0 0 10 10 10
44400- 30 30 30 74 74 74 174 135 50 216 158 10
44401-236 178 12 246 190 14 246 190 14 246 190 14
44402-246 190 14 246 190 14 246 190 14 246 190 14
44403-246 190 14 246 190 14 246 190 14 246 190 14
44404-246 190 14 246 190 14 241 196 14 226 184 13
44405- 61 42 6 2 2 6 2 2 6 2 2 6
44406- 22 22 22 238 238 238 253 253 253 253 253 253
44407-253 253 253 253 253 253 253 253 253 253 253 253
44408-253 253 253 253 253 253 253 253 253 253 253 253
44409-253 253 253 253 253 253 253 253 253 253 253 253
44410-253 253 253 226 226 226 187 187 187 180 133 36
44411-216 158 10 236 178 12 239 182 13 236 178 12
44412-230 174 11 226 170 11 226 170 11 230 174 11
44413-236 178 12 242 186 14 246 190 14 246 190 14
44414-246 190 14 246 190 14 246 186 14 239 182 13
44415-206 162 42 106 106 106 66 66 66 34 34 34
44416- 14 14 14 6 6 6 0 0 0 0 0 0
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 0 0 0 0 0 0
44419- 0 0 0 0 0 0 0 0 0 6 6 6
44420- 26 26 26 70 70 70 163 133 67 213 154 11
44421-236 178 12 246 190 14 246 190 14 246 190 14
44422-246 190 14 246 190 14 246 190 14 246 190 14
44423-246 190 14 246 190 14 246 190 14 246 190 14
44424-246 190 14 246 190 14 246 190 14 241 196 14
44425-190 146 13 18 14 6 2 2 6 2 2 6
44426- 46 46 46 246 246 246 253 253 253 253 253 253
44427-253 253 253 253 253 253 253 253 253 253 253 253
44428-253 253 253 253 253 253 253 253 253 253 253 253
44429-253 253 253 253 253 253 253 253 253 253 253 253
44430-253 253 253 221 221 221 86 86 86 156 107 11
44431-216 158 10 236 178 12 242 186 14 246 186 14
44432-242 186 14 239 182 13 239 182 13 242 186 14
44433-242 186 14 246 186 14 246 190 14 246 190 14
44434-246 190 14 246 190 14 246 190 14 246 190 14
44435-242 186 14 225 175 15 142 122 72 66 66 66
44436- 30 30 30 10 10 10 0 0 0 0 0 0
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 0 0 0
44439- 0 0 0 0 0 0 0 0 0 6 6 6
44440- 26 26 26 70 70 70 163 133 67 210 150 10
44441-236 178 12 246 190 14 246 190 14 246 190 14
44442-246 190 14 246 190 14 246 190 14 246 190 14
44443-246 190 14 246 190 14 246 190 14 246 190 14
44444-246 190 14 246 190 14 246 190 14 246 190 14
44445-232 195 16 121 92 8 34 34 34 106 106 106
44446-221 221 221 253 253 253 253 253 253 253 253 253
44447-253 253 253 253 253 253 253 253 253 253 253 253
44448-253 253 253 253 253 253 253 253 253 253 253 253
44449-253 253 253 253 253 253 253 253 253 253 253 253
44450-242 242 242 82 82 82 18 14 6 163 110 8
44451-216 158 10 236 178 12 242 186 14 246 190 14
44452-246 190 14 246 190 14 246 190 14 246 190 14
44453-246 190 14 246 190 14 246 190 14 246 190 14
44454-246 190 14 246 190 14 246 190 14 246 190 14
44455-246 190 14 246 190 14 242 186 14 163 133 67
44456- 46 46 46 18 18 18 6 6 6 0 0 0
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 10 10 10
44460- 30 30 30 78 78 78 163 133 67 210 150 10
44461-236 178 12 246 186 14 246 190 14 246 190 14
44462-246 190 14 246 190 14 246 190 14 246 190 14
44463-246 190 14 246 190 14 246 190 14 246 190 14
44464-246 190 14 246 190 14 246 190 14 246 190 14
44465-241 196 14 215 174 15 190 178 144 253 253 253
44466-253 253 253 253 253 253 253 253 253 253 253 253
44467-253 253 253 253 253 253 253 253 253 253 253 253
44468-253 253 253 253 253 253 253 253 253 253 253 253
44469-253 253 253 253 253 253 253 253 253 218 218 218
44470- 58 58 58 2 2 6 22 18 6 167 114 7
44471-216 158 10 236 178 12 246 186 14 246 190 14
44472-246 190 14 246 190 14 246 190 14 246 190 14
44473-246 190 14 246 190 14 246 190 14 246 190 14
44474-246 190 14 246 190 14 246 190 14 246 190 14
44475-246 190 14 246 186 14 242 186 14 190 150 46
44476- 54 54 54 22 22 22 6 6 6 0 0 0
44477- 0 0 0 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 0 0 0 0 0 0 14 14 14
44480- 38 38 38 86 86 86 180 133 36 213 154 11
44481-236 178 12 246 186 14 246 190 14 246 190 14
44482-246 190 14 246 190 14 246 190 14 246 190 14
44483-246 190 14 246 190 14 246 190 14 246 190 14
44484-246 190 14 246 190 14 246 190 14 246 190 14
44485-246 190 14 232 195 16 190 146 13 214 214 214
44486-253 253 253 253 253 253 253 253 253 253 253 253
44487-253 253 253 253 253 253 253 253 253 253 253 253
44488-253 253 253 253 253 253 253 253 253 253 253 253
44489-253 253 253 250 250 250 170 170 170 26 26 26
44490- 2 2 6 2 2 6 37 26 9 163 110 8
44491-219 162 10 239 182 13 246 186 14 246 190 14
44492-246 190 14 246 190 14 246 190 14 246 190 14
44493-246 190 14 246 190 14 246 190 14 246 190 14
44494-246 190 14 246 190 14 246 190 14 246 190 14
44495-246 186 14 236 178 12 224 166 10 142 122 72
44496- 46 46 46 18 18 18 6 6 6 0 0 0
44497- 0 0 0 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 6 6 6 18 18 18
44500- 50 50 50 109 106 95 192 133 9 224 166 10
44501-242 186 14 246 190 14 246 190 14 246 190 14
44502-246 190 14 246 190 14 246 190 14 246 190 14
44503-246 190 14 246 190 14 246 190 14 246 190 14
44504-246 190 14 246 190 14 246 190 14 246 190 14
44505-242 186 14 226 184 13 210 162 10 142 110 46
44506-226 226 226 253 253 253 253 253 253 253 253 253
44507-253 253 253 253 253 253 253 253 253 253 253 253
44508-253 253 253 253 253 253 253 253 253 253 253 253
44509-198 198 198 66 66 66 2 2 6 2 2 6
44510- 2 2 6 2 2 6 50 34 6 156 107 11
44511-219 162 10 239 182 13 246 186 14 246 190 14
44512-246 190 14 246 190 14 246 190 14 246 190 14
44513-246 190 14 246 190 14 246 190 14 246 190 14
44514-246 190 14 246 190 14 246 190 14 242 186 14
44515-234 174 13 213 154 11 154 122 46 66 66 66
44516- 30 30 30 10 10 10 0 0 0 0 0 0
44517- 0 0 0 0 0 0 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 6 6 6 22 22 22
44520- 58 58 58 154 121 60 206 145 10 234 174 13
44521-242 186 14 246 186 14 246 190 14 246 190 14
44522-246 190 14 246 190 14 246 190 14 246 190 14
44523-246 190 14 246 190 14 246 190 14 246 190 14
44524-246 190 14 246 190 14 246 190 14 246 190 14
44525-246 186 14 236 178 12 210 162 10 163 110 8
44526- 61 42 6 138 138 138 218 218 218 250 250 250
44527-253 253 253 253 253 253 253 253 253 250 250 250
44528-242 242 242 210 210 210 144 144 144 66 66 66
44529- 6 6 6 2 2 6 2 2 6 2 2 6
44530- 2 2 6 2 2 6 61 42 6 163 110 8
44531-216 158 10 236 178 12 246 190 14 246 190 14
44532-246 190 14 246 190 14 246 190 14 246 190 14
44533-246 190 14 246 190 14 246 190 14 246 190 14
44534-246 190 14 239 182 13 230 174 11 216 158 10
44535-190 142 34 124 112 88 70 70 70 38 38 38
44536- 18 18 18 6 6 6 0 0 0 0 0 0
44537- 0 0 0 0 0 0 0 0 0 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 6 6 6 22 22 22
44540- 62 62 62 168 124 44 206 145 10 224 166 10
44541-236 178 12 239 182 13 242 186 14 242 186 14
44542-246 186 14 246 190 14 246 190 14 246 190 14
44543-246 190 14 246 190 14 246 190 14 246 190 14
44544-246 190 14 246 190 14 246 190 14 246 190 14
44545-246 190 14 236 178 12 216 158 10 175 118 6
44546- 80 54 7 2 2 6 6 6 6 30 30 30
44547- 54 54 54 62 62 62 50 50 50 38 38 38
44548- 14 14 14 2 2 6 2 2 6 2 2 6
44549- 2 2 6 2 2 6 2 2 6 2 2 6
44550- 2 2 6 6 6 6 80 54 7 167 114 7
44551-213 154 11 236 178 12 246 190 14 246 190 14
44552-246 190 14 246 190 14 246 190 14 246 190 14
44553-246 190 14 242 186 14 239 182 13 239 182 13
44554-230 174 11 210 150 10 174 135 50 124 112 88
44555- 82 82 82 54 54 54 34 34 34 18 18 18
44556- 6 6 6 0 0 0 0 0 0 0 0 0
44557- 0 0 0 0 0 0 0 0 0 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 6 6 6 18 18 18
44560- 50 50 50 158 118 36 192 133 9 200 144 11
44561-216 158 10 219 162 10 224 166 10 226 170 11
44562-230 174 11 236 178 12 239 182 13 239 182 13
44563-242 186 14 246 186 14 246 190 14 246 190 14
44564-246 190 14 246 190 14 246 190 14 246 190 14
44565-246 186 14 230 174 11 210 150 10 163 110 8
44566-104 69 6 10 10 10 2 2 6 2 2 6
44567- 2 2 6 2 2 6 2 2 6 2 2 6
44568- 2 2 6 2 2 6 2 2 6 2 2 6
44569- 2 2 6 2 2 6 2 2 6 2 2 6
44570- 2 2 6 6 6 6 91 60 6 167 114 7
44571-206 145 10 230 174 11 242 186 14 246 190 14
44572-246 190 14 246 190 14 246 186 14 242 186 14
44573-239 182 13 230 174 11 224 166 10 213 154 11
44574-180 133 36 124 112 88 86 86 86 58 58 58
44575- 38 38 38 22 22 22 10 10 10 6 6 6
44576- 0 0 0 0 0 0 0 0 0 0 0 0
44577- 0 0 0 0 0 0 0 0 0 0 0 0
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 14 14 14
44580- 34 34 34 70 70 70 138 110 50 158 118 36
44581-167 114 7 180 123 7 192 133 9 197 138 11
44582-200 144 11 206 145 10 213 154 11 219 162 10
44583-224 166 10 230 174 11 239 182 13 242 186 14
44584-246 186 14 246 186 14 246 186 14 246 186 14
44585-239 182 13 216 158 10 185 133 11 152 99 6
44586-104 69 6 18 14 6 2 2 6 2 2 6
44587- 2 2 6 2 2 6 2 2 6 2 2 6
44588- 2 2 6 2 2 6 2 2 6 2 2 6
44589- 2 2 6 2 2 6 2 2 6 2 2 6
44590- 2 2 6 6 6 6 80 54 7 152 99 6
44591-192 133 9 219 162 10 236 178 12 239 182 13
44592-246 186 14 242 186 14 239 182 13 236 178 12
44593-224 166 10 206 145 10 192 133 9 154 121 60
44594- 94 94 94 62 62 62 42 42 42 22 22 22
44595- 14 14 14 6 6 6 0 0 0 0 0 0
44596- 0 0 0 0 0 0 0 0 0 0 0 0
44597- 0 0 0 0 0 0 0 0 0 0 0 0
44598- 0 0 0 0 0 0 0 0 0 0 0 0
44599- 0 0 0 0 0 0 0 0 0 6 6 6
44600- 18 18 18 34 34 34 58 58 58 78 78 78
44601-101 98 89 124 112 88 142 110 46 156 107 11
44602-163 110 8 167 114 7 175 118 6 180 123 7
44603-185 133 11 197 138 11 210 150 10 219 162 10
44604-226 170 11 236 178 12 236 178 12 234 174 13
44605-219 162 10 197 138 11 163 110 8 130 83 6
44606- 91 60 6 10 10 10 2 2 6 2 2 6
44607- 18 18 18 38 38 38 38 38 38 38 38 38
44608- 38 38 38 38 38 38 38 38 38 38 38 38
44609- 38 38 38 38 38 38 26 26 26 2 2 6
44610- 2 2 6 6 6 6 70 47 6 137 92 6
44611-175 118 6 200 144 11 219 162 10 230 174 11
44612-234 174 13 230 174 11 219 162 10 210 150 10
44613-192 133 9 163 110 8 124 112 88 82 82 82
44614- 50 50 50 30 30 30 14 14 14 6 6 6
44615- 0 0 0 0 0 0 0 0 0 0 0 0
44616- 0 0 0 0 0 0 0 0 0 0 0 0
44617- 0 0 0 0 0 0 0 0 0 0 0 0
44618- 0 0 0 0 0 0 0 0 0 0 0 0
44619- 0 0 0 0 0 0 0 0 0 0 0 0
44620- 6 6 6 14 14 14 22 22 22 34 34 34
44621- 42 42 42 58 58 58 74 74 74 86 86 86
44622-101 98 89 122 102 70 130 98 46 121 87 25
44623-137 92 6 152 99 6 163 110 8 180 123 7
44624-185 133 11 197 138 11 206 145 10 200 144 11
44625-180 123 7 156 107 11 130 83 6 104 69 6
44626- 50 34 6 54 54 54 110 110 110 101 98 89
44627- 86 86 86 82 82 82 78 78 78 78 78 78
44628- 78 78 78 78 78 78 78 78 78 78 78 78
44629- 78 78 78 82 82 82 86 86 86 94 94 94
44630-106 106 106 101 101 101 86 66 34 124 80 6
44631-156 107 11 180 123 7 192 133 9 200 144 11
44632-206 145 10 200 144 11 192 133 9 175 118 6
44633-139 102 15 109 106 95 70 70 70 42 42 42
44634- 22 22 22 10 10 10 0 0 0 0 0 0
44635- 0 0 0 0 0 0 0 0 0 0 0 0
44636- 0 0 0 0 0 0 0 0 0 0 0 0
44637- 0 0 0 0 0 0 0 0 0 0 0 0
44638- 0 0 0 0 0 0 0 0 0 0 0 0
44639- 0 0 0 0 0 0 0 0 0 0 0 0
44640- 0 0 0 0 0 0 6 6 6 10 10 10
44641- 14 14 14 22 22 22 30 30 30 38 38 38
44642- 50 50 50 62 62 62 74 74 74 90 90 90
44643-101 98 89 112 100 78 121 87 25 124 80 6
44644-137 92 6 152 99 6 152 99 6 152 99 6
44645-138 86 6 124 80 6 98 70 6 86 66 30
44646-101 98 89 82 82 82 58 58 58 46 46 46
44647- 38 38 38 34 34 34 34 34 34 34 34 34
44648- 34 34 34 34 34 34 34 34 34 34 34 34
44649- 34 34 34 34 34 34 38 38 38 42 42 42
44650- 54 54 54 82 82 82 94 86 76 91 60 6
44651-134 86 6 156 107 11 167 114 7 175 118 6
44652-175 118 6 167 114 7 152 99 6 121 87 25
44653-101 98 89 62 62 62 34 34 34 18 18 18
44654- 6 6 6 0 0 0 0 0 0 0 0 0
44655- 0 0 0 0 0 0 0 0 0 0 0 0
44656- 0 0 0 0 0 0 0 0 0 0 0 0
44657- 0 0 0 0 0 0 0 0 0 0 0 0
44658- 0 0 0 0 0 0 0 0 0 0 0 0
44659- 0 0 0 0 0 0 0 0 0 0 0 0
44660- 0 0 0 0 0 0 0 0 0 0 0 0
44661- 0 0 0 6 6 6 6 6 6 10 10 10
44662- 18 18 18 22 22 22 30 30 30 42 42 42
44663- 50 50 50 66 66 66 86 86 86 101 98 89
44664-106 86 58 98 70 6 104 69 6 104 69 6
44665-104 69 6 91 60 6 82 62 34 90 90 90
44666- 62 62 62 38 38 38 22 22 22 14 14 14
44667- 10 10 10 10 10 10 10 10 10 10 10 10
44668- 10 10 10 10 10 10 6 6 6 10 10 10
44669- 10 10 10 10 10 10 10 10 10 14 14 14
44670- 22 22 22 42 42 42 70 70 70 89 81 66
44671- 80 54 7 104 69 6 124 80 6 137 92 6
44672-134 86 6 116 81 8 100 82 52 86 86 86
44673- 58 58 58 30 30 30 14 14 14 6 6 6
44674- 0 0 0 0 0 0 0 0 0 0 0 0
44675- 0 0 0 0 0 0 0 0 0 0 0 0
44676- 0 0 0 0 0 0 0 0 0 0 0 0
44677- 0 0 0 0 0 0 0 0 0 0 0 0
44678- 0 0 0 0 0 0 0 0 0 0 0 0
44679- 0 0 0 0 0 0 0 0 0 0 0 0
44680- 0 0 0 0 0 0 0 0 0 0 0 0
44681- 0 0 0 0 0 0 0 0 0 0 0 0
44682- 0 0 0 6 6 6 10 10 10 14 14 14
44683- 18 18 18 26 26 26 38 38 38 54 54 54
44684- 70 70 70 86 86 86 94 86 76 89 81 66
44685- 89 81 66 86 86 86 74 74 74 50 50 50
44686- 30 30 30 14 14 14 6 6 6 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 0 0 0 0 0 0 0 0 0 0 0 0
44689- 0 0 0 0 0 0 0 0 0 0 0 0
44690- 6 6 6 18 18 18 34 34 34 58 58 58
44691- 82 82 82 89 81 66 89 81 66 89 81 66
44692- 94 86 66 94 86 76 74 74 74 50 50 50
44693- 26 26 26 14 14 14 6 6 6 0 0 0
44694- 0 0 0 0 0 0 0 0 0 0 0 0
44695- 0 0 0 0 0 0 0 0 0 0 0 0
44696- 0 0 0 0 0 0 0 0 0 0 0 0
44697- 0 0 0 0 0 0 0 0 0 0 0 0
44698- 0 0 0 0 0 0 0 0 0 0 0 0
44699- 0 0 0 0 0 0 0 0 0 0 0 0
44700- 0 0 0 0 0 0 0 0 0 0 0 0
44701- 0 0 0 0 0 0 0 0 0 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 6 6 6 6 6 6 14 14 14 18 18 18
44704- 30 30 30 38 38 38 46 46 46 54 54 54
44705- 50 50 50 42 42 42 30 30 30 18 18 18
44706- 10 10 10 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 0 0 0 0 0 0 0 0 0 0 0 0
44709- 0 0 0 0 0 0 0 0 0 0 0 0
44710- 0 0 0 6 6 6 14 14 14 26 26 26
44711- 38 38 38 50 50 50 58 58 58 58 58 58
44712- 54 54 54 42 42 42 30 30 30 18 18 18
44713- 10 10 10 0 0 0 0 0 0 0 0 0
44714- 0 0 0 0 0 0 0 0 0 0 0 0
44715- 0 0 0 0 0 0 0 0 0 0 0 0
44716- 0 0 0 0 0 0 0 0 0 0 0 0
44717- 0 0 0 0 0 0 0 0 0 0 0 0
44718- 0 0 0 0 0 0 0 0 0 0 0 0
44719- 0 0 0 0 0 0 0 0 0 0 0 0
44720- 0 0 0 0 0 0 0 0 0 0 0 0
44721- 0 0 0 0 0 0 0 0 0 0 0 0
44722- 0 0 0 0 0 0 0 0 0 0 0 0
44723- 0 0 0 0 0 0 0 0 0 6 6 6
44724- 6 6 6 10 10 10 14 14 14 18 18 18
44725- 18 18 18 14 14 14 10 10 10 6 6 6
44726- 0 0 0 0 0 0 0 0 0 0 0 0
44727- 0 0 0 0 0 0 0 0 0 0 0 0
44728- 0 0 0 0 0 0 0 0 0 0 0 0
44729- 0 0 0 0 0 0 0 0 0 0 0 0
44730- 0 0 0 0 0 0 0 0 0 6 6 6
44731- 14 14 14 18 18 18 22 22 22 22 22 22
44732- 18 18 18 14 14 14 10 10 10 6 6 6
44733- 0 0 0 0 0 0 0 0 0 0 0 0
44734- 0 0 0 0 0 0 0 0 0 0 0 0
44735- 0 0 0 0 0 0 0 0 0 0 0 0
44736- 0 0 0 0 0 0 0 0 0 0 0 0
44737- 0 0 0 0 0 0 0 0 0 0 0 0
44738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44751+4 4 4 4 4 4
44752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44765+4 4 4 4 4 4
44766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44779+4 4 4 4 4 4
44780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44793+4 4 4 4 4 4
44794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44807+4 4 4 4 4 4
44808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44821+4 4 4 4 4 4
44822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44826+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44827+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44831+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44832+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44833+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44835+4 4 4 4 4 4
44836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44840+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44841+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44842+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44845+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44846+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44847+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44848+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44849+4 4 4 4 4 4
44850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44854+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44855+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44856+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44859+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44860+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44861+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44862+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44863+4 4 4 4 4 4
44864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44867+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44868+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44869+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44870+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44872+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44873+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44874+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44875+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44876+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44877+4 4 4 4 4 4
44878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44881+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44882+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44883+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44884+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44885+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44886+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44887+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44888+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44889+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44890+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44891+4 4 4 4 4 4
44892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44894+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44895+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44896+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44897+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44898+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44899+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44900+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44901+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44902+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44903+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44904+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44905+4 4 4 4 4 4
44906+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44907+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44908+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44909+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44910+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44911+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44912+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44913+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44914+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44915+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44916+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44917+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44918+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44919+4 4 4 4 4 4
44920+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44921+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44922+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44923+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44924+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44925+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44926+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44927+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44928+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44929+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44930+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44931+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44932+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44933+4 4 4 4 4 4
44934+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44935+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44936+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44937+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44938+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44939+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44940+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44941+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44942+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44943+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44944+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44945+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44946+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44947+4 4 4 4 4 4
44948+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44949+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44950+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44951+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44952+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44953+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44954+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44955+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44956+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44957+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44958+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44959+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44960+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44961+4 4 4 4 4 4
44962+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44963+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44964+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44965+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44966+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44967+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44968+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44969+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44970+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44971+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44972+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44973+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44974+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44975+4 4 4 4 4 4
44976+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44977+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44978+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44979+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44980+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44981+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44982+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44983+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44984+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44985+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44986+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44987+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44988+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44989+0 0 0 4 4 4
44990+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44991+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44992+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44993+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44994+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44995+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44996+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44997+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44998+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44999+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
45000+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
45001+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
45002+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
45003+2 0 0 0 0 0
45004+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
45005+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
45006+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
45007+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
45008+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
45009+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
45010+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
45011+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
45012+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
45013+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
45014+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
45015+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
45016+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
45017+37 38 37 0 0 0
45018+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45019+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45020+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45021+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45022+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45023+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45024+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45025+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45026+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45027+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45028+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45029+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45030+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45031+85 115 134 4 0 0
45032+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45033+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45034+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45035+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45036+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45037+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45038+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45039+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45040+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45041+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45042+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45043+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45044+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45045+60 73 81 4 0 0
45046+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45047+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45048+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45049+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45050+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45051+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45052+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45053+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45054+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45055+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45056+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45057+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45058+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45059+16 19 21 4 0 0
45060+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45061+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45062+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45063+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45064+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45065+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45066+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45067+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45068+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45069+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45070+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45071+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45072+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45073+4 0 0 4 3 3
45074+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45075+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45076+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45077+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45078+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45079+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45080+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45081+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45082+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45083+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45084+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45085+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45086+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45087+3 2 2 4 4 4
45088+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45089+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45090+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45091+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45092+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45093+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45094+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45095+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45096+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45097+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45098+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45099+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45100+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45101+4 4 4 4 4 4
45102+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45103+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45104+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45105+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45106+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45107+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45108+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45109+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45110+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45111+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45112+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45113+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45114+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45115+4 4 4 4 4 4
45116+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45117+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45118+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45119+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45120+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45121+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45122+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45123+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45124+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45125+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45126+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45127+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45128+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45129+5 5 5 5 5 5
45130+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45131+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45132+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45133+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45134+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45135+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45136+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45137+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45138+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45139+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45140+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45141+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45142+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45143+5 5 5 4 4 4
45144+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45145+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45146+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45147+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45148+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45149+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45150+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45151+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45152+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45153+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45154+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45155+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45156+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45157+4 4 4 4 4 4
45158+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45159+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45160+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45161+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45162+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45163+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45164+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45165+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45166+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45167+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45168+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45169+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45170+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45171+4 4 4 4 4 4
45172+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45173+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45174+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45175+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45176+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45177+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45178+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45179+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45180+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45181+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45182+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45183+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45184+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45185+4 4 4 4 4 4
45186+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45187+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45188+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45189+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45190+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45191+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45192+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45193+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45194+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45195+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45196+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45197+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45198+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45199+4 4 4 4 4 4
45200+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45201+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45202+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45203+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45204+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45205+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45206+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45207+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45208+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45209+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45210+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45211+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45212+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45213+4 4 4 4 4 4
45214+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45215+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45216+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45217+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45218+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45219+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45220+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45221+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45222+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45223+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45224+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45225+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45226+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45227+4 4 4 4 4 4
45228+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45229+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45230+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45231+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45232+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45233+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45234+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45235+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45236+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45237+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45238+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45239+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45240+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45241+4 4 4 4 4 4
45242+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45243+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45244+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45245+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45246+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45247+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45248+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45249+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45250+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45251+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45252+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45253+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45254+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45255+4 4 4 4 4 4
45256+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45257+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45258+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45259+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45260+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45261+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45262+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45263+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45264+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45265+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45266+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45267+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45268+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45269+4 4 4 4 4 4
45270+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45271+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45272+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45273+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45274+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45275+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45276+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45277+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45278+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45279+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45280+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45281+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45282+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45283+4 4 4 4 4 4
45284+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45285+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45286+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45287+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45288+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45289+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45290+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45291+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45292+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45293+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45294+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45295+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45296+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45297+4 4 4 4 4 4
45298+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45299+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45300+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45301+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45302+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45303+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45304+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45305+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45306+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45307+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45308+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45309+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45310+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45311+4 4 4 4 4 4
45312+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45313+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45314+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45315+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45316+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45317+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45318+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45319+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45320+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45321+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45322+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45323+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45324+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45325+4 4 4 4 4 4
45326+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45327+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45328+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45329+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45330+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45331+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45332+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45333+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45334+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45335+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45336+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45337+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45338+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45339+4 4 4 4 4 4
45340+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45341+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45342+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45343+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45344+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45345+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45346+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45347+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45348+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45349+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45350+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45351+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45352+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45353+4 4 4 4 4 4
45354+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45355+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45356+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45357+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45358+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45359+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45360+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45361+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45362+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45363+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45364+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45365+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45366+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45367+4 4 4 4 4 4
45368+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45369+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45370+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45371+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45372+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45373+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45374+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45375+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45376+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45377+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45378+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45379+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45380+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45381+4 4 4 4 4 4
45382+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45383+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45384+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45385+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45386+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45387+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45388+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45389+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45390+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45391+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45392+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45393+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45394+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45395+4 4 4 4 4 4
45396+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45397+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45398+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45399+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45400+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45401+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45402+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45403+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45404+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45405+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45406+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45407+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45408+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45409+4 4 4 4 4 4
45410+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45411+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45412+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45413+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45414+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45415+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45416+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45417+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45418+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45419+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45420+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45421+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45422+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45423+4 4 4 4 4 4
45424+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45425+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45426+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45427+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45428+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45429+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45430+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45431+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45432+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45433+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45434+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45435+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45436+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45437+4 4 4 4 4 4
45438+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45439+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45440+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45441+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45442+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45443+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45444+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45445+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45446+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45447+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45448+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45449+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45450+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45451+4 4 4 4 4 4
45452+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45453+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45454+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45455+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45456+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45457+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45458+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45459+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45460+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45461+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45462+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45463+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45464+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45465+4 4 4 4 4 4
45466+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45467+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45468+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45469+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45470+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45471+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45472+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45473+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45474+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45475+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45476+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45477+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45478+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45479+4 4 4 4 4 4
45480+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45481+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45482+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45483+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45484+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45485+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45486+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45487+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45488+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45489+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45490+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45491+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45492+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45493+4 4 4 4 4 4
45494+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45495+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45496+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45497+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45498+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45499+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45500+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45501+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45502+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45503+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45504+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45505+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45506+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45507+4 4 4 4 4 4
45508+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45509+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45510+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45511+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45512+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45513+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45514+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45515+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45516+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45517+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45518+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45519+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45520+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45521+4 4 4 4 4 4
45522+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45523+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45524+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45525+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45526+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45527+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45528+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45530+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45531+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45532+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45533+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45534+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45535+4 4 4 4 4 4
45536+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45537+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45538+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45539+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45540+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45541+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45542+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45543+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45544+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45545+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45546+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45547+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45548+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45549+4 4 4 4 4 4
45550+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45551+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45552+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45553+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45554+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45555+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45556+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45557+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45558+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45559+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45560+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45561+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45562+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45563+4 4 4 4 4 4
45564+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45565+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45566+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45567+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45568+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45569+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45570+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45571+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45572+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45573+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45574+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45575+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45576+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45577+4 4 4 4 4 4
45578+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45579+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45580+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45581+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45582+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45583+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45584+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45585+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45586+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45587+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45588+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45589+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45590+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45591+4 4 4 4 4 4
45592+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45593+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45594+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45595+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45596+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45597+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45598+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45599+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45600+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45601+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45602+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45603+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45604+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45605+4 4 4 4 4 4
45606+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45607+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45608+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45609+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45610+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45611+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45612+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45613+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45614+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45615+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45616+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45617+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45618+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45619+4 4 4 4 4 4
45620+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45621+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45622+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45623+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45624+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45625+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45626+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45627+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45628+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45629+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45630+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45631+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45632+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45633+4 4 4 4 4 4
45634+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45635+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45636+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45637+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45638+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45639+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45640+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45641+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45642+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45643+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45644+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45645+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45646+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45647+4 4 4 4 4 4
45648+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45649+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45650+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45651+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45652+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45653+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45654+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45655+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45657+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45658+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45659+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45660+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45661+4 4 4 4 4 4
45662+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45663+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45664+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45665+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45666+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45667+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45668+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45669+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45672+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45673+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45674+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45675+4 4 4 4 4 4
45676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45677+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45678+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45679+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45680+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45681+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45682+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45683+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45686+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45687+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45688+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45689+4 4 4 4 4 4
45690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45691+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45692+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45693+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45694+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45695+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45696+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45697+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45700+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45701+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45703+4 4 4 4 4 4
45704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45705+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45706+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45707+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45708+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45709+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45710+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45711+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45715+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45717+4 4 4 4 4 4
45718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45720+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45721+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45722+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45723+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45724+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45725+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45731+4 4 4 4 4 4
45732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45735+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45736+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45737+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45738+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45745+4 4 4 4 4 4
45746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45749+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45750+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45751+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45752+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45759+4 4 4 4 4 4
45760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45763+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45764+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45765+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45766+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45773+4 4 4 4 4 4
45774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45777+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45778+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45779+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45780+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45787+4 4 4 4 4 4
45788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45792+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45793+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45794+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45801+4 4 4 4 4 4
45802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45806+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45807+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45808+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45815+4 4 4 4 4 4
45816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45820+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45821+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45822+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45829+4 4 4 4 4 4
45830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45834+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45835+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45843+4 4 4 4 4 4
45844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45848+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45849+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45857+4 4 4 4 4 4
45858diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45859index 443e3c8..c443d6a 100644
45860--- a/drivers/video/nvidia/nv_backlight.c
45861+++ b/drivers/video/nvidia/nv_backlight.c
45862@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45863 return bd->props.brightness;
45864 }
45865
45866-static struct backlight_ops nvidia_bl_ops = {
45867+static const struct backlight_ops nvidia_bl_ops = {
45868 .get_brightness = nvidia_bl_get_brightness,
45869 .update_status = nvidia_bl_update_status,
45870 };
45871diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45872index d94c57f..912984c 100644
45873--- a/drivers/video/riva/fbdev.c
45874+++ b/drivers/video/riva/fbdev.c
45875@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45876 return bd->props.brightness;
45877 }
45878
45879-static struct backlight_ops riva_bl_ops = {
45880+static const struct backlight_ops riva_bl_ops = {
45881 .get_brightness = riva_bl_get_brightness,
45882 .update_status = riva_bl_update_status,
45883 };
45884diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45885index 54fbb29..2c108fc 100644
45886--- a/drivers/video/uvesafb.c
45887+++ b/drivers/video/uvesafb.c
45888@@ -18,6 +18,7 @@
45889 #include <linux/fb.h>
45890 #include <linux/io.h>
45891 #include <linux/mutex.h>
45892+#include <linux/moduleloader.h>
45893 #include <video/edid.h>
45894 #include <video/uvesafb.h>
45895 #ifdef CONFIG_X86
45896@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45897 NULL,
45898 };
45899
45900- return call_usermodehelper(v86d_path, argv, envp, 1);
45901+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45902 }
45903
45904 /*
45905@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45906 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45907 par->pmi_setpal = par->ypan = 0;
45908 } else {
45909+
45910+#ifdef CONFIG_PAX_KERNEXEC
45911+#ifdef CONFIG_MODULES
45912+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45913+#endif
45914+ if (!par->pmi_code) {
45915+ par->pmi_setpal = par->ypan = 0;
45916+ return 0;
45917+ }
45918+#endif
45919+
45920 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45921 + task->t.regs.edi);
45922+
45923+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45924+ pax_open_kernel();
45925+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45926+ pax_close_kernel();
45927+
45928+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45929+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45930+#else
45931 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45932 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45933+#endif
45934+
45935 printk(KERN_INFO "uvesafb: protected mode interface info at "
45936 "%04x:%04x\n",
45937 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45938@@ -1799,6 +1822,11 @@ out:
45939 if (par->vbe_modes)
45940 kfree(par->vbe_modes);
45941
45942+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45943+ if (par->pmi_code)
45944+ module_free_exec(NULL, par->pmi_code);
45945+#endif
45946+
45947 framebuffer_release(info);
45948 return err;
45949 }
45950@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45951 kfree(par->vbe_state_orig);
45952 if (par->vbe_state_saved)
45953 kfree(par->vbe_state_saved);
45954+
45955+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45956+ if (par->pmi_code)
45957+ module_free_exec(NULL, par->pmi_code);
45958+#endif
45959+
45960 }
45961
45962 framebuffer_release(info);
45963diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45964index bd37ee1..cb827e8 100644
45965--- a/drivers/video/vesafb.c
45966+++ b/drivers/video/vesafb.c
45967@@ -9,6 +9,7 @@
45968 */
45969
45970 #include <linux/module.h>
45971+#include <linux/moduleloader.h>
45972 #include <linux/kernel.h>
45973 #include <linux/errno.h>
45974 #include <linux/string.h>
45975@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45976 static int vram_total __initdata; /* Set total amount of memory */
45977 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45978 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45979-static void (*pmi_start)(void) __read_mostly;
45980-static void (*pmi_pal) (void) __read_mostly;
45981+static void (*pmi_start)(void) __read_only;
45982+static void (*pmi_pal) (void) __read_only;
45983 static int depth __read_mostly;
45984 static int vga_compat __read_mostly;
45985 /* --------------------------------------------------------------------- */
45986@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45987 unsigned int size_vmode;
45988 unsigned int size_remap;
45989 unsigned int size_total;
45990+ void *pmi_code = NULL;
45991
45992 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45993 return -ENODEV;
45994@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45995 size_remap = size_total;
45996 vesafb_fix.smem_len = size_remap;
45997
45998-#ifndef __i386__
45999- screen_info.vesapm_seg = 0;
46000-#endif
46001-
46002 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
46003 printk(KERN_WARNING
46004 "vesafb: cannot reserve video memory at 0x%lx\n",
46005@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
46006 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
46007 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
46008
46009+#ifdef __i386__
46010+
46011+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46012+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
46013+ if (!pmi_code)
46014+#elif !defined(CONFIG_PAX_KERNEXEC)
46015+ if (0)
46016+#endif
46017+
46018+#endif
46019+ screen_info.vesapm_seg = 0;
46020+
46021 if (screen_info.vesapm_seg) {
46022- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46023- screen_info.vesapm_seg,screen_info.vesapm_off);
46024+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46025+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46026 }
46027
46028 if (screen_info.vesapm_seg < 0xc000)
46029@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46030
46031 if (ypan || pmi_setpal) {
46032 unsigned short *pmi_base;
46033+
46034 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46035- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46036- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46037+
46038+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46039+ pax_open_kernel();
46040+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46041+#else
46042+ pmi_code = pmi_base;
46043+#endif
46044+
46045+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46046+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46047+
46048+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46049+ pmi_start = ktva_ktla(pmi_start);
46050+ pmi_pal = ktva_ktla(pmi_pal);
46051+ pax_close_kernel();
46052+#endif
46053+
46054 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46055 if (pmi_base[3]) {
46056 printk(KERN_INFO "vesafb: pmi: ports = ");
46057@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46058 info->node, info->fix.id);
46059 return 0;
46060 err:
46061+
46062+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46063+ module_free_exec(NULL, pmi_code);
46064+#endif
46065+
46066 if (info->screen_base)
46067 iounmap(info->screen_base);
46068 framebuffer_release(info);
46069diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46070index 88a60e0..6783cc2 100644
46071--- a/drivers/xen/sys-hypervisor.c
46072+++ b/drivers/xen/sys-hypervisor.c
46073@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46074 return 0;
46075 }
46076
46077-static struct sysfs_ops hyp_sysfs_ops = {
46078+static const struct sysfs_ops hyp_sysfs_ops = {
46079 .show = hyp_sysfs_show,
46080 .store = hyp_sysfs_store,
46081 };
46082diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46083index 18f74ec..3227009 100644
46084--- a/fs/9p/vfs_inode.c
46085+++ b/fs/9p/vfs_inode.c
46086@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46087 static void
46088 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46089 {
46090- char *s = nd_get_link(nd);
46091+ const char *s = nd_get_link(nd);
46092
46093 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46094 IS_ERR(s) ? "<error>" : s);
46095diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46096index bb4cc5b..df5eaa0 100644
46097--- a/fs/Kconfig.binfmt
46098+++ b/fs/Kconfig.binfmt
46099@@ -86,7 +86,7 @@ config HAVE_AOUT
46100
46101 config BINFMT_AOUT
46102 tristate "Kernel support for a.out and ECOFF binaries"
46103- depends on HAVE_AOUT
46104+ depends on HAVE_AOUT && BROKEN
46105 ---help---
46106 A.out (Assembler.OUTput) is a set of formats for libraries and
46107 executables used in the earliest versions of UNIX. Linux used
46108diff --git a/fs/aio.c b/fs/aio.c
46109index 22a19ad..d484e5b 100644
46110--- a/fs/aio.c
46111+++ b/fs/aio.c
46112@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46113 size += sizeof(struct io_event) * nr_events;
46114 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46115
46116- if (nr_pages < 0)
46117+ if (nr_pages <= 0)
46118 return -EINVAL;
46119
46120 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46121@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46122 struct aio_timeout to;
46123 int retry = 0;
46124
46125+ pax_track_stack();
46126+
46127 /* needed to zero any padding within an entry (there shouldn't be
46128 * any, but C is fun!
46129 */
46130@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46131 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46132 {
46133 ssize_t ret;
46134+ struct iovec iovstack;
46135
46136 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46137 kiocb->ki_nbytes, 1,
46138- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46139+ &iovstack, &kiocb->ki_iovec);
46140 if (ret < 0)
46141 goto out;
46142
46143+ if (kiocb->ki_iovec == &iovstack) {
46144+ kiocb->ki_inline_vec = iovstack;
46145+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46146+ }
46147 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46148 kiocb->ki_cur_seg = 0;
46149 /* ki_nbytes/left now reflect bytes instead of segs */
46150diff --git a/fs/attr.c b/fs/attr.c
46151index 96d394b..33cf5b4 100644
46152--- a/fs/attr.c
46153+++ b/fs/attr.c
46154@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46155 unsigned long limit;
46156
46157 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46158+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46159 if (limit != RLIM_INFINITY && offset > limit)
46160 goto out_sig;
46161 if (offset > inode->i_sb->s_maxbytes)
46162diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46163index 4a1401c..05eb5ca 100644
46164--- a/fs/autofs/root.c
46165+++ b/fs/autofs/root.c
46166@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46167 set_bit(n,sbi->symlink_bitmap);
46168 sl = &sbi->symlink[n];
46169 sl->len = strlen(symname);
46170- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46171+ slsize = sl->len+1;
46172+ sl->data = kmalloc(slsize, GFP_KERNEL);
46173 if (!sl->data) {
46174 clear_bit(n,sbi->symlink_bitmap);
46175 unlock_kernel();
46176diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46177index b4ea829..e63ef18 100644
46178--- a/fs/autofs4/symlink.c
46179+++ b/fs/autofs4/symlink.c
46180@@ -15,7 +15,7 @@
46181 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46182 {
46183 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46184- nd_set_link(nd, (char *)ino->u.symlink);
46185+ nd_set_link(nd, ino->u.symlink);
46186 return NULL;
46187 }
46188
46189diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46190index 2341375..df9d1c2 100644
46191--- a/fs/autofs4/waitq.c
46192+++ b/fs/autofs4/waitq.c
46193@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46194 {
46195 unsigned long sigpipe, flags;
46196 mm_segment_t fs;
46197- const char *data = (const char *)addr;
46198+ const char __user *data = (const char __force_user *)addr;
46199 ssize_t wr = 0;
46200
46201 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46202diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46203index 9158c07..3f06659 100644
46204--- a/fs/befs/linuxvfs.c
46205+++ b/fs/befs/linuxvfs.c
46206@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46207 {
46208 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46209 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46210- char *link = nd_get_link(nd);
46211+ const char *link = nd_get_link(nd);
46212 if (!IS_ERR(link))
46213 kfree(link);
46214 }
46215diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46216index 0133b5a..b3baa9f 100644
46217--- a/fs/binfmt_aout.c
46218+++ b/fs/binfmt_aout.c
46219@@ -16,6 +16,7 @@
46220 #include <linux/string.h>
46221 #include <linux/fs.h>
46222 #include <linux/file.h>
46223+#include <linux/security.h>
46224 #include <linux/stat.h>
46225 #include <linux/fcntl.h>
46226 #include <linux/ptrace.h>
46227@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46228 #endif
46229 # define START_STACK(u) (u.start_stack)
46230
46231+ memset(&dump, 0, sizeof(dump));
46232+
46233 fs = get_fs();
46234 set_fs(KERNEL_DS);
46235 has_dumped = 1;
46236@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46237
46238 /* If the size of the dump file exceeds the rlimit, then see what would happen
46239 if we wrote the stack, but not the data area. */
46240+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46241 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46242 dump.u_dsize = 0;
46243
46244 /* Make sure we have enough room to write the stack and data areas. */
46245+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46246 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46247 dump.u_ssize = 0;
46248
46249@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46250 dump_size = dump.u_ssize << PAGE_SHIFT;
46251 DUMP_WRITE(dump_start,dump_size);
46252 }
46253-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46254- set_fs(KERNEL_DS);
46255- DUMP_WRITE(current,sizeof(*current));
46256+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46257 end_coredump:
46258 set_fs(fs);
46259 return has_dumped;
46260@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46261 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46262 if (rlim >= RLIM_INFINITY)
46263 rlim = ~0;
46264+
46265+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46266 if (ex.a_data + ex.a_bss > rlim)
46267 return -ENOMEM;
46268
46269@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46270 install_exec_creds(bprm);
46271 current->flags &= ~PF_FORKNOEXEC;
46272
46273+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46274+ current->mm->pax_flags = 0UL;
46275+#endif
46276+
46277+#ifdef CONFIG_PAX_PAGEEXEC
46278+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46279+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46280+
46281+#ifdef CONFIG_PAX_EMUTRAMP
46282+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46283+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46284+#endif
46285+
46286+#ifdef CONFIG_PAX_MPROTECT
46287+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46288+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46289+#endif
46290+
46291+ }
46292+#endif
46293+
46294 if (N_MAGIC(ex) == OMAGIC) {
46295 unsigned long text_addr, map_size;
46296 loff_t pos;
46297@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46298
46299 down_write(&current->mm->mmap_sem);
46300 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46301- PROT_READ | PROT_WRITE | PROT_EXEC,
46302+ PROT_READ | PROT_WRITE,
46303 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46304 fd_offset + ex.a_text);
46305 up_write(&current->mm->mmap_sem);
46306diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46307index 1ed37ba..de82ab7 100644
46308--- a/fs/binfmt_elf.c
46309+++ b/fs/binfmt_elf.c
46310@@ -31,6 +31,7 @@
46311 #include <linux/random.h>
46312 #include <linux/elf.h>
46313 #include <linux/utsname.h>
46314+#include <linux/xattr.h>
46315 #include <asm/uaccess.h>
46316 #include <asm/param.h>
46317 #include <asm/page.h>
46318@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46319 #define elf_core_dump NULL
46320 #endif
46321
46322+#ifdef CONFIG_PAX_MPROTECT
46323+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46324+#endif
46325+
46326 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46327 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46328 #else
46329@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46330 .load_binary = load_elf_binary,
46331 .load_shlib = load_elf_library,
46332 .core_dump = elf_core_dump,
46333+
46334+#ifdef CONFIG_PAX_MPROTECT
46335+ .handle_mprotect= elf_handle_mprotect,
46336+#endif
46337+
46338 .min_coredump = ELF_EXEC_PAGESIZE,
46339 .hasvdso = 1
46340 };
46341@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46342
46343 static int set_brk(unsigned long start, unsigned long end)
46344 {
46345+ unsigned long e = end;
46346+
46347 start = ELF_PAGEALIGN(start);
46348 end = ELF_PAGEALIGN(end);
46349 if (end > start) {
46350@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46351 if (BAD_ADDR(addr))
46352 return addr;
46353 }
46354- current->mm->start_brk = current->mm->brk = end;
46355+ current->mm->start_brk = current->mm->brk = e;
46356 return 0;
46357 }
46358
46359@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46360 elf_addr_t __user *u_rand_bytes;
46361 const char *k_platform = ELF_PLATFORM;
46362 const char *k_base_platform = ELF_BASE_PLATFORM;
46363- unsigned char k_rand_bytes[16];
46364+ u32 k_rand_bytes[4];
46365 int items;
46366 elf_addr_t *elf_info;
46367 int ei_index = 0;
46368 const struct cred *cred = current_cred();
46369 struct vm_area_struct *vma;
46370+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46371+
46372+ pax_track_stack();
46373
46374 /*
46375 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46376@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46377 * Generate 16 random bytes for userspace PRNG seeding.
46378 */
46379 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46380- u_rand_bytes = (elf_addr_t __user *)
46381- STACK_ALLOC(p, sizeof(k_rand_bytes));
46382+ srandom32(k_rand_bytes[0] ^ random32());
46383+ srandom32(k_rand_bytes[1] ^ random32());
46384+ srandom32(k_rand_bytes[2] ^ random32());
46385+ srandom32(k_rand_bytes[3] ^ random32());
46386+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46387+ u_rand_bytes = (elf_addr_t __user *) p;
46388 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46389 return -EFAULT;
46390
46391@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46392 return -EFAULT;
46393 current->mm->env_end = p;
46394
46395+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46396+
46397 /* Put the elf_info on the stack in the right place. */
46398 sp = (elf_addr_t __user *)envp + 1;
46399- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46400+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46401 return -EFAULT;
46402 return 0;
46403 }
46404@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46405 {
46406 struct elf_phdr *elf_phdata;
46407 struct elf_phdr *eppnt;
46408- unsigned long load_addr = 0;
46409+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46410 int load_addr_set = 0;
46411 unsigned long last_bss = 0, elf_bss = 0;
46412- unsigned long error = ~0UL;
46413+ unsigned long error = -EINVAL;
46414 unsigned long total_size;
46415 int retval, i, size;
46416
46417@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46418 goto out_close;
46419 }
46420
46421+#ifdef CONFIG_PAX_SEGMEXEC
46422+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46423+ pax_task_size = SEGMEXEC_TASK_SIZE;
46424+#endif
46425+
46426 eppnt = elf_phdata;
46427 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46428 if (eppnt->p_type == PT_LOAD) {
46429@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46430 k = load_addr + eppnt->p_vaddr;
46431 if (BAD_ADDR(k) ||
46432 eppnt->p_filesz > eppnt->p_memsz ||
46433- eppnt->p_memsz > TASK_SIZE ||
46434- TASK_SIZE - eppnt->p_memsz < k) {
46435+ eppnt->p_memsz > pax_task_size ||
46436+ pax_task_size - eppnt->p_memsz < k) {
46437 error = -ENOMEM;
46438 goto out_close;
46439 }
46440@@ -532,6 +558,351 @@ out:
46441 return error;
46442 }
46443
46444+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46445+{
46446+ unsigned long pax_flags = 0UL;
46447+
46448+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46449+
46450+#ifdef CONFIG_PAX_PAGEEXEC
46451+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46452+ pax_flags |= MF_PAX_PAGEEXEC;
46453+#endif
46454+
46455+#ifdef CONFIG_PAX_SEGMEXEC
46456+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46457+ pax_flags |= MF_PAX_SEGMEXEC;
46458+#endif
46459+
46460+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46461+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46462+ if (nx_enabled)
46463+ pax_flags &= ~MF_PAX_SEGMEXEC;
46464+ else
46465+ pax_flags &= ~MF_PAX_PAGEEXEC;
46466+ }
46467+#endif
46468+
46469+#ifdef CONFIG_PAX_EMUTRAMP
46470+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46471+ pax_flags |= MF_PAX_EMUTRAMP;
46472+#endif
46473+
46474+#ifdef CONFIG_PAX_MPROTECT
46475+ if (elf_phdata->p_flags & PF_MPROTECT)
46476+ pax_flags |= MF_PAX_MPROTECT;
46477+#endif
46478+
46479+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46480+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46481+ pax_flags |= MF_PAX_RANDMMAP;
46482+#endif
46483+
46484+#endif
46485+
46486+ return pax_flags;
46487+}
46488+
46489+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46490+{
46491+ unsigned long pax_flags = 0UL;
46492+
46493+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46494+
46495+#ifdef CONFIG_PAX_PAGEEXEC
46496+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46497+ pax_flags |= MF_PAX_PAGEEXEC;
46498+#endif
46499+
46500+#ifdef CONFIG_PAX_SEGMEXEC
46501+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46502+ pax_flags |= MF_PAX_SEGMEXEC;
46503+#endif
46504+
46505+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46506+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46507+ if (nx_enabled)
46508+ pax_flags &= ~MF_PAX_SEGMEXEC;
46509+ else
46510+ pax_flags &= ~MF_PAX_PAGEEXEC;
46511+ }
46512+#endif
46513+
46514+#ifdef CONFIG_PAX_EMUTRAMP
46515+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46516+ pax_flags |= MF_PAX_EMUTRAMP;
46517+#endif
46518+
46519+#ifdef CONFIG_PAX_MPROTECT
46520+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46521+ pax_flags |= MF_PAX_MPROTECT;
46522+#endif
46523+
46524+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46525+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46526+ pax_flags |= MF_PAX_RANDMMAP;
46527+#endif
46528+
46529+#endif
46530+
46531+ return pax_flags;
46532+}
46533+
46534+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46535+{
46536+ unsigned long pax_flags = 0UL;
46537+
46538+#ifdef CONFIG_PAX_EI_PAX
46539+
46540+#ifdef CONFIG_PAX_PAGEEXEC
46541+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46542+ pax_flags |= MF_PAX_PAGEEXEC;
46543+#endif
46544+
46545+#ifdef CONFIG_PAX_SEGMEXEC
46546+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46547+ pax_flags |= MF_PAX_SEGMEXEC;
46548+#endif
46549+
46550+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46551+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46552+ if (nx_enabled)
46553+ pax_flags &= ~MF_PAX_SEGMEXEC;
46554+ else
46555+ pax_flags &= ~MF_PAX_PAGEEXEC;
46556+ }
46557+#endif
46558+
46559+#ifdef CONFIG_PAX_EMUTRAMP
46560+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46561+ pax_flags |= MF_PAX_EMUTRAMP;
46562+#endif
46563+
46564+#ifdef CONFIG_PAX_MPROTECT
46565+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46566+ pax_flags |= MF_PAX_MPROTECT;
46567+#endif
46568+
46569+#ifdef CONFIG_PAX_ASLR
46570+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46571+ pax_flags |= MF_PAX_RANDMMAP;
46572+#endif
46573+
46574+#else
46575+
46576+#ifdef CONFIG_PAX_PAGEEXEC
46577+ pax_flags |= MF_PAX_PAGEEXEC;
46578+#endif
46579+
46580+#ifdef CONFIG_PAX_MPROTECT
46581+ pax_flags |= MF_PAX_MPROTECT;
46582+#endif
46583+
46584+#ifdef CONFIG_PAX_RANDMMAP
46585+ pax_flags |= MF_PAX_RANDMMAP;
46586+#endif
46587+
46588+#ifdef CONFIG_PAX_SEGMEXEC
46589+ if (!(__supported_pte_mask & _PAGE_NX)) {
46590+ pax_flags &= ~MF_PAX_PAGEEXEC;
46591+ pax_flags |= MF_PAX_SEGMEXEC;
46592+ }
46593+#endif
46594+
46595+#endif
46596+
46597+ return pax_flags;
46598+}
46599+
46600+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46601+{
46602+
46603+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46604+ unsigned long i;
46605+
46606+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46607+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46608+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46609+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46610+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46611+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46612+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46613+ return ~0UL;
46614+
46615+#ifdef CONFIG_PAX_SOFTMODE
46616+ if (pax_softmode)
46617+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46618+ else
46619+#endif
46620+
46621+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46622+ break;
46623+ }
46624+#endif
46625+
46626+ return ~0UL;
46627+}
46628+
46629+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46630+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46631+{
46632+ unsigned long pax_flags = 0UL;
46633+
46634+#ifdef CONFIG_PAX_PAGEEXEC
46635+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46636+ pax_flags |= MF_PAX_PAGEEXEC;
46637+#endif
46638+
46639+#ifdef CONFIG_PAX_SEGMEXEC
46640+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46641+ pax_flags |= MF_PAX_SEGMEXEC;
46642+#endif
46643+
46644+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46645+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46646+ if ((__supported_pte_mask & _PAGE_NX))
46647+ pax_flags &= ~MF_PAX_SEGMEXEC;
46648+ else
46649+ pax_flags &= ~MF_PAX_PAGEEXEC;
46650+ }
46651+#endif
46652+
46653+#ifdef CONFIG_PAX_EMUTRAMP
46654+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46655+ pax_flags |= MF_PAX_EMUTRAMP;
46656+#endif
46657+
46658+#ifdef CONFIG_PAX_MPROTECT
46659+ if (pax_flags_softmode & MF_PAX_MPROTECT)
46660+ pax_flags |= MF_PAX_MPROTECT;
46661+#endif
46662+
46663+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46664+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46665+ pax_flags |= MF_PAX_RANDMMAP;
46666+#endif
46667+
46668+ return pax_flags;
46669+}
46670+
46671+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46672+{
46673+ unsigned long pax_flags = 0UL;
46674+
46675+#ifdef CONFIG_PAX_PAGEEXEC
46676+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46677+ pax_flags |= MF_PAX_PAGEEXEC;
46678+#endif
46679+
46680+#ifdef CONFIG_PAX_SEGMEXEC
46681+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46682+ pax_flags |= MF_PAX_SEGMEXEC;
46683+#endif
46684+
46685+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46686+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46687+ if ((__supported_pte_mask & _PAGE_NX))
46688+ pax_flags &= ~MF_PAX_SEGMEXEC;
46689+ else
46690+ pax_flags &= ~MF_PAX_PAGEEXEC;
46691+ }
46692+#endif
46693+
46694+#ifdef CONFIG_PAX_EMUTRAMP
46695+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46696+ pax_flags |= MF_PAX_EMUTRAMP;
46697+#endif
46698+
46699+#ifdef CONFIG_PAX_MPROTECT
46700+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46701+ pax_flags |= MF_PAX_MPROTECT;
46702+#endif
46703+
46704+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46705+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46706+ pax_flags |= MF_PAX_RANDMMAP;
46707+#endif
46708+
46709+ return pax_flags;
46710+}
46711+#endif
46712+
46713+static unsigned long pax_parse_xattr_pax(struct file * const file)
46714+{
46715+
46716+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46717+ ssize_t xattr_size, i;
46718+ unsigned char xattr_value[5];
46719+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46720+
46721+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46722+ if (xattr_size <= 0)
46723+ return ~0UL;
46724+
46725+ for (i = 0; i < xattr_size; i++)
46726+ switch (xattr_value[i]) {
46727+ default:
46728+ return ~0UL;
46729+
46730+#define parse_flag(option1, option2, flag) \
46731+ case option1: \
46732+ pax_flags_hardmode |= MF_PAX_##flag; \
46733+ break; \
46734+ case option2: \
46735+ pax_flags_softmode |= MF_PAX_##flag; \
46736+ break;
46737+
46738+ parse_flag('p', 'P', PAGEEXEC);
46739+ parse_flag('e', 'E', EMUTRAMP);
46740+ parse_flag('m', 'M', MPROTECT);
46741+ parse_flag('r', 'R', RANDMMAP);
46742+ parse_flag('s', 'S', SEGMEXEC);
46743+
46744+#undef parse_flag
46745+ }
46746+
46747+ if (pax_flags_hardmode & pax_flags_softmode)
46748+ return ~0UL;
46749+
46750+#ifdef CONFIG_PAX_SOFTMODE
46751+ if (pax_softmode)
46752+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46753+ else
46754+#endif
46755+
46756+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46757+#else
46758+ return ~0UL;
46759+#endif
46760+
46761+}
46762+
46763+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46764+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46765+{
46766+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46767+
46768+ pax_flags = pax_parse_ei_pax(elf_ex);
46769+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46770+ xattr_pax_flags = pax_parse_xattr_pax(file);
46771+
46772+ if (pt_pax_flags == ~0UL)
46773+ pt_pax_flags = xattr_pax_flags;
46774+ else if (xattr_pax_flags == ~0UL)
46775+ xattr_pax_flags = pt_pax_flags;
46776+ if (pt_pax_flags != xattr_pax_flags)
46777+ return -EINVAL;
46778+ if (pt_pax_flags != ~0UL)
46779+ pax_flags = pt_pax_flags;
46780+
46781+ if (0 > pax_check_flags(&pax_flags))
46782+ return -EINVAL;
46783+
46784+ current->mm->pax_flags = pax_flags;
46785+ return 0;
46786+}
46787+#endif
46788+
46789 /*
46790 * These are the functions used to load ELF style executables and shared
46791 * libraries. There is no binary dependent code anywhere else.
46792@@ -548,6 +919,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46793 {
46794 unsigned int random_variable = 0;
46795
46796+#ifdef CONFIG_PAX_RANDUSTACK
46797+ if (randomize_va_space)
46798+ return stack_top - current->mm->delta_stack;
46799+#endif
46800+
46801 if ((current->flags & PF_RANDOMIZE) &&
46802 !(current->personality & ADDR_NO_RANDOMIZE)) {
46803 random_variable = get_random_int() & STACK_RND_MASK;
46804@@ -566,7 +942,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46805 unsigned long load_addr = 0, load_bias = 0;
46806 int load_addr_set = 0;
46807 char * elf_interpreter = NULL;
46808- unsigned long error;
46809+ unsigned long error = 0;
46810 struct elf_phdr *elf_ppnt, *elf_phdata;
46811 unsigned long elf_bss, elf_brk;
46812 int retval, i;
46813@@ -576,11 +952,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46814 unsigned long start_code, end_code, start_data, end_data;
46815 unsigned long reloc_func_desc = 0;
46816 int executable_stack = EXSTACK_DEFAULT;
46817- unsigned long def_flags = 0;
46818 struct {
46819 struct elfhdr elf_ex;
46820 struct elfhdr interp_elf_ex;
46821 } *loc;
46822+ unsigned long pax_task_size = TASK_SIZE;
46823
46824 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46825 if (!loc) {
46826@@ -718,11 +1094,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46827
46828 /* OK, This is the point of no return */
46829 current->flags &= ~PF_FORKNOEXEC;
46830- current->mm->def_flags = def_flags;
46831+
46832+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46833+ current->mm->pax_flags = 0UL;
46834+#endif
46835+
46836+#ifdef CONFIG_PAX_DLRESOLVE
46837+ current->mm->call_dl_resolve = 0UL;
46838+#endif
46839+
46840+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46841+ current->mm->call_syscall = 0UL;
46842+#endif
46843+
46844+#ifdef CONFIG_PAX_ASLR
46845+ current->mm->delta_mmap = 0UL;
46846+ current->mm->delta_stack = 0UL;
46847+#endif
46848+
46849+ current->mm->def_flags = 0;
46850+
46851+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46852+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
46853+ send_sig(SIGKILL, current, 0);
46854+ goto out_free_dentry;
46855+ }
46856+#endif
46857+
46858+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46859+ pax_set_initial_flags(bprm);
46860+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46861+ if (pax_set_initial_flags_func)
46862+ (pax_set_initial_flags_func)(bprm);
46863+#endif
46864+
46865+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46866+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46867+ current->mm->context.user_cs_limit = PAGE_SIZE;
46868+ current->mm->def_flags |= VM_PAGEEXEC;
46869+ }
46870+#endif
46871+
46872+#ifdef CONFIG_PAX_SEGMEXEC
46873+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46874+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46875+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46876+ pax_task_size = SEGMEXEC_TASK_SIZE;
46877+ }
46878+#endif
46879+
46880+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46881+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46882+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46883+ put_cpu();
46884+ }
46885+#endif
46886
46887 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46888 may depend on the personality. */
46889 SET_PERSONALITY(loc->elf_ex);
46890+
46891+#ifdef CONFIG_PAX_ASLR
46892+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46893+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46894+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46895+ }
46896+#endif
46897+
46898+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46899+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46900+ executable_stack = EXSTACK_DISABLE_X;
46901+ current->personality &= ~READ_IMPLIES_EXEC;
46902+ } else
46903+#endif
46904+
46905 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46906 current->personality |= READ_IMPLIES_EXEC;
46907
46908@@ -800,10 +1245,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46909 * might try to exec. This is because the brk will
46910 * follow the loader, and is not movable. */
46911 #ifdef CONFIG_X86
46912- load_bias = 0;
46913+ if (current->flags & PF_RANDOMIZE)
46914+ load_bias = 0;
46915+ else
46916+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46917 #else
46918 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46919 #endif
46920+
46921+#ifdef CONFIG_PAX_RANDMMAP
46922+ /* PaX: randomize base address at the default exe base if requested */
46923+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46924+#ifdef CONFIG_SPARC64
46925+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46926+#else
46927+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46928+#endif
46929+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46930+ elf_flags |= MAP_FIXED;
46931+ }
46932+#endif
46933+
46934 }
46935
46936 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46937@@ -836,9 +1298,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46938 * allowed task size. Note that p_filesz must always be
46939 * <= p_memsz so it is only necessary to check p_memsz.
46940 */
46941- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46942- elf_ppnt->p_memsz > TASK_SIZE ||
46943- TASK_SIZE - elf_ppnt->p_memsz < k) {
46944+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46945+ elf_ppnt->p_memsz > pax_task_size ||
46946+ pax_task_size - elf_ppnt->p_memsz < k) {
46947 /* set_brk can never work. Avoid overflows. */
46948 send_sig(SIGKILL, current, 0);
46949 retval = -EINVAL;
46950@@ -866,6 +1328,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46951 start_data += load_bias;
46952 end_data += load_bias;
46953
46954+#ifdef CONFIG_PAX_RANDMMAP
46955+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46956+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46957+#endif
46958+
46959 /* Calling set_brk effectively mmaps the pages that we need
46960 * for the bss and break sections. We must do this before
46961 * mapping in the interpreter, to make sure it doesn't wind
46962@@ -877,9 +1344,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46963 goto out_free_dentry;
46964 }
46965 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46966- send_sig(SIGSEGV, current, 0);
46967- retval = -EFAULT; /* Nobody gets to see this, but.. */
46968- goto out_free_dentry;
46969+ /*
46970+ * This bss-zeroing can fail if the ELF
46971+ * file specifies odd protections. So
46972+ * we don't check the return value
46973+ */
46974 }
46975
46976 if (elf_interpreter) {
46977@@ -1112,8 +1581,10 @@ static int dump_seek(struct file *file, loff_t off)
46978 unsigned long n = off;
46979 if (n > PAGE_SIZE)
46980 n = PAGE_SIZE;
46981- if (!dump_write(file, buf, n))
46982+ if (!dump_write(file, buf, n)) {
46983+ free_page((unsigned long)buf);
46984 return 0;
46985+ }
46986 off -= n;
46987 }
46988 free_page((unsigned long)buf);
46989@@ -1125,7 +1596,7 @@ static int dump_seek(struct file *file, loff_t off)
46990 * Decide what to dump of a segment, part, all or none.
46991 */
46992 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46993- unsigned long mm_flags)
46994+ unsigned long mm_flags, long signr)
46995 {
46996 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46997
46998@@ -1159,7 +1630,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46999 if (vma->vm_file == NULL)
47000 return 0;
47001
47002- if (FILTER(MAPPED_PRIVATE))
47003+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
47004 goto whole;
47005
47006 /*
47007@@ -1255,8 +1726,11 @@ static int writenote(struct memelfnote *men, struct file *file,
47008 #undef DUMP_WRITE
47009
47010 #define DUMP_WRITE(addr, nr) \
47011+ do { \
47012+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
47013 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
47014- goto end_coredump;
47015+ goto end_coredump; \
47016+ } while (0);
47017
47018 static void fill_elf_header(struct elfhdr *elf, int segs,
47019 u16 machine, u32 flags, u8 osabi)
47020@@ -1385,9 +1859,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
47021 {
47022 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47023 int i = 0;
47024- do
47025+ do {
47026 i += 2;
47027- while (auxv[i - 2] != AT_NULL);
47028+ } while (auxv[i - 2] != AT_NULL);
47029 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47030 }
47031
47032@@ -1973,7 +2447,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47033 phdr.p_offset = offset;
47034 phdr.p_vaddr = vma->vm_start;
47035 phdr.p_paddr = 0;
47036- phdr.p_filesz = vma_dump_size(vma, mm_flags);
47037+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47038 phdr.p_memsz = vma->vm_end - vma->vm_start;
47039 offset += phdr.p_filesz;
47040 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47041@@ -2006,7 +2480,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47042 unsigned long addr;
47043 unsigned long end;
47044
47045- end = vma->vm_start + vma_dump_size(vma, mm_flags);
47046+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47047
47048 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47049 struct page *page;
47050@@ -2015,6 +2489,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47051 page = get_dump_page(addr);
47052 if (page) {
47053 void *kaddr = kmap(page);
47054+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47055 stop = ((size += PAGE_SIZE) > limit) ||
47056 !dump_write(file, kaddr, PAGE_SIZE);
47057 kunmap(page);
47058@@ -2042,6 +2517,97 @@ out:
47059
47060 #endif /* USE_ELF_CORE_DUMP */
47061
47062+#ifdef CONFIG_PAX_MPROTECT
47063+/* PaX: non-PIC ELF libraries need relocations on their executable segments
47064+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47065+ * we'll remove VM_MAYWRITE for good on RELRO segments.
47066+ *
47067+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47068+ * basis because we want to allow the common case and not the special ones.
47069+ */
47070+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47071+{
47072+ struct elfhdr elf_h;
47073+ struct elf_phdr elf_p;
47074+ unsigned long i;
47075+ unsigned long oldflags;
47076+ bool is_textrel_rw, is_textrel_rx, is_relro;
47077+
47078+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47079+ return;
47080+
47081+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47082+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47083+
47084+#ifdef CONFIG_PAX_ELFRELOCS
47085+ /* possible TEXTREL */
47086+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47087+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47088+#else
47089+ is_textrel_rw = false;
47090+ is_textrel_rx = false;
47091+#endif
47092+
47093+ /* possible RELRO */
47094+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47095+
47096+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47097+ return;
47098+
47099+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47100+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47101+
47102+#ifdef CONFIG_PAX_ETEXECRELOCS
47103+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47104+#else
47105+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47106+#endif
47107+
47108+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47109+ !elf_check_arch(&elf_h) ||
47110+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47111+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47112+ return;
47113+
47114+ for (i = 0UL; i < elf_h.e_phnum; i++) {
47115+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47116+ return;
47117+ switch (elf_p.p_type) {
47118+ case PT_DYNAMIC:
47119+ if (!is_textrel_rw && !is_textrel_rx)
47120+ continue;
47121+ i = 0UL;
47122+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47123+ elf_dyn dyn;
47124+
47125+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47126+ return;
47127+ if (dyn.d_tag == DT_NULL)
47128+ return;
47129+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47130+ gr_log_textrel(vma);
47131+ if (is_textrel_rw)
47132+ vma->vm_flags |= VM_MAYWRITE;
47133+ else
47134+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47135+ vma->vm_flags &= ~VM_MAYWRITE;
47136+ return;
47137+ }
47138+ i++;
47139+ }
47140+ return;
47141+
47142+ case PT_GNU_RELRO:
47143+ if (!is_relro)
47144+ continue;
47145+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47146+ vma->vm_flags &= ~VM_MAYWRITE;
47147+ return;
47148+ }
47149+ }
47150+}
47151+#endif
47152+
47153 static int __init init_elf_binfmt(void)
47154 {
47155 return register_binfmt(&elf_format);
47156diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47157index ca88c46..f155a60 100644
47158--- a/fs/binfmt_flat.c
47159+++ b/fs/binfmt_flat.c
47160@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47161 realdatastart = (unsigned long) -ENOMEM;
47162 printk("Unable to allocate RAM for process data, errno %d\n",
47163 (int)-realdatastart);
47164+ down_write(&current->mm->mmap_sem);
47165 do_munmap(current->mm, textpos, text_len);
47166+ up_write(&current->mm->mmap_sem);
47167 ret = realdatastart;
47168 goto err;
47169 }
47170@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47171 }
47172 if (IS_ERR_VALUE(result)) {
47173 printk("Unable to read data+bss, errno %d\n", (int)-result);
47174+ down_write(&current->mm->mmap_sem);
47175 do_munmap(current->mm, textpos, text_len);
47176 do_munmap(current->mm, realdatastart, data_len + extra);
47177+ up_write(&current->mm->mmap_sem);
47178 ret = result;
47179 goto err;
47180 }
47181@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47182 }
47183 if (IS_ERR_VALUE(result)) {
47184 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47185+ down_write(&current->mm->mmap_sem);
47186 do_munmap(current->mm, textpos, text_len + data_len + extra +
47187 MAX_SHARED_LIBS * sizeof(unsigned long));
47188+ up_write(&current->mm->mmap_sem);
47189 ret = result;
47190 goto err;
47191 }
47192diff --git a/fs/bio.c b/fs/bio.c
47193index e696713..83de133 100644
47194--- a/fs/bio.c
47195+++ b/fs/bio.c
47196@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47197
47198 i = 0;
47199 while (i < bio_slab_nr) {
47200- struct bio_slab *bslab = &bio_slabs[i];
47201+ bslab = &bio_slabs[i];
47202
47203 if (!bslab->slab && entry == -1)
47204 entry = i;
47205@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47206 const int read = bio_data_dir(bio) == READ;
47207 struct bio_map_data *bmd = bio->bi_private;
47208 int i;
47209- char *p = bmd->sgvecs[0].iov_base;
47210+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47211
47212 __bio_for_each_segment(bvec, bio, i, 0) {
47213 char *addr = page_address(bvec->bv_page);
47214diff --git a/fs/block_dev.c b/fs/block_dev.c
47215index e65efa2..04fae57 100644
47216--- a/fs/block_dev.c
47217+++ b/fs/block_dev.c
47218@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47219 else if (bdev->bd_contains == bdev)
47220 res = 0; /* is a whole device which isn't held */
47221
47222- else if (bdev->bd_contains->bd_holder == bd_claim)
47223+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47224 res = 0; /* is a partition of a device that is being partitioned */
47225 else if (bdev->bd_contains->bd_holder != NULL)
47226 res = -EBUSY; /* is a partition of a held device */
47227diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47228index c4bc570..42acd8d 100644
47229--- a/fs/btrfs/ctree.c
47230+++ b/fs/btrfs/ctree.c
47231@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47232 free_extent_buffer(buf);
47233 add_root_to_dirty_list(root);
47234 } else {
47235- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47236- parent_start = parent->start;
47237- else
47238+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47239+ if (parent)
47240+ parent_start = parent->start;
47241+ else
47242+ parent_start = 0;
47243+ } else
47244 parent_start = 0;
47245
47246 WARN_ON(trans->transid != btrfs_header_generation(parent));
47247@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47248
47249 ret = 0;
47250 if (slot == 0) {
47251- struct btrfs_disk_key disk_key;
47252 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47253 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47254 }
47255diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47256index f447188..59c17c5 100644
47257--- a/fs/btrfs/disk-io.c
47258+++ b/fs/btrfs/disk-io.c
47259@@ -39,7 +39,7 @@
47260 #include "tree-log.h"
47261 #include "free-space-cache.h"
47262
47263-static struct extent_io_ops btree_extent_io_ops;
47264+static const struct extent_io_ops btree_extent_io_ops;
47265 static void end_workqueue_fn(struct btrfs_work *work);
47266 static void free_fs_root(struct btrfs_root *root);
47267
47268@@ -2607,7 +2607,7 @@ out:
47269 return 0;
47270 }
47271
47272-static struct extent_io_ops btree_extent_io_ops = {
47273+static const struct extent_io_ops btree_extent_io_ops = {
47274 .write_cache_pages_lock_hook = btree_lock_page_hook,
47275 .readpage_end_io_hook = btree_readpage_end_io_hook,
47276 .submit_bio_hook = btree_submit_bio_hook,
47277diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47278index 559f724..a026171 100644
47279--- a/fs/btrfs/extent-tree.c
47280+++ b/fs/btrfs/extent-tree.c
47281@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47282 u64 group_start = group->key.objectid;
47283 new_extents = kmalloc(sizeof(*new_extents),
47284 GFP_NOFS);
47285+ if (!new_extents) {
47286+ ret = -ENOMEM;
47287+ goto out;
47288+ }
47289 nr_extents = 1;
47290 ret = get_new_locations(reloc_inode,
47291 extent_key,
47292diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47293index 36de250..7ec75c7 100644
47294--- a/fs/btrfs/extent_io.h
47295+++ b/fs/btrfs/extent_io.h
47296@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47297 struct bio *bio, int mirror_num,
47298 unsigned long bio_flags);
47299 struct extent_io_ops {
47300- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47301+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47302 u64 start, u64 end, int *page_started,
47303 unsigned long *nr_written);
47304- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47305- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47306+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47307+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47308 extent_submit_bio_hook_t *submit_bio_hook;
47309- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47310+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47311 size_t size, struct bio *bio,
47312 unsigned long bio_flags);
47313- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47314- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47315+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47316+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47317 u64 start, u64 end,
47318 struct extent_state *state);
47319- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47320+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47321 u64 start, u64 end,
47322 struct extent_state *state);
47323- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47324+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47325 struct extent_state *state);
47326- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47327+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47328 struct extent_state *state, int uptodate);
47329- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47330+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47331 unsigned long old, unsigned long bits);
47332- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47333+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47334 unsigned long bits);
47335- int (*merge_extent_hook)(struct inode *inode,
47336+ int (* const merge_extent_hook)(struct inode *inode,
47337 struct extent_state *new,
47338 struct extent_state *other);
47339- int (*split_extent_hook)(struct inode *inode,
47340+ int (* const split_extent_hook)(struct inode *inode,
47341 struct extent_state *orig, u64 split);
47342- int (*write_cache_pages_lock_hook)(struct page *page);
47343+ int (* const write_cache_pages_lock_hook)(struct page *page);
47344 };
47345
47346 struct extent_io_tree {
47347@@ -88,7 +88,7 @@ struct extent_io_tree {
47348 u64 dirty_bytes;
47349 spinlock_t lock;
47350 spinlock_t buffer_lock;
47351- struct extent_io_ops *ops;
47352+ const struct extent_io_ops *ops;
47353 };
47354
47355 struct extent_state {
47356diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47357index cb2849f..3718fb4 100644
47358--- a/fs/btrfs/free-space-cache.c
47359+++ b/fs/btrfs/free-space-cache.c
47360@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47361
47362 while(1) {
47363 if (entry->bytes < bytes || entry->offset < min_start) {
47364- struct rb_node *node;
47365-
47366 node = rb_next(&entry->offset_index);
47367 if (!node)
47368 break;
47369@@ -1226,7 +1224,7 @@ again:
47370 */
47371 while (entry->bitmap || found_bitmap ||
47372 (!entry->bitmap && entry->bytes < min_bytes)) {
47373- struct rb_node *node = rb_next(&entry->offset_index);
47374+ node = rb_next(&entry->offset_index);
47375
47376 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47377 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47378diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47379index e03a836..323837e 100644
47380--- a/fs/btrfs/inode.c
47381+++ b/fs/btrfs/inode.c
47382@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47383 static const struct address_space_operations btrfs_aops;
47384 static const struct address_space_operations btrfs_symlink_aops;
47385 static const struct file_operations btrfs_dir_file_operations;
47386-static struct extent_io_ops btrfs_extent_io_ops;
47387+static const struct extent_io_ops btrfs_extent_io_ops;
47388
47389 static struct kmem_cache *btrfs_inode_cachep;
47390 struct kmem_cache *btrfs_trans_handle_cachep;
47391@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47392 1, 0, NULL, GFP_NOFS);
47393 while (start < end) {
47394 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47395+ BUG_ON(!async_cow);
47396 async_cow->inode = inode;
47397 async_cow->root = root;
47398 async_cow->locked_page = locked_page;
47399@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47400 inline_size = btrfs_file_extent_inline_item_len(leaf,
47401 btrfs_item_nr(leaf, path->slots[0]));
47402 tmp = kmalloc(inline_size, GFP_NOFS);
47403+ if (!tmp)
47404+ return -ENOMEM;
47405 ptr = btrfs_file_extent_inline_start(item);
47406
47407 read_extent_buffer(leaf, tmp, ptr, inline_size);
47408@@ -5410,7 +5413,7 @@ fail:
47409 return -ENOMEM;
47410 }
47411
47412-static int btrfs_getattr(struct vfsmount *mnt,
47413+int btrfs_getattr(struct vfsmount *mnt,
47414 struct dentry *dentry, struct kstat *stat)
47415 {
47416 struct inode *inode = dentry->d_inode;
47417@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47418 return 0;
47419 }
47420
47421+EXPORT_SYMBOL(btrfs_getattr);
47422+
47423+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47424+{
47425+ return BTRFS_I(inode)->root->anon_super.s_dev;
47426+}
47427+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47428+
47429 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47430 struct inode *new_dir, struct dentry *new_dentry)
47431 {
47432@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47433 .fsync = btrfs_sync_file,
47434 };
47435
47436-static struct extent_io_ops btrfs_extent_io_ops = {
47437+static const struct extent_io_ops btrfs_extent_io_ops = {
47438 .fill_delalloc = run_delalloc_range,
47439 .submit_bio_hook = btrfs_submit_bio_hook,
47440 .merge_bio_hook = btrfs_merge_bio_hook,
47441diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47442index ab7ab53..94e0781 100644
47443--- a/fs/btrfs/relocation.c
47444+++ b/fs/btrfs/relocation.c
47445@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47446 }
47447 spin_unlock(&rc->reloc_root_tree.lock);
47448
47449- BUG_ON((struct btrfs_root *)node->data != root);
47450+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47451
47452 if (!del) {
47453 spin_lock(&rc->reloc_root_tree.lock);
47454diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47455index a240b6f..4ce16ef 100644
47456--- a/fs/btrfs/sysfs.c
47457+++ b/fs/btrfs/sysfs.c
47458@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47459 complete(&root->kobj_unregister);
47460 }
47461
47462-static struct sysfs_ops btrfs_super_attr_ops = {
47463+static const struct sysfs_ops btrfs_super_attr_ops = {
47464 .show = btrfs_super_attr_show,
47465 .store = btrfs_super_attr_store,
47466 };
47467
47468-static struct sysfs_ops btrfs_root_attr_ops = {
47469+static const struct sysfs_ops btrfs_root_attr_ops = {
47470 .show = btrfs_root_attr_show,
47471 .store = btrfs_root_attr_store,
47472 };
47473diff --git a/fs/buffer.c b/fs/buffer.c
47474index 6fa5302..395d9f6 100644
47475--- a/fs/buffer.c
47476+++ b/fs/buffer.c
47477@@ -25,6 +25,7 @@
47478 #include <linux/percpu.h>
47479 #include <linux/slab.h>
47480 #include <linux/capability.h>
47481+#include <linux/security.h>
47482 #include <linux/blkdev.h>
47483 #include <linux/file.h>
47484 #include <linux/quotaops.h>
47485diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47486index 3797e00..ce776f6 100644
47487--- a/fs/cachefiles/bind.c
47488+++ b/fs/cachefiles/bind.c
47489@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47490 args);
47491
47492 /* start by checking things over */
47493- ASSERT(cache->fstop_percent >= 0 &&
47494- cache->fstop_percent < cache->fcull_percent &&
47495+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47496 cache->fcull_percent < cache->frun_percent &&
47497 cache->frun_percent < 100);
47498
47499- ASSERT(cache->bstop_percent >= 0 &&
47500- cache->bstop_percent < cache->bcull_percent &&
47501+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47502 cache->bcull_percent < cache->brun_percent &&
47503 cache->brun_percent < 100);
47504
47505diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47506index 4618516..bb30d01 100644
47507--- a/fs/cachefiles/daemon.c
47508+++ b/fs/cachefiles/daemon.c
47509@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47510 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47511 return -EIO;
47512
47513- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47514+ if (datalen > PAGE_SIZE - 1)
47515 return -EOPNOTSUPP;
47516
47517 /* drag the command string into the kernel so we can parse it */
47518@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47519 if (args[0] != '%' || args[1] != '\0')
47520 return -EINVAL;
47521
47522- if (fstop < 0 || fstop >= cache->fcull_percent)
47523+ if (fstop >= cache->fcull_percent)
47524 return cachefiles_daemon_range_error(cache, args);
47525
47526 cache->fstop_percent = fstop;
47527@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47528 if (args[0] != '%' || args[1] != '\0')
47529 return -EINVAL;
47530
47531- if (bstop < 0 || bstop >= cache->bcull_percent)
47532+ if (bstop >= cache->bcull_percent)
47533 return cachefiles_daemon_range_error(cache, args);
47534
47535 cache->bstop_percent = bstop;
47536diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47537index f7c255f..fcd61de 100644
47538--- a/fs/cachefiles/internal.h
47539+++ b/fs/cachefiles/internal.h
47540@@ -56,7 +56,7 @@ struct cachefiles_cache {
47541 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47542 struct rb_root active_nodes; /* active nodes (can't be culled) */
47543 rwlock_t active_lock; /* lock for active_nodes */
47544- atomic_t gravecounter; /* graveyard uniquifier */
47545+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47546 unsigned frun_percent; /* when to stop culling (% files) */
47547 unsigned fcull_percent; /* when to start culling (% files) */
47548 unsigned fstop_percent; /* when to stop allocating (% files) */
47549@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47550 * proc.c
47551 */
47552 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47553-extern atomic_t cachefiles_lookup_histogram[HZ];
47554-extern atomic_t cachefiles_mkdir_histogram[HZ];
47555-extern atomic_t cachefiles_create_histogram[HZ];
47556+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47557+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47558+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47559
47560 extern int __init cachefiles_proc_init(void);
47561 extern void cachefiles_proc_cleanup(void);
47562 static inline
47563-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47564+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47565 {
47566 unsigned long jif = jiffies - start_jif;
47567 if (jif >= HZ)
47568 jif = HZ - 1;
47569- atomic_inc(&histogram[jif]);
47570+ atomic_inc_unchecked(&histogram[jif]);
47571 }
47572
47573 #else
47574diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47575index 14ac480..a62766c 100644
47576--- a/fs/cachefiles/namei.c
47577+++ b/fs/cachefiles/namei.c
47578@@ -250,7 +250,7 @@ try_again:
47579 /* first step is to make up a grave dentry in the graveyard */
47580 sprintf(nbuffer, "%08x%08x",
47581 (uint32_t) get_seconds(),
47582- (uint32_t) atomic_inc_return(&cache->gravecounter));
47583+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47584
47585 /* do the multiway lock magic */
47586 trap = lock_rename(cache->graveyard, dir);
47587diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47588index eccd339..4c1d995 100644
47589--- a/fs/cachefiles/proc.c
47590+++ b/fs/cachefiles/proc.c
47591@@ -14,9 +14,9 @@
47592 #include <linux/seq_file.h>
47593 #include "internal.h"
47594
47595-atomic_t cachefiles_lookup_histogram[HZ];
47596-atomic_t cachefiles_mkdir_histogram[HZ];
47597-atomic_t cachefiles_create_histogram[HZ];
47598+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47599+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47600+atomic_unchecked_t cachefiles_create_histogram[HZ];
47601
47602 /*
47603 * display the latency histogram
47604@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47605 return 0;
47606 default:
47607 index = (unsigned long) v - 3;
47608- x = atomic_read(&cachefiles_lookup_histogram[index]);
47609- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47610- z = atomic_read(&cachefiles_create_histogram[index]);
47611+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47612+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47613+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47614 if (x == 0 && y == 0 && z == 0)
47615 return 0;
47616
47617diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47618index a6c8c6f..5cf8517 100644
47619--- a/fs/cachefiles/rdwr.c
47620+++ b/fs/cachefiles/rdwr.c
47621@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47622 old_fs = get_fs();
47623 set_fs(KERNEL_DS);
47624 ret = file->f_op->write(
47625- file, (const void __user *) data, len, &pos);
47626+ file, (const void __force_user *) data, len, &pos);
47627 set_fs(old_fs);
47628 kunmap(page);
47629 if (ret != len)
47630diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47631index 42cec2a..2aba466 100644
47632--- a/fs/cifs/cifs_debug.c
47633+++ b/fs/cifs/cifs_debug.c
47634@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47635 tcon = list_entry(tmp3,
47636 struct cifsTconInfo,
47637 tcon_list);
47638- atomic_set(&tcon->num_smbs_sent, 0);
47639- atomic_set(&tcon->num_writes, 0);
47640- atomic_set(&tcon->num_reads, 0);
47641- atomic_set(&tcon->num_oplock_brks, 0);
47642- atomic_set(&tcon->num_opens, 0);
47643- atomic_set(&tcon->num_posixopens, 0);
47644- atomic_set(&tcon->num_posixmkdirs, 0);
47645- atomic_set(&tcon->num_closes, 0);
47646- atomic_set(&tcon->num_deletes, 0);
47647- atomic_set(&tcon->num_mkdirs, 0);
47648- atomic_set(&tcon->num_rmdirs, 0);
47649- atomic_set(&tcon->num_renames, 0);
47650- atomic_set(&tcon->num_t2renames, 0);
47651- atomic_set(&tcon->num_ffirst, 0);
47652- atomic_set(&tcon->num_fnext, 0);
47653- atomic_set(&tcon->num_fclose, 0);
47654- atomic_set(&tcon->num_hardlinks, 0);
47655- atomic_set(&tcon->num_symlinks, 0);
47656- atomic_set(&tcon->num_locks, 0);
47657+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47658+ atomic_set_unchecked(&tcon->num_writes, 0);
47659+ atomic_set_unchecked(&tcon->num_reads, 0);
47660+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47661+ atomic_set_unchecked(&tcon->num_opens, 0);
47662+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47663+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47664+ atomic_set_unchecked(&tcon->num_closes, 0);
47665+ atomic_set_unchecked(&tcon->num_deletes, 0);
47666+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47667+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47668+ atomic_set_unchecked(&tcon->num_renames, 0);
47669+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47670+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47671+ atomic_set_unchecked(&tcon->num_fnext, 0);
47672+ atomic_set_unchecked(&tcon->num_fclose, 0);
47673+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47674+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47675+ atomic_set_unchecked(&tcon->num_locks, 0);
47676 }
47677 }
47678 }
47679@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47680 if (tcon->need_reconnect)
47681 seq_puts(m, "\tDISCONNECTED ");
47682 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47683- atomic_read(&tcon->num_smbs_sent),
47684- atomic_read(&tcon->num_oplock_brks));
47685+ atomic_read_unchecked(&tcon->num_smbs_sent),
47686+ atomic_read_unchecked(&tcon->num_oplock_brks));
47687 seq_printf(m, "\nReads: %d Bytes: %lld",
47688- atomic_read(&tcon->num_reads),
47689+ atomic_read_unchecked(&tcon->num_reads),
47690 (long long)(tcon->bytes_read));
47691 seq_printf(m, "\nWrites: %d Bytes: %lld",
47692- atomic_read(&tcon->num_writes),
47693+ atomic_read_unchecked(&tcon->num_writes),
47694 (long long)(tcon->bytes_written));
47695 seq_printf(m, "\nFlushes: %d",
47696- atomic_read(&tcon->num_flushes));
47697+ atomic_read_unchecked(&tcon->num_flushes));
47698 seq_printf(m, "\nLocks: %d HardLinks: %d "
47699 "Symlinks: %d",
47700- atomic_read(&tcon->num_locks),
47701- atomic_read(&tcon->num_hardlinks),
47702- atomic_read(&tcon->num_symlinks));
47703+ atomic_read_unchecked(&tcon->num_locks),
47704+ atomic_read_unchecked(&tcon->num_hardlinks),
47705+ atomic_read_unchecked(&tcon->num_symlinks));
47706 seq_printf(m, "\nOpens: %d Closes: %d "
47707 "Deletes: %d",
47708- atomic_read(&tcon->num_opens),
47709- atomic_read(&tcon->num_closes),
47710- atomic_read(&tcon->num_deletes));
47711+ atomic_read_unchecked(&tcon->num_opens),
47712+ atomic_read_unchecked(&tcon->num_closes),
47713+ atomic_read_unchecked(&tcon->num_deletes));
47714 seq_printf(m, "\nPosix Opens: %d "
47715 "Posix Mkdirs: %d",
47716- atomic_read(&tcon->num_posixopens),
47717- atomic_read(&tcon->num_posixmkdirs));
47718+ atomic_read_unchecked(&tcon->num_posixopens),
47719+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47720 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47721- atomic_read(&tcon->num_mkdirs),
47722- atomic_read(&tcon->num_rmdirs));
47723+ atomic_read_unchecked(&tcon->num_mkdirs),
47724+ atomic_read_unchecked(&tcon->num_rmdirs));
47725 seq_printf(m, "\nRenames: %d T2 Renames %d",
47726- atomic_read(&tcon->num_renames),
47727- atomic_read(&tcon->num_t2renames));
47728+ atomic_read_unchecked(&tcon->num_renames),
47729+ atomic_read_unchecked(&tcon->num_t2renames));
47730 seq_printf(m, "\nFindFirst: %d FNext %d "
47731 "FClose %d",
47732- atomic_read(&tcon->num_ffirst),
47733- atomic_read(&tcon->num_fnext),
47734- atomic_read(&tcon->num_fclose));
47735+ atomic_read_unchecked(&tcon->num_ffirst),
47736+ atomic_read_unchecked(&tcon->num_fnext),
47737+ atomic_read_unchecked(&tcon->num_fclose));
47738 }
47739 }
47740 }
47741diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47742index 1445407..68cb0dc 100644
47743--- a/fs/cifs/cifsfs.c
47744+++ b/fs/cifs/cifsfs.c
47745@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47746 cifs_req_cachep = kmem_cache_create("cifs_request",
47747 CIFSMaxBufSize +
47748 MAX_CIFS_HDR_SIZE, 0,
47749- SLAB_HWCACHE_ALIGN, NULL);
47750+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47751 if (cifs_req_cachep == NULL)
47752 return -ENOMEM;
47753
47754@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47755 efficient to alloc 1 per page off the slab compared to 17K (5page)
47756 alloc of large cifs buffers even when page debugging is on */
47757 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47758- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47759+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47760 NULL);
47761 if (cifs_sm_req_cachep == NULL) {
47762 mempool_destroy(cifs_req_poolp);
47763@@ -991,8 +991,8 @@ init_cifs(void)
47764 atomic_set(&bufAllocCount, 0);
47765 atomic_set(&smBufAllocCount, 0);
47766 #ifdef CONFIG_CIFS_STATS2
47767- atomic_set(&totBufAllocCount, 0);
47768- atomic_set(&totSmBufAllocCount, 0);
47769+ atomic_set_unchecked(&totBufAllocCount, 0);
47770+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47771 #endif /* CONFIG_CIFS_STATS2 */
47772
47773 atomic_set(&midCount, 0);
47774diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47775index e29581e..1c22bab 100644
47776--- a/fs/cifs/cifsglob.h
47777+++ b/fs/cifs/cifsglob.h
47778@@ -252,28 +252,28 @@ struct cifsTconInfo {
47779 __u16 Flags; /* optional support bits */
47780 enum statusEnum tidStatus;
47781 #ifdef CONFIG_CIFS_STATS
47782- atomic_t num_smbs_sent;
47783- atomic_t num_writes;
47784- atomic_t num_reads;
47785- atomic_t num_flushes;
47786- atomic_t num_oplock_brks;
47787- atomic_t num_opens;
47788- atomic_t num_closes;
47789- atomic_t num_deletes;
47790- atomic_t num_mkdirs;
47791- atomic_t num_posixopens;
47792- atomic_t num_posixmkdirs;
47793- atomic_t num_rmdirs;
47794- atomic_t num_renames;
47795- atomic_t num_t2renames;
47796- atomic_t num_ffirst;
47797- atomic_t num_fnext;
47798- atomic_t num_fclose;
47799- atomic_t num_hardlinks;
47800- atomic_t num_symlinks;
47801- atomic_t num_locks;
47802- atomic_t num_acl_get;
47803- atomic_t num_acl_set;
47804+ atomic_unchecked_t num_smbs_sent;
47805+ atomic_unchecked_t num_writes;
47806+ atomic_unchecked_t num_reads;
47807+ atomic_unchecked_t num_flushes;
47808+ atomic_unchecked_t num_oplock_brks;
47809+ atomic_unchecked_t num_opens;
47810+ atomic_unchecked_t num_closes;
47811+ atomic_unchecked_t num_deletes;
47812+ atomic_unchecked_t num_mkdirs;
47813+ atomic_unchecked_t num_posixopens;
47814+ atomic_unchecked_t num_posixmkdirs;
47815+ atomic_unchecked_t num_rmdirs;
47816+ atomic_unchecked_t num_renames;
47817+ atomic_unchecked_t num_t2renames;
47818+ atomic_unchecked_t num_ffirst;
47819+ atomic_unchecked_t num_fnext;
47820+ atomic_unchecked_t num_fclose;
47821+ atomic_unchecked_t num_hardlinks;
47822+ atomic_unchecked_t num_symlinks;
47823+ atomic_unchecked_t num_locks;
47824+ atomic_unchecked_t num_acl_get;
47825+ atomic_unchecked_t num_acl_set;
47826 #ifdef CONFIG_CIFS_STATS2
47827 unsigned long long time_writes;
47828 unsigned long long time_reads;
47829@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47830 }
47831
47832 #ifdef CONFIG_CIFS_STATS
47833-#define cifs_stats_inc atomic_inc
47834+#define cifs_stats_inc atomic_inc_unchecked
47835
47836 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47837 unsigned int bytes)
47838@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47839 /* Various Debug counters */
47840 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47841 #ifdef CONFIG_CIFS_STATS2
47842-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47843-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47844+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47845+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47846 #endif
47847 GLOBAL_EXTERN atomic_t smBufAllocCount;
47848 GLOBAL_EXTERN atomic_t midCount;
47849diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47850index fc1e048..28b3441 100644
47851--- a/fs/cifs/link.c
47852+++ b/fs/cifs/link.c
47853@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47854
47855 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47856 {
47857- char *p = nd_get_link(nd);
47858+ const char *p = nd_get_link(nd);
47859 if (!IS_ERR(p))
47860 kfree(p);
47861 }
47862diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47863index 95b82e8..12a538d 100644
47864--- a/fs/cifs/misc.c
47865+++ b/fs/cifs/misc.c
47866@@ -155,7 +155,7 @@ cifs_buf_get(void)
47867 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47868 atomic_inc(&bufAllocCount);
47869 #ifdef CONFIG_CIFS_STATS2
47870- atomic_inc(&totBufAllocCount);
47871+ atomic_inc_unchecked(&totBufAllocCount);
47872 #endif /* CONFIG_CIFS_STATS2 */
47873 }
47874
47875@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47876 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47877 atomic_inc(&smBufAllocCount);
47878 #ifdef CONFIG_CIFS_STATS2
47879- atomic_inc(&totSmBufAllocCount);
47880+ atomic_inc_unchecked(&totSmBufAllocCount);
47881 #endif /* CONFIG_CIFS_STATS2 */
47882
47883 }
47884diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47885index a5bf577..6d19845 100644
47886--- a/fs/coda/cache.c
47887+++ b/fs/coda/cache.c
47888@@ -24,14 +24,14 @@
47889 #include <linux/coda_fs_i.h>
47890 #include <linux/coda_cache.h>
47891
47892-static atomic_t permission_epoch = ATOMIC_INIT(0);
47893+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47894
47895 /* replace or extend an acl cache hit */
47896 void coda_cache_enter(struct inode *inode, int mask)
47897 {
47898 struct coda_inode_info *cii = ITOC(inode);
47899
47900- cii->c_cached_epoch = atomic_read(&permission_epoch);
47901+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47902 if (cii->c_uid != current_fsuid()) {
47903 cii->c_uid = current_fsuid();
47904 cii->c_cached_perm = mask;
47905@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47906 void coda_cache_clear_inode(struct inode *inode)
47907 {
47908 struct coda_inode_info *cii = ITOC(inode);
47909- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47910+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47911 }
47912
47913 /* remove all acl caches */
47914 void coda_cache_clear_all(struct super_block *sb)
47915 {
47916- atomic_inc(&permission_epoch);
47917+ atomic_inc_unchecked(&permission_epoch);
47918 }
47919
47920
47921@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47922
47923 hit = (mask & cii->c_cached_perm) == mask &&
47924 cii->c_uid == current_fsuid() &&
47925- cii->c_cached_epoch == atomic_read(&permission_epoch);
47926+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47927
47928 return hit;
47929 }
47930diff --git a/fs/compat.c b/fs/compat.c
47931index d1e2411..b1eda5d 100644
47932--- a/fs/compat.c
47933+++ b/fs/compat.c
47934@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47935 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47936 {
47937 compat_ino_t ino = stat->ino;
47938- typeof(ubuf->st_uid) uid = 0;
47939- typeof(ubuf->st_gid) gid = 0;
47940+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47941+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47942 int err;
47943
47944 SET_UID(uid, stat->uid);
47945@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47946
47947 set_fs(KERNEL_DS);
47948 /* The __user pointer cast is valid because of the set_fs() */
47949- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47950+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47951 set_fs(oldfs);
47952 /* truncating is ok because it's a user address */
47953 if (!ret)
47954@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47955
47956 struct compat_readdir_callback {
47957 struct compat_old_linux_dirent __user *dirent;
47958+ struct file * file;
47959 int result;
47960 };
47961
47962@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47963 buf->result = -EOVERFLOW;
47964 return -EOVERFLOW;
47965 }
47966+
47967+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47968+ return 0;
47969+
47970 buf->result++;
47971 dirent = buf->dirent;
47972 if (!access_ok(VERIFY_WRITE, dirent,
47973@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47974
47975 buf.result = 0;
47976 buf.dirent = dirent;
47977+ buf.file = file;
47978
47979 error = vfs_readdir(file, compat_fillonedir, &buf);
47980 if (buf.result)
47981@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47982 struct compat_getdents_callback {
47983 struct compat_linux_dirent __user *current_dir;
47984 struct compat_linux_dirent __user *previous;
47985+ struct file * file;
47986 int count;
47987 int error;
47988 };
47989@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47990 buf->error = -EOVERFLOW;
47991 return -EOVERFLOW;
47992 }
47993+
47994+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47995+ return 0;
47996+
47997 dirent = buf->previous;
47998 if (dirent) {
47999 if (__put_user(offset, &dirent->d_off))
48000@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
48001 buf.previous = NULL;
48002 buf.count = count;
48003 buf.error = 0;
48004+ buf.file = file;
48005
48006 error = vfs_readdir(file, compat_filldir, &buf);
48007 if (error >= 0)
48008@@ -987,6 +999,7 @@ out:
48009 struct compat_getdents_callback64 {
48010 struct linux_dirent64 __user *current_dir;
48011 struct linux_dirent64 __user *previous;
48012+ struct file * file;
48013 int count;
48014 int error;
48015 };
48016@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
48017 buf->error = -EINVAL; /* only used if we fail.. */
48018 if (reclen > buf->count)
48019 return -EINVAL;
48020+
48021+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
48022+ return 0;
48023+
48024 dirent = buf->previous;
48025
48026 if (dirent) {
48027@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48028 buf.previous = NULL;
48029 buf.count = count;
48030 buf.error = 0;
48031+ buf.file = file;
48032
48033 error = vfs_readdir(file, compat_filldir64, &buf);
48034 if (error >= 0)
48035 error = buf.error;
48036 lastdirent = buf.previous;
48037 if (lastdirent) {
48038- typeof(lastdirent->d_off) d_off = file->f_pos;
48039+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48040 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48041 error = -EFAULT;
48042 else
48043@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48044 * verify all the pointers
48045 */
48046 ret = -EINVAL;
48047- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48048+ if (nr_segs > UIO_MAXIOV)
48049 goto out;
48050 if (!file->f_op)
48051 goto out;
48052@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48053 compat_uptr_t __user *envp,
48054 struct pt_regs * regs)
48055 {
48056+#ifdef CONFIG_GRKERNSEC
48057+ struct file *old_exec_file;
48058+ struct acl_subject_label *old_acl;
48059+ struct rlimit old_rlim[RLIM_NLIMITS];
48060+#endif
48061 struct linux_binprm *bprm;
48062 struct file *file;
48063 struct files_struct *displaced;
48064 bool clear_in_exec;
48065 int retval;
48066+ const struct cred *cred = current_cred();
48067+
48068+ /*
48069+ * We move the actual failure in case of RLIMIT_NPROC excess from
48070+ * set*uid() to execve() because too many poorly written programs
48071+ * don't check setuid() return code. Here we additionally recheck
48072+ * whether NPROC limit is still exceeded.
48073+ */
48074+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48075+
48076+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48077+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48078+ retval = -EAGAIN;
48079+ goto out_ret;
48080+ }
48081+
48082+ /* We're below the limit (still or again), so we don't want to make
48083+ * further execve() calls fail. */
48084+ current->flags &= ~PF_NPROC_EXCEEDED;
48085
48086 retval = unshare_files(&displaced);
48087 if (retval)
48088@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
48089 if (IS_ERR(file))
48090 goto out_unmark;
48091
48092+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48093+ retval = -EPERM;
48094+ goto out_file;
48095+ }
48096+
48097 sched_exec();
48098
48099 bprm->file = file;
48100 bprm->filename = filename;
48101 bprm->interp = filename;
48102
48103+ if (gr_process_user_ban()) {
48104+ retval = -EPERM;
48105+ goto out_file;
48106+ }
48107+
48108+ retval = -EACCES;
48109+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48110+ goto out_file;
48111+
48112 retval = bprm_mm_init(bprm);
48113 if (retval)
48114 goto out_file;
48115@@ -1528,9 +1584,40 @@ int compat_do_execve(char * filename,
48116 if (retval < 0)
48117 goto out;
48118
48119+ if (!gr_tpe_allow(file)) {
48120+ retval = -EACCES;
48121+ goto out;
48122+ }
48123+
48124+ if (gr_check_crash_exec(file)) {
48125+ retval = -EACCES;
48126+ goto out;
48127+ }
48128+
48129+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48130+
48131+ gr_handle_exec_args_compat(bprm, argv);
48132+
48133+#ifdef CONFIG_GRKERNSEC
48134+ old_acl = current->acl;
48135+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48136+ old_exec_file = current->exec_file;
48137+ get_file(file);
48138+ current->exec_file = file;
48139+#endif
48140+
48141+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48142+ bprm->unsafe);
48143+ if (retval < 0)
48144+ goto out_fail;
48145+
48146 retval = search_binary_handler(bprm, regs);
48147 if (retval < 0)
48148- goto out;
48149+ goto out_fail;
48150+#ifdef CONFIG_GRKERNSEC
48151+ if (old_exec_file)
48152+ fput(old_exec_file);
48153+#endif
48154
48155 /* execve succeeded */
48156 current->fs->in_exec = 0;
48157@@ -1541,6 +1628,14 @@ int compat_do_execve(char * filename,
48158 put_files_struct(displaced);
48159 return retval;
48160
48161+out_fail:
48162+#ifdef CONFIG_GRKERNSEC
48163+ current->acl = old_acl;
48164+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48165+ fput(current->exec_file);
48166+ current->exec_file = old_exec_file;
48167+#endif
48168+
48169 out:
48170 if (bprm->mm) {
48171 acct_arg_size(bprm, 0);
48172@@ -1711,6 +1806,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48173 struct fdtable *fdt;
48174 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48175
48176+ pax_track_stack();
48177+
48178 if (n < 0)
48179 goto out_nofds;
48180
48181@@ -2151,7 +2248,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48182 oldfs = get_fs();
48183 set_fs(KERNEL_DS);
48184 /* The __user pointer casts are valid because of the set_fs() */
48185- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48186+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48187 set_fs(oldfs);
48188
48189 if (err)
48190diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48191index 0adced2..bbb1b0d 100644
48192--- a/fs/compat_binfmt_elf.c
48193+++ b/fs/compat_binfmt_elf.c
48194@@ -29,10 +29,12 @@
48195 #undef elfhdr
48196 #undef elf_phdr
48197 #undef elf_note
48198+#undef elf_dyn
48199 #undef elf_addr_t
48200 #define elfhdr elf32_hdr
48201 #define elf_phdr elf32_phdr
48202 #define elf_note elf32_note
48203+#define elf_dyn Elf32_Dyn
48204 #define elf_addr_t Elf32_Addr
48205
48206 /*
48207diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48208index d84e705..d8c364c 100644
48209--- a/fs/compat_ioctl.c
48210+++ b/fs/compat_ioctl.c
48211@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48212 up = (struct compat_video_spu_palette __user *) arg;
48213 err = get_user(palp, &up->palette);
48214 err |= get_user(length, &up->length);
48215+ if (err)
48216+ return -EFAULT;
48217
48218 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48219 err = put_user(compat_ptr(palp), &up_native->palette);
48220@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48221 return -EFAULT;
48222 if (__get_user(udata, &ss32->iomem_base))
48223 return -EFAULT;
48224- ss.iomem_base = compat_ptr(udata);
48225+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48226 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48227 __get_user(ss.port_high, &ss32->port_high))
48228 return -EFAULT;
48229@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48230 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48231 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48232 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48233- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48234+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48235 return -EFAULT;
48236
48237 return ioctl_preallocate(file, p);
48238diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48239index 8e48b52..f01ed91 100644
48240--- a/fs/configfs/dir.c
48241+++ b/fs/configfs/dir.c
48242@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48243 }
48244 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48245 struct configfs_dirent *next;
48246- const char * name;
48247+ const unsigned char * name;
48248+ char d_name[sizeof(next->s_dentry->d_iname)];
48249 int len;
48250
48251 next = list_entry(p, struct configfs_dirent,
48252@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48253 continue;
48254
48255 name = configfs_get_name(next);
48256- len = strlen(name);
48257+ if (next->s_dentry && name == next->s_dentry->d_iname) {
48258+ len = next->s_dentry->d_name.len;
48259+ memcpy(d_name, name, len);
48260+ name = d_name;
48261+ } else
48262+ len = strlen(name);
48263 if (next->s_dentry)
48264 ino = next->s_dentry->d_inode->i_ino;
48265 else
48266diff --git a/fs/dcache.c b/fs/dcache.c
48267index 44c0aea..2529092 100644
48268--- a/fs/dcache.c
48269+++ b/fs/dcache.c
48270@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48271
48272 static struct kmem_cache *dentry_cache __read_mostly;
48273
48274-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48275-
48276 /*
48277 * This is the single most critical data structure when it comes
48278 * to the dcache: the hashtable for lookups. Somebody should try
48279@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48280 mempages -= reserve;
48281
48282 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48283- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48284+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48285
48286 dcache_init();
48287 inode_init();
48288diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48289index 39c6ee8..dcee0f1 100644
48290--- a/fs/debugfs/inode.c
48291+++ b/fs/debugfs/inode.c
48292@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48293 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48294 {
48295 return debugfs_create_file(name,
48296+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48297+ S_IFDIR | S_IRWXU,
48298+#else
48299 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48300+#endif
48301 parent, NULL, NULL);
48302 }
48303 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48304diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48305index c010ecf..a8d8c59 100644
48306--- a/fs/dlm/lockspace.c
48307+++ b/fs/dlm/lockspace.c
48308@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48309 kfree(ls);
48310 }
48311
48312-static struct sysfs_ops dlm_attr_ops = {
48313+static const struct sysfs_ops dlm_attr_ops = {
48314 .show = dlm_attr_show,
48315 .store = dlm_attr_store,
48316 };
48317diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48318index 7a5f1ac..205b034 100644
48319--- a/fs/ecryptfs/crypto.c
48320+++ b/fs/ecryptfs/crypto.c
48321@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48322 rc);
48323 goto out;
48324 }
48325- if (unlikely(ecryptfs_verbosity > 0)) {
48326- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48327- "with iv:\n");
48328- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48329- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48330- "encryption:\n");
48331- ecryptfs_dump_hex((char *)
48332- (page_address(page)
48333- + (extent_offset * crypt_stat->extent_size)),
48334- 8);
48335- }
48336 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48337 page, (extent_offset
48338 * crypt_stat->extent_size),
48339@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48340 goto out;
48341 }
48342 rc = 0;
48343- if (unlikely(ecryptfs_verbosity > 0)) {
48344- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48345- "rc = [%d]\n", (extent_base + extent_offset),
48346- rc);
48347- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48348- "encryption:\n");
48349- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48350- }
48351 out:
48352 return rc;
48353 }
48354@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48355 rc);
48356 goto out;
48357 }
48358- if (unlikely(ecryptfs_verbosity > 0)) {
48359- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48360- "with iv:\n");
48361- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48362- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48363- "decryption:\n");
48364- ecryptfs_dump_hex((char *)
48365- (page_address(enc_extent_page)
48366- + (extent_offset * crypt_stat->extent_size)),
48367- 8);
48368- }
48369 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48370 (extent_offset
48371 * crypt_stat->extent_size),
48372@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48373 goto out;
48374 }
48375 rc = 0;
48376- if (unlikely(ecryptfs_verbosity > 0)) {
48377- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48378- "rc = [%d]\n", (extent_base + extent_offset),
48379- rc);
48380- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48381- "decryption:\n");
48382- ecryptfs_dump_hex((char *)(page_address(page)
48383- + (extent_offset
48384- * crypt_stat->extent_size)), 8);
48385- }
48386 out:
48387 return rc;
48388 }
48389diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48390index 88ba4d4..073f003 100644
48391--- a/fs/ecryptfs/inode.c
48392+++ b/fs/ecryptfs/inode.c
48393@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48394 old_fs = get_fs();
48395 set_fs(get_ds());
48396 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48397- (char __user *)lower_buf,
48398+ (char __force_user *)lower_buf,
48399 lower_bufsiz);
48400 set_fs(old_fs);
48401 if (rc < 0)
48402@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48403 }
48404 old_fs = get_fs();
48405 set_fs(get_ds());
48406- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48407+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48408 set_fs(old_fs);
48409 if (rc < 0)
48410 goto out_free;
48411diff --git a/fs/exec.c b/fs/exec.c
48412index 86fafc6..6272c0e 100644
48413--- a/fs/exec.c
48414+++ b/fs/exec.c
48415@@ -56,12 +56,28 @@
48416 #include <linux/fsnotify.h>
48417 #include <linux/fs_struct.h>
48418 #include <linux/pipe_fs_i.h>
48419+#include <linux/random.h>
48420+#include <linux/seq_file.h>
48421+
48422+#ifdef CONFIG_PAX_REFCOUNT
48423+#include <linux/kallsyms.h>
48424+#include <linux/kdebug.h>
48425+#endif
48426
48427 #include <asm/uaccess.h>
48428 #include <asm/mmu_context.h>
48429 #include <asm/tlb.h>
48430 #include "internal.h"
48431
48432+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48433+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48434+#endif
48435+
48436+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48437+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48438+EXPORT_SYMBOL(pax_set_initial_flags_func);
48439+#endif
48440+
48441 int core_uses_pid;
48442 char core_pattern[CORENAME_MAX_SIZE] = "core";
48443 unsigned int core_pipe_limit;
48444@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48445 int write)
48446 {
48447 struct page *page;
48448- int ret;
48449
48450-#ifdef CONFIG_STACK_GROWSUP
48451- if (write) {
48452- ret = expand_stack_downwards(bprm->vma, pos);
48453- if (ret < 0)
48454- return NULL;
48455- }
48456-#endif
48457- ret = get_user_pages(current, bprm->mm, pos,
48458- 1, write, 1, &page, NULL);
48459- if (ret <= 0)
48460+ if (0 > expand_stack_downwards(bprm->vma, pos))
48461+ return NULL;
48462+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48463 return NULL;
48464
48465 if (write) {
48466@@ -205,6 +213,17 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48467 if (size <= ARG_MAX)
48468 return page;
48469
48470+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48471+ // only allow 1MB for argv+env on suid/sgid binaries
48472+ // to prevent easy ASLR exhaustion
48473+ if (((bprm->cred->euid != current_euid()) ||
48474+ (bprm->cred->egid != current_egid())) &&
48475+ (size > (1024 * 1024))) {
48476+ put_page(page);
48477+ return NULL;
48478+ }
48479+#endif
48480+
48481 /*
48482 * Limit to 1/4-th the stack size for the argv+env strings.
48483 * This ensures that:
48484@@ -263,6 +282,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48485 vma->vm_end = STACK_TOP_MAX;
48486 vma->vm_start = vma->vm_end - PAGE_SIZE;
48487 vma->vm_flags = VM_STACK_FLAGS;
48488+
48489+#ifdef CONFIG_PAX_SEGMEXEC
48490+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48491+#endif
48492+
48493 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48494
48495 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48496@@ -276,6 +300,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48497 mm->stack_vm = mm->total_vm = 1;
48498 up_write(&mm->mmap_sem);
48499 bprm->p = vma->vm_end - sizeof(void *);
48500+
48501+#ifdef CONFIG_PAX_RANDUSTACK
48502+ if (randomize_va_space)
48503+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48504+#endif
48505+
48506 return 0;
48507 err:
48508 up_write(&mm->mmap_sem);
48509@@ -510,7 +540,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48510 int r;
48511 mm_segment_t oldfs = get_fs();
48512 set_fs(KERNEL_DS);
48513- r = copy_strings(argc, (char __user * __user *)argv, bprm);
48514+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48515 set_fs(oldfs);
48516 return r;
48517 }
48518@@ -540,7 +570,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48519 unsigned long new_end = old_end - shift;
48520 struct mmu_gather *tlb;
48521
48522- BUG_ON(new_start > new_end);
48523+ if (new_start >= new_end || new_start < mmap_min_addr)
48524+ return -ENOMEM;
48525
48526 /*
48527 * ensure there are no vmas between where we want to go
48528@@ -549,6 +580,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48529 if (vma != find_vma(mm, new_start))
48530 return -EFAULT;
48531
48532+#ifdef CONFIG_PAX_SEGMEXEC
48533+ BUG_ON(pax_find_mirror_vma(vma));
48534+#endif
48535+
48536 /*
48537 * cover the whole range: [new_start, old_end)
48538 */
48539@@ -630,10 +665,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48540 stack_top = arch_align_stack(stack_top);
48541 stack_top = PAGE_ALIGN(stack_top);
48542
48543- if (unlikely(stack_top < mmap_min_addr) ||
48544- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48545- return -ENOMEM;
48546-
48547 stack_shift = vma->vm_end - stack_top;
48548
48549 bprm->p -= stack_shift;
48550@@ -645,6 +676,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48551 bprm->exec -= stack_shift;
48552
48553 down_write(&mm->mmap_sem);
48554+
48555+ /* Move stack pages down in memory. */
48556+ if (stack_shift) {
48557+ ret = shift_arg_pages(vma, stack_shift);
48558+ if (ret)
48559+ goto out_unlock;
48560+ }
48561+
48562 vm_flags = VM_STACK_FLAGS;
48563
48564 /*
48565@@ -658,19 +697,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48566 vm_flags &= ~VM_EXEC;
48567 vm_flags |= mm->def_flags;
48568
48569+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48570+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48571+ vm_flags &= ~VM_EXEC;
48572+
48573+#ifdef CONFIG_PAX_MPROTECT
48574+ if (mm->pax_flags & MF_PAX_MPROTECT)
48575+ vm_flags &= ~VM_MAYEXEC;
48576+#endif
48577+
48578+ }
48579+#endif
48580+
48581 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48582 vm_flags);
48583 if (ret)
48584 goto out_unlock;
48585 BUG_ON(prev != vma);
48586
48587- /* Move stack pages down in memory. */
48588- if (stack_shift) {
48589- ret = shift_arg_pages(vma, stack_shift);
48590- if (ret)
48591- goto out_unlock;
48592- }
48593-
48594 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48595 stack_size = vma->vm_end - vma->vm_start;
48596 /*
48597@@ -744,7 +788,7 @@ int kernel_read(struct file *file, loff_t offset,
48598 old_fs = get_fs();
48599 set_fs(get_ds());
48600 /* The cast to a user pointer is valid due to the set_fs() */
48601- result = vfs_read(file, (void __user *)addr, count, &pos);
48602+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48603 set_fs(old_fs);
48604 return result;
48605 }
48606@@ -985,6 +1029,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
48607 perf_event_comm(tsk);
48608 }
48609
48610+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
48611+{
48612+ int i, ch;
48613+
48614+ /* Copies the binary name from after last slash */
48615+ for (i = 0; (ch = *(fn++)) != '\0';) {
48616+ if (ch == '/')
48617+ i = 0; /* overwrite what we wrote */
48618+ else
48619+ if (i < len - 1)
48620+ tcomm[i++] = ch;
48621+ }
48622+ tcomm[i] = '\0';
48623+}
48624+
48625 int flush_old_exec(struct linux_binprm * bprm)
48626 {
48627 int retval;
48628@@ -999,6 +1058,7 @@ int flush_old_exec(struct linux_binprm * bprm)
48629
48630 set_mm_exe_file(bprm->mm, bprm->file);
48631
48632+ filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
48633 /*
48634 * Release all of the old mmap stuff
48635 */
48636@@ -1023,10 +1083,6 @@ EXPORT_SYMBOL(flush_old_exec);
48637
48638 void setup_new_exec(struct linux_binprm * bprm)
48639 {
48640- int i, ch;
48641- char * name;
48642- char tcomm[sizeof(current->comm)];
48643-
48644 arch_pick_mmap_layout(current->mm);
48645
48646 /* This is the point of no return */
48647@@ -1037,18 +1093,7 @@ void setup_new_exec(struct linux_binprm * bprm)
48648 else
48649 set_dumpable(current->mm, suid_dumpable);
48650
48651- name = bprm->filename;
48652-
48653- /* Copies the binary name from after last slash */
48654- for (i=0; (ch = *(name++)) != '\0';) {
48655- if (ch == '/')
48656- i = 0; /* overwrite what we wrote */
48657- else
48658- if (i < (sizeof(tcomm) - 1))
48659- tcomm[i++] = ch;
48660- }
48661- tcomm[i] = '\0';
48662- set_task_comm(current, tcomm);
48663+ set_task_comm(current, bprm->tcomm);
48664
48665 /* Set the new mm task size. We have to do that late because it may
48666 * depend on TIF_32BIT which is only updated in flush_thread() on
48667@@ -1152,7 +1197,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48668 }
48669 rcu_read_unlock();
48670
48671- if (p->fs->users > n_fs) {
48672+ if (atomic_read(&p->fs->users) > n_fs) {
48673 bprm->unsafe |= LSM_UNSAFE_SHARE;
48674 } else {
48675 res = -EAGAIN;
48676@@ -1339,6 +1384,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
48677
48678 EXPORT_SYMBOL(search_binary_handler);
48679
48680+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48681+atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
48682+#endif
48683+
48684 /*
48685 * sys_execve() executes a new program.
48686 */
48687@@ -1347,11 +1396,35 @@ int do_execve(char * filename,
48688 char __user *__user *envp,
48689 struct pt_regs * regs)
48690 {
48691+#ifdef CONFIG_GRKERNSEC
48692+ struct file *old_exec_file;
48693+ struct acl_subject_label *old_acl;
48694+ struct rlimit old_rlim[RLIM_NLIMITS];
48695+#endif
48696 struct linux_binprm *bprm;
48697 struct file *file;
48698 struct files_struct *displaced;
48699 bool clear_in_exec;
48700 int retval;
48701+ const struct cred *cred = current_cred();
48702+
48703+ /*
48704+ * We move the actual failure in case of RLIMIT_NPROC excess from
48705+ * set*uid() to execve() because too many poorly written programs
48706+ * don't check setuid() return code. Here we additionally recheck
48707+ * whether NPROC limit is still exceeded.
48708+ */
48709+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48710+
48711+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48712+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48713+ retval = -EAGAIN;
48714+ goto out_ret;
48715+ }
48716+
48717+ /* We're below the limit (still or again), so we don't want to make
48718+ * further execve() calls fail. */
48719+ current->flags &= ~PF_NPROC_EXCEEDED;
48720
48721 retval = unshare_files(&displaced);
48722 if (retval)
48723@@ -1377,12 +1450,27 @@ int do_execve(char * filename,
48724 if (IS_ERR(file))
48725 goto out_unmark;
48726
48727+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48728+ retval = -EPERM;
48729+ goto out_file;
48730+ }
48731+
48732 sched_exec();
48733
48734 bprm->file = file;
48735 bprm->filename = filename;
48736 bprm->interp = filename;
48737
48738+ if (gr_process_user_ban()) {
48739+ retval = -EPERM;
48740+ goto out_file;
48741+ }
48742+
48743+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48744+ retval = -EACCES;
48745+ goto out_file;
48746+ }
48747+
48748 retval = bprm_mm_init(bprm);
48749 if (retval)
48750 goto out_file;
48751@@ -1412,12 +1500,47 @@ int do_execve(char * filename,
48752 if (retval < 0)
48753 goto out;
48754
48755+ if (!gr_tpe_allow(file)) {
48756+ retval = -EACCES;
48757+ goto out;
48758+ }
48759+
48760+ if (gr_check_crash_exec(file)) {
48761+ retval = -EACCES;
48762+ goto out;
48763+ }
48764+
48765+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48766+
48767+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48768+
48769+#ifdef CONFIG_GRKERNSEC
48770+ old_acl = current->acl;
48771+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48772+ old_exec_file = current->exec_file;
48773+ get_file(file);
48774+ current->exec_file = file;
48775+#endif
48776+
48777+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48778+ bprm->unsafe);
48779+ if (retval < 0)
48780+ goto out_fail;
48781+
48782 current->flags &= ~PF_KTHREAD;
48783 retval = search_binary_handler(bprm,regs);
48784 if (retval < 0)
48785- goto out;
48786+ goto out_fail;
48787+#ifdef CONFIG_GRKERNSEC
48788+ if (old_exec_file)
48789+ fput(old_exec_file);
48790+#endif
48791
48792 /* execve succeeded */
48793+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48794+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48795+#endif
48796+
48797 current->fs->in_exec = 0;
48798 current->in_execve = 0;
48799 acct_update_integrals(current);
48800@@ -1426,6 +1549,14 @@ int do_execve(char * filename,
48801 put_files_struct(displaced);
48802 return retval;
48803
48804+out_fail:
48805+#ifdef CONFIG_GRKERNSEC
48806+ current->acl = old_acl;
48807+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48808+ fput(current->exec_file);
48809+ current->exec_file = old_exec_file;
48810+#endif
48811+
48812 out:
48813 if (bprm->mm) {
48814 acct_arg_size(bprm, 0);
48815@@ -1591,6 +1722,220 @@ out:
48816 return ispipe;
48817 }
48818
48819+int pax_check_flags(unsigned long *flags)
48820+{
48821+ int retval = 0;
48822+
48823+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48824+ if (*flags & MF_PAX_SEGMEXEC)
48825+ {
48826+ *flags &= ~MF_PAX_SEGMEXEC;
48827+ retval = -EINVAL;
48828+ }
48829+#endif
48830+
48831+ if ((*flags & MF_PAX_PAGEEXEC)
48832+
48833+#ifdef CONFIG_PAX_PAGEEXEC
48834+ && (*flags & MF_PAX_SEGMEXEC)
48835+#endif
48836+
48837+ )
48838+ {
48839+ *flags &= ~MF_PAX_PAGEEXEC;
48840+ retval = -EINVAL;
48841+ }
48842+
48843+ if ((*flags & MF_PAX_MPROTECT)
48844+
48845+#ifdef CONFIG_PAX_MPROTECT
48846+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48847+#endif
48848+
48849+ )
48850+ {
48851+ *flags &= ~MF_PAX_MPROTECT;
48852+ retval = -EINVAL;
48853+ }
48854+
48855+ if ((*flags & MF_PAX_EMUTRAMP)
48856+
48857+#ifdef CONFIG_PAX_EMUTRAMP
48858+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48859+#endif
48860+
48861+ )
48862+ {
48863+ *flags &= ~MF_PAX_EMUTRAMP;
48864+ retval = -EINVAL;
48865+ }
48866+
48867+ return retval;
48868+}
48869+
48870+EXPORT_SYMBOL(pax_check_flags);
48871+
48872+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48873+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48874+{
48875+ struct task_struct *tsk = current;
48876+ struct mm_struct *mm = current->mm;
48877+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48878+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48879+ char *path_exec = NULL;
48880+ char *path_fault = NULL;
48881+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48882+
48883+ if (buffer_exec && buffer_fault) {
48884+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48885+
48886+ down_read(&mm->mmap_sem);
48887+ vma = mm->mmap;
48888+ while (vma && (!vma_exec || !vma_fault)) {
48889+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48890+ vma_exec = vma;
48891+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48892+ vma_fault = vma;
48893+ vma = vma->vm_next;
48894+ }
48895+ if (vma_exec) {
48896+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48897+ if (IS_ERR(path_exec))
48898+ path_exec = "<path too long>";
48899+ else {
48900+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48901+ if (path_exec) {
48902+ *path_exec = 0;
48903+ path_exec = buffer_exec;
48904+ } else
48905+ path_exec = "<path too long>";
48906+ }
48907+ }
48908+ if (vma_fault) {
48909+ start = vma_fault->vm_start;
48910+ end = vma_fault->vm_end;
48911+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48912+ if (vma_fault->vm_file) {
48913+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48914+ if (IS_ERR(path_fault))
48915+ path_fault = "<path too long>";
48916+ else {
48917+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48918+ if (path_fault) {
48919+ *path_fault = 0;
48920+ path_fault = buffer_fault;
48921+ } else
48922+ path_fault = "<path too long>";
48923+ }
48924+ } else
48925+ path_fault = "<anonymous mapping>";
48926+ }
48927+ up_read(&mm->mmap_sem);
48928+ }
48929+ if (tsk->signal->curr_ip)
48930+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48931+ else
48932+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48933+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48934+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48935+ task_uid(tsk), task_euid(tsk), pc, sp);
48936+ free_page((unsigned long)buffer_exec);
48937+ free_page((unsigned long)buffer_fault);
48938+ pax_report_insns(regs, pc, sp);
48939+ do_coredump(SIGKILL, SIGKILL, regs);
48940+}
48941+#endif
48942+
48943+#ifdef CONFIG_PAX_REFCOUNT
48944+void pax_report_refcount_overflow(struct pt_regs *regs)
48945+{
48946+ if (current->signal->curr_ip)
48947+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48948+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48949+ else
48950+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48951+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48952+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48953+ show_regs(regs);
48954+ force_sig_specific(SIGKILL, current);
48955+}
48956+#endif
48957+
48958+#ifdef CONFIG_PAX_USERCOPY
48959+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48960+int object_is_on_stack(const void *obj, unsigned long len)
48961+{
48962+ const void * const stack = task_stack_page(current);
48963+ const void * const stackend = stack + THREAD_SIZE;
48964+
48965+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48966+ const void *frame = NULL;
48967+ const void *oldframe;
48968+#endif
48969+
48970+ if (obj + len < obj)
48971+ return -1;
48972+
48973+ if (obj + len <= stack || stackend <= obj)
48974+ return 0;
48975+
48976+ if (obj < stack || stackend < obj + len)
48977+ return -1;
48978+
48979+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48980+ oldframe = __builtin_frame_address(1);
48981+ if (oldframe)
48982+ frame = __builtin_frame_address(2);
48983+ /*
48984+ low ----------------------------------------------> high
48985+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48986+ ^----------------^
48987+ allow copies only within here
48988+ */
48989+ while (stack <= frame && frame < stackend) {
48990+ /* if obj + len extends past the last frame, this
48991+ check won't pass and the next frame will be 0,
48992+ causing us to bail out and correctly report
48993+ the copy as invalid
48994+ */
48995+ if (obj + len <= frame)
48996+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48997+ oldframe = frame;
48998+ frame = *(const void * const *)frame;
48999+ }
49000+ return -1;
49001+#else
49002+ return 1;
49003+#endif
49004+}
49005+
49006+
49007+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
49008+{
49009+ if (current->signal->curr_ip)
49010+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49011+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49012+ else
49013+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
49014+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
49015+
49016+ dump_stack();
49017+ gr_handle_kernel_exploit();
49018+ do_group_exit(SIGKILL);
49019+}
49020+#endif
49021+
49022+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
49023+void pax_track_stack(void)
49024+{
49025+ unsigned long sp = (unsigned long)&sp;
49026+ if (sp < current_thread_info()->lowest_stack &&
49027+ sp > (unsigned long)task_stack_page(current))
49028+ current_thread_info()->lowest_stack = sp;
49029+}
49030+EXPORT_SYMBOL(pax_track_stack);
49031+#endif
49032+
49033 static int zap_process(struct task_struct *start)
49034 {
49035 struct task_struct *t;
49036@@ -1793,17 +2138,17 @@ static void wait_for_dump_helpers(struct file *file)
49037 pipe = file->f_path.dentry->d_inode->i_pipe;
49038
49039 pipe_lock(pipe);
49040- pipe->readers++;
49041- pipe->writers--;
49042+ atomic_inc(&pipe->readers);
49043+ atomic_dec(&pipe->writers);
49044
49045- while ((pipe->readers > 1) && (!signal_pending(current))) {
49046+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
49047 wake_up_interruptible_sync(&pipe->wait);
49048 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
49049 pipe_wait(pipe);
49050 }
49051
49052- pipe->readers--;
49053- pipe->writers++;
49054+ atomic_dec(&pipe->readers);
49055+ atomic_inc(&pipe->writers);
49056 pipe_unlock(pipe);
49057
49058 }
49059@@ -1826,10 +2171,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49060 char **helper_argv = NULL;
49061 int helper_argc = 0;
49062 int dump_count = 0;
49063- static atomic_t core_dump_count = ATOMIC_INIT(0);
49064+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
49065
49066 audit_core_dumps(signr);
49067
49068+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
49069+ gr_handle_brute_attach(current, mm->flags);
49070+
49071 binfmt = mm->binfmt;
49072 if (!binfmt || !binfmt->core_dump)
49073 goto fail;
49074@@ -1874,6 +2222,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49075 */
49076 clear_thread_flag(TIF_SIGPENDING);
49077
49078+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
49079+
49080 /*
49081 * lock_kernel() because format_corename() is controlled by sysctl, which
49082 * uses lock_kernel()
49083@@ -1908,7 +2258,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49084 goto fail_unlock;
49085 }
49086
49087- dump_count = atomic_inc_return(&core_dump_count);
49088+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49089 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49090 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49091 task_tgid_vnr(current), current->comm);
49092@@ -1972,7 +2322,7 @@ close_fail:
49093 filp_close(file, NULL);
49094 fail_dropcount:
49095 if (dump_count)
49096- atomic_dec(&core_dump_count);
49097+ atomic_dec_unchecked(&core_dump_count);
49098 fail_unlock:
49099 if (helper_argv)
49100 argv_free(helper_argv);
49101diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49102index 7f8d2e5..a1abdbb 100644
49103--- a/fs/ext2/balloc.c
49104+++ b/fs/ext2/balloc.c
49105@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49106
49107 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49108 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49109- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49110+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49111 sbi->s_resuid != current_fsuid() &&
49112 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49113 return 0;
49114diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49115index 27967f9..9f2a5fb 100644
49116--- a/fs/ext3/balloc.c
49117+++ b/fs/ext3/balloc.c
49118@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49119
49120 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49121 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49122- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49123+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49124 sbi->s_resuid != current_fsuid() &&
49125 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49126 return 0;
49127diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49128index e85b63c..80398e6 100644
49129--- a/fs/ext4/balloc.c
49130+++ b/fs/ext4/balloc.c
49131@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49132 /* Hm, nope. Are (enough) root reserved blocks available? */
49133 if (sbi->s_resuid == current_fsuid() ||
49134 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49135- capable(CAP_SYS_RESOURCE)) {
49136+ capable_nolog(CAP_SYS_RESOURCE)) {
49137 if (free_blocks >= (nblocks + dirty_blocks))
49138 return 1;
49139 }
49140diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49141index 67c46ed..1f237e5 100644
49142--- a/fs/ext4/ext4.h
49143+++ b/fs/ext4/ext4.h
49144@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49145
49146 /* stats for buddy allocator */
49147 spinlock_t s_mb_pa_lock;
49148- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49149- atomic_t s_bal_success; /* we found long enough chunks */
49150- atomic_t s_bal_allocated; /* in blocks */
49151- atomic_t s_bal_ex_scanned; /* total extents scanned */
49152- atomic_t s_bal_goals; /* goal hits */
49153- atomic_t s_bal_breaks; /* too long searches */
49154- atomic_t s_bal_2orders; /* 2^order hits */
49155+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49156+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49157+ atomic_unchecked_t s_bal_allocated; /* in blocks */
49158+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49159+ atomic_unchecked_t s_bal_goals; /* goal hits */
49160+ atomic_unchecked_t s_bal_breaks; /* too long searches */
49161+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49162 spinlock_t s_bal_lock;
49163 unsigned long s_mb_buddies_generated;
49164 unsigned long long s_mb_generation_time;
49165- atomic_t s_mb_lost_chunks;
49166- atomic_t s_mb_preallocated;
49167- atomic_t s_mb_discarded;
49168+ atomic_unchecked_t s_mb_lost_chunks;
49169+ atomic_unchecked_t s_mb_preallocated;
49170+ atomic_unchecked_t s_mb_discarded;
49171 atomic_t s_lock_busy;
49172
49173 /* locality groups */
49174diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49175index 2a60541..7439d61 100644
49176--- a/fs/ext4/file.c
49177+++ b/fs/ext4/file.c
49178@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49179 cp = d_path(&path, buf, sizeof(buf));
49180 path_put(&path);
49181 if (!IS_ERR(cp)) {
49182- memcpy(sbi->s_es->s_last_mounted, cp,
49183- sizeof(sbi->s_es->s_last_mounted));
49184+ strlcpy(sbi->s_es->s_last_mounted, cp,
49185+ sizeof(sbi->s_es->s_last_mounted));
49186 sb->s_dirt = 1;
49187 }
49188 }
49189diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49190index 42bac1b..0aab9d8 100644
49191--- a/fs/ext4/mballoc.c
49192+++ b/fs/ext4/mballoc.c
49193@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49194 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49195
49196 if (EXT4_SB(sb)->s_mb_stats)
49197- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49198+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49199
49200 break;
49201 }
49202@@ -2131,7 +2131,7 @@ repeat:
49203 ac->ac_status = AC_STATUS_CONTINUE;
49204 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49205 cr = 3;
49206- atomic_inc(&sbi->s_mb_lost_chunks);
49207+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49208 goto repeat;
49209 }
49210 }
49211@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49212 ext4_grpblk_t counters[16];
49213 } sg;
49214
49215+ pax_track_stack();
49216+
49217 group--;
49218 if (group == 0)
49219 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49220@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49221 if (sbi->s_mb_stats) {
49222 printk(KERN_INFO
49223 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49224- atomic_read(&sbi->s_bal_allocated),
49225- atomic_read(&sbi->s_bal_reqs),
49226- atomic_read(&sbi->s_bal_success));
49227+ atomic_read_unchecked(&sbi->s_bal_allocated),
49228+ atomic_read_unchecked(&sbi->s_bal_reqs),
49229+ atomic_read_unchecked(&sbi->s_bal_success));
49230 printk(KERN_INFO
49231 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49232 "%u 2^N hits, %u breaks, %u lost\n",
49233- atomic_read(&sbi->s_bal_ex_scanned),
49234- atomic_read(&sbi->s_bal_goals),
49235- atomic_read(&sbi->s_bal_2orders),
49236- atomic_read(&sbi->s_bal_breaks),
49237- atomic_read(&sbi->s_mb_lost_chunks));
49238+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49239+ atomic_read_unchecked(&sbi->s_bal_goals),
49240+ atomic_read_unchecked(&sbi->s_bal_2orders),
49241+ atomic_read_unchecked(&sbi->s_bal_breaks),
49242+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49243 printk(KERN_INFO
49244 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49245 sbi->s_mb_buddies_generated++,
49246 sbi->s_mb_generation_time);
49247 printk(KERN_INFO
49248 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49249- atomic_read(&sbi->s_mb_preallocated),
49250- atomic_read(&sbi->s_mb_discarded));
49251+ atomic_read_unchecked(&sbi->s_mb_preallocated),
49252+ atomic_read_unchecked(&sbi->s_mb_discarded));
49253 }
49254
49255 free_percpu(sbi->s_locality_groups);
49256@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49257 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49258
49259 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49260- atomic_inc(&sbi->s_bal_reqs);
49261- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49262+ atomic_inc_unchecked(&sbi->s_bal_reqs);
49263+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49264 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49265- atomic_inc(&sbi->s_bal_success);
49266- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49267+ atomic_inc_unchecked(&sbi->s_bal_success);
49268+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49269 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49270 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49271- atomic_inc(&sbi->s_bal_goals);
49272+ atomic_inc_unchecked(&sbi->s_bal_goals);
49273 if (ac->ac_found > sbi->s_mb_max_to_scan)
49274- atomic_inc(&sbi->s_bal_breaks);
49275+ atomic_inc_unchecked(&sbi->s_bal_breaks);
49276 }
49277
49278 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49279@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49280 trace_ext4_mb_new_inode_pa(ac, pa);
49281
49282 ext4_mb_use_inode_pa(ac, pa);
49283- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49284+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49285
49286 ei = EXT4_I(ac->ac_inode);
49287 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49288@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49289 trace_ext4_mb_new_group_pa(ac, pa);
49290
49291 ext4_mb_use_group_pa(ac, pa);
49292- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49293+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49294
49295 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49296 lg = ac->ac_lg;
49297@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49298 * from the bitmap and continue.
49299 */
49300 }
49301- atomic_add(free, &sbi->s_mb_discarded);
49302+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
49303
49304 return err;
49305 }
49306@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49307 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49308 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49309 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49310- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49311+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49312
49313 if (ac) {
49314 ac->ac_sb = sb;
49315diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49316index f1e7077..edd86b2 100644
49317--- a/fs/ext4/super.c
49318+++ b/fs/ext4/super.c
49319@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49320 }
49321
49322
49323-static struct sysfs_ops ext4_attr_ops = {
49324+static const struct sysfs_ops ext4_attr_ops = {
49325 .show = ext4_attr_show,
49326 .store = ext4_attr_store,
49327 };
49328diff --git a/fs/fcntl.c b/fs/fcntl.c
49329index 97e01dc..e9aab2d 100644
49330--- a/fs/fcntl.c
49331+++ b/fs/fcntl.c
49332@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49333 if (err)
49334 return err;
49335
49336+ if (gr_handle_chroot_fowner(pid, type))
49337+ return -ENOENT;
49338+ if (gr_check_protected_task_fowner(pid, type))
49339+ return -EACCES;
49340+
49341 f_modown(filp, pid, type, force);
49342 return 0;
49343 }
49344@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49345
49346 static int f_setown_ex(struct file *filp, unsigned long arg)
49347 {
49348- struct f_owner_ex * __user owner_p = (void * __user)arg;
49349+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49350 struct f_owner_ex owner;
49351 struct pid *pid;
49352 int type;
49353@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49354
49355 static int f_getown_ex(struct file *filp, unsigned long arg)
49356 {
49357- struct f_owner_ex * __user owner_p = (void * __user)arg;
49358+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49359 struct f_owner_ex owner;
49360 int ret = 0;
49361
49362@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49363 switch (cmd) {
49364 case F_DUPFD:
49365 case F_DUPFD_CLOEXEC:
49366+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49367 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49368 break;
49369 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49370diff --git a/fs/fifo.c b/fs/fifo.c
49371index f8f97b8..b1f2259 100644
49372--- a/fs/fifo.c
49373+++ b/fs/fifo.c
49374@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49375 */
49376 filp->f_op = &read_pipefifo_fops;
49377 pipe->r_counter++;
49378- if (pipe->readers++ == 0)
49379+ if (atomic_inc_return(&pipe->readers) == 1)
49380 wake_up_partner(inode);
49381
49382- if (!pipe->writers) {
49383+ if (!atomic_read(&pipe->writers)) {
49384 if ((filp->f_flags & O_NONBLOCK)) {
49385 /* suppress POLLHUP until we have
49386 * seen a writer */
49387@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49388 * errno=ENXIO when there is no process reading the FIFO.
49389 */
49390 ret = -ENXIO;
49391- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49392+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49393 goto err;
49394
49395 filp->f_op = &write_pipefifo_fops;
49396 pipe->w_counter++;
49397- if (!pipe->writers++)
49398+ if (atomic_inc_return(&pipe->writers) == 1)
49399 wake_up_partner(inode);
49400
49401- if (!pipe->readers) {
49402+ if (!atomic_read(&pipe->readers)) {
49403 wait_for_partner(inode, &pipe->r_counter);
49404 if (signal_pending(current))
49405 goto err_wr;
49406@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49407 */
49408 filp->f_op = &rdwr_pipefifo_fops;
49409
49410- pipe->readers++;
49411- pipe->writers++;
49412+ atomic_inc(&pipe->readers);
49413+ atomic_inc(&pipe->writers);
49414 pipe->r_counter++;
49415 pipe->w_counter++;
49416- if (pipe->readers == 1 || pipe->writers == 1)
49417+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49418 wake_up_partner(inode);
49419 break;
49420
49421@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49422 return 0;
49423
49424 err_rd:
49425- if (!--pipe->readers)
49426+ if (atomic_dec_and_test(&pipe->readers))
49427 wake_up_interruptible(&pipe->wait);
49428 ret = -ERESTARTSYS;
49429 goto err;
49430
49431 err_wr:
49432- if (!--pipe->writers)
49433+ if (atomic_dec_and_test(&pipe->writers))
49434 wake_up_interruptible(&pipe->wait);
49435 ret = -ERESTARTSYS;
49436 goto err;
49437
49438 err:
49439- if (!pipe->readers && !pipe->writers)
49440+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49441 free_pipe_info(inode);
49442
49443 err_nocleanup:
49444diff --git a/fs/file.c b/fs/file.c
49445index 87e1290..a930cc4 100644
49446--- a/fs/file.c
49447+++ b/fs/file.c
49448@@ -14,6 +14,7 @@
49449 #include <linux/slab.h>
49450 #include <linux/vmalloc.h>
49451 #include <linux/file.h>
49452+#include <linux/security.h>
49453 #include <linux/fdtable.h>
49454 #include <linux/bitops.h>
49455 #include <linux/interrupt.h>
49456@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49457 * N.B. For clone tasks sharing a files structure, this test
49458 * will limit the total number of files that can be opened.
49459 */
49460+
49461+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49462 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49463 return -EMFILE;
49464
49465diff --git a/fs/filesystems.c b/fs/filesystems.c
49466index a24c58e..53f91ee 100644
49467--- a/fs/filesystems.c
49468+++ b/fs/filesystems.c
49469@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49470 int len = dot ? dot - name : strlen(name);
49471
49472 fs = __get_fs_type(name, len);
49473+
49474+#ifdef CONFIG_GRKERNSEC_MODHARDEN
49475+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49476+#else
49477 if (!fs && (request_module("%.*s", len, name) == 0))
49478+#endif
49479 fs = __get_fs_type(name, len);
49480
49481 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49482diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49483index eee0590..ef5bc0e 100644
49484--- a/fs/fs_struct.c
49485+++ b/fs/fs_struct.c
49486@@ -4,6 +4,7 @@
49487 #include <linux/path.h>
49488 #include <linux/slab.h>
49489 #include <linux/fs_struct.h>
49490+#include <linux/grsecurity.h>
49491
49492 /*
49493 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49494@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49495 old_root = fs->root;
49496 fs->root = *path;
49497 path_get(path);
49498+ gr_set_chroot_entries(current, path);
49499 write_unlock(&fs->lock);
49500 if (old_root.dentry)
49501 path_put(&old_root);
49502@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49503 && fs->root.mnt == old_root->mnt) {
49504 path_get(new_root);
49505 fs->root = *new_root;
49506+ gr_set_chroot_entries(p, new_root);
49507 count++;
49508 }
49509 if (fs->pwd.dentry == old_root->dentry
49510@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49511 task_lock(tsk);
49512 write_lock(&fs->lock);
49513 tsk->fs = NULL;
49514- kill = !--fs->users;
49515+ gr_clear_chroot_entries(tsk);
49516+ kill = !atomic_dec_return(&fs->users);
49517 write_unlock(&fs->lock);
49518 task_unlock(tsk);
49519 if (kill)
49520@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49521 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49522 /* We don't need to lock fs - think why ;-) */
49523 if (fs) {
49524- fs->users = 1;
49525+ atomic_set(&fs->users, 1);
49526 fs->in_exec = 0;
49527 rwlock_init(&fs->lock);
49528 fs->umask = old->umask;
49529@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49530
49531 task_lock(current);
49532 write_lock(&fs->lock);
49533- kill = !--fs->users;
49534+ kill = !atomic_dec_return(&fs->users);
49535 current->fs = new_fs;
49536+ gr_set_chroot_entries(current, &new_fs->root);
49537 write_unlock(&fs->lock);
49538 task_unlock(current);
49539
49540@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49541
49542 /* to be mentioned only in INIT_TASK */
49543 struct fs_struct init_fs = {
49544- .users = 1,
49545+ .users = ATOMIC_INIT(1),
49546 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49547 .umask = 0022,
49548 };
49549@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49550 task_lock(current);
49551
49552 write_lock(&init_fs.lock);
49553- init_fs.users++;
49554+ atomic_inc(&init_fs.users);
49555 write_unlock(&init_fs.lock);
49556
49557 write_lock(&fs->lock);
49558 current->fs = &init_fs;
49559- kill = !--fs->users;
49560+ gr_set_chroot_entries(current, &current->fs->root);
49561+ kill = !atomic_dec_return(&fs->users);
49562 write_unlock(&fs->lock);
49563
49564 task_unlock(current);
49565diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49566index 9905350..02eaec4 100644
49567--- a/fs/fscache/cookie.c
49568+++ b/fs/fscache/cookie.c
49569@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49570 parent ? (char *) parent->def->name : "<no-parent>",
49571 def->name, netfs_data);
49572
49573- fscache_stat(&fscache_n_acquires);
49574+ fscache_stat_unchecked(&fscache_n_acquires);
49575
49576 /* if there's no parent cookie, then we don't create one here either */
49577 if (!parent) {
49578- fscache_stat(&fscache_n_acquires_null);
49579+ fscache_stat_unchecked(&fscache_n_acquires_null);
49580 _leave(" [no parent]");
49581 return NULL;
49582 }
49583@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49584 /* allocate and initialise a cookie */
49585 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49586 if (!cookie) {
49587- fscache_stat(&fscache_n_acquires_oom);
49588+ fscache_stat_unchecked(&fscache_n_acquires_oom);
49589 _leave(" [ENOMEM]");
49590 return NULL;
49591 }
49592@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49593
49594 switch (cookie->def->type) {
49595 case FSCACHE_COOKIE_TYPE_INDEX:
49596- fscache_stat(&fscache_n_cookie_index);
49597+ fscache_stat_unchecked(&fscache_n_cookie_index);
49598 break;
49599 case FSCACHE_COOKIE_TYPE_DATAFILE:
49600- fscache_stat(&fscache_n_cookie_data);
49601+ fscache_stat_unchecked(&fscache_n_cookie_data);
49602 break;
49603 default:
49604- fscache_stat(&fscache_n_cookie_special);
49605+ fscache_stat_unchecked(&fscache_n_cookie_special);
49606 break;
49607 }
49608
49609@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49610 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49611 atomic_dec(&parent->n_children);
49612 __fscache_cookie_put(cookie);
49613- fscache_stat(&fscache_n_acquires_nobufs);
49614+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49615 _leave(" = NULL");
49616 return NULL;
49617 }
49618 }
49619
49620- fscache_stat(&fscache_n_acquires_ok);
49621+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49622 _leave(" = %p", cookie);
49623 return cookie;
49624 }
49625@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49626 cache = fscache_select_cache_for_object(cookie->parent);
49627 if (!cache) {
49628 up_read(&fscache_addremove_sem);
49629- fscache_stat(&fscache_n_acquires_no_cache);
49630+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49631 _leave(" = -ENOMEDIUM [no cache]");
49632 return -ENOMEDIUM;
49633 }
49634@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49635 object = cache->ops->alloc_object(cache, cookie);
49636 fscache_stat_d(&fscache_n_cop_alloc_object);
49637 if (IS_ERR(object)) {
49638- fscache_stat(&fscache_n_object_no_alloc);
49639+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49640 ret = PTR_ERR(object);
49641 goto error;
49642 }
49643
49644- fscache_stat(&fscache_n_object_alloc);
49645+ fscache_stat_unchecked(&fscache_n_object_alloc);
49646
49647 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49648
49649@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49650 struct fscache_object *object;
49651 struct hlist_node *_p;
49652
49653- fscache_stat(&fscache_n_updates);
49654+ fscache_stat_unchecked(&fscache_n_updates);
49655
49656 if (!cookie) {
49657- fscache_stat(&fscache_n_updates_null);
49658+ fscache_stat_unchecked(&fscache_n_updates_null);
49659 _leave(" [no cookie]");
49660 return;
49661 }
49662@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49663 struct fscache_object *object;
49664 unsigned long event;
49665
49666- fscache_stat(&fscache_n_relinquishes);
49667+ fscache_stat_unchecked(&fscache_n_relinquishes);
49668 if (retire)
49669- fscache_stat(&fscache_n_relinquishes_retire);
49670+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49671
49672 if (!cookie) {
49673- fscache_stat(&fscache_n_relinquishes_null);
49674+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49675 _leave(" [no cookie]");
49676 return;
49677 }
49678@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49679
49680 /* wait for the cookie to finish being instantiated (or to fail) */
49681 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49682- fscache_stat(&fscache_n_relinquishes_waitcrt);
49683+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49684 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49685 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49686 }
49687diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49688index edd7434..0725e66 100644
49689--- a/fs/fscache/internal.h
49690+++ b/fs/fscache/internal.h
49691@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49692 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49693 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49694
49695-extern atomic_t fscache_n_op_pend;
49696-extern atomic_t fscache_n_op_run;
49697-extern atomic_t fscache_n_op_enqueue;
49698-extern atomic_t fscache_n_op_deferred_release;
49699-extern atomic_t fscache_n_op_release;
49700-extern atomic_t fscache_n_op_gc;
49701-extern atomic_t fscache_n_op_cancelled;
49702-extern atomic_t fscache_n_op_rejected;
49703+extern atomic_unchecked_t fscache_n_op_pend;
49704+extern atomic_unchecked_t fscache_n_op_run;
49705+extern atomic_unchecked_t fscache_n_op_enqueue;
49706+extern atomic_unchecked_t fscache_n_op_deferred_release;
49707+extern atomic_unchecked_t fscache_n_op_release;
49708+extern atomic_unchecked_t fscache_n_op_gc;
49709+extern atomic_unchecked_t fscache_n_op_cancelled;
49710+extern atomic_unchecked_t fscache_n_op_rejected;
49711
49712-extern atomic_t fscache_n_attr_changed;
49713-extern atomic_t fscache_n_attr_changed_ok;
49714-extern atomic_t fscache_n_attr_changed_nobufs;
49715-extern atomic_t fscache_n_attr_changed_nomem;
49716-extern atomic_t fscache_n_attr_changed_calls;
49717+extern atomic_unchecked_t fscache_n_attr_changed;
49718+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49719+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49720+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49721+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49722
49723-extern atomic_t fscache_n_allocs;
49724-extern atomic_t fscache_n_allocs_ok;
49725-extern atomic_t fscache_n_allocs_wait;
49726-extern atomic_t fscache_n_allocs_nobufs;
49727-extern atomic_t fscache_n_allocs_intr;
49728-extern atomic_t fscache_n_allocs_object_dead;
49729-extern atomic_t fscache_n_alloc_ops;
49730-extern atomic_t fscache_n_alloc_op_waits;
49731+extern atomic_unchecked_t fscache_n_allocs;
49732+extern atomic_unchecked_t fscache_n_allocs_ok;
49733+extern atomic_unchecked_t fscache_n_allocs_wait;
49734+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49735+extern atomic_unchecked_t fscache_n_allocs_intr;
49736+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49737+extern atomic_unchecked_t fscache_n_alloc_ops;
49738+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49739
49740-extern atomic_t fscache_n_retrievals;
49741-extern atomic_t fscache_n_retrievals_ok;
49742-extern atomic_t fscache_n_retrievals_wait;
49743-extern atomic_t fscache_n_retrievals_nodata;
49744-extern atomic_t fscache_n_retrievals_nobufs;
49745-extern atomic_t fscache_n_retrievals_intr;
49746-extern atomic_t fscache_n_retrievals_nomem;
49747-extern atomic_t fscache_n_retrievals_object_dead;
49748-extern atomic_t fscache_n_retrieval_ops;
49749-extern atomic_t fscache_n_retrieval_op_waits;
49750+extern atomic_unchecked_t fscache_n_retrievals;
49751+extern atomic_unchecked_t fscache_n_retrievals_ok;
49752+extern atomic_unchecked_t fscache_n_retrievals_wait;
49753+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49754+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49755+extern atomic_unchecked_t fscache_n_retrievals_intr;
49756+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49757+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49758+extern atomic_unchecked_t fscache_n_retrieval_ops;
49759+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49760
49761-extern atomic_t fscache_n_stores;
49762-extern atomic_t fscache_n_stores_ok;
49763-extern atomic_t fscache_n_stores_again;
49764-extern atomic_t fscache_n_stores_nobufs;
49765-extern atomic_t fscache_n_stores_oom;
49766-extern atomic_t fscache_n_store_ops;
49767-extern atomic_t fscache_n_store_calls;
49768-extern atomic_t fscache_n_store_pages;
49769-extern atomic_t fscache_n_store_radix_deletes;
49770-extern atomic_t fscache_n_store_pages_over_limit;
49771+extern atomic_unchecked_t fscache_n_stores;
49772+extern atomic_unchecked_t fscache_n_stores_ok;
49773+extern atomic_unchecked_t fscache_n_stores_again;
49774+extern atomic_unchecked_t fscache_n_stores_nobufs;
49775+extern atomic_unchecked_t fscache_n_stores_oom;
49776+extern atomic_unchecked_t fscache_n_store_ops;
49777+extern atomic_unchecked_t fscache_n_store_calls;
49778+extern atomic_unchecked_t fscache_n_store_pages;
49779+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49780+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49781
49782-extern atomic_t fscache_n_store_vmscan_not_storing;
49783-extern atomic_t fscache_n_store_vmscan_gone;
49784-extern atomic_t fscache_n_store_vmscan_busy;
49785-extern atomic_t fscache_n_store_vmscan_cancelled;
49786+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49787+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49788+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49789+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49790
49791-extern atomic_t fscache_n_marks;
49792-extern atomic_t fscache_n_uncaches;
49793+extern atomic_unchecked_t fscache_n_marks;
49794+extern atomic_unchecked_t fscache_n_uncaches;
49795
49796-extern atomic_t fscache_n_acquires;
49797-extern atomic_t fscache_n_acquires_null;
49798-extern atomic_t fscache_n_acquires_no_cache;
49799-extern atomic_t fscache_n_acquires_ok;
49800-extern atomic_t fscache_n_acquires_nobufs;
49801-extern atomic_t fscache_n_acquires_oom;
49802+extern atomic_unchecked_t fscache_n_acquires;
49803+extern atomic_unchecked_t fscache_n_acquires_null;
49804+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49805+extern atomic_unchecked_t fscache_n_acquires_ok;
49806+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49807+extern atomic_unchecked_t fscache_n_acquires_oom;
49808
49809-extern atomic_t fscache_n_updates;
49810-extern atomic_t fscache_n_updates_null;
49811-extern atomic_t fscache_n_updates_run;
49812+extern atomic_unchecked_t fscache_n_updates;
49813+extern atomic_unchecked_t fscache_n_updates_null;
49814+extern atomic_unchecked_t fscache_n_updates_run;
49815
49816-extern atomic_t fscache_n_relinquishes;
49817-extern atomic_t fscache_n_relinquishes_null;
49818-extern atomic_t fscache_n_relinquishes_waitcrt;
49819-extern atomic_t fscache_n_relinquishes_retire;
49820+extern atomic_unchecked_t fscache_n_relinquishes;
49821+extern atomic_unchecked_t fscache_n_relinquishes_null;
49822+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49823+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49824
49825-extern atomic_t fscache_n_cookie_index;
49826-extern atomic_t fscache_n_cookie_data;
49827-extern atomic_t fscache_n_cookie_special;
49828+extern atomic_unchecked_t fscache_n_cookie_index;
49829+extern atomic_unchecked_t fscache_n_cookie_data;
49830+extern atomic_unchecked_t fscache_n_cookie_special;
49831
49832-extern atomic_t fscache_n_object_alloc;
49833-extern atomic_t fscache_n_object_no_alloc;
49834-extern atomic_t fscache_n_object_lookups;
49835-extern atomic_t fscache_n_object_lookups_negative;
49836-extern atomic_t fscache_n_object_lookups_positive;
49837-extern atomic_t fscache_n_object_lookups_timed_out;
49838-extern atomic_t fscache_n_object_created;
49839-extern atomic_t fscache_n_object_avail;
49840-extern atomic_t fscache_n_object_dead;
49841+extern atomic_unchecked_t fscache_n_object_alloc;
49842+extern atomic_unchecked_t fscache_n_object_no_alloc;
49843+extern atomic_unchecked_t fscache_n_object_lookups;
49844+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49845+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49846+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49847+extern atomic_unchecked_t fscache_n_object_created;
49848+extern atomic_unchecked_t fscache_n_object_avail;
49849+extern atomic_unchecked_t fscache_n_object_dead;
49850
49851-extern atomic_t fscache_n_checkaux_none;
49852-extern atomic_t fscache_n_checkaux_okay;
49853-extern atomic_t fscache_n_checkaux_update;
49854-extern atomic_t fscache_n_checkaux_obsolete;
49855+extern atomic_unchecked_t fscache_n_checkaux_none;
49856+extern atomic_unchecked_t fscache_n_checkaux_okay;
49857+extern atomic_unchecked_t fscache_n_checkaux_update;
49858+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49859
49860 extern atomic_t fscache_n_cop_alloc_object;
49861 extern atomic_t fscache_n_cop_lookup_object;
49862@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49863 atomic_inc(stat);
49864 }
49865
49866+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49867+{
49868+ atomic_inc_unchecked(stat);
49869+}
49870+
49871 static inline void fscache_stat_d(atomic_t *stat)
49872 {
49873 atomic_dec(stat);
49874@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49875
49876 #define __fscache_stat(stat) (NULL)
49877 #define fscache_stat(stat) do {} while (0)
49878+#define fscache_stat_unchecked(stat) do {} while (0)
49879 #define fscache_stat_d(stat) do {} while (0)
49880 #endif
49881
49882diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49883index e513ac5..e888d34 100644
49884--- a/fs/fscache/object.c
49885+++ b/fs/fscache/object.c
49886@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49887 /* update the object metadata on disk */
49888 case FSCACHE_OBJECT_UPDATING:
49889 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49890- fscache_stat(&fscache_n_updates_run);
49891+ fscache_stat_unchecked(&fscache_n_updates_run);
49892 fscache_stat(&fscache_n_cop_update_object);
49893 object->cache->ops->update_object(object);
49894 fscache_stat_d(&fscache_n_cop_update_object);
49895@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49896 spin_lock(&object->lock);
49897 object->state = FSCACHE_OBJECT_DEAD;
49898 spin_unlock(&object->lock);
49899- fscache_stat(&fscache_n_object_dead);
49900+ fscache_stat_unchecked(&fscache_n_object_dead);
49901 goto terminal_transit;
49902
49903 /* handle the parent cache of this object being withdrawn from
49904@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49905 spin_lock(&object->lock);
49906 object->state = FSCACHE_OBJECT_DEAD;
49907 spin_unlock(&object->lock);
49908- fscache_stat(&fscache_n_object_dead);
49909+ fscache_stat_unchecked(&fscache_n_object_dead);
49910 goto terminal_transit;
49911
49912 /* complain about the object being woken up once it is
49913@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49914 parent->cookie->def->name, cookie->def->name,
49915 object->cache->tag->name);
49916
49917- fscache_stat(&fscache_n_object_lookups);
49918+ fscache_stat_unchecked(&fscache_n_object_lookups);
49919 fscache_stat(&fscache_n_cop_lookup_object);
49920 ret = object->cache->ops->lookup_object(object);
49921 fscache_stat_d(&fscache_n_cop_lookup_object);
49922@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49923 if (ret == -ETIMEDOUT) {
49924 /* probably stuck behind another object, so move this one to
49925 * the back of the queue */
49926- fscache_stat(&fscache_n_object_lookups_timed_out);
49927+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49928 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49929 }
49930
49931@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49932
49933 spin_lock(&object->lock);
49934 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49935- fscache_stat(&fscache_n_object_lookups_negative);
49936+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49937
49938 /* transit here to allow write requests to begin stacking up
49939 * and read requests to begin returning ENODATA */
49940@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49941 * result, in which case there may be data available */
49942 spin_lock(&object->lock);
49943 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49944- fscache_stat(&fscache_n_object_lookups_positive);
49945+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49946
49947 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49948
49949@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49950 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49951 } else {
49952 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49953- fscache_stat(&fscache_n_object_created);
49954+ fscache_stat_unchecked(&fscache_n_object_created);
49955
49956 object->state = FSCACHE_OBJECT_AVAILABLE;
49957 spin_unlock(&object->lock);
49958@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49959 fscache_enqueue_dependents(object);
49960
49961 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49962- fscache_stat(&fscache_n_object_avail);
49963+ fscache_stat_unchecked(&fscache_n_object_avail);
49964
49965 _leave("");
49966 }
49967@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49968 enum fscache_checkaux result;
49969
49970 if (!object->cookie->def->check_aux) {
49971- fscache_stat(&fscache_n_checkaux_none);
49972+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49973 return FSCACHE_CHECKAUX_OKAY;
49974 }
49975
49976@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49977 switch (result) {
49978 /* entry okay as is */
49979 case FSCACHE_CHECKAUX_OKAY:
49980- fscache_stat(&fscache_n_checkaux_okay);
49981+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49982 break;
49983
49984 /* entry requires update */
49985 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49986- fscache_stat(&fscache_n_checkaux_update);
49987+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49988 break;
49989
49990 /* entry requires deletion */
49991 case FSCACHE_CHECKAUX_OBSOLETE:
49992- fscache_stat(&fscache_n_checkaux_obsolete);
49993+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49994 break;
49995
49996 default:
49997diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49998index 313e79a..775240f 100644
49999--- a/fs/fscache/operation.c
50000+++ b/fs/fscache/operation.c
50001@@ -16,7 +16,7 @@
50002 #include <linux/seq_file.h>
50003 #include "internal.h"
50004
50005-atomic_t fscache_op_debug_id;
50006+atomic_unchecked_t fscache_op_debug_id;
50007 EXPORT_SYMBOL(fscache_op_debug_id);
50008
50009 /**
50010@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
50011 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
50012 ASSERTCMP(atomic_read(&op->usage), >, 0);
50013
50014- fscache_stat(&fscache_n_op_enqueue);
50015+ fscache_stat_unchecked(&fscache_n_op_enqueue);
50016 switch (op->flags & FSCACHE_OP_TYPE) {
50017 case FSCACHE_OP_FAST:
50018 _debug("queue fast");
50019@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
50020 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
50021 if (op->processor)
50022 fscache_enqueue_operation(op);
50023- fscache_stat(&fscache_n_op_run);
50024+ fscache_stat_unchecked(&fscache_n_op_run);
50025 }
50026
50027 /*
50028@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50029 if (object->n_ops > 0) {
50030 atomic_inc(&op->usage);
50031 list_add_tail(&op->pend_link, &object->pending_ops);
50032- fscache_stat(&fscache_n_op_pend);
50033+ fscache_stat_unchecked(&fscache_n_op_pend);
50034 } else if (!list_empty(&object->pending_ops)) {
50035 atomic_inc(&op->usage);
50036 list_add_tail(&op->pend_link, &object->pending_ops);
50037- fscache_stat(&fscache_n_op_pend);
50038+ fscache_stat_unchecked(&fscache_n_op_pend);
50039 fscache_start_operations(object);
50040 } else {
50041 ASSERTCMP(object->n_in_progress, ==, 0);
50042@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
50043 object->n_exclusive++; /* reads and writes must wait */
50044 atomic_inc(&op->usage);
50045 list_add_tail(&op->pend_link, &object->pending_ops);
50046- fscache_stat(&fscache_n_op_pend);
50047+ fscache_stat_unchecked(&fscache_n_op_pend);
50048 ret = 0;
50049 } else {
50050 /* not allowed to submit ops in any other state */
50051@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
50052 if (object->n_exclusive > 0) {
50053 atomic_inc(&op->usage);
50054 list_add_tail(&op->pend_link, &object->pending_ops);
50055- fscache_stat(&fscache_n_op_pend);
50056+ fscache_stat_unchecked(&fscache_n_op_pend);
50057 } else if (!list_empty(&object->pending_ops)) {
50058 atomic_inc(&op->usage);
50059 list_add_tail(&op->pend_link, &object->pending_ops);
50060- fscache_stat(&fscache_n_op_pend);
50061+ fscache_stat_unchecked(&fscache_n_op_pend);
50062 fscache_start_operations(object);
50063 } else {
50064 ASSERTCMP(object->n_exclusive, ==, 0);
50065@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
50066 object->n_ops++;
50067 atomic_inc(&op->usage);
50068 list_add_tail(&op->pend_link, &object->pending_ops);
50069- fscache_stat(&fscache_n_op_pend);
50070+ fscache_stat_unchecked(&fscache_n_op_pend);
50071 ret = 0;
50072 } else if (object->state == FSCACHE_OBJECT_DYING ||
50073 object->state == FSCACHE_OBJECT_LC_DYING ||
50074 object->state == FSCACHE_OBJECT_WITHDRAWING) {
50075- fscache_stat(&fscache_n_op_rejected);
50076+ fscache_stat_unchecked(&fscache_n_op_rejected);
50077 ret = -ENOBUFS;
50078 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
50079 fscache_report_unexpected_submission(object, op, ostate);
50080@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50081
50082 ret = -EBUSY;
50083 if (!list_empty(&op->pend_link)) {
50084- fscache_stat(&fscache_n_op_cancelled);
50085+ fscache_stat_unchecked(&fscache_n_op_cancelled);
50086 list_del_init(&op->pend_link);
50087 object->n_ops--;
50088 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50089@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50090 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50091 BUG();
50092
50093- fscache_stat(&fscache_n_op_release);
50094+ fscache_stat_unchecked(&fscache_n_op_release);
50095
50096 if (op->release) {
50097 op->release(op);
50098@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50099 * lock, and defer it otherwise */
50100 if (!spin_trylock(&object->lock)) {
50101 _debug("defer put");
50102- fscache_stat(&fscache_n_op_deferred_release);
50103+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
50104
50105 cache = object->cache;
50106 spin_lock(&cache->op_gc_list_lock);
50107@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50108
50109 _debug("GC DEFERRED REL OBJ%x OP%x",
50110 object->debug_id, op->debug_id);
50111- fscache_stat(&fscache_n_op_gc);
50112+ fscache_stat_unchecked(&fscache_n_op_gc);
50113
50114 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50115
50116diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50117index c598ea4..6aac13e 100644
50118--- a/fs/fscache/page.c
50119+++ b/fs/fscache/page.c
50120@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50121 val = radix_tree_lookup(&cookie->stores, page->index);
50122 if (!val) {
50123 rcu_read_unlock();
50124- fscache_stat(&fscache_n_store_vmscan_not_storing);
50125+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50126 __fscache_uncache_page(cookie, page);
50127 return true;
50128 }
50129@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50130 spin_unlock(&cookie->stores_lock);
50131
50132 if (xpage) {
50133- fscache_stat(&fscache_n_store_vmscan_cancelled);
50134- fscache_stat(&fscache_n_store_radix_deletes);
50135+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50136+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50137 ASSERTCMP(xpage, ==, page);
50138 } else {
50139- fscache_stat(&fscache_n_store_vmscan_gone);
50140+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50141 }
50142
50143 wake_up_bit(&cookie->flags, 0);
50144@@ -106,7 +106,7 @@ page_busy:
50145 /* we might want to wait here, but that could deadlock the allocator as
50146 * the slow-work threads writing to the cache may all end up sleeping
50147 * on memory allocation */
50148- fscache_stat(&fscache_n_store_vmscan_busy);
50149+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50150 return false;
50151 }
50152 EXPORT_SYMBOL(__fscache_maybe_release_page);
50153@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50154 FSCACHE_COOKIE_STORING_TAG);
50155 if (!radix_tree_tag_get(&cookie->stores, page->index,
50156 FSCACHE_COOKIE_PENDING_TAG)) {
50157- fscache_stat(&fscache_n_store_radix_deletes);
50158+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50159 xpage = radix_tree_delete(&cookie->stores, page->index);
50160 }
50161 spin_unlock(&cookie->stores_lock);
50162@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50163
50164 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50165
50166- fscache_stat(&fscache_n_attr_changed_calls);
50167+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50168
50169 if (fscache_object_is_active(object)) {
50170 fscache_set_op_state(op, "CallFS");
50171@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50172
50173 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50174
50175- fscache_stat(&fscache_n_attr_changed);
50176+ fscache_stat_unchecked(&fscache_n_attr_changed);
50177
50178 op = kzalloc(sizeof(*op), GFP_KERNEL);
50179 if (!op) {
50180- fscache_stat(&fscache_n_attr_changed_nomem);
50181+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50182 _leave(" = -ENOMEM");
50183 return -ENOMEM;
50184 }
50185@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50186 if (fscache_submit_exclusive_op(object, op) < 0)
50187 goto nobufs;
50188 spin_unlock(&cookie->lock);
50189- fscache_stat(&fscache_n_attr_changed_ok);
50190+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50191 fscache_put_operation(op);
50192 _leave(" = 0");
50193 return 0;
50194@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50195 nobufs:
50196 spin_unlock(&cookie->lock);
50197 kfree(op);
50198- fscache_stat(&fscache_n_attr_changed_nobufs);
50199+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50200 _leave(" = %d", -ENOBUFS);
50201 return -ENOBUFS;
50202 }
50203@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50204 /* allocate a retrieval operation and attempt to submit it */
50205 op = kzalloc(sizeof(*op), GFP_NOIO);
50206 if (!op) {
50207- fscache_stat(&fscache_n_retrievals_nomem);
50208+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50209 return NULL;
50210 }
50211
50212@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50213 return 0;
50214 }
50215
50216- fscache_stat(&fscache_n_retrievals_wait);
50217+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
50218
50219 jif = jiffies;
50220 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50221 fscache_wait_bit_interruptible,
50222 TASK_INTERRUPTIBLE) != 0) {
50223- fscache_stat(&fscache_n_retrievals_intr);
50224+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50225 _leave(" = -ERESTARTSYS");
50226 return -ERESTARTSYS;
50227 }
50228@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50229 */
50230 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50231 struct fscache_retrieval *op,
50232- atomic_t *stat_op_waits,
50233- atomic_t *stat_object_dead)
50234+ atomic_unchecked_t *stat_op_waits,
50235+ atomic_unchecked_t *stat_object_dead)
50236 {
50237 int ret;
50238
50239@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50240 goto check_if_dead;
50241
50242 _debug(">>> WT");
50243- fscache_stat(stat_op_waits);
50244+ fscache_stat_unchecked(stat_op_waits);
50245 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50246 fscache_wait_bit_interruptible,
50247 TASK_INTERRUPTIBLE) < 0) {
50248@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50249
50250 check_if_dead:
50251 if (unlikely(fscache_object_is_dead(object))) {
50252- fscache_stat(stat_object_dead);
50253+ fscache_stat_unchecked(stat_object_dead);
50254 return -ENOBUFS;
50255 }
50256 return 0;
50257@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50258
50259 _enter("%p,%p,,,", cookie, page);
50260
50261- fscache_stat(&fscache_n_retrievals);
50262+ fscache_stat_unchecked(&fscache_n_retrievals);
50263
50264 if (hlist_empty(&cookie->backing_objects))
50265 goto nobufs;
50266@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50267 goto nobufs_unlock;
50268 spin_unlock(&cookie->lock);
50269
50270- fscache_stat(&fscache_n_retrieval_ops);
50271+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50272
50273 /* pin the netfs read context in case we need to do the actual netfs
50274 * read because we've encountered a cache read failure */
50275@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50276
50277 error:
50278 if (ret == -ENOMEM)
50279- fscache_stat(&fscache_n_retrievals_nomem);
50280+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50281 else if (ret == -ERESTARTSYS)
50282- fscache_stat(&fscache_n_retrievals_intr);
50283+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50284 else if (ret == -ENODATA)
50285- fscache_stat(&fscache_n_retrievals_nodata);
50286+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50287 else if (ret < 0)
50288- fscache_stat(&fscache_n_retrievals_nobufs);
50289+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50290 else
50291- fscache_stat(&fscache_n_retrievals_ok);
50292+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50293
50294 fscache_put_retrieval(op);
50295 _leave(" = %d", ret);
50296@@ -453,7 +453,7 @@ nobufs_unlock:
50297 spin_unlock(&cookie->lock);
50298 kfree(op);
50299 nobufs:
50300- fscache_stat(&fscache_n_retrievals_nobufs);
50301+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50302 _leave(" = -ENOBUFS");
50303 return -ENOBUFS;
50304 }
50305@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50306
50307 _enter("%p,,%d,,,", cookie, *nr_pages);
50308
50309- fscache_stat(&fscache_n_retrievals);
50310+ fscache_stat_unchecked(&fscache_n_retrievals);
50311
50312 if (hlist_empty(&cookie->backing_objects))
50313 goto nobufs;
50314@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50315 goto nobufs_unlock;
50316 spin_unlock(&cookie->lock);
50317
50318- fscache_stat(&fscache_n_retrieval_ops);
50319+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50320
50321 /* pin the netfs read context in case we need to do the actual netfs
50322 * read because we've encountered a cache read failure */
50323@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50324
50325 error:
50326 if (ret == -ENOMEM)
50327- fscache_stat(&fscache_n_retrievals_nomem);
50328+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50329 else if (ret == -ERESTARTSYS)
50330- fscache_stat(&fscache_n_retrievals_intr);
50331+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50332 else if (ret == -ENODATA)
50333- fscache_stat(&fscache_n_retrievals_nodata);
50334+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50335 else if (ret < 0)
50336- fscache_stat(&fscache_n_retrievals_nobufs);
50337+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50338 else
50339- fscache_stat(&fscache_n_retrievals_ok);
50340+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50341
50342 fscache_put_retrieval(op);
50343 _leave(" = %d", ret);
50344@@ -570,7 +570,7 @@ nobufs_unlock:
50345 spin_unlock(&cookie->lock);
50346 kfree(op);
50347 nobufs:
50348- fscache_stat(&fscache_n_retrievals_nobufs);
50349+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50350 _leave(" = -ENOBUFS");
50351 return -ENOBUFS;
50352 }
50353@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50354
50355 _enter("%p,%p,,,", cookie, page);
50356
50357- fscache_stat(&fscache_n_allocs);
50358+ fscache_stat_unchecked(&fscache_n_allocs);
50359
50360 if (hlist_empty(&cookie->backing_objects))
50361 goto nobufs;
50362@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50363 goto nobufs_unlock;
50364 spin_unlock(&cookie->lock);
50365
50366- fscache_stat(&fscache_n_alloc_ops);
50367+ fscache_stat_unchecked(&fscache_n_alloc_ops);
50368
50369 ret = fscache_wait_for_retrieval_activation(
50370 object, op,
50371@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50372
50373 error:
50374 if (ret == -ERESTARTSYS)
50375- fscache_stat(&fscache_n_allocs_intr);
50376+ fscache_stat_unchecked(&fscache_n_allocs_intr);
50377 else if (ret < 0)
50378- fscache_stat(&fscache_n_allocs_nobufs);
50379+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50380 else
50381- fscache_stat(&fscache_n_allocs_ok);
50382+ fscache_stat_unchecked(&fscache_n_allocs_ok);
50383
50384 fscache_put_retrieval(op);
50385 _leave(" = %d", ret);
50386@@ -651,7 +651,7 @@ nobufs_unlock:
50387 spin_unlock(&cookie->lock);
50388 kfree(op);
50389 nobufs:
50390- fscache_stat(&fscache_n_allocs_nobufs);
50391+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50392 _leave(" = -ENOBUFS");
50393 return -ENOBUFS;
50394 }
50395@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50396
50397 spin_lock(&cookie->stores_lock);
50398
50399- fscache_stat(&fscache_n_store_calls);
50400+ fscache_stat_unchecked(&fscache_n_store_calls);
50401
50402 /* find a page to store */
50403 page = NULL;
50404@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50405 page = results[0];
50406 _debug("gang %d [%lx]", n, page->index);
50407 if (page->index > op->store_limit) {
50408- fscache_stat(&fscache_n_store_pages_over_limit);
50409+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50410 goto superseded;
50411 }
50412
50413@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50414
50415 if (page) {
50416 fscache_set_op_state(&op->op, "Store");
50417- fscache_stat(&fscache_n_store_pages);
50418+ fscache_stat_unchecked(&fscache_n_store_pages);
50419 fscache_stat(&fscache_n_cop_write_page);
50420 ret = object->cache->ops->write_page(op, page);
50421 fscache_stat_d(&fscache_n_cop_write_page);
50422@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50423 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50424 ASSERT(PageFsCache(page));
50425
50426- fscache_stat(&fscache_n_stores);
50427+ fscache_stat_unchecked(&fscache_n_stores);
50428
50429 op = kzalloc(sizeof(*op), GFP_NOIO);
50430 if (!op)
50431@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50432 spin_unlock(&cookie->stores_lock);
50433 spin_unlock(&object->lock);
50434
50435- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50436+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50437 op->store_limit = object->store_limit;
50438
50439 if (fscache_submit_op(object, &op->op) < 0)
50440@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50441
50442 spin_unlock(&cookie->lock);
50443 radix_tree_preload_end();
50444- fscache_stat(&fscache_n_store_ops);
50445- fscache_stat(&fscache_n_stores_ok);
50446+ fscache_stat_unchecked(&fscache_n_store_ops);
50447+ fscache_stat_unchecked(&fscache_n_stores_ok);
50448
50449 /* the slow work queue now carries its own ref on the object */
50450 fscache_put_operation(&op->op);
50451@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50452 return 0;
50453
50454 already_queued:
50455- fscache_stat(&fscache_n_stores_again);
50456+ fscache_stat_unchecked(&fscache_n_stores_again);
50457 already_pending:
50458 spin_unlock(&cookie->stores_lock);
50459 spin_unlock(&object->lock);
50460 spin_unlock(&cookie->lock);
50461 radix_tree_preload_end();
50462 kfree(op);
50463- fscache_stat(&fscache_n_stores_ok);
50464+ fscache_stat_unchecked(&fscache_n_stores_ok);
50465 _leave(" = 0");
50466 return 0;
50467
50468@@ -886,14 +886,14 @@ nobufs:
50469 spin_unlock(&cookie->lock);
50470 radix_tree_preload_end();
50471 kfree(op);
50472- fscache_stat(&fscache_n_stores_nobufs);
50473+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
50474 _leave(" = -ENOBUFS");
50475 return -ENOBUFS;
50476
50477 nomem_free:
50478 kfree(op);
50479 nomem:
50480- fscache_stat(&fscache_n_stores_oom);
50481+ fscache_stat_unchecked(&fscache_n_stores_oom);
50482 _leave(" = -ENOMEM");
50483 return -ENOMEM;
50484 }
50485@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50486 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50487 ASSERTCMP(page, !=, NULL);
50488
50489- fscache_stat(&fscache_n_uncaches);
50490+ fscache_stat_unchecked(&fscache_n_uncaches);
50491
50492 /* cache withdrawal may beat us to it */
50493 if (!PageFsCache(page))
50494@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50495 unsigned long loop;
50496
50497 #ifdef CONFIG_FSCACHE_STATS
50498- atomic_add(pagevec->nr, &fscache_n_marks);
50499+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50500 #endif
50501
50502 for (loop = 0; loop < pagevec->nr; loop++) {
50503diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50504index 46435f3..8cddf18 100644
50505--- a/fs/fscache/stats.c
50506+++ b/fs/fscache/stats.c
50507@@ -18,95 +18,95 @@
50508 /*
50509 * operation counters
50510 */
50511-atomic_t fscache_n_op_pend;
50512-atomic_t fscache_n_op_run;
50513-atomic_t fscache_n_op_enqueue;
50514-atomic_t fscache_n_op_requeue;
50515-atomic_t fscache_n_op_deferred_release;
50516-atomic_t fscache_n_op_release;
50517-atomic_t fscache_n_op_gc;
50518-atomic_t fscache_n_op_cancelled;
50519-atomic_t fscache_n_op_rejected;
50520+atomic_unchecked_t fscache_n_op_pend;
50521+atomic_unchecked_t fscache_n_op_run;
50522+atomic_unchecked_t fscache_n_op_enqueue;
50523+atomic_unchecked_t fscache_n_op_requeue;
50524+atomic_unchecked_t fscache_n_op_deferred_release;
50525+atomic_unchecked_t fscache_n_op_release;
50526+atomic_unchecked_t fscache_n_op_gc;
50527+atomic_unchecked_t fscache_n_op_cancelled;
50528+atomic_unchecked_t fscache_n_op_rejected;
50529
50530-atomic_t fscache_n_attr_changed;
50531-atomic_t fscache_n_attr_changed_ok;
50532-atomic_t fscache_n_attr_changed_nobufs;
50533-atomic_t fscache_n_attr_changed_nomem;
50534-atomic_t fscache_n_attr_changed_calls;
50535+atomic_unchecked_t fscache_n_attr_changed;
50536+atomic_unchecked_t fscache_n_attr_changed_ok;
50537+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50538+atomic_unchecked_t fscache_n_attr_changed_nomem;
50539+atomic_unchecked_t fscache_n_attr_changed_calls;
50540
50541-atomic_t fscache_n_allocs;
50542-atomic_t fscache_n_allocs_ok;
50543-atomic_t fscache_n_allocs_wait;
50544-atomic_t fscache_n_allocs_nobufs;
50545-atomic_t fscache_n_allocs_intr;
50546-atomic_t fscache_n_allocs_object_dead;
50547-atomic_t fscache_n_alloc_ops;
50548-atomic_t fscache_n_alloc_op_waits;
50549+atomic_unchecked_t fscache_n_allocs;
50550+atomic_unchecked_t fscache_n_allocs_ok;
50551+atomic_unchecked_t fscache_n_allocs_wait;
50552+atomic_unchecked_t fscache_n_allocs_nobufs;
50553+atomic_unchecked_t fscache_n_allocs_intr;
50554+atomic_unchecked_t fscache_n_allocs_object_dead;
50555+atomic_unchecked_t fscache_n_alloc_ops;
50556+atomic_unchecked_t fscache_n_alloc_op_waits;
50557
50558-atomic_t fscache_n_retrievals;
50559-atomic_t fscache_n_retrievals_ok;
50560-atomic_t fscache_n_retrievals_wait;
50561-atomic_t fscache_n_retrievals_nodata;
50562-atomic_t fscache_n_retrievals_nobufs;
50563-atomic_t fscache_n_retrievals_intr;
50564-atomic_t fscache_n_retrievals_nomem;
50565-atomic_t fscache_n_retrievals_object_dead;
50566-atomic_t fscache_n_retrieval_ops;
50567-atomic_t fscache_n_retrieval_op_waits;
50568+atomic_unchecked_t fscache_n_retrievals;
50569+atomic_unchecked_t fscache_n_retrievals_ok;
50570+atomic_unchecked_t fscache_n_retrievals_wait;
50571+atomic_unchecked_t fscache_n_retrievals_nodata;
50572+atomic_unchecked_t fscache_n_retrievals_nobufs;
50573+atomic_unchecked_t fscache_n_retrievals_intr;
50574+atomic_unchecked_t fscache_n_retrievals_nomem;
50575+atomic_unchecked_t fscache_n_retrievals_object_dead;
50576+atomic_unchecked_t fscache_n_retrieval_ops;
50577+atomic_unchecked_t fscache_n_retrieval_op_waits;
50578
50579-atomic_t fscache_n_stores;
50580-atomic_t fscache_n_stores_ok;
50581-atomic_t fscache_n_stores_again;
50582-atomic_t fscache_n_stores_nobufs;
50583-atomic_t fscache_n_stores_oom;
50584-atomic_t fscache_n_store_ops;
50585-atomic_t fscache_n_store_calls;
50586-atomic_t fscache_n_store_pages;
50587-atomic_t fscache_n_store_radix_deletes;
50588-atomic_t fscache_n_store_pages_over_limit;
50589+atomic_unchecked_t fscache_n_stores;
50590+atomic_unchecked_t fscache_n_stores_ok;
50591+atomic_unchecked_t fscache_n_stores_again;
50592+atomic_unchecked_t fscache_n_stores_nobufs;
50593+atomic_unchecked_t fscache_n_stores_oom;
50594+atomic_unchecked_t fscache_n_store_ops;
50595+atomic_unchecked_t fscache_n_store_calls;
50596+atomic_unchecked_t fscache_n_store_pages;
50597+atomic_unchecked_t fscache_n_store_radix_deletes;
50598+atomic_unchecked_t fscache_n_store_pages_over_limit;
50599
50600-atomic_t fscache_n_store_vmscan_not_storing;
50601-atomic_t fscache_n_store_vmscan_gone;
50602-atomic_t fscache_n_store_vmscan_busy;
50603-atomic_t fscache_n_store_vmscan_cancelled;
50604+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50605+atomic_unchecked_t fscache_n_store_vmscan_gone;
50606+atomic_unchecked_t fscache_n_store_vmscan_busy;
50607+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50608
50609-atomic_t fscache_n_marks;
50610-atomic_t fscache_n_uncaches;
50611+atomic_unchecked_t fscache_n_marks;
50612+atomic_unchecked_t fscache_n_uncaches;
50613
50614-atomic_t fscache_n_acquires;
50615-atomic_t fscache_n_acquires_null;
50616-atomic_t fscache_n_acquires_no_cache;
50617-atomic_t fscache_n_acquires_ok;
50618-atomic_t fscache_n_acquires_nobufs;
50619-atomic_t fscache_n_acquires_oom;
50620+atomic_unchecked_t fscache_n_acquires;
50621+atomic_unchecked_t fscache_n_acquires_null;
50622+atomic_unchecked_t fscache_n_acquires_no_cache;
50623+atomic_unchecked_t fscache_n_acquires_ok;
50624+atomic_unchecked_t fscache_n_acquires_nobufs;
50625+atomic_unchecked_t fscache_n_acquires_oom;
50626
50627-atomic_t fscache_n_updates;
50628-atomic_t fscache_n_updates_null;
50629-atomic_t fscache_n_updates_run;
50630+atomic_unchecked_t fscache_n_updates;
50631+atomic_unchecked_t fscache_n_updates_null;
50632+atomic_unchecked_t fscache_n_updates_run;
50633
50634-atomic_t fscache_n_relinquishes;
50635-atomic_t fscache_n_relinquishes_null;
50636-atomic_t fscache_n_relinquishes_waitcrt;
50637-atomic_t fscache_n_relinquishes_retire;
50638+atomic_unchecked_t fscache_n_relinquishes;
50639+atomic_unchecked_t fscache_n_relinquishes_null;
50640+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50641+atomic_unchecked_t fscache_n_relinquishes_retire;
50642
50643-atomic_t fscache_n_cookie_index;
50644-atomic_t fscache_n_cookie_data;
50645-atomic_t fscache_n_cookie_special;
50646+atomic_unchecked_t fscache_n_cookie_index;
50647+atomic_unchecked_t fscache_n_cookie_data;
50648+atomic_unchecked_t fscache_n_cookie_special;
50649
50650-atomic_t fscache_n_object_alloc;
50651-atomic_t fscache_n_object_no_alloc;
50652-atomic_t fscache_n_object_lookups;
50653-atomic_t fscache_n_object_lookups_negative;
50654-atomic_t fscache_n_object_lookups_positive;
50655-atomic_t fscache_n_object_lookups_timed_out;
50656-atomic_t fscache_n_object_created;
50657-atomic_t fscache_n_object_avail;
50658-atomic_t fscache_n_object_dead;
50659+atomic_unchecked_t fscache_n_object_alloc;
50660+atomic_unchecked_t fscache_n_object_no_alloc;
50661+atomic_unchecked_t fscache_n_object_lookups;
50662+atomic_unchecked_t fscache_n_object_lookups_negative;
50663+atomic_unchecked_t fscache_n_object_lookups_positive;
50664+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50665+atomic_unchecked_t fscache_n_object_created;
50666+atomic_unchecked_t fscache_n_object_avail;
50667+atomic_unchecked_t fscache_n_object_dead;
50668
50669-atomic_t fscache_n_checkaux_none;
50670-atomic_t fscache_n_checkaux_okay;
50671-atomic_t fscache_n_checkaux_update;
50672-atomic_t fscache_n_checkaux_obsolete;
50673+atomic_unchecked_t fscache_n_checkaux_none;
50674+atomic_unchecked_t fscache_n_checkaux_okay;
50675+atomic_unchecked_t fscache_n_checkaux_update;
50676+atomic_unchecked_t fscache_n_checkaux_obsolete;
50677
50678 atomic_t fscache_n_cop_alloc_object;
50679 atomic_t fscache_n_cop_lookup_object;
50680@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50681 seq_puts(m, "FS-Cache statistics\n");
50682
50683 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50684- atomic_read(&fscache_n_cookie_index),
50685- atomic_read(&fscache_n_cookie_data),
50686- atomic_read(&fscache_n_cookie_special));
50687+ atomic_read_unchecked(&fscache_n_cookie_index),
50688+ atomic_read_unchecked(&fscache_n_cookie_data),
50689+ atomic_read_unchecked(&fscache_n_cookie_special));
50690
50691 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50692- atomic_read(&fscache_n_object_alloc),
50693- atomic_read(&fscache_n_object_no_alloc),
50694- atomic_read(&fscache_n_object_avail),
50695- atomic_read(&fscache_n_object_dead));
50696+ atomic_read_unchecked(&fscache_n_object_alloc),
50697+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50698+ atomic_read_unchecked(&fscache_n_object_avail),
50699+ atomic_read_unchecked(&fscache_n_object_dead));
50700 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50701- atomic_read(&fscache_n_checkaux_none),
50702- atomic_read(&fscache_n_checkaux_okay),
50703- atomic_read(&fscache_n_checkaux_update),
50704- atomic_read(&fscache_n_checkaux_obsolete));
50705+ atomic_read_unchecked(&fscache_n_checkaux_none),
50706+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50707+ atomic_read_unchecked(&fscache_n_checkaux_update),
50708+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50709
50710 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50711- atomic_read(&fscache_n_marks),
50712- atomic_read(&fscache_n_uncaches));
50713+ atomic_read_unchecked(&fscache_n_marks),
50714+ atomic_read_unchecked(&fscache_n_uncaches));
50715
50716 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50717 " oom=%u\n",
50718- atomic_read(&fscache_n_acquires),
50719- atomic_read(&fscache_n_acquires_null),
50720- atomic_read(&fscache_n_acquires_no_cache),
50721- atomic_read(&fscache_n_acquires_ok),
50722- atomic_read(&fscache_n_acquires_nobufs),
50723- atomic_read(&fscache_n_acquires_oom));
50724+ atomic_read_unchecked(&fscache_n_acquires),
50725+ atomic_read_unchecked(&fscache_n_acquires_null),
50726+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50727+ atomic_read_unchecked(&fscache_n_acquires_ok),
50728+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50729+ atomic_read_unchecked(&fscache_n_acquires_oom));
50730
50731 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50732- atomic_read(&fscache_n_object_lookups),
50733- atomic_read(&fscache_n_object_lookups_negative),
50734- atomic_read(&fscache_n_object_lookups_positive),
50735- atomic_read(&fscache_n_object_lookups_timed_out),
50736- atomic_read(&fscache_n_object_created));
50737+ atomic_read_unchecked(&fscache_n_object_lookups),
50738+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50739+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50740+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50741+ atomic_read_unchecked(&fscache_n_object_created));
50742
50743 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50744- atomic_read(&fscache_n_updates),
50745- atomic_read(&fscache_n_updates_null),
50746- atomic_read(&fscache_n_updates_run));
50747+ atomic_read_unchecked(&fscache_n_updates),
50748+ atomic_read_unchecked(&fscache_n_updates_null),
50749+ atomic_read_unchecked(&fscache_n_updates_run));
50750
50751 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50752- atomic_read(&fscache_n_relinquishes),
50753- atomic_read(&fscache_n_relinquishes_null),
50754- atomic_read(&fscache_n_relinquishes_waitcrt),
50755- atomic_read(&fscache_n_relinquishes_retire));
50756+ atomic_read_unchecked(&fscache_n_relinquishes),
50757+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50758+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50759+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50760
50761 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50762- atomic_read(&fscache_n_attr_changed),
50763- atomic_read(&fscache_n_attr_changed_ok),
50764- atomic_read(&fscache_n_attr_changed_nobufs),
50765- atomic_read(&fscache_n_attr_changed_nomem),
50766- atomic_read(&fscache_n_attr_changed_calls));
50767+ atomic_read_unchecked(&fscache_n_attr_changed),
50768+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50769+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50770+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50771+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50772
50773 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50774- atomic_read(&fscache_n_allocs),
50775- atomic_read(&fscache_n_allocs_ok),
50776- atomic_read(&fscache_n_allocs_wait),
50777- atomic_read(&fscache_n_allocs_nobufs),
50778- atomic_read(&fscache_n_allocs_intr));
50779+ atomic_read_unchecked(&fscache_n_allocs),
50780+ atomic_read_unchecked(&fscache_n_allocs_ok),
50781+ atomic_read_unchecked(&fscache_n_allocs_wait),
50782+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50783+ atomic_read_unchecked(&fscache_n_allocs_intr));
50784 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50785- atomic_read(&fscache_n_alloc_ops),
50786- atomic_read(&fscache_n_alloc_op_waits),
50787- atomic_read(&fscache_n_allocs_object_dead));
50788+ atomic_read_unchecked(&fscache_n_alloc_ops),
50789+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50790+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50791
50792 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50793 " int=%u oom=%u\n",
50794- atomic_read(&fscache_n_retrievals),
50795- atomic_read(&fscache_n_retrievals_ok),
50796- atomic_read(&fscache_n_retrievals_wait),
50797- atomic_read(&fscache_n_retrievals_nodata),
50798- atomic_read(&fscache_n_retrievals_nobufs),
50799- atomic_read(&fscache_n_retrievals_intr),
50800- atomic_read(&fscache_n_retrievals_nomem));
50801+ atomic_read_unchecked(&fscache_n_retrievals),
50802+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50803+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50804+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50805+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50806+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50807+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50808 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50809- atomic_read(&fscache_n_retrieval_ops),
50810- atomic_read(&fscache_n_retrieval_op_waits),
50811- atomic_read(&fscache_n_retrievals_object_dead));
50812+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50813+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50814+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50815
50816 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50817- atomic_read(&fscache_n_stores),
50818- atomic_read(&fscache_n_stores_ok),
50819- atomic_read(&fscache_n_stores_again),
50820- atomic_read(&fscache_n_stores_nobufs),
50821- atomic_read(&fscache_n_stores_oom));
50822+ atomic_read_unchecked(&fscache_n_stores),
50823+ atomic_read_unchecked(&fscache_n_stores_ok),
50824+ atomic_read_unchecked(&fscache_n_stores_again),
50825+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50826+ atomic_read_unchecked(&fscache_n_stores_oom));
50827 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50828- atomic_read(&fscache_n_store_ops),
50829- atomic_read(&fscache_n_store_calls),
50830- atomic_read(&fscache_n_store_pages),
50831- atomic_read(&fscache_n_store_radix_deletes),
50832- atomic_read(&fscache_n_store_pages_over_limit));
50833+ atomic_read_unchecked(&fscache_n_store_ops),
50834+ atomic_read_unchecked(&fscache_n_store_calls),
50835+ atomic_read_unchecked(&fscache_n_store_pages),
50836+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50837+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50838
50839 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50840- atomic_read(&fscache_n_store_vmscan_not_storing),
50841- atomic_read(&fscache_n_store_vmscan_gone),
50842- atomic_read(&fscache_n_store_vmscan_busy),
50843- atomic_read(&fscache_n_store_vmscan_cancelled));
50844+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50845+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50846+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50847+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50848
50849 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50850- atomic_read(&fscache_n_op_pend),
50851- atomic_read(&fscache_n_op_run),
50852- atomic_read(&fscache_n_op_enqueue),
50853- atomic_read(&fscache_n_op_cancelled),
50854- atomic_read(&fscache_n_op_rejected));
50855+ atomic_read_unchecked(&fscache_n_op_pend),
50856+ atomic_read_unchecked(&fscache_n_op_run),
50857+ atomic_read_unchecked(&fscache_n_op_enqueue),
50858+ atomic_read_unchecked(&fscache_n_op_cancelled),
50859+ atomic_read_unchecked(&fscache_n_op_rejected));
50860 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50861- atomic_read(&fscache_n_op_deferred_release),
50862- atomic_read(&fscache_n_op_release),
50863- atomic_read(&fscache_n_op_gc));
50864+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50865+ atomic_read_unchecked(&fscache_n_op_release),
50866+ atomic_read_unchecked(&fscache_n_op_gc));
50867
50868 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50869 atomic_read(&fscache_n_cop_alloc_object),
50870diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50871index de792dc..448b532 100644
50872--- a/fs/fuse/cuse.c
50873+++ b/fs/fuse/cuse.c
50874@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50875 INIT_LIST_HEAD(&cuse_conntbl[i]);
50876
50877 /* inherit and extend fuse_dev_operations */
50878- cuse_channel_fops = fuse_dev_operations;
50879- cuse_channel_fops.owner = THIS_MODULE;
50880- cuse_channel_fops.open = cuse_channel_open;
50881- cuse_channel_fops.release = cuse_channel_release;
50882+ pax_open_kernel();
50883+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50884+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50885+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50886+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50887+ pax_close_kernel();
50888
50889 cuse_class = class_create(THIS_MODULE, "cuse");
50890 if (IS_ERR(cuse_class))
50891diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50892index 1facb39..7f48557 100644
50893--- a/fs/fuse/dev.c
50894+++ b/fs/fuse/dev.c
50895@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50896 {
50897 struct fuse_notify_inval_entry_out outarg;
50898 int err = -EINVAL;
50899- char buf[FUSE_NAME_MAX+1];
50900+ char *buf = NULL;
50901 struct qstr name;
50902
50903 if (size < sizeof(outarg))
50904@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50905 if (outarg.namelen > FUSE_NAME_MAX)
50906 goto err;
50907
50908+ err = -ENOMEM;
50909+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50910+ if (!buf)
50911+ goto err;
50912+
50913 err = -EINVAL;
50914 if (size != sizeof(outarg) + outarg.namelen + 1)
50915 goto err;
50916@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50917
50918 down_read(&fc->killsb);
50919 err = -ENOENT;
50920- if (!fc->sb)
50921- goto err_unlock;
50922-
50923- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50924-
50925-err_unlock:
50926+ if (fc->sb)
50927+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50928 up_read(&fc->killsb);
50929+ kfree(buf);
50930 return err;
50931
50932 err:
50933 fuse_copy_finish(cs);
50934+ kfree(buf);
50935 return err;
50936 }
50937
50938diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50939index 4787ae6..73efff7 100644
50940--- a/fs/fuse/dir.c
50941+++ b/fs/fuse/dir.c
50942@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50943 return link;
50944 }
50945
50946-static void free_link(char *link)
50947+static void free_link(const char *link)
50948 {
50949 if (!IS_ERR(link))
50950 free_page((unsigned long) link);
50951diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50952index 247436c..e650ccb 100644
50953--- a/fs/gfs2/ops_inode.c
50954+++ b/fs/gfs2/ops_inode.c
50955@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50956 unsigned int x;
50957 int error;
50958
50959+ pax_track_stack();
50960+
50961 if (ndentry->d_inode) {
50962 nip = GFS2_I(ndentry->d_inode);
50963 if (ip == nip)
50964diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50965index 4463297..4fed53b 100644
50966--- a/fs/gfs2/sys.c
50967+++ b/fs/gfs2/sys.c
50968@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50969 return a->store ? a->store(sdp, buf, len) : len;
50970 }
50971
50972-static struct sysfs_ops gfs2_attr_ops = {
50973+static const struct sysfs_ops gfs2_attr_ops = {
50974 .show = gfs2_attr_show,
50975 .store = gfs2_attr_store,
50976 };
50977@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50978 return 0;
50979 }
50980
50981-static struct kset_uevent_ops gfs2_uevent_ops = {
50982+static const struct kset_uevent_ops gfs2_uevent_ops = {
50983 .uevent = gfs2_uevent,
50984 };
50985
50986diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50987index f6874ac..7cd98a8 100644
50988--- a/fs/hfsplus/catalog.c
50989+++ b/fs/hfsplus/catalog.c
50990@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50991 int err;
50992 u16 type;
50993
50994+ pax_track_stack();
50995+
50996 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50997 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50998 if (err)
50999@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
51000 int entry_size;
51001 int err;
51002
51003+ pax_track_stack();
51004+
51005 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
51006 sb = dir->i_sb;
51007 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
51008@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
51009 int entry_size, type;
51010 int err = 0;
51011
51012+ pax_track_stack();
51013+
51014 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
51015 dst_dir->i_ino, dst_name->name);
51016 sb = src_dir->i_sb;
51017diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
51018index 5f40236..dac3421 100644
51019--- a/fs/hfsplus/dir.c
51020+++ b/fs/hfsplus/dir.c
51021@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
51022 struct hfsplus_readdir_data *rd;
51023 u16 type;
51024
51025+ pax_track_stack();
51026+
51027 if (filp->f_pos >= inode->i_size)
51028 return 0;
51029
51030diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
51031index 1bcf597..905a251 100644
51032--- a/fs/hfsplus/inode.c
51033+++ b/fs/hfsplus/inode.c
51034@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
51035 int res = 0;
51036 u16 type;
51037
51038+ pax_track_stack();
51039+
51040 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
51041
51042 HFSPLUS_I(inode).dev = 0;
51043@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
51044 struct hfs_find_data fd;
51045 hfsplus_cat_entry entry;
51046
51047+ pax_track_stack();
51048+
51049 if (HFSPLUS_IS_RSRC(inode))
51050 main_inode = HFSPLUS_I(inode).rsrc_inode;
51051
51052diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
51053index f457d2c..7ef4ad5 100644
51054--- a/fs/hfsplus/ioctl.c
51055+++ b/fs/hfsplus/ioctl.c
51056@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
51057 struct hfsplus_cat_file *file;
51058 int res;
51059
51060+ pax_track_stack();
51061+
51062 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51063 return -EOPNOTSUPP;
51064
51065@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
51066 struct hfsplus_cat_file *file;
51067 ssize_t res = 0;
51068
51069+ pax_track_stack();
51070+
51071 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
51072 return -EOPNOTSUPP;
51073
51074diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
51075index 43022f3..7298079 100644
51076--- a/fs/hfsplus/super.c
51077+++ b/fs/hfsplus/super.c
51078@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
51079 struct nls_table *nls = NULL;
51080 int err = -EINVAL;
51081
51082+ pax_track_stack();
51083+
51084 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51085 if (!sbi)
51086 return -ENOMEM;
51087diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51088index 87a1258..5694d91 100644
51089--- a/fs/hugetlbfs/inode.c
51090+++ b/fs/hugetlbfs/inode.c
51091@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51092 .kill_sb = kill_litter_super,
51093 };
51094
51095-static struct vfsmount *hugetlbfs_vfsmount;
51096+struct vfsmount *hugetlbfs_vfsmount;
51097
51098 static int can_do_hugetlb_shm(void)
51099 {
51100diff --git a/fs/ioctl.c b/fs/ioctl.c
51101index 6c75110..19d2c3c 100644
51102--- a/fs/ioctl.c
51103+++ b/fs/ioctl.c
51104@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51105 u64 phys, u64 len, u32 flags)
51106 {
51107 struct fiemap_extent extent;
51108- struct fiemap_extent *dest = fieinfo->fi_extents_start;
51109+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51110
51111 /* only count the extents */
51112 if (fieinfo->fi_extents_max == 0) {
51113@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51114
51115 fieinfo.fi_flags = fiemap.fm_flags;
51116 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51117- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51118+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51119
51120 if (fiemap.fm_extent_count != 0 &&
51121 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51122@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51123 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51124 fiemap.fm_flags = fieinfo.fi_flags;
51125 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51126- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51127+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51128 error = -EFAULT;
51129
51130 return error;
51131diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51132index b0435dd..81ee0be 100644
51133--- a/fs/jbd/checkpoint.c
51134+++ b/fs/jbd/checkpoint.c
51135@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51136 tid_t this_tid;
51137 int result;
51138
51139+ pax_track_stack();
51140+
51141 jbd_debug(1, "Start checkpoint\n");
51142
51143 /*
51144diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51145index 546d153..736896c 100644
51146--- a/fs/jffs2/compr_rtime.c
51147+++ b/fs/jffs2/compr_rtime.c
51148@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51149 int outpos = 0;
51150 int pos=0;
51151
51152+ pax_track_stack();
51153+
51154 memset(positions,0,sizeof(positions));
51155
51156 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51157@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51158 int outpos = 0;
51159 int pos=0;
51160
51161+ pax_track_stack();
51162+
51163 memset(positions,0,sizeof(positions));
51164
51165 while (outpos<destlen) {
51166diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51167index 170d289..3254b98 100644
51168--- a/fs/jffs2/compr_rubin.c
51169+++ b/fs/jffs2/compr_rubin.c
51170@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51171 int ret;
51172 uint32_t mysrclen, mydstlen;
51173
51174+ pax_track_stack();
51175+
51176 mysrclen = *sourcelen;
51177 mydstlen = *dstlen - 8;
51178
51179diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51180index b47679b..00d65d3 100644
51181--- a/fs/jffs2/erase.c
51182+++ b/fs/jffs2/erase.c
51183@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51184 struct jffs2_unknown_node marker = {
51185 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51186 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51187- .totlen = cpu_to_je32(c->cleanmarker_size)
51188+ .totlen = cpu_to_je32(c->cleanmarker_size),
51189+ .hdr_crc = cpu_to_je32(0)
51190 };
51191
51192 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51193diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51194index 5ef7bac..4fd1e3c 100644
51195--- a/fs/jffs2/wbuf.c
51196+++ b/fs/jffs2/wbuf.c
51197@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51198 {
51199 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51200 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51201- .totlen = constant_cpu_to_je32(8)
51202+ .totlen = constant_cpu_to_je32(8),
51203+ .hdr_crc = constant_cpu_to_je32(0)
51204 };
51205
51206 /*
51207diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51208index 082e844..52012a1 100644
51209--- a/fs/jffs2/xattr.c
51210+++ b/fs/jffs2/xattr.c
51211@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51212
51213 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51214
51215+ pax_track_stack();
51216+
51217 /* Phase.1 : Merge same xref */
51218 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51219 xref_tmphash[i] = NULL;
51220diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51221index 2234c73..f6e6e6b 100644
51222--- a/fs/jfs/super.c
51223+++ b/fs/jfs/super.c
51224@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51225
51226 jfs_inode_cachep =
51227 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51228- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51229+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51230 init_once);
51231 if (jfs_inode_cachep == NULL)
51232 return -ENOMEM;
51233diff --git a/fs/libfs.c b/fs/libfs.c
51234index ba36e93..3153fce 100644
51235--- a/fs/libfs.c
51236+++ b/fs/libfs.c
51237@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51238
51239 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51240 struct dentry *next;
51241+ char d_name[sizeof(next->d_iname)];
51242+ const unsigned char *name;
51243+
51244 next = list_entry(p, struct dentry, d_u.d_child);
51245 if (d_unhashed(next) || !next->d_inode)
51246 continue;
51247
51248 spin_unlock(&dcache_lock);
51249- if (filldir(dirent, next->d_name.name,
51250+ name = next->d_name.name;
51251+ if (name == next->d_iname) {
51252+ memcpy(d_name, name, next->d_name.len);
51253+ name = d_name;
51254+ }
51255+ if (filldir(dirent, name,
51256 next->d_name.len, filp->f_pos,
51257 next->d_inode->i_ino,
51258 dt_type(next->d_inode)) < 0)
51259diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51260index c325a83..d15b07b 100644
51261--- a/fs/lockd/clntproc.c
51262+++ b/fs/lockd/clntproc.c
51263@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51264 /*
51265 * Cookie counter for NLM requests
51266 */
51267-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51268+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51269
51270 void nlmclnt_next_cookie(struct nlm_cookie *c)
51271 {
51272- u32 cookie = atomic_inc_return(&nlm_cookie);
51273+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51274
51275 memcpy(c->data, &cookie, 4);
51276 c->len=4;
51277@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51278 struct nlm_rqst reqst, *req;
51279 int status;
51280
51281+ pax_track_stack();
51282+
51283 req = &reqst;
51284 memset(req, 0, sizeof(*req));
51285 locks_init_lock(&req->a_args.lock.fl);
51286diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51287index 1a54ae1..6a16c27 100644
51288--- a/fs/lockd/svc.c
51289+++ b/fs/lockd/svc.c
51290@@ -43,7 +43,7 @@
51291
51292 static struct svc_program nlmsvc_program;
51293
51294-struct nlmsvc_binding * nlmsvc_ops;
51295+const struct nlmsvc_binding * nlmsvc_ops;
51296 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51297
51298 static DEFINE_MUTEX(nlmsvc_mutex);
51299diff --git a/fs/locks.c b/fs/locks.c
51300index a8794f2..4041e55 100644
51301--- a/fs/locks.c
51302+++ b/fs/locks.c
51303@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51304
51305 static struct kmem_cache *filelock_cache __read_mostly;
51306
51307+static void locks_init_lock_always(struct file_lock *fl)
51308+{
51309+ fl->fl_next = NULL;
51310+ fl->fl_fasync = NULL;
51311+ fl->fl_owner = NULL;
51312+ fl->fl_pid = 0;
51313+ fl->fl_nspid = NULL;
51314+ fl->fl_file = NULL;
51315+ fl->fl_flags = 0;
51316+ fl->fl_type = 0;
51317+ fl->fl_start = fl->fl_end = 0;
51318+}
51319+
51320 /* Allocate an empty lock structure. */
51321 static struct file_lock *locks_alloc_lock(void)
51322 {
51323- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51324+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51325+
51326+ if (fl)
51327+ locks_init_lock_always(fl);
51328+
51329+ return fl;
51330 }
51331
51332 void locks_release_private(struct file_lock *fl)
51333@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51334 INIT_LIST_HEAD(&fl->fl_link);
51335 INIT_LIST_HEAD(&fl->fl_block);
51336 init_waitqueue_head(&fl->fl_wait);
51337- fl->fl_next = NULL;
51338- fl->fl_fasync = NULL;
51339- fl->fl_owner = NULL;
51340- fl->fl_pid = 0;
51341- fl->fl_nspid = NULL;
51342- fl->fl_file = NULL;
51343- fl->fl_flags = 0;
51344- fl->fl_type = 0;
51345- fl->fl_start = fl->fl_end = 0;
51346 fl->fl_ops = NULL;
51347 fl->fl_lmops = NULL;
51348+ locks_init_lock_always(fl);
51349 }
51350
51351 EXPORT_SYMBOL(locks_init_lock);
51352@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51353 return;
51354
51355 if (filp->f_op && filp->f_op->flock) {
51356- struct file_lock fl = {
51357+ struct file_lock flock = {
51358 .fl_pid = current->tgid,
51359 .fl_file = filp,
51360 .fl_flags = FL_FLOCK,
51361 .fl_type = F_UNLCK,
51362 .fl_end = OFFSET_MAX,
51363 };
51364- filp->f_op->flock(filp, F_SETLKW, &fl);
51365- if (fl.fl_ops && fl.fl_ops->fl_release_private)
51366- fl.fl_ops->fl_release_private(&fl);
51367+ filp->f_op->flock(filp, F_SETLKW, &flock);
51368+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
51369+ flock.fl_ops->fl_release_private(&flock);
51370 }
51371
51372 lock_kernel();
51373diff --git a/fs/mbcache.c b/fs/mbcache.c
51374index ec88ff3..b843a82 100644
51375--- a/fs/mbcache.c
51376+++ b/fs/mbcache.c
51377@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51378 if (!cache)
51379 goto fail;
51380 cache->c_name = name;
51381- cache->c_op.free = NULL;
51382+ *(void **)&cache->c_op.free = NULL;
51383 if (cache_op)
51384- cache->c_op.free = cache_op->free;
51385+ *(void **)&cache->c_op.free = cache_op->free;
51386 atomic_set(&cache->c_entry_count, 0);
51387 cache->c_bucket_bits = bucket_bits;
51388 #ifdef MB_CACHE_INDEXES_COUNT
51389diff --git a/fs/namei.c b/fs/namei.c
51390index b0afbd4..8d065a1 100644
51391--- a/fs/namei.c
51392+++ b/fs/namei.c
51393@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51394 return ret;
51395
51396 /*
51397+ * Searching includes executable on directories, else just read.
51398+ */
51399+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51400+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51401+ if (capable(CAP_DAC_READ_SEARCH))
51402+ return 0;
51403+
51404+ /*
51405 * Read/write DACs are always overridable.
51406 * Executable DACs are overridable if at least one exec bit is set.
51407 */
51408@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51409 if (capable(CAP_DAC_OVERRIDE))
51410 return 0;
51411
51412- /*
51413- * Searching includes executable on directories, else just read.
51414- */
51415- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51416- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51417- if (capable(CAP_DAC_READ_SEARCH))
51418- return 0;
51419-
51420 return -EACCES;
51421 }
51422
51423@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51424 if (!ret)
51425 goto ok;
51426
51427- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51428+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51429+ capable(CAP_DAC_OVERRIDE))
51430 goto ok;
51431
51432 return ret;
51433@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51434 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51435 error = PTR_ERR(cookie);
51436 if (!IS_ERR(cookie)) {
51437- char *s = nd_get_link(nd);
51438+ const char *s = nd_get_link(nd);
51439 error = 0;
51440 if (s)
51441 error = __vfs_follow_link(nd, s);
51442@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51443 err = security_inode_follow_link(path->dentry, nd);
51444 if (err)
51445 goto loop;
51446+
51447+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51448+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51449+ err = -EACCES;
51450+ goto loop;
51451+ }
51452+
51453 current->link_count++;
51454 current->total_link_count++;
51455 nd->depth++;
51456@@ -1016,11 +1024,19 @@ return_reval:
51457 break;
51458 }
51459 return_base:
51460+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51461+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51462+ path_put(&nd->path);
51463+ return -ENOENT;
51464+ }
51465 return 0;
51466 out_dput:
51467 path_put_conditional(&next, nd);
51468 break;
51469 }
51470+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51471+ err = -ENOENT;
51472+
51473 path_put(&nd->path);
51474 return_err:
51475 return err;
51476@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51477 int retval = path_init(dfd, name, flags, nd);
51478 if (!retval)
51479 retval = path_walk(name, nd);
51480- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51481- nd->path.dentry->d_inode))
51482- audit_inode(name, nd->path.dentry);
51483+
51484+ if (likely(!retval)) {
51485+ if (nd->path.dentry && nd->path.dentry->d_inode) {
51486+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51487+ retval = -ENOENT;
51488+ if (!audit_dummy_context())
51489+ audit_inode(name, nd->path.dentry);
51490+ }
51491+ }
51492 if (nd->root.mnt) {
51493 path_put(&nd->root);
51494 nd->root.mnt = NULL;
51495 }
51496+
51497 return retval;
51498 }
51499
51500@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51501 if (error)
51502 goto err_out;
51503
51504+
51505+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51506+ error = -EPERM;
51507+ goto err_out;
51508+ }
51509+ if (gr_handle_rawio(inode)) {
51510+ error = -EPERM;
51511+ goto err_out;
51512+ }
51513+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51514+ error = -EACCES;
51515+ goto err_out;
51516+ }
51517+
51518 if (flag & O_TRUNC) {
51519 error = get_write_access(inode);
51520 if (error)
51521@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51522 {
51523 int error;
51524 struct dentry *dir = nd->path.dentry;
51525+ int acc_mode = ACC_MODE(flag);
51526+
51527+ if (flag & O_TRUNC)
51528+ acc_mode |= MAY_WRITE;
51529+ if (flag & O_APPEND)
51530+ acc_mode |= MAY_APPEND;
51531+
51532+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51533+ error = -EACCES;
51534+ goto out_unlock;
51535+ }
51536
51537 if (!IS_POSIXACL(dir->d_inode))
51538 mode &= ~current_umask();
51539@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51540 if (error)
51541 goto out_unlock;
51542 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51543+ if (!error)
51544+ gr_handle_create(path->dentry, nd->path.mnt);
51545 out_unlock:
51546 mutex_unlock(&dir->d_inode->i_mutex);
51547 dput(nd->path.dentry);
51548@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51549 &nd, flag);
51550 if (error)
51551 return ERR_PTR(error);
51552+
51553+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51554+ error = -EPERM;
51555+ goto exit;
51556+ }
51557+
51558+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51559+ error = -EPERM;
51560+ goto exit;
51561+ }
51562+
51563+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51564+ error = -EACCES;
51565+ goto exit;
51566+ }
51567+
51568 goto ok;
51569 }
51570
51571@@ -1795,6 +1861,19 @@ do_last:
51572 /*
51573 * It already exists.
51574 */
51575+
51576+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51577+ error = -ENOENT;
51578+ goto exit_mutex_unlock;
51579+ }
51580+
51581+ /* only check if O_CREAT is specified, all other checks need
51582+ to go into may_open */
51583+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51584+ error = -EACCES;
51585+ goto exit_mutex_unlock;
51586+ }
51587+
51588 mutex_unlock(&dir->d_inode->i_mutex);
51589 audit_inode(pathname, path.dentry);
51590
51591@@ -1887,6 +1966,13 @@ do_link:
51592 error = security_inode_follow_link(path.dentry, &nd);
51593 if (error)
51594 goto exit_dput;
51595+
51596+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51597+ path.dentry, nd.path.mnt)) {
51598+ error = -EACCES;
51599+ goto exit_dput;
51600+ }
51601+
51602 error = __do_follow_link(&path, &nd);
51603 if (error) {
51604 /* Does someone understand code flow here? Or it is only
51605@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51606 }
51607 return dentry;
51608 eexist:
51609+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51610+ dput(dentry);
51611+ return ERR_PTR(-ENOENT);
51612+ }
51613 dput(dentry);
51614 dentry = ERR_PTR(-EEXIST);
51615 fail:
51616@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51617 error = may_mknod(mode);
51618 if (error)
51619 goto out_dput;
51620+
51621+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51622+ error = -EPERM;
51623+ goto out_dput;
51624+ }
51625+
51626+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51627+ error = -EACCES;
51628+ goto out_dput;
51629+ }
51630+
51631 error = mnt_want_write(nd.path.mnt);
51632 if (error)
51633 goto out_dput;
51634@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51635 }
51636 out_drop_write:
51637 mnt_drop_write(nd.path.mnt);
51638+
51639+ if (!error)
51640+ gr_handle_create(dentry, nd.path.mnt);
51641 out_dput:
51642 dput(dentry);
51643 out_unlock:
51644@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51645 if (IS_ERR(dentry))
51646 goto out_unlock;
51647
51648+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51649+ error = -EACCES;
51650+ goto out_dput;
51651+ }
51652+
51653 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51654 mode &= ~current_umask();
51655 error = mnt_want_write(nd.path.mnt);
51656@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51657 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51658 out_drop_write:
51659 mnt_drop_write(nd.path.mnt);
51660+
51661+ if (!error)
51662+ gr_handle_create(dentry, nd.path.mnt);
51663+
51664 out_dput:
51665 dput(dentry);
51666 out_unlock:
51667@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51668 char * name;
51669 struct dentry *dentry;
51670 struct nameidata nd;
51671+ ino_t saved_ino = 0;
51672+ dev_t saved_dev = 0;
51673
51674 error = user_path_parent(dfd, pathname, &nd, &name);
51675 if (error)
51676@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51677 error = PTR_ERR(dentry);
51678 if (IS_ERR(dentry))
51679 goto exit2;
51680+
51681+ if (dentry->d_inode != NULL) {
51682+ saved_ino = dentry->d_inode->i_ino;
51683+ saved_dev = gr_get_dev_from_dentry(dentry);
51684+
51685+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51686+ error = -EACCES;
51687+ goto exit3;
51688+ }
51689+ }
51690+
51691 error = mnt_want_write(nd.path.mnt);
51692 if (error)
51693 goto exit3;
51694@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51695 if (error)
51696 goto exit4;
51697 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51698+ if (!error && (saved_dev || saved_ino))
51699+ gr_handle_delete(saved_ino, saved_dev);
51700 exit4:
51701 mnt_drop_write(nd.path.mnt);
51702 exit3:
51703@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51704 struct dentry *dentry;
51705 struct nameidata nd;
51706 struct inode *inode = NULL;
51707+ ino_t saved_ino = 0;
51708+ dev_t saved_dev = 0;
51709
51710 error = user_path_parent(dfd, pathname, &nd, &name);
51711 if (error)
51712@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51713 if (nd.last.name[nd.last.len])
51714 goto slashes;
51715 inode = dentry->d_inode;
51716- if (inode)
51717+ if (inode) {
51718+ if (inode->i_nlink <= 1) {
51719+ saved_ino = inode->i_ino;
51720+ saved_dev = gr_get_dev_from_dentry(dentry);
51721+ }
51722+
51723 atomic_inc(&inode->i_count);
51724+
51725+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51726+ error = -EACCES;
51727+ goto exit2;
51728+ }
51729+ }
51730 error = mnt_want_write(nd.path.mnt);
51731 if (error)
51732 goto exit2;
51733@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51734 if (error)
51735 goto exit3;
51736 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51737+ if (!error && (saved_ino || saved_dev))
51738+ gr_handle_delete(saved_ino, saved_dev);
51739 exit3:
51740 mnt_drop_write(nd.path.mnt);
51741 exit2:
51742@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51743 if (IS_ERR(dentry))
51744 goto out_unlock;
51745
51746+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51747+ error = -EACCES;
51748+ goto out_dput;
51749+ }
51750+
51751 error = mnt_want_write(nd.path.mnt);
51752 if (error)
51753 goto out_dput;
51754@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51755 if (error)
51756 goto out_drop_write;
51757 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51758+ if (!error)
51759+ gr_handle_create(dentry, nd.path.mnt);
51760 out_drop_write:
51761 mnt_drop_write(nd.path.mnt);
51762 out_dput:
51763@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51764 error = PTR_ERR(new_dentry);
51765 if (IS_ERR(new_dentry))
51766 goto out_unlock;
51767+
51768+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51769+ old_path.dentry->d_inode,
51770+ old_path.dentry->d_inode->i_mode, to)) {
51771+ error = -EACCES;
51772+ goto out_dput;
51773+ }
51774+
51775+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51776+ old_path.dentry, old_path.mnt, to)) {
51777+ error = -EACCES;
51778+ goto out_dput;
51779+ }
51780+
51781 error = mnt_want_write(nd.path.mnt);
51782 if (error)
51783 goto out_dput;
51784@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51785 if (error)
51786 goto out_drop_write;
51787 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51788+ if (!error)
51789+ gr_handle_create(new_dentry, nd.path.mnt);
51790 out_drop_write:
51791 mnt_drop_write(nd.path.mnt);
51792 out_dput:
51793@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51794 char *to;
51795 int error;
51796
51797+ pax_track_stack();
51798+
51799 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51800 if (error)
51801 goto exit;
51802@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51803 if (new_dentry == trap)
51804 goto exit5;
51805
51806+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51807+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51808+ to);
51809+ if (error)
51810+ goto exit5;
51811+
51812 error = mnt_want_write(oldnd.path.mnt);
51813 if (error)
51814 goto exit5;
51815@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51816 goto exit6;
51817 error = vfs_rename(old_dir->d_inode, old_dentry,
51818 new_dir->d_inode, new_dentry);
51819+ if (!error)
51820+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51821+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51822 exit6:
51823 mnt_drop_write(oldnd.path.mnt);
51824 exit5:
51825@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51826
51827 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51828 {
51829+ char tmpbuf[64];
51830+ const char *newlink;
51831 int len;
51832
51833 len = PTR_ERR(link);
51834@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51835 len = strlen(link);
51836 if (len > (unsigned) buflen)
51837 len = buflen;
51838- if (copy_to_user(buffer, link, len))
51839+
51840+ if (len < sizeof(tmpbuf)) {
51841+ memcpy(tmpbuf, link, len);
51842+ newlink = tmpbuf;
51843+ } else
51844+ newlink = link;
51845+
51846+ if (copy_to_user(buffer, newlink, len))
51847 len = -EFAULT;
51848 out:
51849 return len;
51850diff --git a/fs/namespace.c b/fs/namespace.c
51851index 2beb0fb..11a95a5 100644
51852--- a/fs/namespace.c
51853+++ b/fs/namespace.c
51854@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51855 if (!(sb->s_flags & MS_RDONLY))
51856 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51857 up_write(&sb->s_umount);
51858+
51859+ gr_log_remount(mnt->mnt_devname, retval);
51860+
51861 return retval;
51862 }
51863
51864@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51865 security_sb_umount_busy(mnt);
51866 up_write(&namespace_sem);
51867 release_mounts(&umount_list);
51868+
51869+ gr_log_unmount(mnt->mnt_devname, retval);
51870+
51871 return retval;
51872 }
51873
51874@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51875 if (retval)
51876 goto dput_out;
51877
51878+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51879+ retval = -EPERM;
51880+ goto dput_out;
51881+ }
51882+
51883+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51884+ retval = -EPERM;
51885+ goto dput_out;
51886+ }
51887+
51888 if (flags & MS_REMOUNT)
51889 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51890 data_page);
51891@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51892 dev_name, data_page);
51893 dput_out:
51894 path_put(&path);
51895+
51896+ gr_log_mount(dev_name, dir_name, retval);
51897+
51898 return retval;
51899 }
51900
51901@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51902 goto out1;
51903 }
51904
51905+ if (gr_handle_chroot_pivot()) {
51906+ error = -EPERM;
51907+ path_put(&old);
51908+ goto out1;
51909+ }
51910+
51911 read_lock(&current->fs->lock);
51912 root = current->fs->root;
51913 path_get(&current->fs->root);
51914diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51915index b8b5b30..2bd9ccb 100644
51916--- a/fs/ncpfs/dir.c
51917+++ b/fs/ncpfs/dir.c
51918@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51919 int res, val = 0, len;
51920 __u8 __name[NCP_MAXPATHLEN + 1];
51921
51922+ pax_track_stack();
51923+
51924 parent = dget_parent(dentry);
51925 dir = parent->d_inode;
51926
51927@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51928 int error, res, len;
51929 __u8 __name[NCP_MAXPATHLEN + 1];
51930
51931+ pax_track_stack();
51932+
51933 lock_kernel();
51934 error = -EIO;
51935 if (!ncp_conn_valid(server))
51936@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51937 int error, result, len;
51938 int opmode;
51939 __u8 __name[NCP_MAXPATHLEN + 1];
51940-
51941+
51942 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51943 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51944
51945+ pax_track_stack();
51946+
51947 error = -EIO;
51948 lock_kernel();
51949 if (!ncp_conn_valid(server))
51950@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51951 int error, len;
51952 __u8 __name[NCP_MAXPATHLEN + 1];
51953
51954+ pax_track_stack();
51955+
51956 DPRINTK("ncp_mkdir: making %s/%s\n",
51957 dentry->d_parent->d_name.name, dentry->d_name.name);
51958
51959@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51960 if (!ncp_conn_valid(server))
51961 goto out;
51962
51963+ pax_track_stack();
51964+
51965 ncp_age_dentry(server, dentry);
51966 len = sizeof(__name);
51967 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51968@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51969 int old_len, new_len;
51970 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51971
51972+ pax_track_stack();
51973+
51974 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51975 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51976 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51977diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51978index cf98da1..da890a9 100644
51979--- a/fs/ncpfs/inode.c
51980+++ b/fs/ncpfs/inode.c
51981@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51982 #endif
51983 struct ncp_entry_info finfo;
51984
51985+ pax_track_stack();
51986+
51987 data.wdog_pid = NULL;
51988 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51989 if (!server)
51990diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51991index bfaef7b..e9d03ca 100644
51992--- a/fs/nfs/inode.c
51993+++ b/fs/nfs/inode.c
51994@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51995 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51996 nfsi->attrtimeo_timestamp = jiffies;
51997
51998- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51999+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
52000 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
52001 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
52002 else
52003@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
52004 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
52005 }
52006
52007-static atomic_long_t nfs_attr_generation_counter;
52008+static atomic_long_unchecked_t nfs_attr_generation_counter;
52009
52010 static unsigned long nfs_read_attr_generation_counter(void)
52011 {
52012- return atomic_long_read(&nfs_attr_generation_counter);
52013+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
52014 }
52015
52016 unsigned long nfs_inc_attr_generation_counter(void)
52017 {
52018- return atomic_long_inc_return(&nfs_attr_generation_counter);
52019+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
52020 }
52021
52022 void nfs_fattr_init(struct nfs_fattr *fattr)
52023diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
52024index cc2f505..f6a236f 100644
52025--- a/fs/nfsd/lockd.c
52026+++ b/fs/nfsd/lockd.c
52027@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
52028 fput(filp);
52029 }
52030
52031-static struct nlmsvc_binding nfsd_nlm_ops = {
52032+static const struct nlmsvc_binding nfsd_nlm_ops = {
52033 .fopen = nlm_fopen, /* open file for locking */
52034 .fclose = nlm_fclose, /* close file */
52035 };
52036diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
52037index cfc3391..dcc083a 100644
52038--- a/fs/nfsd/nfs4state.c
52039+++ b/fs/nfsd/nfs4state.c
52040@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
52041 unsigned int cmd;
52042 int err;
52043
52044+ pax_track_stack();
52045+
52046 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
52047 (long long) lock->lk_offset,
52048 (long long) lock->lk_length);
52049diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
52050index 4a82a96..0d5fb49 100644
52051--- a/fs/nfsd/nfs4xdr.c
52052+++ b/fs/nfsd/nfs4xdr.c
52053@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
52054 struct nfsd4_compoundres *resp = rqstp->rq_resp;
52055 u32 minorversion = resp->cstate.minorversion;
52056
52057+ pax_track_stack();
52058+
52059 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
52060 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
52061 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
52062diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
52063index 2e09588..596421d 100644
52064--- a/fs/nfsd/vfs.c
52065+++ b/fs/nfsd/vfs.c
52066@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52067 } else {
52068 oldfs = get_fs();
52069 set_fs(KERNEL_DS);
52070- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
52071+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
52072 set_fs(oldfs);
52073 }
52074
52075@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
52076
52077 /* Write the data. */
52078 oldfs = get_fs(); set_fs(KERNEL_DS);
52079- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52080+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52081 set_fs(oldfs);
52082 if (host_err < 0)
52083 goto out_nfserr;
52084@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52085 */
52086
52087 oldfs = get_fs(); set_fs(KERNEL_DS);
52088- host_err = inode->i_op->readlink(dentry, buf, *lenp);
52089+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52090 set_fs(oldfs);
52091
52092 if (host_err < 0)
52093diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52094index f6af760..d0adf34 100644
52095--- a/fs/nilfs2/ioctl.c
52096+++ b/fs/nilfs2/ioctl.c
52097@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52098 unsigned int cmd, void __user *argp)
52099 {
52100 struct nilfs_argv argv[5];
52101- const static size_t argsz[5] = {
52102+ static const size_t argsz[5] = {
52103 sizeof(struct nilfs_vdesc),
52104 sizeof(struct nilfs_period),
52105 sizeof(__u64),
52106@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52107 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52108 goto out_free;
52109
52110+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52111+ goto out_free;
52112+
52113 len = argv[n].v_size * argv[n].v_nmembs;
52114 base = (void __user *)(unsigned long)argv[n].v_base;
52115 if (len == 0) {
52116diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52117index 7e54e52..9337248 100644
52118--- a/fs/notify/dnotify/dnotify.c
52119+++ b/fs/notify/dnotify/dnotify.c
52120@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52121 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52122 }
52123
52124-static struct fsnotify_ops dnotify_fsnotify_ops = {
52125+static const struct fsnotify_ops dnotify_fsnotify_ops = {
52126 .handle_event = dnotify_handle_event,
52127 .should_send_event = dnotify_should_send_event,
52128 .free_group_priv = NULL,
52129diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52130index b8bf53b..c518688 100644
52131--- a/fs/notify/notification.c
52132+++ b/fs/notify/notification.c
52133@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52134 * get set to 0 so it will never get 'freed'
52135 */
52136 static struct fsnotify_event q_overflow_event;
52137-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52138+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52139
52140 /**
52141 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52142@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52143 */
52144 u32 fsnotify_get_cookie(void)
52145 {
52146- return atomic_inc_return(&fsnotify_sync_cookie);
52147+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52148 }
52149 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52150
52151diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52152index 5a9e344..0f8cd28 100644
52153--- a/fs/ntfs/dir.c
52154+++ b/fs/ntfs/dir.c
52155@@ -1328,7 +1328,7 @@ find_next_index_buffer:
52156 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52157 ~(s64)(ndir->itype.index.block_size - 1)));
52158 /* Bounds checks. */
52159- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52160+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52161 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52162 "inode 0x%lx or driver bug.", vdir->i_ino);
52163 goto err_out;
52164diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52165index 663c0e3..b6868e9 100644
52166--- a/fs/ntfs/file.c
52167+++ b/fs/ntfs/file.c
52168@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52169 #endif /* NTFS_RW */
52170 };
52171
52172-const struct file_operations ntfs_empty_file_ops = {};
52173+const struct file_operations ntfs_empty_file_ops __read_only;
52174
52175-const struct inode_operations ntfs_empty_inode_ops = {};
52176+const struct inode_operations ntfs_empty_inode_ops __read_only;
52177diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52178index 1cd2934..880b5d2 100644
52179--- a/fs/ocfs2/cluster/masklog.c
52180+++ b/fs/ocfs2/cluster/masklog.c
52181@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52182 return mlog_mask_store(mlog_attr->mask, buf, count);
52183 }
52184
52185-static struct sysfs_ops mlog_attr_ops = {
52186+static const struct sysfs_ops mlog_attr_ops = {
52187 .show = mlog_show,
52188 .store = mlog_store,
52189 };
52190diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52191index ac10f83..2cd2607 100644
52192--- a/fs/ocfs2/localalloc.c
52193+++ b/fs/ocfs2/localalloc.c
52194@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52195 goto bail;
52196 }
52197
52198- atomic_inc(&osb->alloc_stats.moves);
52199+ atomic_inc_unchecked(&osb->alloc_stats.moves);
52200
52201 status = 0;
52202 bail:
52203diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52204index f010b22..9f9ed34 100644
52205--- a/fs/ocfs2/namei.c
52206+++ b/fs/ocfs2/namei.c
52207@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52208 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52209 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52210
52211+ pax_track_stack();
52212+
52213 /* At some point it might be nice to break this function up a
52214 * bit. */
52215
52216diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52217index d963d86..914cfbd 100644
52218--- a/fs/ocfs2/ocfs2.h
52219+++ b/fs/ocfs2/ocfs2.h
52220@@ -217,11 +217,11 @@ enum ocfs2_vol_state
52221
52222 struct ocfs2_alloc_stats
52223 {
52224- atomic_t moves;
52225- atomic_t local_data;
52226- atomic_t bitmap_data;
52227- atomic_t bg_allocs;
52228- atomic_t bg_extends;
52229+ atomic_unchecked_t moves;
52230+ atomic_unchecked_t local_data;
52231+ atomic_unchecked_t bitmap_data;
52232+ atomic_unchecked_t bg_allocs;
52233+ atomic_unchecked_t bg_extends;
52234 };
52235
52236 enum ocfs2_local_alloc_state
52237diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52238index 79b5dac..d322952 100644
52239--- a/fs/ocfs2/suballoc.c
52240+++ b/fs/ocfs2/suballoc.c
52241@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52242 mlog_errno(status);
52243 goto bail;
52244 }
52245- atomic_inc(&osb->alloc_stats.bg_extends);
52246+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52247
52248 /* You should never ask for this much metadata */
52249 BUG_ON(bits_wanted >
52250@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52251 mlog_errno(status);
52252 goto bail;
52253 }
52254- atomic_inc(&osb->alloc_stats.bg_allocs);
52255+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52256
52257 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52258 ac->ac_bits_given += (*num_bits);
52259@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52260 mlog_errno(status);
52261 goto bail;
52262 }
52263- atomic_inc(&osb->alloc_stats.bg_allocs);
52264+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52265
52266 BUG_ON(num_bits != 1);
52267
52268@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52269 cluster_start,
52270 num_clusters);
52271 if (!status)
52272- atomic_inc(&osb->alloc_stats.local_data);
52273+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
52274 } else {
52275 if (min_clusters > (osb->bitmap_cpg - 1)) {
52276 /* The only paths asking for contiguousness
52277@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52278 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52279 bg_blkno,
52280 bg_bit_off);
52281- atomic_inc(&osb->alloc_stats.bitmap_data);
52282+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52283 }
52284 }
52285 if (status < 0) {
52286diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52287index 9f55be4..a3f8048 100644
52288--- a/fs/ocfs2/super.c
52289+++ b/fs/ocfs2/super.c
52290@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52291 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52292 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52293 "Stats",
52294- atomic_read(&osb->alloc_stats.bitmap_data),
52295- atomic_read(&osb->alloc_stats.local_data),
52296- atomic_read(&osb->alloc_stats.bg_allocs),
52297- atomic_read(&osb->alloc_stats.moves),
52298- atomic_read(&osb->alloc_stats.bg_extends));
52299+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52300+ atomic_read_unchecked(&osb->alloc_stats.local_data),
52301+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52302+ atomic_read_unchecked(&osb->alloc_stats.moves),
52303+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52304
52305 out += snprintf(buf + out, len - out,
52306 "%10s => State: %u Descriptor: %llu Size: %u bits "
52307@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52308 spin_lock_init(&osb->osb_xattr_lock);
52309 ocfs2_init_inode_steal_slot(osb);
52310
52311- atomic_set(&osb->alloc_stats.moves, 0);
52312- atomic_set(&osb->alloc_stats.local_data, 0);
52313- atomic_set(&osb->alloc_stats.bitmap_data, 0);
52314- atomic_set(&osb->alloc_stats.bg_allocs, 0);
52315- atomic_set(&osb->alloc_stats.bg_extends, 0);
52316+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52317+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52318+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52319+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52320+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52321
52322 /* Copy the blockcheck stats from the superblock probe */
52323 osb->osb_ecc_stats = *stats;
52324diff --git a/fs/open.c b/fs/open.c
52325index 4f01e06..091f6c3 100644
52326--- a/fs/open.c
52327+++ b/fs/open.c
52328@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52329 error = locks_verify_truncate(inode, NULL, length);
52330 if (!error)
52331 error = security_path_truncate(&path, length, 0);
52332+
52333+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52334+ error = -EACCES;
52335+
52336 if (!error) {
52337 vfs_dq_init(inode);
52338 error = do_truncate(path.dentry, length, 0, NULL);
52339@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52340 if (__mnt_is_readonly(path.mnt))
52341 res = -EROFS;
52342
52343+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52344+ res = -EACCES;
52345+
52346 out_path_release:
52347 path_put(&path);
52348 out:
52349@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52350 if (error)
52351 goto dput_and_out;
52352
52353+ gr_log_chdir(path.dentry, path.mnt);
52354+
52355 set_fs_pwd(current->fs, &path);
52356
52357 dput_and_out:
52358@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52359 goto out_putf;
52360
52361 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52362+
52363+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52364+ error = -EPERM;
52365+
52366+ if (!error)
52367+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52368+
52369 if (!error)
52370 set_fs_pwd(current->fs, &file->f_path);
52371 out_putf:
52372@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52373 if (!capable(CAP_SYS_CHROOT))
52374 goto dput_and_out;
52375
52376+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52377+ goto dput_and_out;
52378+
52379 set_fs_root(current->fs, &path);
52380+
52381+ gr_handle_chroot_chdir(&path);
52382+
52383 error = 0;
52384 dput_and_out:
52385 path_put(&path);
52386@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52387 err = mnt_want_write_file(file);
52388 if (err)
52389 goto out_putf;
52390+
52391 mutex_lock(&inode->i_mutex);
52392+
52393+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52394+ err = -EACCES;
52395+ goto out_unlock;
52396+ }
52397+
52398 if (mode == (mode_t) -1)
52399 mode = inode->i_mode;
52400+
52401+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52402+ err = -EPERM;
52403+ goto out_unlock;
52404+ }
52405+
52406 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52407 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52408 err = notify_change(dentry, &newattrs);
52409+
52410+out_unlock:
52411 mutex_unlock(&inode->i_mutex);
52412 mnt_drop_write(file->f_path.mnt);
52413 out_putf:
52414@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52415 error = mnt_want_write(path.mnt);
52416 if (error)
52417 goto dput_and_out;
52418+
52419 mutex_lock(&inode->i_mutex);
52420+
52421+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52422+ error = -EACCES;
52423+ goto out_unlock;
52424+ }
52425+
52426 if (mode == (mode_t) -1)
52427 mode = inode->i_mode;
52428+
52429+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52430+ error = -EACCES;
52431+ goto out_unlock;
52432+ }
52433+
52434 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52435 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52436 error = notify_change(path.dentry, &newattrs);
52437+
52438+out_unlock:
52439 mutex_unlock(&inode->i_mutex);
52440 mnt_drop_write(path.mnt);
52441 dput_and_out:
52442@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52443 return sys_fchmodat(AT_FDCWD, filename, mode);
52444 }
52445
52446-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52447+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52448 {
52449 struct inode *inode = dentry->d_inode;
52450 int error;
52451 struct iattr newattrs;
52452
52453+ if (!gr_acl_handle_chown(dentry, mnt))
52454+ return -EACCES;
52455+
52456 newattrs.ia_valid = ATTR_CTIME;
52457 if (user != (uid_t) -1) {
52458 newattrs.ia_valid |= ATTR_UID;
52459@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52460 error = mnt_want_write(path.mnt);
52461 if (error)
52462 goto out_release;
52463- error = chown_common(path.dentry, user, group);
52464+ error = chown_common(path.dentry, user, group, path.mnt);
52465 mnt_drop_write(path.mnt);
52466 out_release:
52467 path_put(&path);
52468@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52469 error = mnt_want_write(path.mnt);
52470 if (error)
52471 goto out_release;
52472- error = chown_common(path.dentry, user, group);
52473+ error = chown_common(path.dentry, user, group, path.mnt);
52474 mnt_drop_write(path.mnt);
52475 out_release:
52476 path_put(&path);
52477@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52478 error = mnt_want_write(path.mnt);
52479 if (error)
52480 goto out_release;
52481- error = chown_common(path.dentry, user, group);
52482+ error = chown_common(path.dentry, user, group, path.mnt);
52483 mnt_drop_write(path.mnt);
52484 out_release:
52485 path_put(&path);
52486@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52487 goto out_fput;
52488 dentry = file->f_path.dentry;
52489 audit_inode(NULL, dentry);
52490- error = chown_common(dentry, user, group);
52491+ error = chown_common(dentry, user, group, file->f_path.mnt);
52492 mnt_drop_write(file->f_path.mnt);
52493 out_fput:
52494 fput(file);
52495@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52496 if (!IS_ERR(tmp)) {
52497 fd = get_unused_fd_flags(flags);
52498 if (fd >= 0) {
52499- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52500+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52501 if (IS_ERR(f)) {
52502 put_unused_fd(fd);
52503 fd = PTR_ERR(f);
52504diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52505index 6ab70f4..f4103d1 100644
52506--- a/fs/partitions/efi.c
52507+++ b/fs/partitions/efi.c
52508@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52509 if (!bdev || !gpt)
52510 return NULL;
52511
52512+ if (!le32_to_cpu(gpt->num_partition_entries))
52513+ return NULL;
52514+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52515+ if (!pte)
52516+ return NULL;
52517+
52518 count = le32_to_cpu(gpt->num_partition_entries) *
52519 le32_to_cpu(gpt->sizeof_partition_entry);
52520- if (!count)
52521- return NULL;
52522- pte = kzalloc(count, GFP_KERNEL);
52523- if (!pte)
52524- return NULL;
52525-
52526 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52527 (u8 *) pte,
52528 count) < count) {
52529diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52530index dd6efdb..3babc6c 100644
52531--- a/fs/partitions/ldm.c
52532+++ b/fs/partitions/ldm.c
52533@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52534 ldm_error ("A VBLK claims to have %d parts.", num);
52535 return false;
52536 }
52537+
52538 if (rec >= num) {
52539 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52540 return false;
52541@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52542 goto found;
52543 }
52544
52545- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52546+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52547 if (!f) {
52548 ldm_crit ("Out of memory.");
52549 return false;
52550diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52551index 5765198..7f8e9e0 100644
52552--- a/fs/partitions/mac.c
52553+++ b/fs/partitions/mac.c
52554@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52555 return 0; /* not a MacOS disk */
52556 }
52557 blocks_in_map = be32_to_cpu(part->map_count);
52558- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52559- put_dev_sector(sect);
52560- return 0;
52561- }
52562 printk(" [mac]");
52563+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52564+ put_dev_sector(sect);
52565+ return 0;
52566+ }
52567 for (slot = 1; slot <= blocks_in_map; ++slot) {
52568 int pos = slot * secsize;
52569 put_dev_sector(sect);
52570diff --git a/fs/pipe.c b/fs/pipe.c
52571index d0cc080..8a6f211 100644
52572--- a/fs/pipe.c
52573+++ b/fs/pipe.c
52574@@ -401,9 +401,9 @@ redo:
52575 }
52576 if (bufs) /* More to do? */
52577 continue;
52578- if (!pipe->writers)
52579+ if (!atomic_read(&pipe->writers))
52580 break;
52581- if (!pipe->waiting_writers) {
52582+ if (!atomic_read(&pipe->waiting_writers)) {
52583 /* syscall merging: Usually we must not sleep
52584 * if O_NONBLOCK is set, or if we got some data.
52585 * But if a writer sleeps in kernel space, then
52586@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52587 mutex_lock(&inode->i_mutex);
52588 pipe = inode->i_pipe;
52589
52590- if (!pipe->readers) {
52591+ if (!atomic_read(&pipe->readers)) {
52592 send_sig(SIGPIPE, current, 0);
52593 ret = -EPIPE;
52594 goto out;
52595@@ -511,7 +511,7 @@ redo1:
52596 for (;;) {
52597 int bufs;
52598
52599- if (!pipe->readers) {
52600+ if (!atomic_read(&pipe->readers)) {
52601 send_sig(SIGPIPE, current, 0);
52602 if (!ret)
52603 ret = -EPIPE;
52604@@ -597,9 +597,9 @@ redo2:
52605 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52606 do_wakeup = 0;
52607 }
52608- pipe->waiting_writers++;
52609+ atomic_inc(&pipe->waiting_writers);
52610 pipe_wait(pipe);
52611- pipe->waiting_writers--;
52612+ atomic_dec(&pipe->waiting_writers);
52613 }
52614 out:
52615 mutex_unlock(&inode->i_mutex);
52616@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52617 mask = 0;
52618 if (filp->f_mode & FMODE_READ) {
52619 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52620- if (!pipe->writers && filp->f_version != pipe->w_counter)
52621+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52622 mask |= POLLHUP;
52623 }
52624
52625@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52626 * Most Unices do not set POLLERR for FIFOs but on Linux they
52627 * behave exactly like pipes for poll().
52628 */
52629- if (!pipe->readers)
52630+ if (!atomic_read(&pipe->readers))
52631 mask |= POLLERR;
52632 }
52633
52634@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52635
52636 mutex_lock(&inode->i_mutex);
52637 pipe = inode->i_pipe;
52638- pipe->readers -= decr;
52639- pipe->writers -= decw;
52640+ atomic_sub(decr, &pipe->readers);
52641+ atomic_sub(decw, &pipe->writers);
52642
52643- if (!pipe->readers && !pipe->writers) {
52644+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52645 free_pipe_info(inode);
52646 } else {
52647 wake_up_interruptible_sync(&pipe->wait);
52648@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52649
52650 if (inode->i_pipe) {
52651 ret = 0;
52652- inode->i_pipe->readers++;
52653+ atomic_inc(&inode->i_pipe->readers);
52654 }
52655
52656 mutex_unlock(&inode->i_mutex);
52657@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52658
52659 if (inode->i_pipe) {
52660 ret = 0;
52661- inode->i_pipe->writers++;
52662+ atomic_inc(&inode->i_pipe->writers);
52663 }
52664
52665 mutex_unlock(&inode->i_mutex);
52666@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52667 if (inode->i_pipe) {
52668 ret = 0;
52669 if (filp->f_mode & FMODE_READ)
52670- inode->i_pipe->readers++;
52671+ atomic_inc(&inode->i_pipe->readers);
52672 if (filp->f_mode & FMODE_WRITE)
52673- inode->i_pipe->writers++;
52674+ atomic_inc(&inode->i_pipe->writers);
52675 }
52676
52677 mutex_unlock(&inode->i_mutex);
52678@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52679 inode->i_pipe = NULL;
52680 }
52681
52682-static struct vfsmount *pipe_mnt __read_mostly;
52683+struct vfsmount *pipe_mnt __read_mostly;
52684 static int pipefs_delete_dentry(struct dentry *dentry)
52685 {
52686 /*
52687@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52688 goto fail_iput;
52689 inode->i_pipe = pipe;
52690
52691- pipe->readers = pipe->writers = 1;
52692+ atomic_set(&pipe->readers, 1);
52693+ atomic_set(&pipe->writers, 1);
52694 inode->i_fop = &rdwr_pipefifo_fops;
52695
52696 /*
52697diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52698index 50f8f06..c5755df 100644
52699--- a/fs/proc/Kconfig
52700+++ b/fs/proc/Kconfig
52701@@ -30,12 +30,12 @@ config PROC_FS
52702
52703 config PROC_KCORE
52704 bool "/proc/kcore support" if !ARM
52705- depends on PROC_FS && MMU
52706+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52707
52708 config PROC_VMCORE
52709 bool "/proc/vmcore support (EXPERIMENTAL)"
52710- depends on PROC_FS && CRASH_DUMP
52711- default y
52712+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52713+ default n
52714 help
52715 Exports the dump image of crashed kernel in ELF format.
52716
52717@@ -59,8 +59,8 @@ config PROC_SYSCTL
52718 limited in memory.
52719
52720 config PROC_PAGE_MONITOR
52721- default y
52722- depends on PROC_FS && MMU
52723+ default n
52724+ depends on PROC_FS && MMU && !GRKERNSEC
52725 bool "Enable /proc page monitoring" if EMBEDDED
52726 help
52727 Various /proc files exist to monitor process memory utilization:
52728diff --git a/fs/proc/array.c b/fs/proc/array.c
52729index c5ef152..24a1b87 100644
52730--- a/fs/proc/array.c
52731+++ b/fs/proc/array.c
52732@@ -60,6 +60,7 @@
52733 #include <linux/tty.h>
52734 #include <linux/string.h>
52735 #include <linux/mman.h>
52736+#include <linux/grsecurity.h>
52737 #include <linux/proc_fs.h>
52738 #include <linux/ioport.h>
52739 #include <linux/uaccess.h>
52740@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52741 p->nivcsw);
52742 }
52743
52744+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52745+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52746+{
52747+ if (p->mm)
52748+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52749+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52750+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52751+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52752+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52753+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52754+ else
52755+ seq_printf(m, "PaX:\t-----\n");
52756+}
52757+#endif
52758+
52759 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52760 struct pid *pid, struct task_struct *task)
52761 {
52762@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52763 task_cap(m, task);
52764 cpuset_task_status_allowed(m, task);
52765 task_context_switch_counts(m, task);
52766+
52767+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52768+ task_pax(m, task);
52769+#endif
52770+
52771+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52772+ task_grsec_rbac(m, task);
52773+#endif
52774+
52775 return 0;
52776 }
52777
52778+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52779+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52780+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52781+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52782+#endif
52783+
52784 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52785 struct pid *pid, struct task_struct *task, int whole)
52786 {
52787@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52788 cputime_t cutime, cstime, utime, stime;
52789 cputime_t cgtime, gtime;
52790 unsigned long rsslim = 0;
52791- char tcomm[sizeof(task->comm)];
52792+ char tcomm[sizeof(task->comm)] = { 0 };
52793 unsigned long flags;
52794
52795+ pax_track_stack();
52796+
52797+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52798+ if (current->exec_id != m->exec_id) {
52799+ gr_log_badprocpid("stat");
52800+ return 0;
52801+ }
52802+#endif
52803+
52804 state = *get_task_state(task);
52805 vsize = eip = esp = 0;
52806 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52807@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52808 gtime = task_gtime(task);
52809 }
52810
52811+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52812+ if (PAX_RAND_FLAGS(mm)) {
52813+ eip = 0;
52814+ esp = 0;
52815+ wchan = 0;
52816+ }
52817+#endif
52818+#ifdef CONFIG_GRKERNSEC_HIDESYM
52819+ wchan = 0;
52820+ eip =0;
52821+ esp =0;
52822+#endif
52823+
52824 /* scale priority and nice values from timeslices to -20..20 */
52825 /* to make it look like a "normal" Unix priority/nice value */
52826 priority = task_prio(task);
52827@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52828 vsize,
52829 mm ? get_mm_rss(mm) : 0,
52830 rsslim,
52831+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52832+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52833+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52834+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52835+#else
52836 mm ? (permitted ? mm->start_code : 1) : 0,
52837 mm ? (permitted ? mm->end_code : 1) : 0,
52838 (permitted && mm) ? mm->start_stack : 0,
52839+#endif
52840 esp,
52841 eip,
52842 /* The signal information here is obsolete.
52843@@ -519,6 +578,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52844 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
52845 struct mm_struct *mm = get_task_mm(task);
52846
52847+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52848+ if (current->exec_id != m->exec_id) {
52849+ gr_log_badprocpid("statm");
52850+ return 0;
52851+ }
52852+#endif
52853+
52854 if (mm) {
52855 size = task_statm(mm, &shared, &text, &data, &resident);
52856 mmput(mm);
52857@@ -528,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52858
52859 return 0;
52860 }
52861+
52862+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52863+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52864+{
52865+ u32 curr_ip = 0;
52866+ unsigned long flags;
52867+
52868+ if (lock_task_sighand(task, &flags)) {
52869+ curr_ip = task->signal->curr_ip;
52870+ unlock_task_sighand(task, &flags);
52871+ }
52872+
52873+ return sprintf(buffer, "%pI4\n", &curr_ip);
52874+}
52875+#endif
52876diff --git a/fs/proc/base.c b/fs/proc/base.c
52877index 67f7dc0..67ab883 100644
52878--- a/fs/proc/base.c
52879+++ b/fs/proc/base.c
52880@@ -102,6 +102,22 @@ struct pid_entry {
52881 union proc_op op;
52882 };
52883
52884+struct getdents_callback {
52885+ struct linux_dirent __user * current_dir;
52886+ struct linux_dirent __user * previous;
52887+ struct file * file;
52888+ int count;
52889+ int error;
52890+};
52891+
52892+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52893+ loff_t offset, u64 ino, unsigned int d_type)
52894+{
52895+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52896+ buf->error = -EINVAL;
52897+ return 0;
52898+}
52899+
52900 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52901 .name = (NAME), \
52902 .len = sizeof(NAME) - 1, \
52903@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52904 if (task == current)
52905 return 0;
52906
52907+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52908+ return -EPERM;
52909+
52910 /*
52911 * If current is actively ptrace'ing, and would also be
52912 * permitted to freshly attach with ptrace now, permit it.
52913@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52914 if (!mm->arg_end)
52915 goto out_mm; /* Shh! No looking before we're done */
52916
52917+ if (gr_acl_handle_procpidmem(task))
52918+ goto out_mm;
52919+
52920 len = mm->arg_end - mm->arg_start;
52921
52922 if (len > PAGE_SIZE)
52923@@ -287,12 +309,28 @@ out:
52924 return res;
52925 }
52926
52927+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52928+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52929+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52930+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52931+#endif
52932+
52933 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52934 {
52935 int res = 0;
52936 struct mm_struct *mm = get_task_mm(task);
52937 if (mm) {
52938 unsigned int nwords = 0;
52939+
52940+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52941+ /* allow if we're currently ptracing this task */
52942+ if (PAX_RAND_FLAGS(mm) &&
52943+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52944+ mmput(mm);
52945+ return 0;
52946+ }
52947+#endif
52948+
52949 do {
52950 nwords += 2;
52951 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52952@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52953 }
52954
52955
52956-#ifdef CONFIG_KALLSYMS
52957+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52958 /*
52959 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52960 * Returns the resolved symbol. If that fails, simply return the address.
52961@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52962 mutex_unlock(&task->cred_guard_mutex);
52963 }
52964
52965-#ifdef CONFIG_STACKTRACE
52966+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52967
52968 #define MAX_STACK_TRACE_DEPTH 64
52969
52970@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52971 return count;
52972 }
52973
52974-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52975+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52976 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52977 {
52978 long nr;
52979@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52980 /************************************************************************/
52981
52982 /* permission checks */
52983-static int proc_fd_access_allowed(struct inode *inode)
52984+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52985 {
52986 struct task_struct *task;
52987 int allowed = 0;
52988@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52989 */
52990 task = get_proc_task(inode);
52991 if (task) {
52992- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52993+ if (log)
52994+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52995+ else
52996+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52997 put_task_struct(task);
52998 }
52999 return allowed;
53000@@ -809,6 +850,8 @@ static int mem_open(struct inode* inode, struct file* file)
53001 return 0;
53002 }
53003
53004+static int task_dumpable(struct task_struct *task);
53005+
53006 static ssize_t mem_read(struct file * file, char __user * buf,
53007 size_t count, loff_t *ppos)
53008 {
53009@@ -824,6 +867,12 @@ static ssize_t mem_read(struct file * file, char __user * buf,
53010 if (check_mem_permission(task))
53011 goto out;
53012
53013+ // XXX: temporary workaround
53014+ if (!task_dumpable(task) && task == current) {
53015+ ret = -EACCES;
53016+ goto out;
53017+ }
53018+
53019 ret = -ENOMEM;
53020 page = (char *)__get_free_page(GFP_TEMPORARY);
53021 if (!page)
53022@@ -963,6 +1012,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
53023 if (!task)
53024 goto out_no_task;
53025
53026+ if (gr_acl_handle_procpidmem(task))
53027+ goto out;
53028+
53029 if (!ptrace_may_access(task, PTRACE_MODE_READ))
53030 goto out;
53031
53032@@ -1377,7 +1429,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
53033 path_put(&nd->path);
53034
53035 /* Are we allowed to snoop on the tasks file descriptors? */
53036- if (!proc_fd_access_allowed(inode))
53037+ if (!proc_fd_access_allowed(inode,0))
53038 goto out;
53039
53040 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
53041@@ -1417,8 +1469,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
53042 struct path path;
53043
53044 /* Are we allowed to snoop on the tasks file descriptors? */
53045- if (!proc_fd_access_allowed(inode))
53046- goto out;
53047+ /* logging this is needed for learning on chromium to work properly,
53048+ but we don't want to flood the logs from 'ps' which does a readlink
53049+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
53050+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
53051+ */
53052+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
53053+ if (!proc_fd_access_allowed(inode,0))
53054+ goto out;
53055+ } else {
53056+ if (!proc_fd_access_allowed(inode,1))
53057+ goto out;
53058+ }
53059
53060 error = PROC_I(inode)->op.proc_get_link(inode, &path);
53061 if (error)
53062@@ -1483,7 +1545,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
53063 rcu_read_lock();
53064 cred = __task_cred(task);
53065 inode->i_uid = cred->euid;
53066+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53067+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53068+#else
53069 inode->i_gid = cred->egid;
53070+#endif
53071 rcu_read_unlock();
53072 }
53073 security_task_to_inode(task, inode);
53074@@ -1501,6 +1567,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53075 struct inode *inode = dentry->d_inode;
53076 struct task_struct *task;
53077 const struct cred *cred;
53078+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53079+ const struct cred *tmpcred = current_cred();
53080+#endif
53081
53082 generic_fillattr(inode, stat);
53083
53084@@ -1508,13 +1577,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53085 stat->uid = 0;
53086 stat->gid = 0;
53087 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53088+
53089+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53090+ rcu_read_unlock();
53091+ return -ENOENT;
53092+ }
53093+
53094 if (task) {
53095+ cred = __task_cred(task);
53096+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53097+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53098+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53099+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53100+#endif
53101+ ) {
53102+#endif
53103 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53104+#ifdef CONFIG_GRKERNSEC_PROC_USER
53105+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53106+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53107+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53108+#endif
53109 task_dumpable(task)) {
53110- cred = __task_cred(task);
53111 stat->uid = cred->euid;
53112+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53113+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53114+#else
53115 stat->gid = cred->egid;
53116+#endif
53117 }
53118+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53119+ } else {
53120+ rcu_read_unlock();
53121+ return -ENOENT;
53122+ }
53123+#endif
53124 }
53125 rcu_read_unlock();
53126 return 0;
53127@@ -1545,11 +1642,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53128
53129 if (task) {
53130 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53131+#ifdef CONFIG_GRKERNSEC_PROC_USER
53132+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53133+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53134+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53135+#endif
53136 task_dumpable(task)) {
53137 rcu_read_lock();
53138 cred = __task_cred(task);
53139 inode->i_uid = cred->euid;
53140+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53141+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53142+#else
53143 inode->i_gid = cred->egid;
53144+#endif
53145 rcu_read_unlock();
53146 } else {
53147 inode->i_uid = 0;
53148@@ -1670,7 +1776,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53149 int fd = proc_fd(inode);
53150
53151 if (task) {
53152- files = get_files_struct(task);
53153+ if (!gr_acl_handle_procpidmem(task))
53154+ files = get_files_struct(task);
53155 put_task_struct(task);
53156 }
53157 if (files) {
53158@@ -1922,12 +2029,22 @@ static const struct file_operations proc_fd_operations = {
53159 static int proc_fd_permission(struct inode *inode, int mask)
53160 {
53161 int rv;
53162+ struct task_struct *task;
53163
53164 rv = generic_permission(inode, mask, NULL);
53165- if (rv == 0)
53166- return 0;
53167+
53168 if (task_pid(current) == proc_pid(inode))
53169 rv = 0;
53170+
53171+ task = get_proc_task(inode);
53172+ if (task == NULL)
53173+ return rv;
53174+
53175+ if (gr_acl_handle_procpidmem(task))
53176+ rv = -EACCES;
53177+
53178+ put_task_struct(task);
53179+
53180 return rv;
53181 }
53182
53183@@ -2036,6 +2153,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53184 if (!task)
53185 goto out_no_task;
53186
53187+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53188+ goto out;
53189+
53190 /*
53191 * Yes, it does not scale. And it should not. Don't add
53192 * new entries into /proc/<tgid>/ without very good reasons.
53193@@ -2080,6 +2200,9 @@ static int proc_pident_readdir(struct file *filp,
53194 if (!task)
53195 goto out_no_task;
53196
53197+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53198+ goto out;
53199+
53200 ret = 0;
53201 i = filp->f_pos;
53202 switch (i) {
53203@@ -2347,7 +2470,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53204 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53205 void *cookie)
53206 {
53207- char *s = nd_get_link(nd);
53208+ const char *s = nd_get_link(nd);
53209 if (!IS_ERR(s))
53210 __putname(s);
53211 }
53212@@ -2553,7 +2676,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53213 #ifdef CONFIG_SCHED_DEBUG
53214 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53215 #endif
53216-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53217+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53218 INF("syscall", S_IRUGO, proc_pid_syscall),
53219 #endif
53220 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53221@@ -2578,10 +2701,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53222 #ifdef CONFIG_SECURITY
53223 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53224 #endif
53225-#ifdef CONFIG_KALLSYMS
53226+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53227 INF("wchan", S_IRUGO, proc_pid_wchan),
53228 #endif
53229-#ifdef CONFIG_STACKTRACE
53230+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53231 ONE("stack", S_IRUGO, proc_pid_stack),
53232 #endif
53233 #ifdef CONFIG_SCHEDSTATS
53234@@ -2611,6 +2734,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53235 #ifdef CONFIG_TASK_IO_ACCOUNTING
53236 INF("io", S_IRUSR, proc_tgid_io_accounting),
53237 #endif
53238+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53239+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53240+#endif
53241 };
53242
53243 static int proc_tgid_base_readdir(struct file * filp,
53244@@ -2735,7 +2861,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53245 if (!inode)
53246 goto out;
53247
53248+#ifdef CONFIG_GRKERNSEC_PROC_USER
53249+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53250+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53251+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53252+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53253+#else
53254 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53255+#endif
53256 inode->i_op = &proc_tgid_base_inode_operations;
53257 inode->i_fop = &proc_tgid_base_operations;
53258 inode->i_flags|=S_IMMUTABLE;
53259@@ -2777,7 +2910,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53260 if (!task)
53261 goto out;
53262
53263+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53264+ goto out_put_task;
53265+
53266 result = proc_pid_instantiate(dir, dentry, task, NULL);
53267+out_put_task:
53268 put_task_struct(task);
53269 out:
53270 return result;
53271@@ -2842,6 +2979,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53272 {
53273 unsigned int nr;
53274 struct task_struct *reaper;
53275+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53276+ const struct cred *tmpcred = current_cred();
53277+ const struct cred *itercred;
53278+#endif
53279+ filldir_t __filldir = filldir;
53280 struct tgid_iter iter;
53281 struct pid_namespace *ns;
53282
53283@@ -2865,8 +3007,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53284 for (iter = next_tgid(ns, iter);
53285 iter.task;
53286 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53287+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53288+ rcu_read_lock();
53289+ itercred = __task_cred(iter.task);
53290+#endif
53291+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53292+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53293+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53294+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53295+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53296+#endif
53297+ )
53298+#endif
53299+ )
53300+ __filldir = &gr_fake_filldir;
53301+ else
53302+ __filldir = filldir;
53303+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53304+ rcu_read_unlock();
53305+#endif
53306 filp->f_pos = iter.tgid + TGID_OFFSET;
53307- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53308+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53309 put_task_struct(iter.task);
53310 goto out;
53311 }
53312@@ -2892,7 +3053,7 @@ static const struct pid_entry tid_base_stuff[] = {
53313 #ifdef CONFIG_SCHED_DEBUG
53314 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53315 #endif
53316-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53317+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53318 INF("syscall", S_IRUGO, proc_pid_syscall),
53319 #endif
53320 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53321@@ -2916,10 +3077,10 @@ static const struct pid_entry tid_base_stuff[] = {
53322 #ifdef CONFIG_SECURITY
53323 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53324 #endif
53325-#ifdef CONFIG_KALLSYMS
53326+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53327 INF("wchan", S_IRUGO, proc_pid_wchan),
53328 #endif
53329-#ifdef CONFIG_STACKTRACE
53330+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53331 ONE("stack", S_IRUGO, proc_pid_stack),
53332 #endif
53333 #ifdef CONFIG_SCHEDSTATS
53334diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53335index 82676e3..5f8518a 100644
53336--- a/fs/proc/cmdline.c
53337+++ b/fs/proc/cmdline.c
53338@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53339
53340 static int __init proc_cmdline_init(void)
53341 {
53342+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53343+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53344+#else
53345 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53346+#endif
53347 return 0;
53348 }
53349 module_init(proc_cmdline_init);
53350diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53351index 59ee7da..469b4b6 100644
53352--- a/fs/proc/devices.c
53353+++ b/fs/proc/devices.c
53354@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53355
53356 static int __init proc_devices_init(void)
53357 {
53358+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53359+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53360+#else
53361 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53362+#endif
53363 return 0;
53364 }
53365 module_init(proc_devices_init);
53366diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53367index d78ade3..81767f9 100644
53368--- a/fs/proc/inode.c
53369+++ b/fs/proc/inode.c
53370@@ -18,12 +18,19 @@
53371 #include <linux/module.h>
53372 #include <linux/smp_lock.h>
53373 #include <linux/sysctl.h>
53374+#include <linux/grsecurity.h>
53375
53376 #include <asm/system.h>
53377 #include <asm/uaccess.h>
53378
53379 #include "internal.h"
53380
53381+#ifdef CONFIG_PROC_SYSCTL
53382+extern const struct inode_operations proc_sys_inode_operations;
53383+extern const struct inode_operations proc_sys_dir_operations;
53384+#endif
53385+
53386+
53387 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53388 {
53389 atomic_inc(&de->count);
53390@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53391 de_put(de);
53392 if (PROC_I(inode)->sysctl)
53393 sysctl_head_put(PROC_I(inode)->sysctl);
53394+
53395+#ifdef CONFIG_PROC_SYSCTL
53396+ if (inode->i_op == &proc_sys_inode_operations ||
53397+ inode->i_op == &proc_sys_dir_operations)
53398+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53399+#endif
53400+
53401 clear_inode(inode);
53402 }
53403
53404@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53405 if (de->mode) {
53406 inode->i_mode = de->mode;
53407 inode->i_uid = de->uid;
53408+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53409+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53410+#else
53411 inode->i_gid = de->gid;
53412+#endif
53413 }
53414 if (de->size)
53415 inode->i_size = de->size;
53416diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53417index 753ca37..26bcf3b 100644
53418--- a/fs/proc/internal.h
53419+++ b/fs/proc/internal.h
53420@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53421 struct pid *pid, struct task_struct *task);
53422 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53423 struct pid *pid, struct task_struct *task);
53424+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53425+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53426+#endif
53427 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53428
53429 extern const struct file_operations proc_maps_operations;
53430diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53431index b442dac..aab29cb 100644
53432--- a/fs/proc/kcore.c
53433+++ b/fs/proc/kcore.c
53434@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53435 off_t offset = 0;
53436 struct kcore_list *m;
53437
53438+ pax_track_stack();
53439+
53440 /* setup ELF header */
53441 elf = (struct elfhdr *) bufp;
53442 bufp += sizeof(struct elfhdr);
53443@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53444 * the addresses in the elf_phdr on our list.
53445 */
53446 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53447- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53448+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53449+ if (tsz > buflen)
53450 tsz = buflen;
53451-
53452+
53453 while (buflen) {
53454 struct kcore_list *m;
53455
53456@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53457 kfree(elf_buf);
53458 } else {
53459 if (kern_addr_valid(start)) {
53460- unsigned long n;
53461+ char *elf_buf;
53462+ mm_segment_t oldfs;
53463
53464- n = copy_to_user(buffer, (char *)start, tsz);
53465- /*
53466- * We cannot distingush between fault on source
53467- * and fault on destination. When this happens
53468- * we clear too and hope it will trigger the
53469- * EFAULT again.
53470- */
53471- if (n) {
53472- if (clear_user(buffer + tsz - n,
53473- n))
53474+ elf_buf = kmalloc(tsz, GFP_KERNEL);
53475+ if (!elf_buf)
53476+ return -ENOMEM;
53477+ oldfs = get_fs();
53478+ set_fs(KERNEL_DS);
53479+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53480+ set_fs(oldfs);
53481+ if (copy_to_user(buffer, elf_buf, tsz)) {
53482+ kfree(elf_buf);
53483 return -EFAULT;
53484+ }
53485 }
53486+ set_fs(oldfs);
53487+ kfree(elf_buf);
53488 } else {
53489 if (clear_user(buffer, tsz))
53490 return -EFAULT;
53491@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53492
53493 static int open_kcore(struct inode *inode, struct file *filp)
53494 {
53495+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53496+ return -EPERM;
53497+#endif
53498 if (!capable(CAP_SYS_RAWIO))
53499 return -EPERM;
53500 if (kcore_need_update)
53501diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53502index 7ca7834..cfe90a4 100644
53503--- a/fs/proc/kmsg.c
53504+++ b/fs/proc/kmsg.c
53505@@ -12,37 +12,37 @@
53506 #include <linux/poll.h>
53507 #include <linux/proc_fs.h>
53508 #include <linux/fs.h>
53509+#include <linux/syslog.h>
53510
53511 #include <asm/uaccess.h>
53512 #include <asm/io.h>
53513
53514 extern wait_queue_head_t log_wait;
53515
53516-extern int do_syslog(int type, char __user *bug, int count);
53517-
53518 static int kmsg_open(struct inode * inode, struct file * file)
53519 {
53520- return do_syslog(1,NULL,0);
53521+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53522 }
53523
53524 static int kmsg_release(struct inode * inode, struct file * file)
53525 {
53526- (void) do_syslog(0,NULL,0);
53527+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53528 return 0;
53529 }
53530
53531 static ssize_t kmsg_read(struct file *file, char __user *buf,
53532 size_t count, loff_t *ppos)
53533 {
53534- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53535+ if ((file->f_flags & O_NONBLOCK) &&
53536+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53537 return -EAGAIN;
53538- return do_syslog(2, buf, count);
53539+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53540 }
53541
53542 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53543 {
53544 poll_wait(file, &log_wait, wait);
53545- if (do_syslog(9, NULL, 0))
53546+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53547 return POLLIN | POLLRDNORM;
53548 return 0;
53549 }
53550diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53551index a65239c..ad1182a 100644
53552--- a/fs/proc/meminfo.c
53553+++ b/fs/proc/meminfo.c
53554@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53555 unsigned long pages[NR_LRU_LISTS];
53556 int lru;
53557
53558+ pax_track_stack();
53559+
53560 /*
53561 * display in kilobytes.
53562 */
53563@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53564 vmi.used >> 10,
53565 vmi.largest_chunk >> 10
53566 #ifdef CONFIG_MEMORY_FAILURE
53567- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53568+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53569 #endif
53570 );
53571
53572diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53573index 9fe7d7e..cdb62c9 100644
53574--- a/fs/proc/nommu.c
53575+++ b/fs/proc/nommu.c
53576@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53577 if (len < 1)
53578 len = 1;
53579 seq_printf(m, "%*c", len, ' ');
53580- seq_path(m, &file->f_path, "");
53581+ seq_path(m, &file->f_path, "\n\\");
53582 }
53583
53584 seq_putc(m, '\n');
53585diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53586index 04d1270..25e1173 100644
53587--- a/fs/proc/proc_net.c
53588+++ b/fs/proc/proc_net.c
53589@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53590 struct task_struct *task;
53591 struct nsproxy *ns;
53592 struct net *net = NULL;
53593+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53594+ const struct cred *cred = current_cred();
53595+#endif
53596+
53597+#ifdef CONFIG_GRKERNSEC_PROC_USER
53598+ if (cred->fsuid)
53599+ return net;
53600+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53601+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53602+ return net;
53603+#endif
53604
53605 rcu_read_lock();
53606 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53607diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53608index f667e8a..55f4d96 100644
53609--- a/fs/proc/proc_sysctl.c
53610+++ b/fs/proc/proc_sysctl.c
53611@@ -7,11 +7,13 @@
53612 #include <linux/security.h>
53613 #include "internal.h"
53614
53615+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53616+
53617 static const struct dentry_operations proc_sys_dentry_operations;
53618 static const struct file_operations proc_sys_file_operations;
53619-static const struct inode_operations proc_sys_inode_operations;
53620+const struct inode_operations proc_sys_inode_operations;
53621 static const struct file_operations proc_sys_dir_file_operations;
53622-static const struct inode_operations proc_sys_dir_operations;
53623+const struct inode_operations proc_sys_dir_operations;
53624
53625 static struct inode *proc_sys_make_inode(struct super_block *sb,
53626 struct ctl_table_header *head, struct ctl_table *table)
53627@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53628 if (!p)
53629 goto out;
53630
53631+ if (gr_handle_sysctl(p, MAY_EXEC))
53632+ goto out;
53633+
53634 err = ERR_PTR(-ENOMEM);
53635 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53636 if (h)
53637@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53638
53639 err = NULL;
53640 dentry->d_op = &proc_sys_dentry_operations;
53641+
53642+ gr_handle_proc_create(dentry, inode);
53643+
53644 d_add(dentry, inode);
53645
53646 out:
53647@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53648 return -ENOMEM;
53649 } else {
53650 child->d_op = &proc_sys_dentry_operations;
53651+
53652+ gr_handle_proc_create(child, inode);
53653+
53654 d_add(child, inode);
53655 }
53656 } else {
53657@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53658 if (*pos < file->f_pos)
53659 continue;
53660
53661+ if (gr_handle_sysctl(table, 0))
53662+ continue;
53663+
53664 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53665 if (res)
53666 return res;
53667@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53668 if (IS_ERR(head))
53669 return PTR_ERR(head);
53670
53671+ if (table && gr_handle_sysctl(table, MAY_EXEC))
53672+ return -ENOENT;
53673+
53674 generic_fillattr(inode, stat);
53675 if (table)
53676 stat->mode = (stat->mode & S_IFMT) | table->mode;
53677@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53678 };
53679
53680 static const struct file_operations proc_sys_dir_file_operations = {
53681+ .read = generic_read_dir,
53682 .readdir = proc_sys_readdir,
53683 .llseek = generic_file_llseek,
53684 };
53685
53686-static const struct inode_operations proc_sys_inode_operations = {
53687+const struct inode_operations proc_sys_inode_operations = {
53688 .permission = proc_sys_permission,
53689 .setattr = proc_sys_setattr,
53690 .getattr = proc_sys_getattr,
53691 };
53692
53693-static const struct inode_operations proc_sys_dir_operations = {
53694+const struct inode_operations proc_sys_dir_operations = {
53695 .lookup = proc_sys_lookup,
53696 .permission = proc_sys_permission,
53697 .setattr = proc_sys_setattr,
53698diff --git a/fs/proc/root.c b/fs/proc/root.c
53699index b080b79..d957e63 100644
53700--- a/fs/proc/root.c
53701+++ b/fs/proc/root.c
53702@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53703 #ifdef CONFIG_PROC_DEVICETREE
53704 proc_device_tree_init();
53705 #endif
53706+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53707+#ifdef CONFIG_GRKERNSEC_PROC_USER
53708+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53709+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53710+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53711+#endif
53712+#else
53713 proc_mkdir("bus", NULL);
53714+#endif
53715 proc_sys_init();
53716 }
53717
53718diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53719index 3b7b82a..4b420b0 100644
53720--- a/fs/proc/task_mmu.c
53721+++ b/fs/proc/task_mmu.c
53722@@ -8,6 +8,7 @@
53723 #include <linux/mempolicy.h>
53724 #include <linux/swap.h>
53725 #include <linux/swapops.h>
53726+#include <linux/grsecurity.h>
53727
53728 #include <asm/elf.h>
53729 #include <asm/uaccess.h>
53730@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53731 "VmStk:\t%8lu kB\n"
53732 "VmExe:\t%8lu kB\n"
53733 "VmLib:\t%8lu kB\n"
53734- "VmPTE:\t%8lu kB\n",
53735- hiwater_vm << (PAGE_SHIFT-10),
53736+ "VmPTE:\t%8lu kB\n"
53737+
53738+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53739+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53740+#endif
53741+
53742+ ,hiwater_vm << (PAGE_SHIFT-10),
53743 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53744 mm->locked_vm << (PAGE_SHIFT-10),
53745 hiwater_rss << (PAGE_SHIFT-10),
53746 total_rss << (PAGE_SHIFT-10),
53747 data << (PAGE_SHIFT-10),
53748 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53749- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53750+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53751+
53752+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53753+ , mm->context.user_cs_base, mm->context.user_cs_limit
53754+#endif
53755+
53756+ );
53757 }
53758
53759 unsigned long task_vsize(struct mm_struct *mm)
53760@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
53761 struct proc_maps_private *priv = m->private;
53762 struct vm_area_struct *vma = v;
53763
53764- vma_stop(priv, vma);
53765+ if (!IS_ERR(vma))
53766+ vma_stop(priv, vma);
53767 if (priv->task)
53768 put_task_struct(priv->task);
53769 }
53770@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53771 return ret;
53772 }
53773
53774+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53775+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53776+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53777+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53778+#endif
53779+
53780 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53781 {
53782 struct mm_struct *mm = vma->vm_mm;
53783@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53784 int flags = vma->vm_flags;
53785 unsigned long ino = 0;
53786 unsigned long long pgoff = 0;
53787- unsigned long start;
53788 dev_t dev = 0;
53789 int len;
53790
53791@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53792 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53793 }
53794
53795- /* We don't show the stack guard page in /proc/maps */
53796- start = vma->vm_start;
53797- if (vma->vm_flags & VM_GROWSDOWN)
53798- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53799- start += PAGE_SIZE;
53800-
53801 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53802- start,
53803+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53804+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53805+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53806+#else
53807+ vma->vm_start,
53808 vma->vm_end,
53809+#endif
53810 flags & VM_READ ? 'r' : '-',
53811 flags & VM_WRITE ? 'w' : '-',
53812 flags & VM_EXEC ? 'x' : '-',
53813 flags & VM_MAYSHARE ? 's' : 'p',
53814+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53815+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53816+#else
53817 pgoff,
53818+#endif
53819 MAJOR(dev), MINOR(dev), ino, &len);
53820
53821 /*
53822@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53823 */
53824 if (file) {
53825 pad_len_spaces(m, len);
53826- seq_path(m, &file->f_path, "\n");
53827+ seq_path(m, &file->f_path, "\n\\");
53828 } else {
53829 const char *name = arch_vma_name(vma);
53830 if (!name) {
53831@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53832 if (vma->vm_start <= mm->brk &&
53833 vma->vm_end >= mm->start_brk) {
53834 name = "[heap]";
53835- } else if (vma->vm_start <= mm->start_stack &&
53836- vma->vm_end >= mm->start_stack) {
53837+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53838+ (vma->vm_start <= mm->start_stack &&
53839+ vma->vm_end >= mm->start_stack)) {
53840 name = "[stack]";
53841 }
53842 } else {
53843@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
53844 struct proc_maps_private *priv = m->private;
53845 struct task_struct *task = priv->task;
53846
53847+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53848+ if (current->exec_id != m->exec_id) {
53849+ gr_log_badprocpid("maps");
53850+ return 0;
53851+ }
53852+#endif
53853+
53854 show_map_vma(m, vma);
53855
53856 if (m->count < m->size) /* vma is copied successfully */
53857@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
53858 .private = &mss,
53859 };
53860
53861+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53862+ if (current->exec_id != m->exec_id) {
53863+ gr_log_badprocpid("smaps");
53864+ return 0;
53865+ }
53866+#endif
53867 memset(&mss, 0, sizeof mss);
53868- mss.vma = vma;
53869- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53870- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53871+
53872+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53873+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53874+#endif
53875+ mss.vma = vma;
53876+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53877+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53878+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53879+ }
53880+#endif
53881
53882 show_map_vma(m, vma);
53883
53884@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
53885 "Swap: %8lu kB\n"
53886 "KernelPageSize: %8lu kB\n"
53887 "MMUPageSize: %8lu kB\n",
53888+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53889+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53890+#else
53891 (vma->vm_end - vma->vm_start) >> 10,
53892+#endif
53893 mss.resident >> 10,
53894 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53895 mss.shared_clean >> 10,
53896diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53897index 8f5c05d..c99c76d 100644
53898--- a/fs/proc/task_nommu.c
53899+++ b/fs/proc/task_nommu.c
53900@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53901 else
53902 bytes += kobjsize(mm);
53903
53904- if (current->fs && current->fs->users > 1)
53905+ if (current->fs && atomic_read(&current->fs->users) > 1)
53906 sbytes += kobjsize(current->fs);
53907 else
53908 bytes += kobjsize(current->fs);
53909@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53910 if (len < 1)
53911 len = 1;
53912 seq_printf(m, "%*c", len, ' ');
53913- seq_path(m, &file->f_path, "");
53914+ seq_path(m, &file->f_path, "\n\\");
53915 }
53916
53917 seq_putc(m, '\n');
53918diff --git a/fs/readdir.c b/fs/readdir.c
53919index 7723401..30059a6 100644
53920--- a/fs/readdir.c
53921+++ b/fs/readdir.c
53922@@ -16,6 +16,7 @@
53923 #include <linux/security.h>
53924 #include <linux/syscalls.h>
53925 #include <linux/unistd.h>
53926+#include <linux/namei.h>
53927
53928 #include <asm/uaccess.h>
53929
53930@@ -67,6 +68,7 @@ struct old_linux_dirent {
53931
53932 struct readdir_callback {
53933 struct old_linux_dirent __user * dirent;
53934+ struct file * file;
53935 int result;
53936 };
53937
53938@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53939 buf->result = -EOVERFLOW;
53940 return -EOVERFLOW;
53941 }
53942+
53943+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53944+ return 0;
53945+
53946 buf->result++;
53947 dirent = buf->dirent;
53948 if (!access_ok(VERIFY_WRITE, dirent,
53949@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53950
53951 buf.result = 0;
53952 buf.dirent = dirent;
53953+ buf.file = file;
53954
53955 error = vfs_readdir(file, fillonedir, &buf);
53956 if (buf.result)
53957@@ -142,6 +149,7 @@ struct linux_dirent {
53958 struct getdents_callback {
53959 struct linux_dirent __user * current_dir;
53960 struct linux_dirent __user * previous;
53961+ struct file * file;
53962 int count;
53963 int error;
53964 };
53965@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53966 buf->error = -EOVERFLOW;
53967 return -EOVERFLOW;
53968 }
53969+
53970+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53971+ return 0;
53972+
53973 dirent = buf->previous;
53974 if (dirent) {
53975 if (__put_user(offset, &dirent->d_off))
53976@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53977 buf.previous = NULL;
53978 buf.count = count;
53979 buf.error = 0;
53980+ buf.file = file;
53981
53982 error = vfs_readdir(file, filldir, &buf);
53983 if (error >= 0)
53984@@ -228,6 +241,7 @@ out:
53985 struct getdents_callback64 {
53986 struct linux_dirent64 __user * current_dir;
53987 struct linux_dirent64 __user * previous;
53988+ struct file *file;
53989 int count;
53990 int error;
53991 };
53992@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53993 buf->error = -EINVAL; /* only used if we fail.. */
53994 if (reclen > buf->count)
53995 return -EINVAL;
53996+
53997+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53998+ return 0;
53999+
54000 dirent = buf->previous;
54001 if (dirent) {
54002 if (__put_user(offset, &dirent->d_off))
54003@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54004
54005 buf.current_dir = dirent;
54006 buf.previous = NULL;
54007+ buf.file = file;
54008 buf.count = count;
54009 buf.error = 0;
54010
54011@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
54012 error = buf.error;
54013 lastdirent = buf.previous;
54014 if (lastdirent) {
54015- typeof(lastdirent->d_off) d_off = file->f_pos;
54016+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
54017 if (__put_user(d_off, &lastdirent->d_off))
54018 error = -EFAULT;
54019 else
54020diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
54021index d42c30c..4fd8718 100644
54022--- a/fs/reiserfs/dir.c
54023+++ b/fs/reiserfs/dir.c
54024@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
54025 struct reiserfs_dir_entry de;
54026 int ret = 0;
54027
54028+ pax_track_stack();
54029+
54030 reiserfs_write_lock(inode->i_sb);
54031
54032 reiserfs_check_lock_depth(inode->i_sb, "readdir");
54033diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
54034index 128d3f7..8840d44 100644
54035--- a/fs/reiserfs/do_balan.c
54036+++ b/fs/reiserfs/do_balan.c
54037@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
54038 return;
54039 }
54040
54041- atomic_inc(&(fs_generation(tb->tb_sb)));
54042+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
54043 do_balance_starts(tb);
54044
54045 /* balance leaf returns 0 except if combining L R and S into
54046diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
54047index 72cb1cc..d0e3181 100644
54048--- a/fs/reiserfs/item_ops.c
54049+++ b/fs/reiserfs/item_ops.c
54050@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
54051 vi->vi_index, vi->vi_type, vi->vi_ih);
54052 }
54053
54054-static struct item_operations stat_data_ops = {
54055+static const struct item_operations stat_data_ops = {
54056 .bytes_number = sd_bytes_number,
54057 .decrement_key = sd_decrement_key,
54058 .is_left_mergeable = sd_is_left_mergeable,
54059@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
54060 vi->vi_index, vi->vi_type, vi->vi_ih);
54061 }
54062
54063-static struct item_operations direct_ops = {
54064+static const struct item_operations direct_ops = {
54065 .bytes_number = direct_bytes_number,
54066 .decrement_key = direct_decrement_key,
54067 .is_left_mergeable = direct_is_left_mergeable,
54068@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
54069 vi->vi_index, vi->vi_type, vi->vi_ih);
54070 }
54071
54072-static struct item_operations indirect_ops = {
54073+static const struct item_operations indirect_ops = {
54074 .bytes_number = indirect_bytes_number,
54075 .decrement_key = indirect_decrement_key,
54076 .is_left_mergeable = indirect_is_left_mergeable,
54077@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
54078 printk("\n");
54079 }
54080
54081-static struct item_operations direntry_ops = {
54082+static const struct item_operations direntry_ops = {
54083 .bytes_number = direntry_bytes_number,
54084 .decrement_key = direntry_decrement_key,
54085 .is_left_mergeable = direntry_is_left_mergeable,
54086@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54087 "Invalid item type observed, run fsck ASAP");
54088 }
54089
54090-static struct item_operations errcatch_ops = {
54091+static const struct item_operations errcatch_ops = {
54092 errcatch_bytes_number,
54093 errcatch_decrement_key,
54094 errcatch_is_left_mergeable,
54095@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54096 #error Item types must use disk-format assigned values.
54097 #endif
54098
54099-struct item_operations *item_ops[TYPE_ANY + 1] = {
54100+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54101 &stat_data_ops,
54102 &indirect_ops,
54103 &direct_ops,
54104diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54105index b5fe0aa..e0e25c4 100644
54106--- a/fs/reiserfs/journal.c
54107+++ b/fs/reiserfs/journal.c
54108@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54109 struct buffer_head *bh;
54110 int i, j;
54111
54112+ pax_track_stack();
54113+
54114 bh = __getblk(dev, block, bufsize);
54115 if (buffer_uptodate(bh))
54116 return (bh);
54117diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54118index 2715791..b8996db 100644
54119--- a/fs/reiserfs/namei.c
54120+++ b/fs/reiserfs/namei.c
54121@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54122 unsigned long savelink = 1;
54123 struct timespec ctime;
54124
54125+ pax_track_stack();
54126+
54127 /* three balancings: (1) old name removal, (2) new name insertion
54128 and (3) maybe "save" link insertion
54129 stat data updates: (1) old directory,
54130diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54131index 9229e55..3d2e3b7 100644
54132--- a/fs/reiserfs/procfs.c
54133+++ b/fs/reiserfs/procfs.c
54134@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54135 "SMALL_TAILS " : "NO_TAILS ",
54136 replay_only(sb) ? "REPLAY_ONLY " : "",
54137 convert_reiserfs(sb) ? "CONV " : "",
54138- atomic_read(&r->s_generation_counter),
54139+ atomic_read_unchecked(&r->s_generation_counter),
54140 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54141 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54142 SF(s_good_search_by_key_reada), SF(s_bmaps),
54143@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54144 struct journal_params *jp = &rs->s_v1.s_journal;
54145 char b[BDEVNAME_SIZE];
54146
54147+ pax_track_stack();
54148+
54149 seq_printf(m, /* on-disk fields */
54150 "jp_journal_1st_block: \t%i\n"
54151 "jp_journal_dev: \t%s[%x]\n"
54152diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54153index d036ee5..4c7dca1 100644
54154--- a/fs/reiserfs/stree.c
54155+++ b/fs/reiserfs/stree.c
54156@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54157 int iter = 0;
54158 #endif
54159
54160+ pax_track_stack();
54161+
54162 BUG_ON(!th->t_trans_id);
54163
54164 init_tb_struct(th, &s_del_balance, sb, path,
54165@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54166 int retval;
54167 int quota_cut_bytes = 0;
54168
54169+ pax_track_stack();
54170+
54171 BUG_ON(!th->t_trans_id);
54172
54173 le_key2cpu_key(&cpu_key, key);
54174@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54175 int quota_cut_bytes;
54176 loff_t tail_pos = 0;
54177
54178+ pax_track_stack();
54179+
54180 BUG_ON(!th->t_trans_id);
54181
54182 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54183@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54184 int retval;
54185 int fs_gen;
54186
54187+ pax_track_stack();
54188+
54189 BUG_ON(!th->t_trans_id);
54190
54191 fs_gen = get_generation(inode->i_sb);
54192@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54193 int fs_gen = 0;
54194 int quota_bytes = 0;
54195
54196+ pax_track_stack();
54197+
54198 BUG_ON(!th->t_trans_id);
54199
54200 if (inode) { /* Do we count quotas for item? */
54201diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54202index 7cb1285..c726cd0 100644
54203--- a/fs/reiserfs/super.c
54204+++ b/fs/reiserfs/super.c
54205@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54206 {.option_name = NULL}
54207 };
54208
54209+ pax_track_stack();
54210+
54211 *blocks = 0;
54212 if (!options || !*options)
54213 /* use default configuration: create tails, journaling on, no
54214diff --git a/fs/select.c b/fs/select.c
54215index fd38ce2..f5381b8 100644
54216--- a/fs/select.c
54217+++ b/fs/select.c
54218@@ -20,6 +20,7 @@
54219 #include <linux/module.h>
54220 #include <linux/slab.h>
54221 #include <linux/poll.h>
54222+#include <linux/security.h>
54223 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54224 #include <linux/file.h>
54225 #include <linux/fdtable.h>
54226@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54227 int retval, i, timed_out = 0;
54228 unsigned long slack = 0;
54229
54230+ pax_track_stack();
54231+
54232 rcu_read_lock();
54233 retval = max_select_fd(n, fds);
54234 rcu_read_unlock();
54235@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54236 /* Allocate small arguments on the stack to save memory and be faster */
54237 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54238
54239+ pax_track_stack();
54240+
54241 ret = -EINVAL;
54242 if (n < 0)
54243 goto out_nofds;
54244@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54245 struct poll_list *walk = head;
54246 unsigned long todo = nfds;
54247
54248+ pax_track_stack();
54249+
54250+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54251 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54252 return -EINVAL;
54253
54254diff --git a/fs/seq_file.c b/fs/seq_file.c
54255index eae7d9d..12c71e3 100644
54256--- a/fs/seq_file.c
54257+++ b/fs/seq_file.c
54258@@ -40,6 +40,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54259 memset(p, 0, sizeof(*p));
54260 mutex_init(&p->lock);
54261 p->op = op;
54262+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54263+ p->exec_id = current->exec_id;
54264+#endif
54265
54266 /*
54267 * Wrappers around seq_open(e.g. swaps_open) need to be
54268@@ -76,7 +79,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54269 return 0;
54270 }
54271 if (!m->buf) {
54272- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54273+ m->size = PAGE_SIZE;
54274+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54275 if (!m->buf)
54276 return -ENOMEM;
54277 }
54278@@ -116,7 +120,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54279 Eoverflow:
54280 m->op->stop(m, p);
54281 kfree(m->buf);
54282- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54283+ m->size <<= 1;
54284+ m->buf = kmalloc(m->size, GFP_KERNEL);
54285 return !m->buf ? -ENOMEM : -EAGAIN;
54286 }
54287
54288@@ -169,7 +174,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54289 m->version = file->f_version;
54290 /* grab buffer if we didn't have one */
54291 if (!m->buf) {
54292- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54293+ m->size = PAGE_SIZE;
54294+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54295 if (!m->buf)
54296 goto Enomem;
54297 }
54298@@ -210,7 +216,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54299 goto Fill;
54300 m->op->stop(m, p);
54301 kfree(m->buf);
54302- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54303+ m->size <<= 1;
54304+ m->buf = kmalloc(m->size, GFP_KERNEL);
54305 if (!m->buf)
54306 goto Enomem;
54307 m->count = 0;
54308@@ -551,7 +558,7 @@ static void single_stop(struct seq_file *p, void *v)
54309 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54310 void *data)
54311 {
54312- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54313+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54314 int res = -ENOMEM;
54315
54316 if (op) {
54317diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54318index 71c29b6..54694dd 100644
54319--- a/fs/smbfs/proc.c
54320+++ b/fs/smbfs/proc.c
54321@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54322
54323 out:
54324 if (server->local_nls != NULL && server->remote_nls != NULL)
54325- server->ops->convert = convert_cp;
54326+ *(void **)&server->ops->convert = convert_cp;
54327 else
54328- server->ops->convert = convert_memcpy;
54329+ *(void **)&server->ops->convert = convert_memcpy;
54330
54331 smb_unlock_server(server);
54332 return n;
54333@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54334
54335 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54336 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54337- server->ops->getattr = smb_proc_getattr_core;
54338+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
54339 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54340- server->ops->getattr = smb_proc_getattr_ff;
54341+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54342 }
54343
54344 /* Decode server capabilities */
54345@@ -3439,7 +3439,7 @@ out:
54346 static void
54347 install_ops(struct smb_ops *dst, struct smb_ops *src)
54348 {
54349- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54350+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54351 }
54352
54353 /* < LANMAN2 */
54354diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54355index 00b2909..2ace383 100644
54356--- a/fs/smbfs/symlink.c
54357+++ b/fs/smbfs/symlink.c
54358@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54359
54360 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54361 {
54362- char *s = nd_get_link(nd);
54363+ const char *s = nd_get_link(nd);
54364 if (!IS_ERR(s))
54365 __putname(s);
54366 }
54367diff --git a/fs/splice.c b/fs/splice.c
54368index bb92b7c..5aa72b0 100644
54369--- a/fs/splice.c
54370+++ b/fs/splice.c
54371@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54372 pipe_lock(pipe);
54373
54374 for (;;) {
54375- if (!pipe->readers) {
54376+ if (!atomic_read(&pipe->readers)) {
54377 send_sig(SIGPIPE, current, 0);
54378 if (!ret)
54379 ret = -EPIPE;
54380@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54381 do_wakeup = 0;
54382 }
54383
54384- pipe->waiting_writers++;
54385+ atomic_inc(&pipe->waiting_writers);
54386 pipe_wait(pipe);
54387- pipe->waiting_writers--;
54388+ atomic_dec(&pipe->waiting_writers);
54389 }
54390
54391 pipe_unlock(pipe);
54392@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54393 .spd_release = spd_release_page,
54394 };
54395
54396+ pax_track_stack();
54397+
54398 index = *ppos >> PAGE_CACHE_SHIFT;
54399 loff = *ppos & ~PAGE_CACHE_MASK;
54400 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54401@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54402 old_fs = get_fs();
54403 set_fs(get_ds());
54404 /* The cast to a user pointer is valid due to the set_fs() */
54405- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54406+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54407 set_fs(old_fs);
54408
54409 return res;
54410@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54411 old_fs = get_fs();
54412 set_fs(get_ds());
54413 /* The cast to a user pointer is valid due to the set_fs() */
54414- res = vfs_write(file, (const char __user *)buf, count, &pos);
54415+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54416 set_fs(old_fs);
54417
54418 return res;
54419@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54420 .spd_release = spd_release_page,
54421 };
54422
54423+ pax_track_stack();
54424+
54425 index = *ppos >> PAGE_CACHE_SHIFT;
54426 offset = *ppos & ~PAGE_CACHE_MASK;
54427 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54428@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54429 goto err;
54430
54431 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54432- vec[i].iov_base = (void __user *) page_address(page);
54433+ vec[i].iov_base = (__force void __user *) page_address(page);
54434 vec[i].iov_len = this_len;
54435 pages[i] = page;
54436 spd.nr_pages++;
54437@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54438 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54439 {
54440 while (!pipe->nrbufs) {
54441- if (!pipe->writers)
54442+ if (!atomic_read(&pipe->writers))
54443 return 0;
54444
54445- if (!pipe->waiting_writers && sd->num_spliced)
54446+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54447 return 0;
54448
54449 if (sd->flags & SPLICE_F_NONBLOCK)
54450@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54451 * out of the pipe right after the splice_to_pipe(). So set
54452 * PIPE_READERS appropriately.
54453 */
54454- pipe->readers = 1;
54455+ atomic_set(&pipe->readers, 1);
54456
54457 current->splice_pipe = pipe;
54458 }
54459@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54460 .spd_release = spd_release_page,
54461 };
54462
54463+ pax_track_stack();
54464+
54465 pipe = pipe_info(file->f_path.dentry->d_inode);
54466 if (!pipe)
54467 return -EBADF;
54468@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54469 ret = -ERESTARTSYS;
54470 break;
54471 }
54472- if (!pipe->writers)
54473+ if (!atomic_read(&pipe->writers))
54474 break;
54475- if (!pipe->waiting_writers) {
54476+ if (!atomic_read(&pipe->waiting_writers)) {
54477 if (flags & SPLICE_F_NONBLOCK) {
54478 ret = -EAGAIN;
54479 break;
54480@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54481 pipe_lock(pipe);
54482
54483 while (pipe->nrbufs >= PIPE_BUFFERS) {
54484- if (!pipe->readers) {
54485+ if (!atomic_read(&pipe->readers)) {
54486 send_sig(SIGPIPE, current, 0);
54487 ret = -EPIPE;
54488 break;
54489@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54490 ret = -ERESTARTSYS;
54491 break;
54492 }
54493- pipe->waiting_writers++;
54494+ atomic_inc(&pipe->waiting_writers);
54495 pipe_wait(pipe);
54496- pipe->waiting_writers--;
54497+ atomic_dec(&pipe->waiting_writers);
54498 }
54499
54500 pipe_unlock(pipe);
54501@@ -1786,14 +1792,14 @@ retry:
54502 pipe_double_lock(ipipe, opipe);
54503
54504 do {
54505- if (!opipe->readers) {
54506+ if (!atomic_read(&opipe->readers)) {
54507 send_sig(SIGPIPE, current, 0);
54508 if (!ret)
54509 ret = -EPIPE;
54510 break;
54511 }
54512
54513- if (!ipipe->nrbufs && !ipipe->writers)
54514+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54515 break;
54516
54517 /*
54518@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54519 pipe_double_lock(ipipe, opipe);
54520
54521 do {
54522- if (!opipe->readers) {
54523+ if (!atomic_read(&opipe->readers)) {
54524 send_sig(SIGPIPE, current, 0);
54525 if (!ret)
54526 ret = -EPIPE;
54527@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54528 * return EAGAIN if we have the potential of some data in the
54529 * future, otherwise just return 0
54530 */
54531- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54532+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54533 ret = -EAGAIN;
54534
54535 pipe_unlock(ipipe);
54536diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54537index e020183..18d64b4 100644
54538--- a/fs/sysfs/dir.c
54539+++ b/fs/sysfs/dir.c
54540@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54541 struct sysfs_dirent *sd;
54542 int rc;
54543
54544+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54545+ const char *parent_name = parent_sd->s_name;
54546+
54547+ mode = S_IFDIR | S_IRWXU;
54548+
54549+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54550+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54551+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54552+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54553+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54554+#endif
54555+
54556 /* allocate */
54557 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54558 if (!sd)
54559diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54560index 7118a38..70af853 100644
54561--- a/fs/sysfs/file.c
54562+++ b/fs/sysfs/file.c
54563@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54564
54565 struct sysfs_open_dirent {
54566 atomic_t refcnt;
54567- atomic_t event;
54568+ atomic_unchecked_t event;
54569 wait_queue_head_t poll;
54570 struct list_head buffers; /* goes through sysfs_buffer.list */
54571 };
54572@@ -53,7 +53,7 @@ struct sysfs_buffer {
54573 size_t count;
54574 loff_t pos;
54575 char * page;
54576- struct sysfs_ops * ops;
54577+ const struct sysfs_ops * ops;
54578 struct mutex mutex;
54579 int needs_read_fill;
54580 int event;
54581@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54582 {
54583 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54584 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54585- struct sysfs_ops * ops = buffer->ops;
54586+ const struct sysfs_ops * ops = buffer->ops;
54587 int ret = 0;
54588 ssize_t count;
54589
54590@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54591 if (!sysfs_get_active_two(attr_sd))
54592 return -ENODEV;
54593
54594- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54595+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54596 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54597
54598 sysfs_put_active_two(attr_sd);
54599@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54600 {
54601 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54602 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54603- struct sysfs_ops * ops = buffer->ops;
54604+ const struct sysfs_ops * ops = buffer->ops;
54605 int rc;
54606
54607 /* need attr_sd for attr and ops, its parent for kobj */
54608@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54609 return -ENOMEM;
54610
54611 atomic_set(&new_od->refcnt, 0);
54612- atomic_set(&new_od->event, 1);
54613+ atomic_set_unchecked(&new_od->event, 1);
54614 init_waitqueue_head(&new_od->poll);
54615 INIT_LIST_HEAD(&new_od->buffers);
54616 goto retry;
54617@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54618 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54619 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54620 struct sysfs_buffer *buffer;
54621- struct sysfs_ops *ops;
54622+ const struct sysfs_ops *ops;
54623 int error = -EACCES;
54624 char *p;
54625
54626@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54627
54628 sysfs_put_active_two(attr_sd);
54629
54630- if (buffer->event != atomic_read(&od->event))
54631+ if (buffer->event != atomic_read_unchecked(&od->event))
54632 goto trigger;
54633
54634 return DEFAULT_POLLMASK;
54635@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54636
54637 od = sd->s_attr.open;
54638 if (od) {
54639- atomic_inc(&od->event);
54640+ atomic_inc_unchecked(&od->event);
54641 wake_up_interruptible(&od->poll);
54642 }
54643
54644diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54645index c5081ad..342ea86 100644
54646--- a/fs/sysfs/symlink.c
54647+++ b/fs/sysfs/symlink.c
54648@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54649
54650 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54651 {
54652- char *page = nd_get_link(nd);
54653+ const char *page = nd_get_link(nd);
54654 if (!IS_ERR(page))
54655 free_page((unsigned long)page);
54656 }
54657diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54658index 1e06853..b06d325 100644
54659--- a/fs/udf/balloc.c
54660+++ b/fs/udf/balloc.c
54661@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54662
54663 mutex_lock(&sbi->s_alloc_mutex);
54664 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54665- if (bloc->logicalBlockNum < 0 ||
54666- (bloc->logicalBlockNum + count) >
54667- partmap->s_partition_len) {
54668+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54669 udf_debug("%d < %d || %d + %d > %d\n",
54670 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54671 count, partmap->s_partition_len);
54672@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54673
54674 mutex_lock(&sbi->s_alloc_mutex);
54675 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54676- if (bloc->logicalBlockNum < 0 ||
54677- (bloc->logicalBlockNum + count) >
54678- partmap->s_partition_len) {
54679+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54680 udf_debug("%d < %d || %d + %d > %d\n",
54681 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54682 partmap->s_partition_len);
54683diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54684index 6d24c2c..fff470f 100644
54685--- a/fs/udf/inode.c
54686+++ b/fs/udf/inode.c
54687@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54688 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54689 int lastblock = 0;
54690
54691+ pax_track_stack();
54692+
54693 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54694 prev_epos.block = iinfo->i_location;
54695 prev_epos.bh = NULL;
54696diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54697index 9215700..bf1f68e 100644
54698--- a/fs/udf/misc.c
54699+++ b/fs/udf/misc.c
54700@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54701
54702 u8 udf_tag_checksum(const struct tag *t)
54703 {
54704- u8 *data = (u8 *)t;
54705+ const u8 *data = (const u8 *)t;
54706 u8 checksum = 0;
54707 int i;
54708 for (i = 0; i < sizeof(struct tag); ++i)
54709diff --git a/fs/utimes.c b/fs/utimes.c
54710index e4c75db..b4df0e0 100644
54711--- a/fs/utimes.c
54712+++ b/fs/utimes.c
54713@@ -1,6 +1,7 @@
54714 #include <linux/compiler.h>
54715 #include <linux/file.h>
54716 #include <linux/fs.h>
54717+#include <linux/security.h>
54718 #include <linux/linkage.h>
54719 #include <linux/mount.h>
54720 #include <linux/namei.h>
54721@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54722 goto mnt_drop_write_and_out;
54723 }
54724 }
54725+
54726+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54727+ error = -EACCES;
54728+ goto mnt_drop_write_and_out;
54729+ }
54730+
54731 mutex_lock(&inode->i_mutex);
54732 error = notify_change(path->dentry, &newattrs);
54733 mutex_unlock(&inode->i_mutex);
54734diff --git a/fs/xattr.c b/fs/xattr.c
54735index 6d4f6d3..cda3958 100644
54736--- a/fs/xattr.c
54737+++ b/fs/xattr.c
54738@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54739 * Extended attribute SET operations
54740 */
54741 static long
54742-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54743+setxattr(struct path *path, const char __user *name, const void __user *value,
54744 size_t size, int flags)
54745 {
54746 int error;
54747@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54748 return PTR_ERR(kvalue);
54749 }
54750
54751- error = vfs_setxattr(d, kname, kvalue, size, flags);
54752+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54753+ error = -EACCES;
54754+ goto out;
54755+ }
54756+
54757+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54758+out:
54759 kfree(kvalue);
54760 return error;
54761 }
54762@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54763 return error;
54764 error = mnt_want_write(path.mnt);
54765 if (!error) {
54766- error = setxattr(path.dentry, name, value, size, flags);
54767+ error = setxattr(&path, name, value, size, flags);
54768 mnt_drop_write(path.mnt);
54769 }
54770 path_put(&path);
54771@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54772 return error;
54773 error = mnt_want_write(path.mnt);
54774 if (!error) {
54775- error = setxattr(path.dentry, name, value, size, flags);
54776+ error = setxattr(&path, name, value, size, flags);
54777 mnt_drop_write(path.mnt);
54778 }
54779 path_put(&path);
54780@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54781 const void __user *,value, size_t, size, int, flags)
54782 {
54783 struct file *f;
54784- struct dentry *dentry;
54785 int error = -EBADF;
54786
54787 f = fget(fd);
54788 if (!f)
54789 return error;
54790- dentry = f->f_path.dentry;
54791- audit_inode(NULL, dentry);
54792+ audit_inode(NULL, f->f_path.dentry);
54793 error = mnt_want_write_file(f);
54794 if (!error) {
54795- error = setxattr(dentry, name, value, size, flags);
54796+ error = setxattr(&f->f_path, name, value, size, flags);
54797 mnt_drop_write(f->f_path.mnt);
54798 }
54799 fput(f);
54800diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54801index c6ad7c7..f2847a7 100644
54802--- a/fs/xattr_acl.c
54803+++ b/fs/xattr_acl.c
54804@@ -17,8 +17,8 @@
54805 struct posix_acl *
54806 posix_acl_from_xattr(const void *value, size_t size)
54807 {
54808- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54809- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54810+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54811+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54812 int count;
54813 struct posix_acl *acl;
54814 struct posix_acl_entry *acl_e;
54815diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54816index 942362f..88f96f5 100644
54817--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54818+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54819@@ -134,7 +134,7 @@ xfs_find_handle(
54820 }
54821
54822 error = -EFAULT;
54823- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54824+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54825 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54826 goto out_put;
54827
54828@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54829 if (IS_ERR(dentry))
54830 return PTR_ERR(dentry);
54831
54832- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54833+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54834 if (!kbuf)
54835 goto out_dput;
54836
54837@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54838 xfs_mount_t *mp,
54839 void __user *arg)
54840 {
54841- xfs_fsop_geom_t fsgeo;
54842+ xfs_fsop_geom_t fsgeo;
54843 int error;
54844
54845 error = xfs_fs_geometry(mp, &fsgeo, 3);
54846diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54847index bad485a..479bd32 100644
54848--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54849+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54850@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54851 xfs_fsop_geom_t fsgeo;
54852 int error;
54853
54854+ memset(&fsgeo, 0, sizeof(fsgeo));
54855 error = xfs_fs_geometry(mp, &fsgeo, 3);
54856 if (error)
54857 return -error;
54858diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54859index 1f3b4b8..6102f6d 100644
54860--- a/fs/xfs/linux-2.6/xfs_iops.c
54861+++ b/fs/xfs/linux-2.6/xfs_iops.c
54862@@ -468,7 +468,7 @@ xfs_vn_put_link(
54863 struct nameidata *nd,
54864 void *p)
54865 {
54866- char *s = nd_get_link(nd);
54867+ const char *s = nd_get_link(nd);
54868
54869 if (!IS_ERR(s))
54870 kfree(s);
54871diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54872index 8971fb0..5fc1eb2 100644
54873--- a/fs/xfs/xfs_bmap.c
54874+++ b/fs/xfs/xfs_bmap.c
54875@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54876 int nmap,
54877 int ret_nmap);
54878 #else
54879-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54880+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54881 #endif /* DEBUG */
54882
54883 #if defined(XFS_RW_TRACE)
54884diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54885index e89734e..5e84d8d 100644
54886--- a/fs/xfs/xfs_dir2_sf.c
54887+++ b/fs/xfs/xfs_dir2_sf.c
54888@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54889 }
54890
54891 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54892- if (filldir(dirent, sfep->name, sfep->namelen,
54893+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54894+ char name[sfep->namelen];
54895+ memcpy(name, sfep->name, sfep->namelen);
54896+ if (filldir(dirent, name, sfep->namelen,
54897+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54898+ *offset = off & 0x7fffffff;
54899+ return 0;
54900+ }
54901+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54902 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54903 *offset = off & 0x7fffffff;
54904 return 0;
54905diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54906index 8f32f50..b6a41e8 100644
54907--- a/fs/xfs/xfs_vnodeops.c
54908+++ b/fs/xfs/xfs_vnodeops.c
54909@@ -564,13 +564,18 @@ xfs_readlink(
54910
54911 xfs_ilock(ip, XFS_ILOCK_SHARED);
54912
54913- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54914- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54915-
54916 pathlen = ip->i_d.di_size;
54917 if (!pathlen)
54918 goto out;
54919
54920+ if (pathlen > MAXPATHLEN) {
54921+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54922+ __func__, (unsigned long long)ip->i_ino, pathlen);
54923+ ASSERT(0);
54924+ error = XFS_ERROR(EFSCORRUPTED);
54925+ goto out;
54926+ }
54927+
54928 if (ip->i_df.if_flags & XFS_IFINLINE) {
54929 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54930 link[pathlen] = '\0';
54931diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54932new file mode 100644
54933index 0000000..7026cbd
54934--- /dev/null
54935+++ b/grsecurity/Kconfig
54936@@ -0,0 +1,1074 @@
54937+#
54938+# grecurity configuration
54939+#
54940+
54941+menu "Grsecurity"
54942+
54943+config GRKERNSEC
54944+ bool "Grsecurity"
54945+ select CRYPTO
54946+ select CRYPTO_SHA256
54947+ help
54948+ If you say Y here, you will be able to configure many features
54949+ that will enhance the security of your system. It is highly
54950+ recommended that you say Y here and read through the help
54951+ for each option so that you fully understand the features and
54952+ can evaluate their usefulness for your machine.
54953+
54954+choice
54955+ prompt "Security Level"
54956+ depends on GRKERNSEC
54957+ default GRKERNSEC_CUSTOM
54958+
54959+config GRKERNSEC_LOW
54960+ bool "Low"
54961+ select GRKERNSEC_LINK
54962+ select GRKERNSEC_FIFO
54963+ select GRKERNSEC_RANDNET
54964+ select GRKERNSEC_DMESG
54965+ select GRKERNSEC_CHROOT
54966+ select GRKERNSEC_CHROOT_CHDIR
54967+
54968+ help
54969+ If you choose this option, several of the grsecurity options will
54970+ be enabled that will give you greater protection against a number
54971+ of attacks, while assuring that none of your software will have any
54972+ conflicts with the additional security measures. If you run a lot
54973+ of unusual software, or you are having problems with the higher
54974+ security levels, you should say Y here. With this option, the
54975+ following features are enabled:
54976+
54977+ - Linking restrictions
54978+ - FIFO restrictions
54979+ - Restricted dmesg
54980+ - Enforced chdir("/") on chroot
54981+ - Runtime module disabling
54982+
54983+config GRKERNSEC_MEDIUM
54984+ bool "Medium"
54985+ select PAX
54986+ select PAX_EI_PAX
54987+ select PAX_PT_PAX_FLAGS
54988+ select PAX_HAVE_ACL_FLAGS
54989+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54990+ select GRKERNSEC_CHROOT
54991+ select GRKERNSEC_CHROOT_SYSCTL
54992+ select GRKERNSEC_LINK
54993+ select GRKERNSEC_FIFO
54994+ select GRKERNSEC_DMESG
54995+ select GRKERNSEC_RANDNET
54996+ select GRKERNSEC_FORKFAIL
54997+ select GRKERNSEC_TIME
54998+ select GRKERNSEC_SIGNAL
54999+ select GRKERNSEC_CHROOT
55000+ select GRKERNSEC_CHROOT_UNIX
55001+ select GRKERNSEC_CHROOT_MOUNT
55002+ select GRKERNSEC_CHROOT_PIVOT
55003+ select GRKERNSEC_CHROOT_DOUBLE
55004+ select GRKERNSEC_CHROOT_CHDIR
55005+ select GRKERNSEC_CHROOT_MKNOD
55006+ select GRKERNSEC_PROC
55007+ select GRKERNSEC_PROC_USERGROUP
55008+ select PAX_RANDUSTACK
55009+ select PAX_ASLR
55010+ select PAX_RANDMMAP
55011+ select PAX_REFCOUNT if (X86 || SPARC64)
55012+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55013+
55014+ help
55015+ If you say Y here, several features in addition to those included
55016+ in the low additional security level will be enabled. These
55017+ features provide even more security to your system, though in rare
55018+ cases they may be incompatible with very old or poorly written
55019+ software. If you enable this option, make sure that your auth
55020+ service (identd) is running as gid 1001. With this option,
55021+ the following features (in addition to those provided in the
55022+ low additional security level) will be enabled:
55023+
55024+ - Failed fork logging
55025+ - Time change logging
55026+ - Signal logging
55027+ - Deny mounts in chroot
55028+ - Deny double chrooting
55029+ - Deny sysctl writes in chroot
55030+ - Deny mknod in chroot
55031+ - Deny access to abstract AF_UNIX sockets out of chroot
55032+ - Deny pivot_root in chroot
55033+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
55034+ - /proc restrictions with special GID set to 10 (usually wheel)
55035+ - Address Space Layout Randomization (ASLR)
55036+ - Prevent exploitation of most refcount overflows
55037+ - Bounds checking of copying between the kernel and userland
55038+
55039+config GRKERNSEC_HIGH
55040+ bool "High"
55041+ select GRKERNSEC_LINK
55042+ select GRKERNSEC_FIFO
55043+ select GRKERNSEC_DMESG
55044+ select GRKERNSEC_FORKFAIL
55045+ select GRKERNSEC_TIME
55046+ select GRKERNSEC_SIGNAL
55047+ select GRKERNSEC_CHROOT
55048+ select GRKERNSEC_CHROOT_SHMAT
55049+ select GRKERNSEC_CHROOT_UNIX
55050+ select GRKERNSEC_CHROOT_MOUNT
55051+ select GRKERNSEC_CHROOT_FCHDIR
55052+ select GRKERNSEC_CHROOT_PIVOT
55053+ select GRKERNSEC_CHROOT_DOUBLE
55054+ select GRKERNSEC_CHROOT_CHDIR
55055+ select GRKERNSEC_CHROOT_MKNOD
55056+ select GRKERNSEC_CHROOT_CAPS
55057+ select GRKERNSEC_CHROOT_SYSCTL
55058+ select GRKERNSEC_CHROOT_FINDTASK
55059+ select GRKERNSEC_SYSFS_RESTRICT
55060+ select GRKERNSEC_PROC
55061+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
55062+ select GRKERNSEC_HIDESYM
55063+ select GRKERNSEC_BRUTE
55064+ select GRKERNSEC_PROC_USERGROUP
55065+ select GRKERNSEC_KMEM
55066+ select GRKERNSEC_RESLOG
55067+ select GRKERNSEC_RANDNET
55068+ select GRKERNSEC_PROC_ADD
55069+ select GRKERNSEC_CHROOT_CHMOD
55070+ select GRKERNSEC_CHROOT_NICE
55071+ select GRKERNSEC_SETXID
55072+ select GRKERNSEC_AUDIT_MOUNT
55073+ select GRKERNSEC_MODHARDEN if (MODULES)
55074+ select GRKERNSEC_HARDEN_PTRACE
55075+ select GRKERNSEC_PTRACE_READEXEC
55076+ select GRKERNSEC_VM86 if (X86_32)
55077+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
55078+ select PAX
55079+ select PAX_RANDUSTACK
55080+ select PAX_ASLR
55081+ select PAX_RANDMMAP
55082+ select PAX_NOEXEC
55083+ select PAX_MPROTECT
55084+ select PAX_EI_PAX
55085+ select PAX_PT_PAX_FLAGS
55086+ select PAX_HAVE_ACL_FLAGS
55087+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55088+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55089+ select PAX_RANDKSTACK if (X86_TSC && X86)
55090+ select PAX_SEGMEXEC if (X86_32)
55091+ select PAX_PAGEEXEC
55092+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55093+ select PAX_EMUTRAMP if (PARISC)
55094+ select PAX_EMUSIGRT if (PARISC)
55095+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55096+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55097+ select PAX_REFCOUNT if (X86 || SPARC64)
55098+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55099+ help
55100+ If you say Y here, many of the features of grsecurity will be
55101+ enabled, which will protect you against many kinds of attacks
55102+ against your system. The heightened security comes at a cost
55103+ of an increased chance of incompatibilities with rare software
55104+ on your machine. Since this security level enables PaX, you should
55105+ view <http://pax.grsecurity.net> and read about the PaX
55106+ project. While you are there, download chpax and run it on
55107+ binaries that cause problems with PaX. Also remember that
55108+ since the /proc restrictions are enabled, you must run your
55109+ identd as gid 1001. This security level enables the following
55110+ features in addition to those listed in the low and medium
55111+ security levels:
55112+
55113+ - Additional /proc restrictions
55114+ - Chmod restrictions in chroot
55115+ - No signals, ptrace, or viewing of processes outside of chroot
55116+ - Capability restrictions in chroot
55117+ - Deny fchdir out of chroot
55118+ - Priority restrictions in chroot
55119+ - Segmentation-based implementation of PaX
55120+ - Mprotect restrictions
55121+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55122+ - Kernel stack randomization
55123+ - Mount/unmount/remount logging
55124+ - Kernel symbol hiding
55125+ - Hardening of module auto-loading
55126+ - Ptrace restrictions
55127+ - Restricted vm86 mode
55128+ - Restricted sysfs/debugfs
55129+ - Active kernel exploit response
55130+
55131+config GRKERNSEC_CUSTOM
55132+ bool "Custom"
55133+ help
55134+ If you say Y here, you will be able to configure every grsecurity
55135+ option, which allows you to enable many more features that aren't
55136+ covered in the basic security levels. These additional features
55137+ include TPE, socket restrictions, and the sysctl system for
55138+ grsecurity. It is advised that you read through the help for
55139+ each option to determine its usefulness in your situation.
55140+
55141+endchoice
55142+
55143+menu "Memory Protections"
55144+depends on GRKERNSEC
55145+
55146+config GRKERNSEC_KMEM
55147+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55148+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55149+ help
55150+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55151+ be written to or read from to modify or leak the contents of the running
55152+ kernel. /dev/port will also not be allowed to be opened. If you have module
55153+ support disabled, enabling this will close up four ways that are
55154+ currently used to insert malicious code into the running kernel.
55155+ Even with all these features enabled, we still highly recommend that
55156+ you use the RBAC system, as it is still possible for an attacker to
55157+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55158+ If you are not using XFree86, you may be able to stop this additional
55159+ case by enabling the 'Disable privileged I/O' option. Though nothing
55160+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55161+ but only to video memory, which is the only writing we allow in this
55162+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55163+ not be allowed to mprotect it with PROT_WRITE later.
55164+ It is highly recommended that you say Y here if you meet all the
55165+ conditions above.
55166+
55167+config GRKERNSEC_VM86
55168+ bool "Restrict VM86 mode"
55169+ depends on X86_32
55170+
55171+ help
55172+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55173+ make use of a special execution mode on 32bit x86 processors called
55174+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55175+ video cards and will still work with this option enabled. The purpose
55176+ of the option is to prevent exploitation of emulation errors in
55177+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55178+ Nearly all users should be able to enable this option.
55179+
55180+config GRKERNSEC_IO
55181+ bool "Disable privileged I/O"
55182+ depends on X86
55183+ select RTC_CLASS
55184+ select RTC_INTF_DEV
55185+ select RTC_DRV_CMOS
55186+
55187+ help
55188+ If you say Y here, all ioperm and iopl calls will return an error.
55189+ Ioperm and iopl can be used to modify the running kernel.
55190+ Unfortunately, some programs need this access to operate properly,
55191+ the most notable of which are XFree86 and hwclock. hwclock can be
55192+ remedied by having RTC support in the kernel, so real-time
55193+ clock support is enabled if this option is enabled, to ensure
55194+ that hwclock operates correctly. XFree86 still will not
55195+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55196+ IF YOU USE XFree86. If you use XFree86 and you still want to
55197+ protect your kernel against modification, use the RBAC system.
55198+
55199+config GRKERNSEC_PROC_MEMMAP
55200+ bool "Harden ASLR against information leaks and entropy reduction"
55201+ default y if (PAX_NOEXEC || PAX_ASLR)
55202+ depends on PAX_NOEXEC || PAX_ASLR
55203+ help
55204+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55205+ give no information about the addresses of its mappings if
55206+ PaX features that rely on random addresses are enabled on the task.
55207+ In addition to sanitizing this information and disabling other
55208+ dangerous sources of information, this option causes reads of sensitive
55209+ /proc/<pid> entries where the file descriptor was opened in a different
55210+ task than the one performing the read. Such attempts are logged.
55211+ Finally, this option limits argv/env strings for suid/sgid binaries
55212+ to 1MB to prevent a complete exhaustion of the stack entropy provided
55213+ by ASLR.
55214+ If you use PaX it is essential that you say Y here as it closes up
55215+ several holes that make full ASLR useless for suid/sgid binaries.
55216+
55217+config GRKERNSEC_BRUTE
55218+ bool "Deter exploit bruteforcing"
55219+ help
55220+ If you say Y here, attempts to bruteforce exploits against forking
55221+ daemons such as apache or sshd, as well as against suid/sgid binaries
55222+ will be deterred. When a child of a forking daemon is killed by PaX
55223+ or crashes due to an illegal instruction or other suspicious signal,
55224+ the parent process will be delayed 30 seconds upon every subsequent
55225+ fork until the administrator is able to assess the situation and
55226+ restart the daemon.
55227+ In the suid/sgid case, the attempt is logged, the user has all their
55228+ processes terminated, and they are prevented from executing any further
55229+ processes for 15 minutes.
55230+ It is recommended that you also enable signal logging in the auditing
55231+ section so that logs are generated when a process triggers a suspicious
55232+ signal.
55233+ If the sysctl option is enabled, a sysctl option with name
55234+ "deter_bruteforce" is created.
55235+
55236+config GRKERNSEC_MODHARDEN
55237+ bool "Harden module auto-loading"
55238+ depends on MODULES
55239+ help
55240+ If you say Y here, module auto-loading in response to use of some
55241+ feature implemented by an unloaded module will be restricted to
55242+ root users. Enabling this option helps defend against attacks
55243+ by unprivileged users who abuse the auto-loading behavior to
55244+ cause a vulnerable module to load that is then exploited.
55245+
55246+ If this option prevents a legitimate use of auto-loading for a
55247+ non-root user, the administrator can execute modprobe manually
55248+ with the exact name of the module mentioned in the alert log.
55249+ Alternatively, the administrator can add the module to the list
55250+ of modules loaded at boot by modifying init scripts.
55251+
55252+ Modification of init scripts will most likely be needed on
55253+ Ubuntu servers with encrypted home directory support enabled,
55254+ as the first non-root user logging in will cause the ecb(aes),
55255+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55256+
55257+config GRKERNSEC_HIDESYM
55258+ bool "Hide kernel symbols"
55259+ help
55260+ If you say Y here, getting information on loaded modules, and
55261+ displaying all kernel symbols through a syscall will be restricted
55262+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55263+ /proc/kallsyms will be restricted to the root user. The RBAC
55264+ system can hide that entry even from root.
55265+
55266+ This option also prevents leaking of kernel addresses through
55267+ several /proc entries.
55268+
55269+ Note that this option is only effective provided the following
55270+ conditions are met:
55271+ 1) The kernel using grsecurity is not precompiled by some distribution
55272+ 2) You have also enabled GRKERNSEC_DMESG
55273+ 3) You are using the RBAC system and hiding other files such as your
55274+ kernel image and System.map. Alternatively, enabling this option
55275+ causes the permissions on /boot, /lib/modules, and the kernel
55276+ source directory to change at compile time to prevent
55277+ reading by non-root users.
55278+ If the above conditions are met, this option will aid in providing a
55279+ useful protection against local kernel exploitation of overflows
55280+ and arbitrary read/write vulnerabilities.
55281+
55282+config GRKERNSEC_KERN_LOCKOUT
55283+ bool "Active kernel exploit response"
55284+ depends on X86 || ARM || PPC || SPARC
55285+ help
55286+ If you say Y here, when a PaX alert is triggered due to suspicious
55287+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55288+ or an OOPs occurs due to bad memory accesses, instead of just
55289+ terminating the offending process (and potentially allowing
55290+ a subsequent exploit from the same user), we will take one of two
55291+ actions:
55292+ If the user was root, we will panic the system
55293+ If the user was non-root, we will log the attempt, terminate
55294+ all processes owned by the user, then prevent them from creating
55295+ any new processes until the system is restarted
55296+ This deters repeated kernel exploitation/bruteforcing attempts
55297+ and is useful for later forensics.
55298+
55299+endmenu
55300+menu "Role Based Access Control Options"
55301+depends on GRKERNSEC
55302+
55303+config GRKERNSEC_RBAC_DEBUG
55304+ bool
55305+
55306+config GRKERNSEC_NO_RBAC
55307+ bool "Disable RBAC system"
55308+ help
55309+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55310+ preventing the RBAC system from being enabled. You should only say Y
55311+ here if you have no intention of using the RBAC system, so as to prevent
55312+ an attacker with root access from misusing the RBAC system to hide files
55313+ and processes when loadable module support and /dev/[k]mem have been
55314+ locked down.
55315+
55316+config GRKERNSEC_ACL_HIDEKERN
55317+ bool "Hide kernel processes"
55318+ help
55319+ If you say Y here, all kernel threads will be hidden to all
55320+ processes but those whose subject has the "view hidden processes"
55321+ flag.
55322+
55323+config GRKERNSEC_ACL_MAXTRIES
55324+ int "Maximum tries before password lockout"
55325+ default 3
55326+ help
55327+ This option enforces the maximum number of times a user can attempt
55328+ to authorize themselves with the grsecurity RBAC system before being
55329+ denied the ability to attempt authorization again for a specified time.
55330+ The lower the number, the harder it will be to brute-force a password.
55331+
55332+config GRKERNSEC_ACL_TIMEOUT
55333+ int "Time to wait after max password tries, in seconds"
55334+ default 30
55335+ help
55336+ This option specifies the time the user must wait after attempting to
55337+ authorize to the RBAC system with the maximum number of invalid
55338+ passwords. The higher the number, the harder it will be to brute-force
55339+ a password.
55340+
55341+endmenu
55342+menu "Filesystem Protections"
55343+depends on GRKERNSEC
55344+
55345+config GRKERNSEC_PROC
55346+ bool "Proc restrictions"
55347+ help
55348+ If you say Y here, the permissions of the /proc filesystem
55349+ will be altered to enhance system security and privacy. You MUST
55350+ choose either a user only restriction or a user and group restriction.
55351+ Depending upon the option you choose, you can either restrict users to
55352+ see only the processes they themselves run, or choose a group that can
55353+ view all processes and files normally restricted to root if you choose
55354+ the "restrict to user only" option. NOTE: If you're running identd as
55355+ a non-root user, you will have to run it as the group you specify here.
55356+
55357+config GRKERNSEC_PROC_USER
55358+ bool "Restrict /proc to user only"
55359+ depends on GRKERNSEC_PROC
55360+ help
55361+ If you say Y here, non-root users will only be able to view their own
55362+ processes, and restricts them from viewing network-related information,
55363+ and viewing kernel symbol and module information.
55364+
55365+config GRKERNSEC_PROC_USERGROUP
55366+ bool "Allow special group"
55367+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55368+ help
55369+ If you say Y here, you will be able to select a group that will be
55370+ able to view all processes and network-related information. If you've
55371+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55372+ remain hidden. This option is useful if you want to run identd as
55373+ a non-root user.
55374+
55375+config GRKERNSEC_PROC_GID
55376+ int "GID for special group"
55377+ depends on GRKERNSEC_PROC_USERGROUP
55378+ default 1001
55379+
55380+config GRKERNSEC_PROC_ADD
55381+ bool "Additional restrictions"
55382+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55383+ help
55384+ If you say Y here, additional restrictions will be placed on
55385+ /proc that keep normal users from viewing device information and
55386+ slabinfo information that could be useful for exploits.
55387+
55388+config GRKERNSEC_LINK
55389+ bool "Linking restrictions"
55390+ help
55391+ If you say Y here, /tmp race exploits will be prevented, since users
55392+ will no longer be able to follow symlinks owned by other users in
55393+ world-writable +t directories (e.g. /tmp), unless the owner of the
55394+ symlink is the owner of the directory. users will also not be
55395+ able to hardlink to files they do not own. If the sysctl option is
55396+ enabled, a sysctl option with name "linking_restrictions" is created.
55397+
55398+config GRKERNSEC_FIFO
55399+ bool "FIFO restrictions"
55400+ help
55401+ If you say Y here, users will not be able to write to FIFOs they don't
55402+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55403+ the FIFO is the same owner of the directory it's held in. If the sysctl
55404+ option is enabled, a sysctl option with name "fifo_restrictions" is
55405+ created.
55406+
55407+config GRKERNSEC_SYSFS_RESTRICT
55408+ bool "Sysfs/debugfs restriction"
55409+ depends on SYSFS
55410+ help
55411+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55412+ any filesystem normally mounted under it (e.g. debugfs) will be
55413+ mostly accessible only by root. These filesystems generally provide access
55414+ to hardware and debug information that isn't appropriate for unprivileged
55415+ users of the system. Sysfs and debugfs have also become a large source
55416+ of new vulnerabilities, ranging from infoleaks to local compromise.
55417+ There has been very little oversight with an eye toward security involved
55418+ in adding new exporters of information to these filesystems, so their
55419+ use is discouraged.
55420+ For reasons of compatibility, a few directories have been whitelisted
55421+ for access by non-root users:
55422+ /sys/fs/selinux
55423+ /sys/fs/fuse
55424+ /sys/devices/system/cpu
55425+
55426+config GRKERNSEC_ROFS
55427+ bool "Runtime read-only mount protection"
55428+ help
55429+ If you say Y here, a sysctl option with name "romount_protect" will
55430+ be created. By setting this option to 1 at runtime, filesystems
55431+ will be protected in the following ways:
55432+ * No new writable mounts will be allowed
55433+ * Existing read-only mounts won't be able to be remounted read/write
55434+ * Write operations will be denied on all block devices
55435+ This option acts independently of grsec_lock: once it is set to 1,
55436+ it cannot be turned off. Therefore, please be mindful of the resulting
55437+ behavior if this option is enabled in an init script on a read-only
55438+ filesystem. This feature is mainly intended for secure embedded systems.
55439+
55440+config GRKERNSEC_CHROOT
55441+ bool "Chroot jail restrictions"
55442+ help
55443+ If you say Y here, you will be able to choose several options that will
55444+ make breaking out of a chrooted jail much more difficult. If you
55445+ encounter no software incompatibilities with the following options, it
55446+ is recommended that you enable each one.
55447+
55448+config GRKERNSEC_CHROOT_MOUNT
55449+ bool "Deny mounts"
55450+ depends on GRKERNSEC_CHROOT
55451+ help
55452+ If you say Y here, processes inside a chroot will not be able to
55453+ mount or remount filesystems. If the sysctl option is enabled, a
55454+ sysctl option with name "chroot_deny_mount" is created.
55455+
55456+config GRKERNSEC_CHROOT_DOUBLE
55457+ bool "Deny double-chroots"
55458+ depends on GRKERNSEC_CHROOT
55459+ help
55460+ If you say Y here, processes inside a chroot will not be able to chroot
55461+ again outside the chroot. This is a widely used method of breaking
55462+ out of a chroot jail and should not be allowed. If the sysctl
55463+ option is enabled, a sysctl option with name
55464+ "chroot_deny_chroot" is created.
55465+
55466+config GRKERNSEC_CHROOT_PIVOT
55467+ bool "Deny pivot_root in chroot"
55468+ depends on GRKERNSEC_CHROOT
55469+ help
55470+ If you say Y here, processes inside a chroot will not be able to use
55471+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55472+ works similar to chroot in that it changes the root filesystem. This
55473+ function could be misused in a chrooted process to attempt to break out
55474+ of the chroot, and therefore should not be allowed. If the sysctl
55475+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55476+ created.
55477+
55478+config GRKERNSEC_CHROOT_CHDIR
55479+ bool "Enforce chdir(\"/\") on all chroots"
55480+ depends on GRKERNSEC_CHROOT
55481+ help
55482+ If you say Y here, the current working directory of all newly-chrooted
55483+ applications will be set to the the root directory of the chroot.
55484+ The man page on chroot(2) states:
55485+ Note that this call does not change the current working
55486+ directory, so that `.' can be outside the tree rooted at
55487+ `/'. In particular, the super-user can escape from a
55488+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55489+
55490+ It is recommended that you say Y here, since it's not known to break
55491+ any software. If the sysctl option is enabled, a sysctl option with
55492+ name "chroot_enforce_chdir" is created.
55493+
55494+config GRKERNSEC_CHROOT_CHMOD
55495+ bool "Deny (f)chmod +s"
55496+ depends on GRKERNSEC_CHROOT
55497+ help
55498+ If you say Y here, processes inside a chroot will not be able to chmod
55499+ or fchmod files to make them have suid or sgid bits. This protects
55500+ against another published method of breaking a chroot. If the sysctl
55501+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55502+ created.
55503+
55504+config GRKERNSEC_CHROOT_FCHDIR
55505+ bool "Deny fchdir out of chroot"
55506+ depends on GRKERNSEC_CHROOT
55507+ help
55508+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55509+ to a file descriptor of the chrooting process that points to a directory
55510+ outside the filesystem will be stopped. If the sysctl option
55511+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55512+
55513+config GRKERNSEC_CHROOT_MKNOD
55514+ bool "Deny mknod"
55515+ depends on GRKERNSEC_CHROOT
55516+ help
55517+ If you say Y here, processes inside a chroot will not be allowed to
55518+ mknod. The problem with using mknod inside a chroot is that it
55519+ would allow an attacker to create a device entry that is the same
55520+ as one on the physical root of your system, which could range from
55521+ anything from the console device to a device for your harddrive (which
55522+ they could then use to wipe the drive or steal data). It is recommended
55523+ that you say Y here, unless you run into software incompatibilities.
55524+ If the sysctl option is enabled, a sysctl option with name
55525+ "chroot_deny_mknod" is created.
55526+
55527+config GRKERNSEC_CHROOT_SHMAT
55528+ bool "Deny shmat() out of chroot"
55529+ depends on GRKERNSEC_CHROOT
55530+ help
55531+ If you say Y here, processes inside a chroot will not be able to attach
55532+ to shared memory segments that were created outside of the chroot jail.
55533+ It is recommended that you say Y here. If the sysctl option is enabled,
55534+ a sysctl option with name "chroot_deny_shmat" is created.
55535+
55536+config GRKERNSEC_CHROOT_UNIX
55537+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55538+ depends on GRKERNSEC_CHROOT
55539+ help
55540+ If you say Y here, processes inside a chroot will not be able to
55541+ connect to abstract (meaning not belonging to a filesystem) Unix
55542+ domain sockets that were bound outside of a chroot. It is recommended
55543+ that you say Y here. If the sysctl option is enabled, a sysctl option
55544+ with name "chroot_deny_unix" is created.
55545+
55546+config GRKERNSEC_CHROOT_FINDTASK
55547+ bool "Protect outside processes"
55548+ depends on GRKERNSEC_CHROOT
55549+ help
55550+ If you say Y here, processes inside a chroot will not be able to
55551+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55552+ getsid, or view any process outside of the chroot. If the sysctl
55553+ option is enabled, a sysctl option with name "chroot_findtask" is
55554+ created.
55555+
55556+config GRKERNSEC_CHROOT_NICE
55557+ bool "Restrict priority changes"
55558+ depends on GRKERNSEC_CHROOT
55559+ help
55560+ If you say Y here, processes inside a chroot will not be able to raise
55561+ the priority of processes in the chroot, or alter the priority of
55562+ processes outside the chroot. This provides more security than simply
55563+ removing CAP_SYS_NICE from the process' capability set. If the
55564+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55565+ is created.
55566+
55567+config GRKERNSEC_CHROOT_SYSCTL
55568+ bool "Deny sysctl writes"
55569+ depends on GRKERNSEC_CHROOT
55570+ help
55571+ If you say Y here, an attacker in a chroot will not be able to
55572+ write to sysctl entries, either by sysctl(2) or through a /proc
55573+ interface. It is strongly recommended that you say Y here. If the
55574+ sysctl option is enabled, a sysctl option with name
55575+ "chroot_deny_sysctl" is created.
55576+
55577+config GRKERNSEC_CHROOT_CAPS
55578+ bool "Capability restrictions"
55579+ depends on GRKERNSEC_CHROOT
55580+ help
55581+ If you say Y here, the capabilities on all processes within a
55582+ chroot jail will be lowered to stop module insertion, raw i/o,
55583+ system and net admin tasks, rebooting the system, modifying immutable
55584+ files, modifying IPC owned by another, and changing the system time.
55585+ This is left an option because it can break some apps. Disable this
55586+ if your chrooted apps are having problems performing those kinds of
55587+ tasks. If the sysctl option is enabled, a sysctl option with
55588+ name "chroot_caps" is created.
55589+
55590+endmenu
55591+menu "Kernel Auditing"
55592+depends on GRKERNSEC
55593+
55594+config GRKERNSEC_AUDIT_GROUP
55595+ bool "Single group for auditing"
55596+ help
55597+ If you say Y here, the exec, chdir, and (un)mount logging features
55598+ will only operate on a group you specify. This option is recommended
55599+ if you only want to watch certain users instead of having a large
55600+ amount of logs from the entire system. If the sysctl option is enabled,
55601+ a sysctl option with name "audit_group" is created.
55602+
55603+config GRKERNSEC_AUDIT_GID
55604+ int "GID for auditing"
55605+ depends on GRKERNSEC_AUDIT_GROUP
55606+ default 1007
55607+
55608+config GRKERNSEC_EXECLOG
55609+ bool "Exec logging"
55610+ help
55611+ If you say Y here, all execve() calls will be logged (since the
55612+ other exec*() calls are frontends to execve(), all execution
55613+ will be logged). Useful for shell-servers that like to keep track
55614+ of their users. If the sysctl option is enabled, a sysctl option with
55615+ name "exec_logging" is created.
55616+ WARNING: This option when enabled will produce a LOT of logs, especially
55617+ on an active system.
55618+
55619+config GRKERNSEC_RESLOG
55620+ bool "Resource logging"
55621+ help
55622+ If you say Y here, all attempts to overstep resource limits will
55623+ be logged with the resource name, the requested size, and the current
55624+ limit. It is highly recommended that you say Y here. If the sysctl
55625+ option is enabled, a sysctl option with name "resource_logging" is
55626+ created. If the RBAC system is enabled, the sysctl value is ignored.
55627+
55628+config GRKERNSEC_CHROOT_EXECLOG
55629+ bool "Log execs within chroot"
55630+ help
55631+ If you say Y here, all executions inside a chroot jail will be logged
55632+ to syslog. This can cause a large amount of logs if certain
55633+ applications (eg. djb's daemontools) are installed on the system, and
55634+ is therefore left as an option. If the sysctl option is enabled, a
55635+ sysctl option with name "chroot_execlog" is created.
55636+
55637+config GRKERNSEC_AUDIT_PTRACE
55638+ bool "Ptrace logging"
55639+ help
55640+ If you say Y here, all attempts to attach to a process via ptrace
55641+ will be logged. If the sysctl option is enabled, a sysctl option
55642+ with name "audit_ptrace" is created.
55643+
55644+config GRKERNSEC_AUDIT_CHDIR
55645+ bool "Chdir logging"
55646+ help
55647+ If you say Y here, all chdir() calls will be logged. If the sysctl
55648+ option is enabled, a sysctl option with name "audit_chdir" is created.
55649+
55650+config GRKERNSEC_AUDIT_MOUNT
55651+ bool "(Un)Mount logging"
55652+ help
55653+ If you say Y here, all mounts and unmounts will be logged. If the
55654+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55655+ created.
55656+
55657+config GRKERNSEC_SIGNAL
55658+ bool "Signal logging"
55659+ help
55660+ If you say Y here, certain important signals will be logged, such as
55661+ SIGSEGV, which will as a result inform you of when a error in a program
55662+ occurred, which in some cases could mean a possible exploit attempt.
55663+ If the sysctl option is enabled, a sysctl option with name
55664+ "signal_logging" is created.
55665+
55666+config GRKERNSEC_FORKFAIL
55667+ bool "Fork failure logging"
55668+ help
55669+ If you say Y here, all failed fork() attempts will be logged.
55670+ This could suggest a fork bomb, or someone attempting to overstep
55671+ their process limit. If the sysctl option is enabled, a sysctl option
55672+ with name "forkfail_logging" is created.
55673+
55674+config GRKERNSEC_TIME
55675+ bool "Time change logging"
55676+ help
55677+ If you say Y here, any changes of the system clock will be logged.
55678+ If the sysctl option is enabled, a sysctl option with name
55679+ "timechange_logging" is created.
55680+
55681+config GRKERNSEC_PROC_IPADDR
55682+ bool "/proc/<pid>/ipaddr support"
55683+ help
55684+ If you say Y here, a new entry will be added to each /proc/<pid>
55685+ directory that contains the IP address of the person using the task.
55686+ The IP is carried across local TCP and AF_UNIX stream sockets.
55687+ This information can be useful for IDS/IPSes to perform remote response
55688+ to a local attack. The entry is readable by only the owner of the
55689+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55690+ the RBAC system), and thus does not create privacy concerns.
55691+
55692+config GRKERNSEC_RWXMAP_LOG
55693+ bool 'Denied RWX mmap/mprotect logging'
55694+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55695+ help
55696+ If you say Y here, calls to mmap() and mprotect() with explicit
55697+ usage of PROT_WRITE and PROT_EXEC together will be logged when
55698+ denied by the PAX_MPROTECT feature. If the sysctl option is
55699+ enabled, a sysctl option with name "rwxmap_logging" is created.
55700+
55701+config GRKERNSEC_AUDIT_TEXTREL
55702+ bool 'ELF text relocations logging (READ HELP)'
55703+ depends on PAX_MPROTECT
55704+ help
55705+ If you say Y here, text relocations will be logged with the filename
55706+ of the offending library or binary. The purpose of the feature is
55707+ to help Linux distribution developers get rid of libraries and
55708+ binaries that need text relocations which hinder the future progress
55709+ of PaX. Only Linux distribution developers should say Y here, and
55710+ never on a production machine, as this option creates an information
55711+ leak that could aid an attacker in defeating the randomization of
55712+ a single memory region. If the sysctl option is enabled, a sysctl
55713+ option with name "audit_textrel" is created.
55714+
55715+endmenu
55716+
55717+menu "Executable Protections"
55718+depends on GRKERNSEC
55719+
55720+config GRKERNSEC_DMESG
55721+ bool "Dmesg(8) restriction"
55722+ help
55723+ If you say Y here, non-root users will not be able to use dmesg(8)
55724+ to view up to the last 4kb of messages in the kernel's log buffer.
55725+ The kernel's log buffer often contains kernel addresses and other
55726+ identifying information useful to an attacker in fingerprinting a
55727+ system for a targeted exploit.
55728+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
55729+ created.
55730+
55731+config GRKERNSEC_HARDEN_PTRACE
55732+ bool "Deter ptrace-based process snooping"
55733+ help
55734+ If you say Y here, TTY sniffers and other malicious monitoring
55735+ programs implemented through ptrace will be defeated. If you
55736+ have been using the RBAC system, this option has already been
55737+ enabled for several years for all users, with the ability to make
55738+ fine-grained exceptions.
55739+
55740+ This option only affects the ability of non-root users to ptrace
55741+ processes that are not a descendent of the ptracing process.
55742+ This means that strace ./binary and gdb ./binary will still work,
55743+ but attaching to arbitrary processes will not. If the sysctl
55744+ option is enabled, a sysctl option with name "harden_ptrace" is
55745+ created.
55746+
55747+config GRKERNSEC_PTRACE_READEXEC
55748+ bool "Require read access to ptrace sensitive binaries"
55749+ help
55750+ If you say Y here, unprivileged users will not be able to ptrace unreadable
55751+ binaries. This option is useful in environments that
55752+ remove the read bits (e.g. file mode 4711) from suid binaries to
55753+ prevent infoleaking of their contents. This option adds
55754+ consistency to the use of that file mode, as the binary could normally
55755+ be read out when run without privileges while ptracing.
55756+
55757+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
55758+ is created.
55759+
55760+config GRKERNSEC_SETXID
55761+ bool "Enforce consistent multithreaded privileges"
55762+ help
55763+ If you say Y here, a change from a root uid to a non-root uid
55764+ in a multithreaded application will cause the resulting uids,
55765+ gids, supplementary groups, and capabilities in that thread
55766+ to be propagated to the other threads of the process. In most
55767+ cases this is unnecessary, as glibc will emulate this behavior
55768+ on behalf of the application. Other libcs do not act in the
55769+ same way, allowing the other threads of the process to continue
55770+ running with root privileges. If the sysctl option is enabled,
55771+ a sysctl option with name "consistent_setxid" is created.
55772+
55773+config GRKERNSEC_TPE
55774+ bool "Trusted Path Execution (TPE)"
55775+ help
55776+ If you say Y here, you will be able to choose a gid to add to the
55777+ supplementary groups of users you want to mark as "untrusted."
55778+ These users will not be able to execute any files that are not in
55779+ root-owned directories writable only by root. If the sysctl option
55780+ is enabled, a sysctl option with name "tpe" is created.
55781+
55782+config GRKERNSEC_TPE_ALL
55783+ bool "Partially restrict all non-root users"
55784+ depends on GRKERNSEC_TPE
55785+ help
55786+ If you say Y here, all non-root users will be covered under
55787+ a weaker TPE restriction. This is separate from, and in addition to,
55788+ the main TPE options that you have selected elsewhere. Thus, if a
55789+ "trusted" GID is chosen, this restriction applies to even that GID.
55790+ Under this restriction, all non-root users will only be allowed to
55791+ execute files in directories they own that are not group or
55792+ world-writable, or in directories owned by root and writable only by
55793+ root. If the sysctl option is enabled, a sysctl option with name
55794+ "tpe_restrict_all" is created.
55795+
55796+config GRKERNSEC_TPE_INVERT
55797+ bool "Invert GID option"
55798+ depends on GRKERNSEC_TPE
55799+ help
55800+ If you say Y here, the group you specify in the TPE configuration will
55801+ decide what group TPE restrictions will be *disabled* for. This
55802+ option is useful if you want TPE restrictions to be applied to most
55803+ users on the system. If the sysctl option is enabled, a sysctl option
55804+ with name "tpe_invert" is created. Unlike other sysctl options, this
55805+ entry will default to on for backward-compatibility.
55806+
55807+config GRKERNSEC_TPE_GID
55808+ int "GID for untrusted users"
55809+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55810+ default 1005
55811+ help
55812+ Setting this GID determines what group TPE restrictions will be
55813+ *enabled* for. If the sysctl option is enabled, a sysctl option
55814+ with name "tpe_gid" is created.
55815+
55816+config GRKERNSEC_TPE_GID
55817+ int "GID for trusted users"
55818+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55819+ default 1005
55820+ help
55821+ Setting this GID determines what group TPE restrictions will be
55822+ *disabled* for. If the sysctl option is enabled, a sysctl option
55823+ with name "tpe_gid" is created.
55824+
55825+endmenu
55826+menu "Network Protections"
55827+depends on GRKERNSEC
55828+
55829+config GRKERNSEC_RANDNET
55830+ bool "Larger entropy pools"
55831+ help
55832+ If you say Y here, the entropy pools used for many features of Linux
55833+ and grsecurity will be doubled in size. Since several grsecurity
55834+ features use additional randomness, it is recommended that you say Y
55835+ here. Saying Y here has a similar effect as modifying
55836+ /proc/sys/kernel/random/poolsize.
55837+
55838+config GRKERNSEC_BLACKHOLE
55839+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55840+ depends on NET
55841+ help
55842+ If you say Y here, neither TCP resets nor ICMP
55843+ destination-unreachable packets will be sent in response to packets
55844+ sent to ports for which no associated listening process exists.
55845+ This feature supports both IPV4 and IPV6 and exempts the
55846+ loopback interface from blackholing. Enabling this feature
55847+ makes a host more resilient to DoS attacks and reduces network
55848+ visibility against scanners.
55849+
55850+ The blackhole feature as-implemented is equivalent to the FreeBSD
55851+ blackhole feature, as it prevents RST responses to all packets, not
55852+ just SYNs. Under most application behavior this causes no
55853+ problems, but applications (like haproxy) may not close certain
55854+ connections in a way that cleanly terminates them on the remote
55855+ end, leaving the remote host in LAST_ACK state. Because of this
55856+ side-effect and to prevent intentional LAST_ACK DoSes, this
55857+ feature also adds automatic mitigation against such attacks.
55858+ The mitigation drastically reduces the amount of time a socket
55859+ can spend in LAST_ACK state. If you're using haproxy and not
55860+ all servers it connects to have this option enabled, consider
55861+ disabling this feature on the haproxy host.
55862+
55863+ If the sysctl option is enabled, two sysctl options with names
55864+ "ip_blackhole" and "lastack_retries" will be created.
55865+ While "ip_blackhole" takes the standard zero/non-zero on/off
55866+ toggle, "lastack_retries" uses the same kinds of values as
55867+ "tcp_retries1" and "tcp_retries2". The default value of 4
55868+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55869+ state.
55870+
55871+config GRKERNSEC_SOCKET
55872+ bool "Socket restrictions"
55873+ depends on NET
55874+ help
55875+ If you say Y here, you will be able to choose from several options.
55876+ If you assign a GID on your system and add it to the supplementary
55877+ groups of users you want to restrict socket access to, this patch
55878+ will perform up to three things, based on the option(s) you choose.
55879+
55880+config GRKERNSEC_SOCKET_ALL
55881+ bool "Deny any sockets to group"
55882+ depends on GRKERNSEC_SOCKET
55883+ help
55884+ If you say Y here, you will be able to choose a GID of whose users will
55885+ be unable to connect to other hosts from your machine or run server
55886+ applications from your machine. If the sysctl option is enabled, a
55887+ sysctl option with name "socket_all" is created.
55888+
55889+config GRKERNSEC_SOCKET_ALL_GID
55890+ int "GID to deny all sockets for"
55891+ depends on GRKERNSEC_SOCKET_ALL
55892+ default 1004
55893+ help
55894+ Here you can choose the GID to disable socket access for. Remember to
55895+ add the users you want socket access disabled for to the GID
55896+ specified here. If the sysctl option is enabled, a sysctl option
55897+ with name "socket_all_gid" is created.
55898+
55899+config GRKERNSEC_SOCKET_CLIENT
55900+ bool "Deny client sockets to group"
55901+ depends on GRKERNSEC_SOCKET
55902+ help
55903+ If you say Y here, you will be able to choose a GID of whose users will
55904+ be unable to connect to other hosts from your machine, but will be
55905+ able to run servers. If this option is enabled, all users in the group
55906+ you specify will have to use passive mode when initiating ftp transfers
55907+ from the shell on your machine. If the sysctl option is enabled, a
55908+ sysctl option with name "socket_client" is created.
55909+
55910+config GRKERNSEC_SOCKET_CLIENT_GID
55911+ int "GID to deny client sockets for"
55912+ depends on GRKERNSEC_SOCKET_CLIENT
55913+ default 1003
55914+ help
55915+ Here you can choose the GID to disable client socket access for.
55916+ Remember to add the users you want client socket access disabled for to
55917+ the GID specified here. If the sysctl option is enabled, a sysctl
55918+ option with name "socket_client_gid" is created.
55919+
55920+config GRKERNSEC_SOCKET_SERVER
55921+ bool "Deny server sockets to group"
55922+ depends on GRKERNSEC_SOCKET
55923+ help
55924+ If you say Y here, you will be able to choose a GID of whose users will
55925+ be unable to run server applications from your machine. If the sysctl
55926+ option is enabled, a sysctl option with name "socket_server" is created.
55927+
55928+config GRKERNSEC_SOCKET_SERVER_GID
55929+ int "GID to deny server sockets for"
55930+ depends on GRKERNSEC_SOCKET_SERVER
55931+ default 1002
55932+ help
55933+ Here you can choose the GID to disable server socket access for.
55934+ Remember to add the users you want server socket access disabled for to
55935+ the GID specified here. If the sysctl option is enabled, a sysctl
55936+ option with name "socket_server_gid" is created.
55937+
55938+endmenu
55939+menu "Sysctl support"
55940+depends on GRKERNSEC && SYSCTL
55941+
55942+config GRKERNSEC_SYSCTL
55943+ bool "Sysctl support"
55944+ help
55945+ If you say Y here, you will be able to change the options that
55946+ grsecurity runs with at bootup, without having to recompile your
55947+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55948+ to enable (1) or disable (0) various features. All the sysctl entries
55949+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55950+ All features enabled in the kernel configuration are disabled at boot
55951+ if you do not say Y to the "Turn on features by default" option.
55952+ All options should be set at startup, and the grsec_lock entry should
55953+ be set to a non-zero value after all the options are set.
55954+ *THIS IS EXTREMELY IMPORTANT*
55955+
55956+config GRKERNSEC_SYSCTL_DISTRO
55957+ bool "Extra sysctl support for distro makers (READ HELP)"
55958+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55959+ help
55960+ If you say Y here, additional sysctl options will be created
55961+ for features that affect processes running as root. Therefore,
55962+ it is critical when using this option that the grsec_lock entry be
55963+ enabled after boot. Only distros with prebuilt kernel packages
55964+ with this option enabled that can ensure grsec_lock is enabled
55965+ after boot should use this option.
55966+ *Failure to set grsec_lock after boot makes all grsec features
55967+ this option covers useless*
55968+
55969+ Currently this option creates the following sysctl entries:
55970+ "Disable Privileged I/O": "disable_priv_io"
55971+
55972+config GRKERNSEC_SYSCTL_ON
55973+ bool "Turn on features by default"
55974+ depends on GRKERNSEC_SYSCTL
55975+ help
55976+ If you say Y here, instead of having all features enabled in the
55977+ kernel configuration disabled at boot time, the features will be
55978+ enabled at boot time. It is recommended you say Y here unless
55979+ there is some reason you would want all sysctl-tunable features to
55980+ be disabled by default. As mentioned elsewhere, it is important
55981+ to enable the grsec_lock entry once you have finished modifying
55982+ the sysctl entries.
55983+
55984+endmenu
55985+menu "Logging Options"
55986+depends on GRKERNSEC
55987+
55988+config GRKERNSEC_FLOODTIME
55989+ int "Seconds in between log messages (minimum)"
55990+ default 10
55991+ help
55992+ This option allows you to enforce the number of seconds between
55993+ grsecurity log messages. The default should be suitable for most
55994+ people, however, if you choose to change it, choose a value small enough
55995+ to allow informative logs to be produced, but large enough to
55996+ prevent flooding.
55997+
55998+config GRKERNSEC_FLOODBURST
55999+ int "Number of messages in a burst (maximum)"
56000+ default 6
56001+ help
56002+ This option allows you to choose the maximum number of messages allowed
56003+ within the flood time interval you chose in a separate option. The
56004+ default should be suitable for most people, however if you find that
56005+ many of your logs are being interpreted as flooding, you may want to
56006+ raise this value.
56007+
56008+endmenu
56009+
56010+endmenu
56011diff --git a/grsecurity/Makefile b/grsecurity/Makefile
56012new file mode 100644
56013index 0000000..1b9afa9
56014--- /dev/null
56015+++ b/grsecurity/Makefile
56016@@ -0,0 +1,38 @@
56017+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
56018+# during 2001-2009 it has been completely redesigned by Brad Spengler
56019+# into an RBAC system
56020+#
56021+# All code in this directory and various hooks inserted throughout the kernel
56022+# are copyright Brad Spengler - Open Source Security, Inc., and released
56023+# under the GPL v2 or higher
56024+
56025+KBUILD_CFLAGS += -Werror
56026+
56027+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
56028+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
56029+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
56030+
56031+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
56032+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
56033+ gracl_learn.o grsec_log.o
56034+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
56035+
56036+ifdef CONFIG_NET
56037+obj-y += grsec_sock.o
56038+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
56039+endif
56040+
56041+ifndef CONFIG_GRKERNSEC
56042+obj-y += grsec_disabled.o
56043+endif
56044+
56045+ifdef CONFIG_GRKERNSEC_HIDESYM
56046+extra-y := grsec_hidesym.o
56047+$(obj)/grsec_hidesym.o:
56048+ @-chmod -f 500 /boot
56049+ @-chmod -f 500 /lib/modules
56050+ @-chmod -f 500 /lib64/modules
56051+ @-chmod -f 500 /lib32/modules
56052+ @-chmod -f 700 .
56053+ @echo ' grsec: protected kernel image paths'
56054+endif
56055diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
56056new file mode 100644
56057index 0000000..78e83d8
56058--- /dev/null
56059+++ b/grsecurity/gracl.c
56060@@ -0,0 +1,4148 @@
56061+#include <linux/kernel.h>
56062+#include <linux/module.h>
56063+#include <linux/sched.h>
56064+#include <linux/mm.h>
56065+#include <linux/file.h>
56066+#include <linux/fs.h>
56067+#include <linux/namei.h>
56068+#include <linux/mount.h>
56069+#include <linux/tty.h>
56070+#include <linux/proc_fs.h>
56071+#include <linux/smp_lock.h>
56072+#include <linux/slab.h>
56073+#include <linux/vmalloc.h>
56074+#include <linux/types.h>
56075+#include <linux/sysctl.h>
56076+#include <linux/netdevice.h>
56077+#include <linux/ptrace.h>
56078+#include <linux/gracl.h>
56079+#include <linux/gralloc.h>
56080+#include <linux/security.h>
56081+#include <linux/grinternal.h>
56082+#include <linux/pid_namespace.h>
56083+#include <linux/fdtable.h>
56084+#include <linux/percpu.h>
56085+
56086+#include <asm/uaccess.h>
56087+#include <asm/errno.h>
56088+#include <asm/mman.h>
56089+
56090+static struct acl_role_db acl_role_set;
56091+static struct name_db name_set;
56092+static struct inodev_db inodev_set;
56093+
56094+/* for keeping track of userspace pointers used for subjects, so we
56095+ can share references in the kernel as well
56096+*/
56097+
56098+static struct dentry *real_root;
56099+static struct vfsmount *real_root_mnt;
56100+
56101+static struct acl_subj_map_db subj_map_set;
56102+
56103+static struct acl_role_label *default_role;
56104+
56105+static struct acl_role_label *role_list;
56106+
56107+static u16 acl_sp_role_value;
56108+
56109+extern char *gr_shared_page[4];
56110+static DEFINE_MUTEX(gr_dev_mutex);
56111+DEFINE_RWLOCK(gr_inode_lock);
56112+
56113+struct gr_arg *gr_usermode;
56114+
56115+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56116+
56117+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56118+extern void gr_clear_learn_entries(void);
56119+
56120+#ifdef CONFIG_GRKERNSEC_RESLOG
56121+extern void gr_log_resource(const struct task_struct *task,
56122+ const int res, const unsigned long wanted, const int gt);
56123+#endif
56124+
56125+unsigned char *gr_system_salt;
56126+unsigned char *gr_system_sum;
56127+
56128+static struct sprole_pw **acl_special_roles = NULL;
56129+static __u16 num_sprole_pws = 0;
56130+
56131+static struct acl_role_label *kernel_role = NULL;
56132+
56133+static unsigned int gr_auth_attempts = 0;
56134+static unsigned long gr_auth_expires = 0UL;
56135+
56136+#ifdef CONFIG_NET
56137+extern struct vfsmount *sock_mnt;
56138+#endif
56139+extern struct vfsmount *pipe_mnt;
56140+extern struct vfsmount *shm_mnt;
56141+#ifdef CONFIG_HUGETLBFS
56142+extern struct vfsmount *hugetlbfs_vfsmount;
56143+#endif
56144+
56145+static struct acl_object_label *fakefs_obj_rw;
56146+static struct acl_object_label *fakefs_obj_rwx;
56147+
56148+extern int gr_init_uidset(void);
56149+extern void gr_free_uidset(void);
56150+extern void gr_remove_uid(uid_t uid);
56151+extern int gr_find_uid(uid_t uid);
56152+
56153+__inline__ int
56154+gr_acl_is_enabled(void)
56155+{
56156+ return (gr_status & GR_READY);
56157+}
56158+
56159+#ifdef CONFIG_BTRFS_FS
56160+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56161+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56162+#endif
56163+
56164+static inline dev_t __get_dev(const struct dentry *dentry)
56165+{
56166+#ifdef CONFIG_BTRFS_FS
56167+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56168+ return get_btrfs_dev_from_inode(dentry->d_inode);
56169+ else
56170+#endif
56171+ return dentry->d_inode->i_sb->s_dev;
56172+}
56173+
56174+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56175+{
56176+ return __get_dev(dentry);
56177+}
56178+
56179+static char gr_task_roletype_to_char(struct task_struct *task)
56180+{
56181+ switch (task->role->roletype &
56182+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56183+ GR_ROLE_SPECIAL)) {
56184+ case GR_ROLE_DEFAULT:
56185+ return 'D';
56186+ case GR_ROLE_USER:
56187+ return 'U';
56188+ case GR_ROLE_GROUP:
56189+ return 'G';
56190+ case GR_ROLE_SPECIAL:
56191+ return 'S';
56192+ }
56193+
56194+ return 'X';
56195+}
56196+
56197+char gr_roletype_to_char(void)
56198+{
56199+ return gr_task_roletype_to_char(current);
56200+}
56201+
56202+__inline__ int
56203+gr_acl_tpe_check(void)
56204+{
56205+ if (unlikely(!(gr_status & GR_READY)))
56206+ return 0;
56207+ if (current->role->roletype & GR_ROLE_TPE)
56208+ return 1;
56209+ else
56210+ return 0;
56211+}
56212+
56213+int
56214+gr_handle_rawio(const struct inode *inode)
56215+{
56216+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56217+ if (inode && S_ISBLK(inode->i_mode) &&
56218+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56219+ !capable(CAP_SYS_RAWIO))
56220+ return 1;
56221+#endif
56222+ return 0;
56223+}
56224+
56225+static int
56226+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56227+{
56228+ if (likely(lena != lenb))
56229+ return 0;
56230+
56231+ return !memcmp(a, b, lena);
56232+}
56233+
56234+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56235+{
56236+ *buflen -= namelen;
56237+ if (*buflen < 0)
56238+ return -ENAMETOOLONG;
56239+ *buffer -= namelen;
56240+ memcpy(*buffer, str, namelen);
56241+ return 0;
56242+}
56243+
56244+/* this must be called with vfsmount_lock and dcache_lock held */
56245+
56246+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56247+ struct dentry *root, struct vfsmount *rootmnt,
56248+ char *buffer, int buflen)
56249+{
56250+ char * end = buffer+buflen;
56251+ char * retval;
56252+ int namelen;
56253+
56254+ *--end = '\0';
56255+ buflen--;
56256+
56257+ if (buflen < 1)
56258+ goto Elong;
56259+ /* Get '/' right */
56260+ retval = end-1;
56261+ *retval = '/';
56262+
56263+ for (;;) {
56264+ struct dentry * parent;
56265+
56266+ if (dentry == root && vfsmnt == rootmnt)
56267+ break;
56268+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56269+ /* Global root? */
56270+ if (vfsmnt->mnt_parent == vfsmnt)
56271+ goto global_root;
56272+ dentry = vfsmnt->mnt_mountpoint;
56273+ vfsmnt = vfsmnt->mnt_parent;
56274+ continue;
56275+ }
56276+ parent = dentry->d_parent;
56277+ prefetch(parent);
56278+ namelen = dentry->d_name.len;
56279+ buflen -= namelen + 1;
56280+ if (buflen < 0)
56281+ goto Elong;
56282+ end -= namelen;
56283+ memcpy(end, dentry->d_name.name, namelen);
56284+ *--end = '/';
56285+ retval = end;
56286+ dentry = parent;
56287+ }
56288+
56289+out:
56290+ return retval;
56291+
56292+global_root:
56293+ namelen = dentry->d_name.len;
56294+ buflen -= namelen;
56295+ if (buflen < 0)
56296+ goto Elong;
56297+ retval -= namelen-1; /* hit the slash */
56298+ memcpy(retval, dentry->d_name.name, namelen);
56299+ goto out;
56300+Elong:
56301+ retval = ERR_PTR(-ENAMETOOLONG);
56302+ goto out;
56303+}
56304+
56305+static char *
56306+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56307+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56308+{
56309+ char *retval;
56310+
56311+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56312+ if (unlikely(IS_ERR(retval)))
56313+ retval = strcpy(buf, "<path too long>");
56314+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56315+ retval[1] = '\0';
56316+
56317+ return retval;
56318+}
56319+
56320+static char *
56321+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56322+ char *buf, int buflen)
56323+{
56324+ char *res;
56325+
56326+ /* we can use real_root, real_root_mnt, because this is only called
56327+ by the RBAC system */
56328+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56329+
56330+ return res;
56331+}
56332+
56333+static char *
56334+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56335+ char *buf, int buflen)
56336+{
56337+ char *res;
56338+ struct dentry *root;
56339+ struct vfsmount *rootmnt;
56340+ struct task_struct *reaper = &init_task;
56341+
56342+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56343+ read_lock(&reaper->fs->lock);
56344+ root = dget(reaper->fs->root.dentry);
56345+ rootmnt = mntget(reaper->fs->root.mnt);
56346+ read_unlock(&reaper->fs->lock);
56347+
56348+ spin_lock(&dcache_lock);
56349+ spin_lock(&vfsmount_lock);
56350+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56351+ spin_unlock(&vfsmount_lock);
56352+ spin_unlock(&dcache_lock);
56353+
56354+ dput(root);
56355+ mntput(rootmnt);
56356+ return res;
56357+}
56358+
56359+static char *
56360+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56361+{
56362+ char *ret;
56363+ spin_lock(&dcache_lock);
56364+ spin_lock(&vfsmount_lock);
56365+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56366+ PAGE_SIZE);
56367+ spin_unlock(&vfsmount_lock);
56368+ spin_unlock(&dcache_lock);
56369+ return ret;
56370+}
56371+
56372+static char *
56373+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56374+{
56375+ char *ret;
56376+ char *buf;
56377+ int buflen;
56378+
56379+ spin_lock(&dcache_lock);
56380+ spin_lock(&vfsmount_lock);
56381+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56382+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56383+ buflen = (int)(ret - buf);
56384+ if (buflen >= 5)
56385+ prepend(&ret, &buflen, "/proc", 5);
56386+ else
56387+ ret = strcpy(buf, "<path too long>");
56388+ spin_unlock(&vfsmount_lock);
56389+ spin_unlock(&dcache_lock);
56390+ return ret;
56391+}
56392+
56393+char *
56394+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56395+{
56396+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56397+ PAGE_SIZE);
56398+}
56399+
56400+char *
56401+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56402+{
56403+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56404+ PAGE_SIZE);
56405+}
56406+
56407+char *
56408+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56409+{
56410+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56411+ PAGE_SIZE);
56412+}
56413+
56414+char *
56415+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56416+{
56417+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56418+ PAGE_SIZE);
56419+}
56420+
56421+char *
56422+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56423+{
56424+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56425+ PAGE_SIZE);
56426+}
56427+
56428+__inline__ __u32
56429+to_gr_audit(const __u32 reqmode)
56430+{
56431+ /* masks off auditable permission flags, then shifts them to create
56432+ auditing flags, and adds the special case of append auditing if
56433+ we're requesting write */
56434+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56435+}
56436+
56437+struct acl_subject_label *
56438+lookup_subject_map(const struct acl_subject_label *userp)
56439+{
56440+ unsigned int index = shash(userp, subj_map_set.s_size);
56441+ struct subject_map *match;
56442+
56443+ match = subj_map_set.s_hash[index];
56444+
56445+ while (match && match->user != userp)
56446+ match = match->next;
56447+
56448+ if (match != NULL)
56449+ return match->kernel;
56450+ else
56451+ return NULL;
56452+}
56453+
56454+static void
56455+insert_subj_map_entry(struct subject_map *subjmap)
56456+{
56457+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56458+ struct subject_map **curr;
56459+
56460+ subjmap->prev = NULL;
56461+
56462+ curr = &subj_map_set.s_hash[index];
56463+ if (*curr != NULL)
56464+ (*curr)->prev = subjmap;
56465+
56466+ subjmap->next = *curr;
56467+ *curr = subjmap;
56468+
56469+ return;
56470+}
56471+
56472+static struct acl_role_label *
56473+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56474+ const gid_t gid)
56475+{
56476+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56477+ struct acl_role_label *match;
56478+ struct role_allowed_ip *ipp;
56479+ unsigned int x;
56480+ u32 curr_ip = task->signal->curr_ip;
56481+
56482+ task->signal->saved_ip = curr_ip;
56483+
56484+ match = acl_role_set.r_hash[index];
56485+
56486+ while (match) {
56487+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56488+ for (x = 0; x < match->domain_child_num; x++) {
56489+ if (match->domain_children[x] == uid)
56490+ goto found;
56491+ }
56492+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56493+ break;
56494+ match = match->next;
56495+ }
56496+found:
56497+ if (match == NULL) {
56498+ try_group:
56499+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56500+ match = acl_role_set.r_hash[index];
56501+
56502+ while (match) {
56503+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56504+ for (x = 0; x < match->domain_child_num; x++) {
56505+ if (match->domain_children[x] == gid)
56506+ goto found2;
56507+ }
56508+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56509+ break;
56510+ match = match->next;
56511+ }
56512+found2:
56513+ if (match == NULL)
56514+ match = default_role;
56515+ if (match->allowed_ips == NULL)
56516+ return match;
56517+ else {
56518+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56519+ if (likely
56520+ ((ntohl(curr_ip) & ipp->netmask) ==
56521+ (ntohl(ipp->addr) & ipp->netmask)))
56522+ return match;
56523+ }
56524+ match = default_role;
56525+ }
56526+ } else if (match->allowed_ips == NULL) {
56527+ return match;
56528+ } else {
56529+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56530+ if (likely
56531+ ((ntohl(curr_ip) & ipp->netmask) ==
56532+ (ntohl(ipp->addr) & ipp->netmask)))
56533+ return match;
56534+ }
56535+ goto try_group;
56536+ }
56537+
56538+ return match;
56539+}
56540+
56541+struct acl_subject_label *
56542+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56543+ const struct acl_role_label *role)
56544+{
56545+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56546+ struct acl_subject_label *match;
56547+
56548+ match = role->subj_hash[index];
56549+
56550+ while (match && (match->inode != ino || match->device != dev ||
56551+ (match->mode & GR_DELETED))) {
56552+ match = match->next;
56553+ }
56554+
56555+ if (match && !(match->mode & GR_DELETED))
56556+ return match;
56557+ else
56558+ return NULL;
56559+}
56560+
56561+struct acl_subject_label *
56562+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56563+ const struct acl_role_label *role)
56564+{
56565+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56566+ struct acl_subject_label *match;
56567+
56568+ match = role->subj_hash[index];
56569+
56570+ while (match && (match->inode != ino || match->device != dev ||
56571+ !(match->mode & GR_DELETED))) {
56572+ match = match->next;
56573+ }
56574+
56575+ if (match && (match->mode & GR_DELETED))
56576+ return match;
56577+ else
56578+ return NULL;
56579+}
56580+
56581+static struct acl_object_label *
56582+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56583+ const struct acl_subject_label *subj)
56584+{
56585+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56586+ struct acl_object_label *match;
56587+
56588+ match = subj->obj_hash[index];
56589+
56590+ while (match && (match->inode != ino || match->device != dev ||
56591+ (match->mode & GR_DELETED))) {
56592+ match = match->next;
56593+ }
56594+
56595+ if (match && !(match->mode & GR_DELETED))
56596+ return match;
56597+ else
56598+ return NULL;
56599+}
56600+
56601+static struct acl_object_label *
56602+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56603+ const struct acl_subject_label *subj)
56604+{
56605+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56606+ struct acl_object_label *match;
56607+
56608+ match = subj->obj_hash[index];
56609+
56610+ while (match && (match->inode != ino || match->device != dev ||
56611+ !(match->mode & GR_DELETED))) {
56612+ match = match->next;
56613+ }
56614+
56615+ if (match && (match->mode & GR_DELETED))
56616+ return match;
56617+
56618+ match = subj->obj_hash[index];
56619+
56620+ while (match && (match->inode != ino || match->device != dev ||
56621+ (match->mode & GR_DELETED))) {
56622+ match = match->next;
56623+ }
56624+
56625+ if (match && !(match->mode & GR_DELETED))
56626+ return match;
56627+ else
56628+ return NULL;
56629+}
56630+
56631+static struct name_entry *
56632+lookup_name_entry(const char *name)
56633+{
56634+ unsigned int len = strlen(name);
56635+ unsigned int key = full_name_hash(name, len);
56636+ unsigned int index = key % name_set.n_size;
56637+ struct name_entry *match;
56638+
56639+ match = name_set.n_hash[index];
56640+
56641+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56642+ match = match->next;
56643+
56644+ return match;
56645+}
56646+
56647+static struct name_entry *
56648+lookup_name_entry_create(const char *name)
56649+{
56650+ unsigned int len = strlen(name);
56651+ unsigned int key = full_name_hash(name, len);
56652+ unsigned int index = key % name_set.n_size;
56653+ struct name_entry *match;
56654+
56655+ match = name_set.n_hash[index];
56656+
56657+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56658+ !match->deleted))
56659+ match = match->next;
56660+
56661+ if (match && match->deleted)
56662+ return match;
56663+
56664+ match = name_set.n_hash[index];
56665+
56666+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56667+ match->deleted))
56668+ match = match->next;
56669+
56670+ if (match && !match->deleted)
56671+ return match;
56672+ else
56673+ return NULL;
56674+}
56675+
56676+static struct inodev_entry *
56677+lookup_inodev_entry(const ino_t ino, const dev_t dev)
56678+{
56679+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
56680+ struct inodev_entry *match;
56681+
56682+ match = inodev_set.i_hash[index];
56683+
56684+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56685+ match = match->next;
56686+
56687+ return match;
56688+}
56689+
56690+static void
56691+insert_inodev_entry(struct inodev_entry *entry)
56692+{
56693+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56694+ inodev_set.i_size);
56695+ struct inodev_entry **curr;
56696+
56697+ entry->prev = NULL;
56698+
56699+ curr = &inodev_set.i_hash[index];
56700+ if (*curr != NULL)
56701+ (*curr)->prev = entry;
56702+
56703+ entry->next = *curr;
56704+ *curr = entry;
56705+
56706+ return;
56707+}
56708+
56709+static void
56710+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56711+{
56712+ unsigned int index =
56713+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56714+ struct acl_role_label **curr;
56715+ struct acl_role_label *tmp;
56716+
56717+ curr = &acl_role_set.r_hash[index];
56718+
56719+ /* if role was already inserted due to domains and already has
56720+ a role in the same bucket as it attached, then we need to
56721+ combine these two buckets
56722+ */
56723+ if (role->next) {
56724+ tmp = role->next;
56725+ while (tmp->next)
56726+ tmp = tmp->next;
56727+ tmp->next = *curr;
56728+ } else
56729+ role->next = *curr;
56730+ *curr = role;
56731+
56732+ return;
56733+}
56734+
56735+static void
56736+insert_acl_role_label(struct acl_role_label *role)
56737+{
56738+ int i;
56739+
56740+ if (role_list == NULL) {
56741+ role_list = role;
56742+ role->prev = NULL;
56743+ } else {
56744+ role->prev = role_list;
56745+ role_list = role;
56746+ }
56747+
56748+ /* used for hash chains */
56749+ role->next = NULL;
56750+
56751+ if (role->roletype & GR_ROLE_DOMAIN) {
56752+ for (i = 0; i < role->domain_child_num; i++)
56753+ __insert_acl_role_label(role, role->domain_children[i]);
56754+ } else
56755+ __insert_acl_role_label(role, role->uidgid);
56756+}
56757+
56758+static int
56759+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56760+{
56761+ struct name_entry **curr, *nentry;
56762+ struct inodev_entry *ientry;
56763+ unsigned int len = strlen(name);
56764+ unsigned int key = full_name_hash(name, len);
56765+ unsigned int index = key % name_set.n_size;
56766+
56767+ curr = &name_set.n_hash[index];
56768+
56769+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56770+ curr = &((*curr)->next);
56771+
56772+ if (*curr != NULL)
56773+ return 1;
56774+
56775+ nentry = acl_alloc(sizeof (struct name_entry));
56776+ if (nentry == NULL)
56777+ return 0;
56778+ ientry = acl_alloc(sizeof (struct inodev_entry));
56779+ if (ientry == NULL)
56780+ return 0;
56781+ ientry->nentry = nentry;
56782+
56783+ nentry->key = key;
56784+ nentry->name = name;
56785+ nentry->inode = inode;
56786+ nentry->device = device;
56787+ nentry->len = len;
56788+ nentry->deleted = deleted;
56789+
56790+ nentry->prev = NULL;
56791+ curr = &name_set.n_hash[index];
56792+ if (*curr != NULL)
56793+ (*curr)->prev = nentry;
56794+ nentry->next = *curr;
56795+ *curr = nentry;
56796+
56797+ /* insert us into the table searchable by inode/dev */
56798+ insert_inodev_entry(ientry);
56799+
56800+ return 1;
56801+}
56802+
56803+static void
56804+insert_acl_obj_label(struct acl_object_label *obj,
56805+ struct acl_subject_label *subj)
56806+{
56807+ unsigned int index =
56808+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56809+ struct acl_object_label **curr;
56810+
56811+
56812+ obj->prev = NULL;
56813+
56814+ curr = &subj->obj_hash[index];
56815+ if (*curr != NULL)
56816+ (*curr)->prev = obj;
56817+
56818+ obj->next = *curr;
56819+ *curr = obj;
56820+
56821+ return;
56822+}
56823+
56824+static void
56825+insert_acl_subj_label(struct acl_subject_label *obj,
56826+ struct acl_role_label *role)
56827+{
56828+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56829+ struct acl_subject_label **curr;
56830+
56831+ obj->prev = NULL;
56832+
56833+ curr = &role->subj_hash[index];
56834+ if (*curr != NULL)
56835+ (*curr)->prev = obj;
56836+
56837+ obj->next = *curr;
56838+ *curr = obj;
56839+
56840+ return;
56841+}
56842+
56843+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56844+
56845+static void *
56846+create_table(__u32 * len, int elementsize)
56847+{
56848+ unsigned int table_sizes[] = {
56849+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56850+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56851+ 4194301, 8388593, 16777213, 33554393, 67108859
56852+ };
56853+ void *newtable = NULL;
56854+ unsigned int pwr = 0;
56855+
56856+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56857+ table_sizes[pwr] <= *len)
56858+ pwr++;
56859+
56860+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56861+ return newtable;
56862+
56863+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56864+ newtable =
56865+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56866+ else
56867+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56868+
56869+ *len = table_sizes[pwr];
56870+
56871+ return newtable;
56872+}
56873+
56874+static int
56875+init_variables(const struct gr_arg *arg)
56876+{
56877+ struct task_struct *reaper = &init_task;
56878+ unsigned int stacksize;
56879+
56880+ subj_map_set.s_size = arg->role_db.num_subjects;
56881+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56882+ name_set.n_size = arg->role_db.num_objects;
56883+ inodev_set.i_size = arg->role_db.num_objects;
56884+
56885+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56886+ !name_set.n_size || !inodev_set.i_size)
56887+ return 1;
56888+
56889+ if (!gr_init_uidset())
56890+ return 1;
56891+
56892+ /* set up the stack that holds allocation info */
56893+
56894+ stacksize = arg->role_db.num_pointers + 5;
56895+
56896+ if (!acl_alloc_stack_init(stacksize))
56897+ return 1;
56898+
56899+ /* grab reference for the real root dentry and vfsmount */
56900+ read_lock(&reaper->fs->lock);
56901+ real_root = dget(reaper->fs->root.dentry);
56902+ real_root_mnt = mntget(reaper->fs->root.mnt);
56903+ read_unlock(&reaper->fs->lock);
56904+
56905+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56906+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56907+#endif
56908+
56909+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56910+ if (fakefs_obj_rw == NULL)
56911+ return 1;
56912+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56913+
56914+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56915+ if (fakefs_obj_rwx == NULL)
56916+ return 1;
56917+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56918+
56919+ subj_map_set.s_hash =
56920+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56921+ acl_role_set.r_hash =
56922+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56923+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56924+ inodev_set.i_hash =
56925+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56926+
56927+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56928+ !name_set.n_hash || !inodev_set.i_hash)
56929+ return 1;
56930+
56931+ memset(subj_map_set.s_hash, 0,
56932+ sizeof(struct subject_map *) * subj_map_set.s_size);
56933+ memset(acl_role_set.r_hash, 0,
56934+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56935+ memset(name_set.n_hash, 0,
56936+ sizeof (struct name_entry *) * name_set.n_size);
56937+ memset(inodev_set.i_hash, 0,
56938+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56939+
56940+ return 0;
56941+}
56942+
56943+/* free information not needed after startup
56944+ currently contains user->kernel pointer mappings for subjects
56945+*/
56946+
56947+static void
56948+free_init_variables(void)
56949+{
56950+ __u32 i;
56951+
56952+ if (subj_map_set.s_hash) {
56953+ for (i = 0; i < subj_map_set.s_size; i++) {
56954+ if (subj_map_set.s_hash[i]) {
56955+ kfree(subj_map_set.s_hash[i]);
56956+ subj_map_set.s_hash[i] = NULL;
56957+ }
56958+ }
56959+
56960+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56961+ PAGE_SIZE)
56962+ kfree(subj_map_set.s_hash);
56963+ else
56964+ vfree(subj_map_set.s_hash);
56965+ }
56966+
56967+ return;
56968+}
56969+
56970+static void
56971+free_variables(void)
56972+{
56973+ struct acl_subject_label *s;
56974+ struct acl_role_label *r;
56975+ struct task_struct *task, *task2;
56976+ unsigned int x;
56977+
56978+ gr_clear_learn_entries();
56979+
56980+ read_lock(&tasklist_lock);
56981+ do_each_thread(task2, task) {
56982+ task->acl_sp_role = 0;
56983+ task->acl_role_id = 0;
56984+ task->acl = NULL;
56985+ task->role = NULL;
56986+ } while_each_thread(task2, task);
56987+ read_unlock(&tasklist_lock);
56988+
56989+ /* release the reference to the real root dentry and vfsmount */
56990+ if (real_root)
56991+ dput(real_root);
56992+ real_root = NULL;
56993+ if (real_root_mnt)
56994+ mntput(real_root_mnt);
56995+ real_root_mnt = NULL;
56996+
56997+ /* free all object hash tables */
56998+
56999+ FOR_EACH_ROLE_START(r)
57000+ if (r->subj_hash == NULL)
57001+ goto next_role;
57002+ FOR_EACH_SUBJECT_START(r, s, x)
57003+ if (s->obj_hash == NULL)
57004+ break;
57005+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57006+ kfree(s->obj_hash);
57007+ else
57008+ vfree(s->obj_hash);
57009+ FOR_EACH_SUBJECT_END(s, x)
57010+ FOR_EACH_NESTED_SUBJECT_START(r, s)
57011+ if (s->obj_hash == NULL)
57012+ break;
57013+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
57014+ kfree(s->obj_hash);
57015+ else
57016+ vfree(s->obj_hash);
57017+ FOR_EACH_NESTED_SUBJECT_END(s)
57018+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
57019+ kfree(r->subj_hash);
57020+ else
57021+ vfree(r->subj_hash);
57022+ r->subj_hash = NULL;
57023+next_role:
57024+ FOR_EACH_ROLE_END(r)
57025+
57026+ acl_free_all();
57027+
57028+ if (acl_role_set.r_hash) {
57029+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
57030+ PAGE_SIZE)
57031+ kfree(acl_role_set.r_hash);
57032+ else
57033+ vfree(acl_role_set.r_hash);
57034+ }
57035+ if (name_set.n_hash) {
57036+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
57037+ PAGE_SIZE)
57038+ kfree(name_set.n_hash);
57039+ else
57040+ vfree(name_set.n_hash);
57041+ }
57042+
57043+ if (inodev_set.i_hash) {
57044+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
57045+ PAGE_SIZE)
57046+ kfree(inodev_set.i_hash);
57047+ else
57048+ vfree(inodev_set.i_hash);
57049+ }
57050+
57051+ gr_free_uidset();
57052+
57053+ memset(&name_set, 0, sizeof (struct name_db));
57054+ memset(&inodev_set, 0, sizeof (struct inodev_db));
57055+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
57056+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
57057+
57058+ default_role = NULL;
57059+ role_list = NULL;
57060+
57061+ return;
57062+}
57063+
57064+static __u32
57065+count_user_objs(struct acl_object_label *userp)
57066+{
57067+ struct acl_object_label o_tmp;
57068+ __u32 num = 0;
57069+
57070+ while (userp) {
57071+ if (copy_from_user(&o_tmp, userp,
57072+ sizeof (struct acl_object_label)))
57073+ break;
57074+
57075+ userp = o_tmp.prev;
57076+ num++;
57077+ }
57078+
57079+ return num;
57080+}
57081+
57082+static struct acl_subject_label *
57083+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57084+
57085+static int
57086+copy_user_glob(struct acl_object_label *obj)
57087+{
57088+ struct acl_object_label *g_tmp, **guser;
57089+ unsigned int len;
57090+ char *tmp;
57091+
57092+ if (obj->globbed == NULL)
57093+ return 0;
57094+
57095+ guser = &obj->globbed;
57096+ while (*guser) {
57097+ g_tmp = (struct acl_object_label *)
57098+ acl_alloc(sizeof (struct acl_object_label));
57099+ if (g_tmp == NULL)
57100+ return -ENOMEM;
57101+
57102+ if (copy_from_user(g_tmp, *guser,
57103+ sizeof (struct acl_object_label)))
57104+ return -EFAULT;
57105+
57106+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57107+
57108+ if (!len || len >= PATH_MAX)
57109+ return -EINVAL;
57110+
57111+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57112+ return -ENOMEM;
57113+
57114+ if (copy_from_user(tmp, g_tmp->filename, len))
57115+ return -EFAULT;
57116+ tmp[len-1] = '\0';
57117+ g_tmp->filename = tmp;
57118+
57119+ *guser = g_tmp;
57120+ guser = &(g_tmp->next);
57121+ }
57122+
57123+ return 0;
57124+}
57125+
57126+static int
57127+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57128+ struct acl_role_label *role)
57129+{
57130+ struct acl_object_label *o_tmp;
57131+ unsigned int len;
57132+ int ret;
57133+ char *tmp;
57134+
57135+ while (userp) {
57136+ if ((o_tmp = (struct acl_object_label *)
57137+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57138+ return -ENOMEM;
57139+
57140+ if (copy_from_user(o_tmp, userp,
57141+ sizeof (struct acl_object_label)))
57142+ return -EFAULT;
57143+
57144+ userp = o_tmp->prev;
57145+
57146+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57147+
57148+ if (!len || len >= PATH_MAX)
57149+ return -EINVAL;
57150+
57151+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57152+ return -ENOMEM;
57153+
57154+ if (copy_from_user(tmp, o_tmp->filename, len))
57155+ return -EFAULT;
57156+ tmp[len-1] = '\0';
57157+ o_tmp->filename = tmp;
57158+
57159+ insert_acl_obj_label(o_tmp, subj);
57160+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57161+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57162+ return -ENOMEM;
57163+
57164+ ret = copy_user_glob(o_tmp);
57165+ if (ret)
57166+ return ret;
57167+
57168+ if (o_tmp->nested) {
57169+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57170+ if (IS_ERR(o_tmp->nested))
57171+ return PTR_ERR(o_tmp->nested);
57172+
57173+ /* insert into nested subject list */
57174+ o_tmp->nested->next = role->hash->first;
57175+ role->hash->first = o_tmp->nested;
57176+ }
57177+ }
57178+
57179+ return 0;
57180+}
57181+
57182+static __u32
57183+count_user_subjs(struct acl_subject_label *userp)
57184+{
57185+ struct acl_subject_label s_tmp;
57186+ __u32 num = 0;
57187+
57188+ while (userp) {
57189+ if (copy_from_user(&s_tmp, userp,
57190+ sizeof (struct acl_subject_label)))
57191+ break;
57192+
57193+ userp = s_tmp.prev;
57194+ /* do not count nested subjects against this count, since
57195+ they are not included in the hash table, but are
57196+ attached to objects. We have already counted
57197+ the subjects in userspace for the allocation
57198+ stack
57199+ */
57200+ if (!(s_tmp.mode & GR_NESTED))
57201+ num++;
57202+ }
57203+
57204+ return num;
57205+}
57206+
57207+static int
57208+copy_user_allowedips(struct acl_role_label *rolep)
57209+{
57210+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57211+
57212+ ruserip = rolep->allowed_ips;
57213+
57214+ while (ruserip) {
57215+ rlast = rtmp;
57216+
57217+ if ((rtmp = (struct role_allowed_ip *)
57218+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57219+ return -ENOMEM;
57220+
57221+ if (copy_from_user(rtmp, ruserip,
57222+ sizeof (struct role_allowed_ip)))
57223+ return -EFAULT;
57224+
57225+ ruserip = rtmp->prev;
57226+
57227+ if (!rlast) {
57228+ rtmp->prev = NULL;
57229+ rolep->allowed_ips = rtmp;
57230+ } else {
57231+ rlast->next = rtmp;
57232+ rtmp->prev = rlast;
57233+ }
57234+
57235+ if (!ruserip)
57236+ rtmp->next = NULL;
57237+ }
57238+
57239+ return 0;
57240+}
57241+
57242+static int
57243+copy_user_transitions(struct acl_role_label *rolep)
57244+{
57245+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57246+
57247+ unsigned int len;
57248+ char *tmp;
57249+
57250+ rusertp = rolep->transitions;
57251+
57252+ while (rusertp) {
57253+ rlast = rtmp;
57254+
57255+ if ((rtmp = (struct role_transition *)
57256+ acl_alloc(sizeof (struct role_transition))) == NULL)
57257+ return -ENOMEM;
57258+
57259+ if (copy_from_user(rtmp, rusertp,
57260+ sizeof (struct role_transition)))
57261+ return -EFAULT;
57262+
57263+ rusertp = rtmp->prev;
57264+
57265+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57266+
57267+ if (!len || len >= GR_SPROLE_LEN)
57268+ return -EINVAL;
57269+
57270+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57271+ return -ENOMEM;
57272+
57273+ if (copy_from_user(tmp, rtmp->rolename, len))
57274+ return -EFAULT;
57275+ tmp[len-1] = '\0';
57276+ rtmp->rolename = tmp;
57277+
57278+ if (!rlast) {
57279+ rtmp->prev = NULL;
57280+ rolep->transitions = rtmp;
57281+ } else {
57282+ rlast->next = rtmp;
57283+ rtmp->prev = rlast;
57284+ }
57285+
57286+ if (!rusertp)
57287+ rtmp->next = NULL;
57288+ }
57289+
57290+ return 0;
57291+}
57292+
57293+static struct acl_subject_label *
57294+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57295+{
57296+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57297+ unsigned int len;
57298+ char *tmp;
57299+ __u32 num_objs;
57300+ struct acl_ip_label **i_tmp, *i_utmp2;
57301+ struct gr_hash_struct ghash;
57302+ struct subject_map *subjmap;
57303+ unsigned int i_num;
57304+ int err;
57305+
57306+ s_tmp = lookup_subject_map(userp);
57307+
57308+ /* we've already copied this subject into the kernel, just return
57309+ the reference to it, and don't copy it over again
57310+ */
57311+ if (s_tmp)
57312+ return(s_tmp);
57313+
57314+ if ((s_tmp = (struct acl_subject_label *)
57315+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57316+ return ERR_PTR(-ENOMEM);
57317+
57318+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57319+ if (subjmap == NULL)
57320+ return ERR_PTR(-ENOMEM);
57321+
57322+ subjmap->user = userp;
57323+ subjmap->kernel = s_tmp;
57324+ insert_subj_map_entry(subjmap);
57325+
57326+ if (copy_from_user(s_tmp, userp,
57327+ sizeof (struct acl_subject_label)))
57328+ return ERR_PTR(-EFAULT);
57329+
57330+ len = strnlen_user(s_tmp->filename, PATH_MAX);
57331+
57332+ if (!len || len >= PATH_MAX)
57333+ return ERR_PTR(-EINVAL);
57334+
57335+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57336+ return ERR_PTR(-ENOMEM);
57337+
57338+ if (copy_from_user(tmp, s_tmp->filename, len))
57339+ return ERR_PTR(-EFAULT);
57340+ tmp[len-1] = '\0';
57341+ s_tmp->filename = tmp;
57342+
57343+ if (!strcmp(s_tmp->filename, "/"))
57344+ role->root_label = s_tmp;
57345+
57346+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57347+ return ERR_PTR(-EFAULT);
57348+
57349+ /* copy user and group transition tables */
57350+
57351+ if (s_tmp->user_trans_num) {
57352+ uid_t *uidlist;
57353+
57354+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57355+ if (uidlist == NULL)
57356+ return ERR_PTR(-ENOMEM);
57357+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57358+ return ERR_PTR(-EFAULT);
57359+
57360+ s_tmp->user_transitions = uidlist;
57361+ }
57362+
57363+ if (s_tmp->group_trans_num) {
57364+ gid_t *gidlist;
57365+
57366+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57367+ if (gidlist == NULL)
57368+ return ERR_PTR(-ENOMEM);
57369+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57370+ return ERR_PTR(-EFAULT);
57371+
57372+ s_tmp->group_transitions = gidlist;
57373+ }
57374+
57375+ /* set up object hash table */
57376+ num_objs = count_user_objs(ghash.first);
57377+
57378+ s_tmp->obj_hash_size = num_objs;
57379+ s_tmp->obj_hash =
57380+ (struct acl_object_label **)
57381+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57382+
57383+ if (!s_tmp->obj_hash)
57384+ return ERR_PTR(-ENOMEM);
57385+
57386+ memset(s_tmp->obj_hash, 0,
57387+ s_tmp->obj_hash_size *
57388+ sizeof (struct acl_object_label *));
57389+
57390+ /* add in objects */
57391+ err = copy_user_objs(ghash.first, s_tmp, role);
57392+
57393+ if (err)
57394+ return ERR_PTR(err);
57395+
57396+ /* set pointer for parent subject */
57397+ if (s_tmp->parent_subject) {
57398+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57399+
57400+ if (IS_ERR(s_tmp2))
57401+ return s_tmp2;
57402+
57403+ s_tmp->parent_subject = s_tmp2;
57404+ }
57405+
57406+ /* add in ip acls */
57407+
57408+ if (!s_tmp->ip_num) {
57409+ s_tmp->ips = NULL;
57410+ goto insert;
57411+ }
57412+
57413+ i_tmp =
57414+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57415+ sizeof (struct acl_ip_label *));
57416+
57417+ if (!i_tmp)
57418+ return ERR_PTR(-ENOMEM);
57419+
57420+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57421+ *(i_tmp + i_num) =
57422+ (struct acl_ip_label *)
57423+ acl_alloc(sizeof (struct acl_ip_label));
57424+ if (!*(i_tmp + i_num))
57425+ return ERR_PTR(-ENOMEM);
57426+
57427+ if (copy_from_user
57428+ (&i_utmp2, s_tmp->ips + i_num,
57429+ sizeof (struct acl_ip_label *)))
57430+ return ERR_PTR(-EFAULT);
57431+
57432+ if (copy_from_user
57433+ (*(i_tmp + i_num), i_utmp2,
57434+ sizeof (struct acl_ip_label)))
57435+ return ERR_PTR(-EFAULT);
57436+
57437+ if ((*(i_tmp + i_num))->iface == NULL)
57438+ continue;
57439+
57440+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57441+ if (!len || len >= IFNAMSIZ)
57442+ return ERR_PTR(-EINVAL);
57443+ tmp = acl_alloc(len);
57444+ if (tmp == NULL)
57445+ return ERR_PTR(-ENOMEM);
57446+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57447+ return ERR_PTR(-EFAULT);
57448+ (*(i_tmp + i_num))->iface = tmp;
57449+ }
57450+
57451+ s_tmp->ips = i_tmp;
57452+
57453+insert:
57454+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57455+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57456+ return ERR_PTR(-ENOMEM);
57457+
57458+ return s_tmp;
57459+}
57460+
57461+static int
57462+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57463+{
57464+ struct acl_subject_label s_pre;
57465+ struct acl_subject_label * ret;
57466+ int err;
57467+
57468+ while (userp) {
57469+ if (copy_from_user(&s_pre, userp,
57470+ sizeof (struct acl_subject_label)))
57471+ return -EFAULT;
57472+
57473+ /* do not add nested subjects here, add
57474+ while parsing objects
57475+ */
57476+
57477+ if (s_pre.mode & GR_NESTED) {
57478+ userp = s_pre.prev;
57479+ continue;
57480+ }
57481+
57482+ ret = do_copy_user_subj(userp, role);
57483+
57484+ err = PTR_ERR(ret);
57485+ if (IS_ERR(ret))
57486+ return err;
57487+
57488+ insert_acl_subj_label(ret, role);
57489+
57490+ userp = s_pre.prev;
57491+ }
57492+
57493+ return 0;
57494+}
57495+
57496+static int
57497+copy_user_acl(struct gr_arg *arg)
57498+{
57499+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57500+ struct sprole_pw *sptmp;
57501+ struct gr_hash_struct *ghash;
57502+ uid_t *domainlist;
57503+ unsigned int r_num;
57504+ unsigned int len;
57505+ char *tmp;
57506+ int err = 0;
57507+ __u16 i;
57508+ __u32 num_subjs;
57509+
57510+ /* we need a default and kernel role */
57511+ if (arg->role_db.num_roles < 2)
57512+ return -EINVAL;
57513+
57514+ /* copy special role authentication info from userspace */
57515+
57516+ num_sprole_pws = arg->num_sprole_pws;
57517+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57518+
57519+ if (!acl_special_roles) {
57520+ err = -ENOMEM;
57521+ goto cleanup;
57522+ }
57523+
57524+ for (i = 0; i < num_sprole_pws; i++) {
57525+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57526+ if (!sptmp) {
57527+ err = -ENOMEM;
57528+ goto cleanup;
57529+ }
57530+ if (copy_from_user(sptmp, arg->sprole_pws + i,
57531+ sizeof (struct sprole_pw))) {
57532+ err = -EFAULT;
57533+ goto cleanup;
57534+ }
57535+
57536+ len =
57537+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57538+
57539+ if (!len || len >= GR_SPROLE_LEN) {
57540+ err = -EINVAL;
57541+ goto cleanup;
57542+ }
57543+
57544+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57545+ err = -ENOMEM;
57546+ goto cleanup;
57547+ }
57548+
57549+ if (copy_from_user(tmp, sptmp->rolename, len)) {
57550+ err = -EFAULT;
57551+ goto cleanup;
57552+ }
57553+ tmp[len-1] = '\0';
57554+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57555+ printk(KERN_ALERT "Copying special role %s\n", tmp);
57556+#endif
57557+ sptmp->rolename = tmp;
57558+ acl_special_roles[i] = sptmp;
57559+ }
57560+
57561+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57562+
57563+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57564+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
57565+
57566+ if (!r_tmp) {
57567+ err = -ENOMEM;
57568+ goto cleanup;
57569+ }
57570+
57571+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
57572+ sizeof (struct acl_role_label *))) {
57573+ err = -EFAULT;
57574+ goto cleanup;
57575+ }
57576+
57577+ if (copy_from_user(r_tmp, r_utmp2,
57578+ sizeof (struct acl_role_label))) {
57579+ err = -EFAULT;
57580+ goto cleanup;
57581+ }
57582+
57583+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57584+
57585+ if (!len || len >= PATH_MAX) {
57586+ err = -EINVAL;
57587+ goto cleanup;
57588+ }
57589+
57590+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57591+ err = -ENOMEM;
57592+ goto cleanup;
57593+ }
57594+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
57595+ err = -EFAULT;
57596+ goto cleanup;
57597+ }
57598+ tmp[len-1] = '\0';
57599+ r_tmp->rolename = tmp;
57600+
57601+ if (!strcmp(r_tmp->rolename, "default")
57602+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57603+ default_role = r_tmp;
57604+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57605+ kernel_role = r_tmp;
57606+ }
57607+
57608+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57609+ err = -ENOMEM;
57610+ goto cleanup;
57611+ }
57612+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57613+ err = -EFAULT;
57614+ goto cleanup;
57615+ }
57616+
57617+ r_tmp->hash = ghash;
57618+
57619+ num_subjs = count_user_subjs(r_tmp->hash->first);
57620+
57621+ r_tmp->subj_hash_size = num_subjs;
57622+ r_tmp->subj_hash =
57623+ (struct acl_subject_label **)
57624+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57625+
57626+ if (!r_tmp->subj_hash) {
57627+ err = -ENOMEM;
57628+ goto cleanup;
57629+ }
57630+
57631+ err = copy_user_allowedips(r_tmp);
57632+ if (err)
57633+ goto cleanup;
57634+
57635+ /* copy domain info */
57636+ if (r_tmp->domain_children != NULL) {
57637+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57638+ if (domainlist == NULL) {
57639+ err = -ENOMEM;
57640+ goto cleanup;
57641+ }
57642+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57643+ err = -EFAULT;
57644+ goto cleanup;
57645+ }
57646+ r_tmp->domain_children = domainlist;
57647+ }
57648+
57649+ err = copy_user_transitions(r_tmp);
57650+ if (err)
57651+ goto cleanup;
57652+
57653+ memset(r_tmp->subj_hash, 0,
57654+ r_tmp->subj_hash_size *
57655+ sizeof (struct acl_subject_label *));
57656+
57657+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57658+
57659+ if (err)
57660+ goto cleanup;
57661+
57662+ /* set nested subject list to null */
57663+ r_tmp->hash->first = NULL;
57664+
57665+ insert_acl_role_label(r_tmp);
57666+ }
57667+
57668+ goto return_err;
57669+ cleanup:
57670+ free_variables();
57671+ return_err:
57672+ return err;
57673+
57674+}
57675+
57676+static int
57677+gracl_init(struct gr_arg *args)
57678+{
57679+ int error = 0;
57680+
57681+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57682+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57683+
57684+ if (init_variables(args)) {
57685+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57686+ error = -ENOMEM;
57687+ free_variables();
57688+ goto out;
57689+ }
57690+
57691+ error = copy_user_acl(args);
57692+ free_init_variables();
57693+ if (error) {
57694+ free_variables();
57695+ goto out;
57696+ }
57697+
57698+ if ((error = gr_set_acls(0))) {
57699+ free_variables();
57700+ goto out;
57701+ }
57702+
57703+ pax_open_kernel();
57704+ gr_status |= GR_READY;
57705+ pax_close_kernel();
57706+
57707+ out:
57708+ return error;
57709+}
57710+
57711+/* derived from glibc fnmatch() 0: match, 1: no match*/
57712+
57713+static int
57714+glob_match(const char *p, const char *n)
57715+{
57716+ char c;
57717+
57718+ while ((c = *p++) != '\0') {
57719+ switch (c) {
57720+ case '?':
57721+ if (*n == '\0')
57722+ return 1;
57723+ else if (*n == '/')
57724+ return 1;
57725+ break;
57726+ case '\\':
57727+ if (*n != c)
57728+ return 1;
57729+ break;
57730+ case '*':
57731+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
57732+ if (*n == '/')
57733+ return 1;
57734+ else if (c == '?') {
57735+ if (*n == '\0')
57736+ return 1;
57737+ else
57738+ ++n;
57739+ }
57740+ }
57741+ if (c == '\0') {
57742+ return 0;
57743+ } else {
57744+ const char *endp;
57745+
57746+ if ((endp = strchr(n, '/')) == NULL)
57747+ endp = n + strlen(n);
57748+
57749+ if (c == '[') {
57750+ for (--p; n < endp; ++n)
57751+ if (!glob_match(p, n))
57752+ return 0;
57753+ } else if (c == '/') {
57754+ while (*n != '\0' && *n != '/')
57755+ ++n;
57756+ if (*n == '/' && !glob_match(p, n + 1))
57757+ return 0;
57758+ } else {
57759+ for (--p; n < endp; ++n)
57760+ if (*n == c && !glob_match(p, n))
57761+ return 0;
57762+ }
57763+
57764+ return 1;
57765+ }
57766+ case '[':
57767+ {
57768+ int not;
57769+ char cold;
57770+
57771+ if (*n == '\0' || *n == '/')
57772+ return 1;
57773+
57774+ not = (*p == '!' || *p == '^');
57775+ if (not)
57776+ ++p;
57777+
57778+ c = *p++;
57779+ for (;;) {
57780+ unsigned char fn = (unsigned char)*n;
57781+
57782+ if (c == '\0')
57783+ return 1;
57784+ else {
57785+ if (c == fn)
57786+ goto matched;
57787+ cold = c;
57788+ c = *p++;
57789+
57790+ if (c == '-' && *p != ']') {
57791+ unsigned char cend = *p++;
57792+
57793+ if (cend == '\0')
57794+ return 1;
57795+
57796+ if (cold <= fn && fn <= cend)
57797+ goto matched;
57798+
57799+ c = *p++;
57800+ }
57801+ }
57802+
57803+ if (c == ']')
57804+ break;
57805+ }
57806+ if (!not)
57807+ return 1;
57808+ break;
57809+ matched:
57810+ while (c != ']') {
57811+ if (c == '\0')
57812+ return 1;
57813+
57814+ c = *p++;
57815+ }
57816+ if (not)
57817+ return 1;
57818+ }
57819+ break;
57820+ default:
57821+ if (c != *n)
57822+ return 1;
57823+ }
57824+
57825+ ++n;
57826+ }
57827+
57828+ if (*n == '\0')
57829+ return 0;
57830+
57831+ if (*n == '/')
57832+ return 0;
57833+
57834+ return 1;
57835+}
57836+
57837+static struct acl_object_label *
57838+chk_glob_label(struct acl_object_label *globbed,
57839+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57840+{
57841+ struct acl_object_label *tmp;
57842+
57843+ if (*path == NULL)
57844+ *path = gr_to_filename_nolock(dentry, mnt);
57845+
57846+ tmp = globbed;
57847+
57848+ while (tmp) {
57849+ if (!glob_match(tmp->filename, *path))
57850+ return tmp;
57851+ tmp = tmp->next;
57852+ }
57853+
57854+ return NULL;
57855+}
57856+
57857+static struct acl_object_label *
57858+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57859+ const ino_t curr_ino, const dev_t curr_dev,
57860+ const struct acl_subject_label *subj, char **path, const int checkglob)
57861+{
57862+ struct acl_subject_label *tmpsubj;
57863+ struct acl_object_label *retval;
57864+ struct acl_object_label *retval2;
57865+
57866+ tmpsubj = (struct acl_subject_label *) subj;
57867+ read_lock(&gr_inode_lock);
57868+ do {
57869+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57870+ if (retval) {
57871+ if (checkglob && retval->globbed) {
57872+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57873+ (struct vfsmount *)orig_mnt, path);
57874+ if (retval2)
57875+ retval = retval2;
57876+ }
57877+ break;
57878+ }
57879+ } while ((tmpsubj = tmpsubj->parent_subject));
57880+ read_unlock(&gr_inode_lock);
57881+
57882+ return retval;
57883+}
57884+
57885+static __inline__ struct acl_object_label *
57886+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57887+ const struct dentry *curr_dentry,
57888+ const struct acl_subject_label *subj, char **path, const int checkglob)
57889+{
57890+ int newglob = checkglob;
57891+
57892+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57893+ as we don't want a / * rule to match instead of the / object
57894+ don't do this for create lookups that call this function though, since they're looking up
57895+ on the parent and thus need globbing checks on all paths
57896+ */
57897+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57898+ newglob = GR_NO_GLOB;
57899+
57900+ return __full_lookup(orig_dentry, orig_mnt,
57901+ curr_dentry->d_inode->i_ino,
57902+ __get_dev(curr_dentry), subj, path, newglob);
57903+}
57904+
57905+static struct acl_object_label *
57906+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57907+ const struct acl_subject_label *subj, char *path, const int checkglob)
57908+{
57909+ struct dentry *dentry = (struct dentry *) l_dentry;
57910+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57911+ struct acl_object_label *retval;
57912+
57913+ spin_lock(&dcache_lock);
57914+ spin_lock(&vfsmount_lock);
57915+
57916+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57917+#ifdef CONFIG_NET
57918+ mnt == sock_mnt ||
57919+#endif
57920+#ifdef CONFIG_HUGETLBFS
57921+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57922+#endif
57923+ /* ignore Eric Biederman */
57924+ IS_PRIVATE(l_dentry->d_inode))) {
57925+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57926+ goto out;
57927+ }
57928+
57929+ for (;;) {
57930+ if (dentry == real_root && mnt == real_root_mnt)
57931+ break;
57932+
57933+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57934+ if (mnt->mnt_parent == mnt)
57935+ break;
57936+
57937+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57938+ if (retval != NULL)
57939+ goto out;
57940+
57941+ dentry = mnt->mnt_mountpoint;
57942+ mnt = mnt->mnt_parent;
57943+ continue;
57944+ }
57945+
57946+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57947+ if (retval != NULL)
57948+ goto out;
57949+
57950+ dentry = dentry->d_parent;
57951+ }
57952+
57953+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57954+
57955+ if (retval == NULL)
57956+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57957+out:
57958+ spin_unlock(&vfsmount_lock);
57959+ spin_unlock(&dcache_lock);
57960+
57961+ BUG_ON(retval == NULL);
57962+
57963+ return retval;
57964+}
57965+
57966+static __inline__ struct acl_object_label *
57967+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57968+ const struct acl_subject_label *subj)
57969+{
57970+ char *path = NULL;
57971+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57972+}
57973+
57974+static __inline__ struct acl_object_label *
57975+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57976+ const struct acl_subject_label *subj)
57977+{
57978+ char *path = NULL;
57979+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57980+}
57981+
57982+static __inline__ struct acl_object_label *
57983+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57984+ const struct acl_subject_label *subj, char *path)
57985+{
57986+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57987+}
57988+
57989+static struct acl_subject_label *
57990+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57991+ const struct acl_role_label *role)
57992+{
57993+ struct dentry *dentry = (struct dentry *) l_dentry;
57994+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57995+ struct acl_subject_label *retval;
57996+
57997+ spin_lock(&dcache_lock);
57998+ spin_lock(&vfsmount_lock);
57999+
58000+ for (;;) {
58001+ if (dentry == real_root && mnt == real_root_mnt)
58002+ break;
58003+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
58004+ if (mnt->mnt_parent == mnt)
58005+ break;
58006+
58007+ read_lock(&gr_inode_lock);
58008+ retval =
58009+ lookup_acl_subj_label(dentry->d_inode->i_ino,
58010+ __get_dev(dentry), role);
58011+ read_unlock(&gr_inode_lock);
58012+ if (retval != NULL)
58013+ goto out;
58014+
58015+ dentry = mnt->mnt_mountpoint;
58016+ mnt = mnt->mnt_parent;
58017+ continue;
58018+ }
58019+
58020+ read_lock(&gr_inode_lock);
58021+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58022+ __get_dev(dentry), role);
58023+ read_unlock(&gr_inode_lock);
58024+ if (retval != NULL)
58025+ goto out;
58026+
58027+ dentry = dentry->d_parent;
58028+ }
58029+
58030+ read_lock(&gr_inode_lock);
58031+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
58032+ __get_dev(dentry), role);
58033+ read_unlock(&gr_inode_lock);
58034+
58035+ if (unlikely(retval == NULL)) {
58036+ read_lock(&gr_inode_lock);
58037+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
58038+ __get_dev(real_root), role);
58039+ read_unlock(&gr_inode_lock);
58040+ }
58041+out:
58042+ spin_unlock(&vfsmount_lock);
58043+ spin_unlock(&dcache_lock);
58044+
58045+ BUG_ON(retval == NULL);
58046+
58047+ return retval;
58048+}
58049+
58050+static void
58051+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
58052+{
58053+ struct task_struct *task = current;
58054+ const struct cred *cred = current_cred();
58055+
58056+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58057+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58058+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58059+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
58060+
58061+ return;
58062+}
58063+
58064+static void
58065+gr_log_learn_sysctl(const char *path, const __u32 mode)
58066+{
58067+ struct task_struct *task = current;
58068+ const struct cred *cred = current_cred();
58069+
58070+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
58071+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58072+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58073+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
58074+
58075+ return;
58076+}
58077+
58078+static void
58079+gr_log_learn_id_change(const char type, const unsigned int real,
58080+ const unsigned int effective, const unsigned int fs)
58081+{
58082+ struct task_struct *task = current;
58083+ const struct cred *cred = current_cred();
58084+
58085+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58086+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58087+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58088+ type, real, effective, fs, &task->signal->saved_ip);
58089+
58090+ return;
58091+}
58092+
58093+__u32
58094+gr_search_file(const struct dentry * dentry, const __u32 mode,
58095+ const struct vfsmount * mnt)
58096+{
58097+ __u32 retval = mode;
58098+ struct acl_subject_label *curracl;
58099+ struct acl_object_label *currobj;
58100+
58101+ if (unlikely(!(gr_status & GR_READY)))
58102+ return (mode & ~GR_AUDITS);
58103+
58104+ curracl = current->acl;
58105+
58106+ currobj = chk_obj_label(dentry, mnt, curracl);
58107+ retval = currobj->mode & mode;
58108+
58109+ /* if we're opening a specified transfer file for writing
58110+ (e.g. /dev/initctl), then transfer our role to init
58111+ */
58112+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58113+ current->role->roletype & GR_ROLE_PERSIST)) {
58114+ struct task_struct *task = init_pid_ns.child_reaper;
58115+
58116+ if (task->role != current->role) {
58117+ task->acl_sp_role = 0;
58118+ task->acl_role_id = current->acl_role_id;
58119+ task->role = current->role;
58120+ rcu_read_lock();
58121+ read_lock(&grsec_exec_file_lock);
58122+ gr_apply_subject_to_task(task);
58123+ read_unlock(&grsec_exec_file_lock);
58124+ rcu_read_unlock();
58125+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58126+ }
58127+ }
58128+
58129+ if (unlikely
58130+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58131+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58132+ __u32 new_mode = mode;
58133+
58134+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58135+
58136+ retval = new_mode;
58137+
58138+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58139+ new_mode |= GR_INHERIT;
58140+
58141+ if (!(mode & GR_NOLEARN))
58142+ gr_log_learn(dentry, mnt, new_mode);
58143+ }
58144+
58145+ return retval;
58146+}
58147+
58148+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58149+ const struct dentry *parent,
58150+ const struct vfsmount *mnt)
58151+{
58152+ struct name_entry *match;
58153+ struct acl_object_label *matchpo;
58154+ struct acl_subject_label *curracl;
58155+ char *path;
58156+
58157+ if (unlikely(!(gr_status & GR_READY)))
58158+ return NULL;
58159+
58160+ preempt_disable();
58161+ path = gr_to_filename_rbac(new_dentry, mnt);
58162+ match = lookup_name_entry_create(path);
58163+
58164+ curracl = current->acl;
58165+
58166+ if (match) {
58167+ read_lock(&gr_inode_lock);
58168+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58169+ read_unlock(&gr_inode_lock);
58170+
58171+ if (matchpo) {
58172+ preempt_enable();
58173+ return matchpo;
58174+ }
58175+ }
58176+
58177+ // lookup parent
58178+
58179+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58180+
58181+ preempt_enable();
58182+ return matchpo;
58183+}
58184+
58185+__u32
58186+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58187+ const struct vfsmount * mnt, const __u32 mode)
58188+{
58189+ struct acl_object_label *matchpo;
58190+ __u32 retval;
58191+
58192+ if (unlikely(!(gr_status & GR_READY)))
58193+ return (mode & ~GR_AUDITS);
58194+
58195+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58196+
58197+ retval = matchpo->mode & mode;
58198+
58199+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58200+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58201+ __u32 new_mode = mode;
58202+
58203+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58204+
58205+ gr_log_learn(new_dentry, mnt, new_mode);
58206+ return new_mode;
58207+ }
58208+
58209+ return retval;
58210+}
58211+
58212+__u32
58213+gr_check_link(const struct dentry * new_dentry,
58214+ const struct dentry * parent_dentry,
58215+ const struct vfsmount * parent_mnt,
58216+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58217+{
58218+ struct acl_object_label *obj;
58219+ __u32 oldmode, newmode;
58220+ __u32 needmode;
58221+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58222+ GR_DELETE | GR_INHERIT;
58223+
58224+ if (unlikely(!(gr_status & GR_READY)))
58225+ return (GR_CREATE | GR_LINK);
58226+
58227+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58228+ oldmode = obj->mode;
58229+
58230+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58231+ newmode = obj->mode;
58232+
58233+ needmode = newmode & checkmodes;
58234+
58235+ // old name for hardlink must have at least the permissions of the new name
58236+ if ((oldmode & needmode) != needmode)
58237+ goto bad;
58238+
58239+ // if old name had restrictions/auditing, make sure the new name does as well
58240+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58241+
58242+ // don't allow hardlinking of suid/sgid files without permission
58243+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58244+ needmode |= GR_SETID;
58245+
58246+ if ((newmode & needmode) != needmode)
58247+ goto bad;
58248+
58249+ // enforce minimum permissions
58250+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58251+ return newmode;
58252+bad:
58253+ needmode = oldmode;
58254+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58255+ needmode |= GR_SETID;
58256+
58257+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58258+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58259+ return (GR_CREATE | GR_LINK);
58260+ } else if (newmode & GR_SUPPRESS)
58261+ return GR_SUPPRESS;
58262+ else
58263+ return 0;
58264+}
58265+
58266+int
58267+gr_check_hidden_task(const struct task_struct *task)
58268+{
58269+ if (unlikely(!(gr_status & GR_READY)))
58270+ return 0;
58271+
58272+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58273+ return 1;
58274+
58275+ return 0;
58276+}
58277+
58278+int
58279+gr_check_protected_task(const struct task_struct *task)
58280+{
58281+ if (unlikely(!(gr_status & GR_READY) || !task))
58282+ return 0;
58283+
58284+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58285+ task->acl != current->acl)
58286+ return 1;
58287+
58288+ return 0;
58289+}
58290+
58291+int
58292+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58293+{
58294+ struct task_struct *p;
58295+ int ret = 0;
58296+
58297+ if (unlikely(!(gr_status & GR_READY) || !pid))
58298+ return ret;
58299+
58300+ read_lock(&tasklist_lock);
58301+ do_each_pid_task(pid, type, p) {
58302+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58303+ p->acl != current->acl) {
58304+ ret = 1;
58305+ goto out;
58306+ }
58307+ } while_each_pid_task(pid, type, p);
58308+out:
58309+ read_unlock(&tasklist_lock);
58310+
58311+ return ret;
58312+}
58313+
58314+void
58315+gr_copy_label(struct task_struct *tsk)
58316+{
58317+ tsk->signal->used_accept = 0;
58318+ tsk->acl_sp_role = 0;
58319+ tsk->acl_role_id = current->acl_role_id;
58320+ tsk->acl = current->acl;
58321+ tsk->role = current->role;
58322+ tsk->signal->curr_ip = current->signal->curr_ip;
58323+ tsk->signal->saved_ip = current->signal->saved_ip;
58324+ if (current->exec_file)
58325+ get_file(current->exec_file);
58326+ tsk->exec_file = current->exec_file;
58327+ tsk->is_writable = current->is_writable;
58328+ if (unlikely(current->signal->used_accept)) {
58329+ current->signal->curr_ip = 0;
58330+ current->signal->saved_ip = 0;
58331+ }
58332+
58333+ return;
58334+}
58335+
58336+static void
58337+gr_set_proc_res(struct task_struct *task)
58338+{
58339+ struct acl_subject_label *proc;
58340+ unsigned short i;
58341+
58342+ proc = task->acl;
58343+
58344+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58345+ return;
58346+
58347+ for (i = 0; i < RLIM_NLIMITS; i++) {
58348+ if (!(proc->resmask & (1 << i)))
58349+ continue;
58350+
58351+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58352+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58353+ }
58354+
58355+ return;
58356+}
58357+
58358+extern int __gr_process_user_ban(struct user_struct *user);
58359+
58360+int
58361+gr_check_user_change(int real, int effective, int fs)
58362+{
58363+ unsigned int i;
58364+ __u16 num;
58365+ uid_t *uidlist;
58366+ int curuid;
58367+ int realok = 0;
58368+ int effectiveok = 0;
58369+ int fsok = 0;
58370+
58371+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58372+ struct user_struct *user;
58373+
58374+ if (real == -1)
58375+ goto skipit;
58376+
58377+ user = find_user(real);
58378+ if (user == NULL)
58379+ goto skipit;
58380+
58381+ if (__gr_process_user_ban(user)) {
58382+ /* for find_user */
58383+ free_uid(user);
58384+ return 1;
58385+ }
58386+
58387+ /* for find_user */
58388+ free_uid(user);
58389+
58390+skipit:
58391+#endif
58392+
58393+ if (unlikely(!(gr_status & GR_READY)))
58394+ return 0;
58395+
58396+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58397+ gr_log_learn_id_change('u', real, effective, fs);
58398+
58399+ num = current->acl->user_trans_num;
58400+ uidlist = current->acl->user_transitions;
58401+
58402+ if (uidlist == NULL)
58403+ return 0;
58404+
58405+ if (real == -1)
58406+ realok = 1;
58407+ if (effective == -1)
58408+ effectiveok = 1;
58409+ if (fs == -1)
58410+ fsok = 1;
58411+
58412+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
58413+ for (i = 0; i < num; i++) {
58414+ curuid = (int)uidlist[i];
58415+ if (real == curuid)
58416+ realok = 1;
58417+ if (effective == curuid)
58418+ effectiveok = 1;
58419+ if (fs == curuid)
58420+ fsok = 1;
58421+ }
58422+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
58423+ for (i = 0; i < num; i++) {
58424+ curuid = (int)uidlist[i];
58425+ if (real == curuid)
58426+ break;
58427+ if (effective == curuid)
58428+ break;
58429+ if (fs == curuid)
58430+ break;
58431+ }
58432+ /* not in deny list */
58433+ if (i == num) {
58434+ realok = 1;
58435+ effectiveok = 1;
58436+ fsok = 1;
58437+ }
58438+ }
58439+
58440+ if (realok && effectiveok && fsok)
58441+ return 0;
58442+ else {
58443+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58444+ return 1;
58445+ }
58446+}
58447+
58448+int
58449+gr_check_group_change(int real, int effective, int fs)
58450+{
58451+ unsigned int i;
58452+ __u16 num;
58453+ gid_t *gidlist;
58454+ int curgid;
58455+ int realok = 0;
58456+ int effectiveok = 0;
58457+ int fsok = 0;
58458+
58459+ if (unlikely(!(gr_status & GR_READY)))
58460+ return 0;
58461+
58462+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58463+ gr_log_learn_id_change('g', real, effective, fs);
58464+
58465+ num = current->acl->group_trans_num;
58466+ gidlist = current->acl->group_transitions;
58467+
58468+ if (gidlist == NULL)
58469+ return 0;
58470+
58471+ if (real == -1)
58472+ realok = 1;
58473+ if (effective == -1)
58474+ effectiveok = 1;
58475+ if (fs == -1)
58476+ fsok = 1;
58477+
58478+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
58479+ for (i = 0; i < num; i++) {
58480+ curgid = (int)gidlist[i];
58481+ if (real == curgid)
58482+ realok = 1;
58483+ if (effective == curgid)
58484+ effectiveok = 1;
58485+ if (fs == curgid)
58486+ fsok = 1;
58487+ }
58488+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
58489+ for (i = 0; i < num; i++) {
58490+ curgid = (int)gidlist[i];
58491+ if (real == curgid)
58492+ break;
58493+ if (effective == curgid)
58494+ break;
58495+ if (fs == curgid)
58496+ break;
58497+ }
58498+ /* not in deny list */
58499+ if (i == num) {
58500+ realok = 1;
58501+ effectiveok = 1;
58502+ fsok = 1;
58503+ }
58504+ }
58505+
58506+ if (realok && effectiveok && fsok)
58507+ return 0;
58508+ else {
58509+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58510+ return 1;
58511+ }
58512+}
58513+
58514+extern int gr_acl_is_capable(const int cap);
58515+
58516+void
58517+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58518+{
58519+ struct acl_role_label *role = task->role;
58520+ struct acl_subject_label *subj = NULL;
58521+ struct acl_object_label *obj;
58522+ struct file *filp;
58523+
58524+ if (unlikely(!(gr_status & GR_READY)))
58525+ return;
58526+
58527+ filp = task->exec_file;
58528+
58529+ /* kernel process, we'll give them the kernel role */
58530+ if (unlikely(!filp)) {
58531+ task->role = kernel_role;
58532+ task->acl = kernel_role->root_label;
58533+ return;
58534+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58535+ role = lookup_acl_role_label(task, uid, gid);
58536+
58537+ /* don't change the role if we're not a privileged process */
58538+ if (role && task->role != role &&
58539+ (((role->roletype & GR_ROLE_USER) && gr_acl_is_capable(CAP_SETUID)) ||
58540+ ((role->roletype & GR_ROLE_GROUP) && gr_acl_is_capable(CAP_SETGID))))
58541+ return;
58542+
58543+ /* perform subject lookup in possibly new role
58544+ we can use this result below in the case where role == task->role
58545+ */
58546+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58547+
58548+ /* if we changed uid/gid, but result in the same role
58549+ and are using inheritance, don't lose the inherited subject
58550+ if current subject is other than what normal lookup
58551+ would result in, we arrived via inheritance, don't
58552+ lose subject
58553+ */
58554+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58555+ (subj == task->acl)))
58556+ task->acl = subj;
58557+
58558+ task->role = role;
58559+
58560+ task->is_writable = 0;
58561+
58562+ /* ignore additional mmap checks for processes that are writable
58563+ by the default ACL */
58564+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58565+ if (unlikely(obj->mode & GR_WRITE))
58566+ task->is_writable = 1;
58567+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58568+ if (unlikely(obj->mode & GR_WRITE))
58569+ task->is_writable = 1;
58570+
58571+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58572+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58573+#endif
58574+
58575+ gr_set_proc_res(task);
58576+
58577+ return;
58578+}
58579+
58580+int
58581+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58582+ const int unsafe_flags)
58583+{
58584+ struct task_struct *task = current;
58585+ struct acl_subject_label *newacl;
58586+ struct acl_object_label *obj;
58587+ __u32 retmode;
58588+
58589+ if (unlikely(!(gr_status & GR_READY)))
58590+ return 0;
58591+
58592+ newacl = chk_subj_label(dentry, mnt, task->role);
58593+
58594+ task_lock(task);
58595+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58596+ !(task->role->roletype & GR_ROLE_GOD) &&
58597+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58598+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58599+ task_unlock(task);
58600+ if (unsafe_flags & LSM_UNSAFE_SHARE)
58601+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58602+ else
58603+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58604+ return -EACCES;
58605+ }
58606+ task_unlock(task);
58607+
58608+ obj = chk_obj_label(dentry, mnt, task->acl);
58609+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58610+
58611+ if (!(task->acl->mode & GR_INHERITLEARN) &&
58612+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58613+ if (obj->nested)
58614+ task->acl = obj->nested;
58615+ else
58616+ task->acl = newacl;
58617+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58618+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58619+
58620+ task->is_writable = 0;
58621+
58622+ /* ignore additional mmap checks for processes that are writable
58623+ by the default ACL */
58624+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
58625+ if (unlikely(obj->mode & GR_WRITE))
58626+ task->is_writable = 1;
58627+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
58628+ if (unlikely(obj->mode & GR_WRITE))
58629+ task->is_writable = 1;
58630+
58631+ gr_set_proc_res(task);
58632+
58633+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58634+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58635+#endif
58636+ return 0;
58637+}
58638+
58639+/* always called with valid inodev ptr */
58640+static void
58641+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58642+{
58643+ struct acl_object_label *matchpo;
58644+ struct acl_subject_label *matchps;
58645+ struct acl_subject_label *subj;
58646+ struct acl_role_label *role;
58647+ unsigned int x;
58648+
58649+ FOR_EACH_ROLE_START(role)
58650+ FOR_EACH_SUBJECT_START(role, subj, x)
58651+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58652+ matchpo->mode |= GR_DELETED;
58653+ FOR_EACH_SUBJECT_END(subj,x)
58654+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58655+ if (subj->inode == ino && subj->device == dev)
58656+ subj->mode |= GR_DELETED;
58657+ FOR_EACH_NESTED_SUBJECT_END(subj)
58658+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58659+ matchps->mode |= GR_DELETED;
58660+ FOR_EACH_ROLE_END(role)
58661+
58662+ inodev->nentry->deleted = 1;
58663+
58664+ return;
58665+}
58666+
58667+void
58668+gr_handle_delete(const ino_t ino, const dev_t dev)
58669+{
58670+ struct inodev_entry *inodev;
58671+
58672+ if (unlikely(!(gr_status & GR_READY)))
58673+ return;
58674+
58675+ write_lock(&gr_inode_lock);
58676+ inodev = lookup_inodev_entry(ino, dev);
58677+ if (inodev != NULL)
58678+ do_handle_delete(inodev, ino, dev);
58679+ write_unlock(&gr_inode_lock);
58680+
58681+ return;
58682+}
58683+
58684+static void
58685+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58686+ const ino_t newinode, const dev_t newdevice,
58687+ struct acl_subject_label *subj)
58688+{
58689+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58690+ struct acl_object_label *match;
58691+
58692+ match = subj->obj_hash[index];
58693+
58694+ while (match && (match->inode != oldinode ||
58695+ match->device != olddevice ||
58696+ !(match->mode & GR_DELETED)))
58697+ match = match->next;
58698+
58699+ if (match && (match->inode == oldinode)
58700+ && (match->device == olddevice)
58701+ && (match->mode & GR_DELETED)) {
58702+ if (match->prev == NULL) {
58703+ subj->obj_hash[index] = match->next;
58704+ if (match->next != NULL)
58705+ match->next->prev = NULL;
58706+ } else {
58707+ match->prev->next = match->next;
58708+ if (match->next != NULL)
58709+ match->next->prev = match->prev;
58710+ }
58711+ match->prev = NULL;
58712+ match->next = NULL;
58713+ match->inode = newinode;
58714+ match->device = newdevice;
58715+ match->mode &= ~GR_DELETED;
58716+
58717+ insert_acl_obj_label(match, subj);
58718+ }
58719+
58720+ return;
58721+}
58722+
58723+static void
58724+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58725+ const ino_t newinode, const dev_t newdevice,
58726+ struct acl_role_label *role)
58727+{
58728+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58729+ struct acl_subject_label *match;
58730+
58731+ match = role->subj_hash[index];
58732+
58733+ while (match && (match->inode != oldinode ||
58734+ match->device != olddevice ||
58735+ !(match->mode & GR_DELETED)))
58736+ match = match->next;
58737+
58738+ if (match && (match->inode == oldinode)
58739+ && (match->device == olddevice)
58740+ && (match->mode & GR_DELETED)) {
58741+ if (match->prev == NULL) {
58742+ role->subj_hash[index] = match->next;
58743+ if (match->next != NULL)
58744+ match->next->prev = NULL;
58745+ } else {
58746+ match->prev->next = match->next;
58747+ if (match->next != NULL)
58748+ match->next->prev = match->prev;
58749+ }
58750+ match->prev = NULL;
58751+ match->next = NULL;
58752+ match->inode = newinode;
58753+ match->device = newdevice;
58754+ match->mode &= ~GR_DELETED;
58755+
58756+ insert_acl_subj_label(match, role);
58757+ }
58758+
58759+ return;
58760+}
58761+
58762+static void
58763+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58764+ const ino_t newinode, const dev_t newdevice)
58765+{
58766+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58767+ struct inodev_entry *match;
58768+
58769+ match = inodev_set.i_hash[index];
58770+
58771+ while (match && (match->nentry->inode != oldinode ||
58772+ match->nentry->device != olddevice || !match->nentry->deleted))
58773+ match = match->next;
58774+
58775+ if (match && (match->nentry->inode == oldinode)
58776+ && (match->nentry->device == olddevice) &&
58777+ match->nentry->deleted) {
58778+ if (match->prev == NULL) {
58779+ inodev_set.i_hash[index] = match->next;
58780+ if (match->next != NULL)
58781+ match->next->prev = NULL;
58782+ } else {
58783+ match->prev->next = match->next;
58784+ if (match->next != NULL)
58785+ match->next->prev = match->prev;
58786+ }
58787+ match->prev = NULL;
58788+ match->next = NULL;
58789+ match->nentry->inode = newinode;
58790+ match->nentry->device = newdevice;
58791+ match->nentry->deleted = 0;
58792+
58793+ insert_inodev_entry(match);
58794+ }
58795+
58796+ return;
58797+}
58798+
58799+static void
58800+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58801+{
58802+ struct acl_subject_label *subj;
58803+ struct acl_role_label *role;
58804+ unsigned int x;
58805+
58806+ FOR_EACH_ROLE_START(role)
58807+ update_acl_subj_label(matchn->inode, matchn->device,
58808+ inode, dev, role);
58809+
58810+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58811+ if ((subj->inode == inode) && (subj->device == dev)) {
58812+ subj->inode = inode;
58813+ subj->device = dev;
58814+ }
58815+ FOR_EACH_NESTED_SUBJECT_END(subj)
58816+ FOR_EACH_SUBJECT_START(role, subj, x)
58817+ update_acl_obj_label(matchn->inode, matchn->device,
58818+ inode, dev, subj);
58819+ FOR_EACH_SUBJECT_END(subj,x)
58820+ FOR_EACH_ROLE_END(role)
58821+
58822+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58823+
58824+ return;
58825+}
58826+
58827+static void
58828+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58829+ const struct vfsmount *mnt)
58830+{
58831+ ino_t ino = dentry->d_inode->i_ino;
58832+ dev_t dev = __get_dev(dentry);
58833+
58834+ __do_handle_create(matchn, ino, dev);
58835+
58836+ return;
58837+}
58838+
58839+void
58840+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58841+{
58842+ struct name_entry *matchn;
58843+
58844+ if (unlikely(!(gr_status & GR_READY)))
58845+ return;
58846+
58847+ preempt_disable();
58848+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58849+
58850+ if (unlikely((unsigned long)matchn)) {
58851+ write_lock(&gr_inode_lock);
58852+ do_handle_create(matchn, dentry, mnt);
58853+ write_unlock(&gr_inode_lock);
58854+ }
58855+ preempt_enable();
58856+
58857+ return;
58858+}
58859+
58860+void
58861+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58862+{
58863+ struct name_entry *matchn;
58864+
58865+ if (unlikely(!(gr_status & GR_READY)))
58866+ return;
58867+
58868+ preempt_disable();
58869+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58870+
58871+ if (unlikely((unsigned long)matchn)) {
58872+ write_lock(&gr_inode_lock);
58873+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58874+ write_unlock(&gr_inode_lock);
58875+ }
58876+ preempt_enable();
58877+
58878+ return;
58879+}
58880+
58881+void
58882+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58883+ struct dentry *old_dentry,
58884+ struct dentry *new_dentry,
58885+ struct vfsmount *mnt, const __u8 replace)
58886+{
58887+ struct name_entry *matchn;
58888+ struct inodev_entry *inodev;
58889+ struct inode *inode = new_dentry->d_inode;
58890+ ino_t oldinode = old_dentry->d_inode->i_ino;
58891+ dev_t olddev = __get_dev(old_dentry);
58892+
58893+ /* vfs_rename swaps the name and parent link for old_dentry and
58894+ new_dentry
58895+ at this point, old_dentry has the new name, parent link, and inode
58896+ for the renamed file
58897+ if a file is being replaced by a rename, new_dentry has the inode
58898+ and name for the replaced file
58899+ */
58900+
58901+ if (unlikely(!(gr_status & GR_READY)))
58902+ return;
58903+
58904+ preempt_disable();
58905+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58906+
58907+ /* we wouldn't have to check d_inode if it weren't for
58908+ NFS silly-renaming
58909+ */
58910+
58911+ write_lock(&gr_inode_lock);
58912+ if (unlikely(replace && inode)) {
58913+ ino_t newinode = inode->i_ino;
58914+ dev_t newdev = __get_dev(new_dentry);
58915+ inodev = lookup_inodev_entry(newinode, newdev);
58916+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58917+ do_handle_delete(inodev, newinode, newdev);
58918+ }
58919+
58920+ inodev = lookup_inodev_entry(oldinode, olddev);
58921+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58922+ do_handle_delete(inodev, oldinode, olddev);
58923+
58924+ if (unlikely((unsigned long)matchn))
58925+ do_handle_create(matchn, old_dentry, mnt);
58926+
58927+ write_unlock(&gr_inode_lock);
58928+ preempt_enable();
58929+
58930+ return;
58931+}
58932+
58933+static int
58934+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58935+ unsigned char **sum)
58936+{
58937+ struct acl_role_label *r;
58938+ struct role_allowed_ip *ipp;
58939+ struct role_transition *trans;
58940+ unsigned int i;
58941+ int found = 0;
58942+ u32 curr_ip = current->signal->curr_ip;
58943+
58944+ current->signal->saved_ip = curr_ip;
58945+
58946+ /* check transition table */
58947+
58948+ for (trans = current->role->transitions; trans; trans = trans->next) {
58949+ if (!strcmp(rolename, trans->rolename)) {
58950+ found = 1;
58951+ break;
58952+ }
58953+ }
58954+
58955+ if (!found)
58956+ return 0;
58957+
58958+ /* handle special roles that do not require authentication
58959+ and check ip */
58960+
58961+ FOR_EACH_ROLE_START(r)
58962+ if (!strcmp(rolename, r->rolename) &&
58963+ (r->roletype & GR_ROLE_SPECIAL)) {
58964+ found = 0;
58965+ if (r->allowed_ips != NULL) {
58966+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58967+ if ((ntohl(curr_ip) & ipp->netmask) ==
58968+ (ntohl(ipp->addr) & ipp->netmask))
58969+ found = 1;
58970+ }
58971+ } else
58972+ found = 2;
58973+ if (!found)
58974+ return 0;
58975+
58976+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58977+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58978+ *salt = NULL;
58979+ *sum = NULL;
58980+ return 1;
58981+ }
58982+ }
58983+ FOR_EACH_ROLE_END(r)
58984+
58985+ for (i = 0; i < num_sprole_pws; i++) {
58986+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58987+ *salt = acl_special_roles[i]->salt;
58988+ *sum = acl_special_roles[i]->sum;
58989+ return 1;
58990+ }
58991+ }
58992+
58993+ return 0;
58994+}
58995+
58996+static void
58997+assign_special_role(char *rolename)
58998+{
58999+ struct acl_object_label *obj;
59000+ struct acl_role_label *r;
59001+ struct acl_role_label *assigned = NULL;
59002+ struct task_struct *tsk;
59003+ struct file *filp;
59004+
59005+ FOR_EACH_ROLE_START(r)
59006+ if (!strcmp(rolename, r->rolename) &&
59007+ (r->roletype & GR_ROLE_SPECIAL)) {
59008+ assigned = r;
59009+ break;
59010+ }
59011+ FOR_EACH_ROLE_END(r)
59012+
59013+ if (!assigned)
59014+ return;
59015+
59016+ read_lock(&tasklist_lock);
59017+ read_lock(&grsec_exec_file_lock);
59018+
59019+ tsk = current->real_parent;
59020+ if (tsk == NULL)
59021+ goto out_unlock;
59022+
59023+ filp = tsk->exec_file;
59024+ if (filp == NULL)
59025+ goto out_unlock;
59026+
59027+ tsk->is_writable = 0;
59028+
59029+ tsk->acl_sp_role = 1;
59030+ tsk->acl_role_id = ++acl_sp_role_value;
59031+ tsk->role = assigned;
59032+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
59033+
59034+ /* ignore additional mmap checks for processes that are writable
59035+ by the default ACL */
59036+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59037+ if (unlikely(obj->mode & GR_WRITE))
59038+ tsk->is_writable = 1;
59039+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
59040+ if (unlikely(obj->mode & GR_WRITE))
59041+ tsk->is_writable = 1;
59042+
59043+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59044+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
59045+#endif
59046+
59047+out_unlock:
59048+ read_unlock(&grsec_exec_file_lock);
59049+ read_unlock(&tasklist_lock);
59050+ return;
59051+}
59052+
59053+int gr_check_secure_terminal(struct task_struct *task)
59054+{
59055+ struct task_struct *p, *p2, *p3;
59056+ struct files_struct *files;
59057+ struct fdtable *fdt;
59058+ struct file *our_file = NULL, *file;
59059+ int i;
59060+
59061+ if (task->signal->tty == NULL)
59062+ return 1;
59063+
59064+ files = get_files_struct(task);
59065+ if (files != NULL) {
59066+ rcu_read_lock();
59067+ fdt = files_fdtable(files);
59068+ for (i=0; i < fdt->max_fds; i++) {
59069+ file = fcheck_files(files, i);
59070+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
59071+ get_file(file);
59072+ our_file = file;
59073+ }
59074+ }
59075+ rcu_read_unlock();
59076+ put_files_struct(files);
59077+ }
59078+
59079+ if (our_file == NULL)
59080+ return 1;
59081+
59082+ read_lock(&tasklist_lock);
59083+ do_each_thread(p2, p) {
59084+ files = get_files_struct(p);
59085+ if (files == NULL ||
59086+ (p->signal && p->signal->tty == task->signal->tty)) {
59087+ if (files != NULL)
59088+ put_files_struct(files);
59089+ continue;
59090+ }
59091+ rcu_read_lock();
59092+ fdt = files_fdtable(files);
59093+ for (i=0; i < fdt->max_fds; i++) {
59094+ file = fcheck_files(files, i);
59095+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59096+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59097+ p3 = task;
59098+ while (p3->pid > 0) {
59099+ if (p3 == p)
59100+ break;
59101+ p3 = p3->real_parent;
59102+ }
59103+ if (p3 == p)
59104+ break;
59105+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59106+ gr_handle_alertkill(p);
59107+ rcu_read_unlock();
59108+ put_files_struct(files);
59109+ read_unlock(&tasklist_lock);
59110+ fput(our_file);
59111+ return 0;
59112+ }
59113+ }
59114+ rcu_read_unlock();
59115+ put_files_struct(files);
59116+ } while_each_thread(p2, p);
59117+ read_unlock(&tasklist_lock);
59118+
59119+ fput(our_file);
59120+ return 1;
59121+}
59122+
59123+ssize_t
59124+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59125+{
59126+ struct gr_arg_wrapper uwrap;
59127+ unsigned char *sprole_salt = NULL;
59128+ unsigned char *sprole_sum = NULL;
59129+ int error = sizeof (struct gr_arg_wrapper);
59130+ int error2 = 0;
59131+
59132+ mutex_lock(&gr_dev_mutex);
59133+
59134+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59135+ error = -EPERM;
59136+ goto out;
59137+ }
59138+
59139+ if (count != sizeof (struct gr_arg_wrapper)) {
59140+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59141+ error = -EINVAL;
59142+ goto out;
59143+ }
59144+
59145+
59146+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59147+ gr_auth_expires = 0;
59148+ gr_auth_attempts = 0;
59149+ }
59150+
59151+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59152+ error = -EFAULT;
59153+ goto out;
59154+ }
59155+
59156+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59157+ error = -EINVAL;
59158+ goto out;
59159+ }
59160+
59161+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59162+ error = -EFAULT;
59163+ goto out;
59164+ }
59165+
59166+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59167+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59168+ time_after(gr_auth_expires, get_seconds())) {
59169+ error = -EBUSY;
59170+ goto out;
59171+ }
59172+
59173+ /* if non-root trying to do anything other than use a special role,
59174+ do not attempt authentication, do not count towards authentication
59175+ locking
59176+ */
59177+
59178+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59179+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59180+ current_uid()) {
59181+ error = -EPERM;
59182+ goto out;
59183+ }
59184+
59185+ /* ensure pw and special role name are null terminated */
59186+
59187+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59188+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59189+
59190+ /* Okay.
59191+ * We have our enough of the argument structure..(we have yet
59192+ * to copy_from_user the tables themselves) . Copy the tables
59193+ * only if we need them, i.e. for loading operations. */
59194+
59195+ switch (gr_usermode->mode) {
59196+ case GR_STATUS:
59197+ if (gr_status & GR_READY) {
59198+ error = 1;
59199+ if (!gr_check_secure_terminal(current))
59200+ error = 3;
59201+ } else
59202+ error = 2;
59203+ goto out;
59204+ case GR_SHUTDOWN:
59205+ if ((gr_status & GR_READY)
59206+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59207+ pax_open_kernel();
59208+ gr_status &= ~GR_READY;
59209+ pax_close_kernel();
59210+
59211+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59212+ free_variables();
59213+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59214+ memset(gr_system_salt, 0, GR_SALT_LEN);
59215+ memset(gr_system_sum, 0, GR_SHA_LEN);
59216+ } else if (gr_status & GR_READY) {
59217+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59218+ error = -EPERM;
59219+ } else {
59220+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59221+ error = -EAGAIN;
59222+ }
59223+ break;
59224+ case GR_ENABLE:
59225+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59226+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59227+ else {
59228+ if (gr_status & GR_READY)
59229+ error = -EAGAIN;
59230+ else
59231+ error = error2;
59232+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59233+ }
59234+ break;
59235+ case GR_RELOAD:
59236+ if (!(gr_status & GR_READY)) {
59237+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59238+ error = -EAGAIN;
59239+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59240+ lock_kernel();
59241+
59242+ pax_open_kernel();
59243+ gr_status &= ~GR_READY;
59244+ pax_close_kernel();
59245+
59246+ free_variables();
59247+ if (!(error2 = gracl_init(gr_usermode))) {
59248+ unlock_kernel();
59249+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59250+ } else {
59251+ unlock_kernel();
59252+ error = error2;
59253+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59254+ }
59255+ } else {
59256+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59257+ error = -EPERM;
59258+ }
59259+ break;
59260+ case GR_SEGVMOD:
59261+ if (unlikely(!(gr_status & GR_READY))) {
59262+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59263+ error = -EAGAIN;
59264+ break;
59265+ }
59266+
59267+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59268+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59269+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59270+ struct acl_subject_label *segvacl;
59271+ segvacl =
59272+ lookup_acl_subj_label(gr_usermode->segv_inode,
59273+ gr_usermode->segv_device,
59274+ current->role);
59275+ if (segvacl) {
59276+ segvacl->crashes = 0;
59277+ segvacl->expires = 0;
59278+ }
59279+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59280+ gr_remove_uid(gr_usermode->segv_uid);
59281+ }
59282+ } else {
59283+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59284+ error = -EPERM;
59285+ }
59286+ break;
59287+ case GR_SPROLE:
59288+ case GR_SPROLEPAM:
59289+ if (unlikely(!(gr_status & GR_READY))) {
59290+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59291+ error = -EAGAIN;
59292+ break;
59293+ }
59294+
59295+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59296+ current->role->expires = 0;
59297+ current->role->auth_attempts = 0;
59298+ }
59299+
59300+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59301+ time_after(current->role->expires, get_seconds())) {
59302+ error = -EBUSY;
59303+ goto out;
59304+ }
59305+
59306+ if (lookup_special_role_auth
59307+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59308+ && ((!sprole_salt && !sprole_sum)
59309+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59310+ char *p = "";
59311+ assign_special_role(gr_usermode->sp_role);
59312+ read_lock(&tasklist_lock);
59313+ if (current->real_parent)
59314+ p = current->real_parent->role->rolename;
59315+ read_unlock(&tasklist_lock);
59316+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59317+ p, acl_sp_role_value);
59318+ } else {
59319+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59320+ error = -EPERM;
59321+ if(!(current->role->auth_attempts++))
59322+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59323+
59324+ goto out;
59325+ }
59326+ break;
59327+ case GR_UNSPROLE:
59328+ if (unlikely(!(gr_status & GR_READY))) {
59329+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59330+ error = -EAGAIN;
59331+ break;
59332+ }
59333+
59334+ if (current->role->roletype & GR_ROLE_SPECIAL) {
59335+ char *p = "";
59336+ int i = 0;
59337+
59338+ read_lock(&tasklist_lock);
59339+ if (current->real_parent) {
59340+ p = current->real_parent->role->rolename;
59341+ i = current->real_parent->acl_role_id;
59342+ }
59343+ read_unlock(&tasklist_lock);
59344+
59345+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59346+ gr_set_acls(1);
59347+ } else {
59348+ error = -EPERM;
59349+ goto out;
59350+ }
59351+ break;
59352+ default:
59353+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59354+ error = -EINVAL;
59355+ break;
59356+ }
59357+
59358+ if (error != -EPERM)
59359+ goto out;
59360+
59361+ if(!(gr_auth_attempts++))
59362+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59363+
59364+ out:
59365+ mutex_unlock(&gr_dev_mutex);
59366+ return error;
59367+}
59368+
59369+/* must be called with
59370+ rcu_read_lock();
59371+ read_lock(&tasklist_lock);
59372+ read_lock(&grsec_exec_file_lock);
59373+*/
59374+int gr_apply_subject_to_task(struct task_struct *task)
59375+{
59376+ struct acl_object_label *obj;
59377+ char *tmpname;
59378+ struct acl_subject_label *tmpsubj;
59379+ struct file *filp;
59380+ struct name_entry *nmatch;
59381+
59382+ filp = task->exec_file;
59383+ if (filp == NULL)
59384+ return 0;
59385+
59386+ /* the following is to apply the correct subject
59387+ on binaries running when the RBAC system
59388+ is enabled, when the binaries have been
59389+ replaced or deleted since their execution
59390+ -----
59391+ when the RBAC system starts, the inode/dev
59392+ from exec_file will be one the RBAC system
59393+ is unaware of. It only knows the inode/dev
59394+ of the present file on disk, or the absence
59395+ of it.
59396+ */
59397+ preempt_disable();
59398+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59399+
59400+ nmatch = lookup_name_entry(tmpname);
59401+ preempt_enable();
59402+ tmpsubj = NULL;
59403+ if (nmatch) {
59404+ if (nmatch->deleted)
59405+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59406+ else
59407+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59408+ if (tmpsubj != NULL)
59409+ task->acl = tmpsubj;
59410+ }
59411+ if (tmpsubj == NULL)
59412+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59413+ task->role);
59414+ if (task->acl) {
59415+ task->is_writable = 0;
59416+ /* ignore additional mmap checks for processes that are writable
59417+ by the default ACL */
59418+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59419+ if (unlikely(obj->mode & GR_WRITE))
59420+ task->is_writable = 1;
59421+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59422+ if (unlikely(obj->mode & GR_WRITE))
59423+ task->is_writable = 1;
59424+
59425+ gr_set_proc_res(task);
59426+
59427+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59428+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59429+#endif
59430+ } else {
59431+ return 1;
59432+ }
59433+
59434+ return 0;
59435+}
59436+
59437+int
59438+gr_set_acls(const int type)
59439+{
59440+ struct task_struct *task, *task2;
59441+ struct acl_role_label *role = current->role;
59442+ __u16 acl_role_id = current->acl_role_id;
59443+ const struct cred *cred;
59444+ int ret;
59445+
59446+ rcu_read_lock();
59447+ read_lock(&tasklist_lock);
59448+ read_lock(&grsec_exec_file_lock);
59449+ do_each_thread(task2, task) {
59450+ /* check to see if we're called from the exit handler,
59451+ if so, only replace ACLs that have inherited the admin
59452+ ACL */
59453+
59454+ if (type && (task->role != role ||
59455+ task->acl_role_id != acl_role_id))
59456+ continue;
59457+
59458+ task->acl_role_id = 0;
59459+ task->acl_sp_role = 0;
59460+
59461+ if (task->exec_file) {
59462+ cred = __task_cred(task);
59463+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59464+
59465+ ret = gr_apply_subject_to_task(task);
59466+ if (ret) {
59467+ read_unlock(&grsec_exec_file_lock);
59468+ read_unlock(&tasklist_lock);
59469+ rcu_read_unlock();
59470+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59471+ return ret;
59472+ }
59473+ } else {
59474+ // it's a kernel process
59475+ task->role = kernel_role;
59476+ task->acl = kernel_role->root_label;
59477+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59478+ task->acl->mode &= ~GR_PROCFIND;
59479+#endif
59480+ }
59481+ } while_each_thread(task2, task);
59482+ read_unlock(&grsec_exec_file_lock);
59483+ read_unlock(&tasklist_lock);
59484+ rcu_read_unlock();
59485+
59486+ return 0;
59487+}
59488+
59489+void
59490+gr_learn_resource(const struct task_struct *task,
59491+ const int res, const unsigned long wanted, const int gt)
59492+{
59493+ struct acl_subject_label *acl;
59494+ const struct cred *cred;
59495+
59496+ if (unlikely((gr_status & GR_READY) &&
59497+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59498+ goto skip_reslog;
59499+
59500+#ifdef CONFIG_GRKERNSEC_RESLOG
59501+ gr_log_resource(task, res, wanted, gt);
59502+#endif
59503+ skip_reslog:
59504+
59505+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59506+ return;
59507+
59508+ acl = task->acl;
59509+
59510+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59511+ !(acl->resmask & (1 << (unsigned short) res))))
59512+ return;
59513+
59514+ if (wanted >= acl->res[res].rlim_cur) {
59515+ unsigned long res_add;
59516+
59517+ res_add = wanted;
59518+ switch (res) {
59519+ case RLIMIT_CPU:
59520+ res_add += GR_RLIM_CPU_BUMP;
59521+ break;
59522+ case RLIMIT_FSIZE:
59523+ res_add += GR_RLIM_FSIZE_BUMP;
59524+ break;
59525+ case RLIMIT_DATA:
59526+ res_add += GR_RLIM_DATA_BUMP;
59527+ break;
59528+ case RLIMIT_STACK:
59529+ res_add += GR_RLIM_STACK_BUMP;
59530+ break;
59531+ case RLIMIT_CORE:
59532+ res_add += GR_RLIM_CORE_BUMP;
59533+ break;
59534+ case RLIMIT_RSS:
59535+ res_add += GR_RLIM_RSS_BUMP;
59536+ break;
59537+ case RLIMIT_NPROC:
59538+ res_add += GR_RLIM_NPROC_BUMP;
59539+ break;
59540+ case RLIMIT_NOFILE:
59541+ res_add += GR_RLIM_NOFILE_BUMP;
59542+ break;
59543+ case RLIMIT_MEMLOCK:
59544+ res_add += GR_RLIM_MEMLOCK_BUMP;
59545+ break;
59546+ case RLIMIT_AS:
59547+ res_add += GR_RLIM_AS_BUMP;
59548+ break;
59549+ case RLIMIT_LOCKS:
59550+ res_add += GR_RLIM_LOCKS_BUMP;
59551+ break;
59552+ case RLIMIT_SIGPENDING:
59553+ res_add += GR_RLIM_SIGPENDING_BUMP;
59554+ break;
59555+ case RLIMIT_MSGQUEUE:
59556+ res_add += GR_RLIM_MSGQUEUE_BUMP;
59557+ break;
59558+ case RLIMIT_NICE:
59559+ res_add += GR_RLIM_NICE_BUMP;
59560+ break;
59561+ case RLIMIT_RTPRIO:
59562+ res_add += GR_RLIM_RTPRIO_BUMP;
59563+ break;
59564+ case RLIMIT_RTTIME:
59565+ res_add += GR_RLIM_RTTIME_BUMP;
59566+ break;
59567+ }
59568+
59569+ acl->res[res].rlim_cur = res_add;
59570+
59571+ if (wanted > acl->res[res].rlim_max)
59572+ acl->res[res].rlim_max = res_add;
59573+
59574+ /* only log the subject filename, since resource logging is supported for
59575+ single-subject learning only */
59576+ rcu_read_lock();
59577+ cred = __task_cred(task);
59578+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59579+ task->role->roletype, cred->uid, cred->gid, acl->filename,
59580+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59581+ "", (unsigned long) res, &task->signal->saved_ip);
59582+ rcu_read_unlock();
59583+ }
59584+
59585+ return;
59586+}
59587+
59588+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59589+void
59590+pax_set_initial_flags(struct linux_binprm *bprm)
59591+{
59592+ struct task_struct *task = current;
59593+ struct acl_subject_label *proc;
59594+ unsigned long flags;
59595+
59596+ if (unlikely(!(gr_status & GR_READY)))
59597+ return;
59598+
59599+ flags = pax_get_flags(task);
59600+
59601+ proc = task->acl;
59602+
59603+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59604+ flags &= ~MF_PAX_PAGEEXEC;
59605+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59606+ flags &= ~MF_PAX_SEGMEXEC;
59607+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59608+ flags &= ~MF_PAX_RANDMMAP;
59609+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59610+ flags &= ~MF_PAX_EMUTRAMP;
59611+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59612+ flags &= ~MF_PAX_MPROTECT;
59613+
59614+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59615+ flags |= MF_PAX_PAGEEXEC;
59616+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59617+ flags |= MF_PAX_SEGMEXEC;
59618+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59619+ flags |= MF_PAX_RANDMMAP;
59620+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59621+ flags |= MF_PAX_EMUTRAMP;
59622+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59623+ flags |= MF_PAX_MPROTECT;
59624+
59625+ pax_set_flags(task, flags);
59626+
59627+ return;
59628+}
59629+#endif
59630+
59631+#ifdef CONFIG_SYSCTL
59632+/* Eric Biederman likes breaking userland ABI and every inode-based security
59633+ system to save 35kb of memory */
59634+
59635+/* we modify the passed in filename, but adjust it back before returning */
59636+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59637+{
59638+ struct name_entry *nmatch;
59639+ char *p, *lastp = NULL;
59640+ struct acl_object_label *obj = NULL, *tmp;
59641+ struct acl_subject_label *tmpsubj;
59642+ char c = '\0';
59643+
59644+ read_lock(&gr_inode_lock);
59645+
59646+ p = name + len - 1;
59647+ do {
59648+ nmatch = lookup_name_entry(name);
59649+ if (lastp != NULL)
59650+ *lastp = c;
59651+
59652+ if (nmatch == NULL)
59653+ goto next_component;
59654+ tmpsubj = current->acl;
59655+ do {
59656+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59657+ if (obj != NULL) {
59658+ tmp = obj->globbed;
59659+ while (tmp) {
59660+ if (!glob_match(tmp->filename, name)) {
59661+ obj = tmp;
59662+ goto found_obj;
59663+ }
59664+ tmp = tmp->next;
59665+ }
59666+ goto found_obj;
59667+ }
59668+ } while ((tmpsubj = tmpsubj->parent_subject));
59669+next_component:
59670+ /* end case */
59671+ if (p == name)
59672+ break;
59673+
59674+ while (*p != '/')
59675+ p--;
59676+ if (p == name)
59677+ lastp = p + 1;
59678+ else {
59679+ lastp = p;
59680+ p--;
59681+ }
59682+ c = *lastp;
59683+ *lastp = '\0';
59684+ } while (1);
59685+found_obj:
59686+ read_unlock(&gr_inode_lock);
59687+ /* obj returned will always be non-null */
59688+ return obj;
59689+}
59690+
59691+/* returns 0 when allowing, non-zero on error
59692+ op of 0 is used for readdir, so we don't log the names of hidden files
59693+*/
59694+__u32
59695+gr_handle_sysctl(const struct ctl_table *table, const int op)
59696+{
59697+ ctl_table *tmp;
59698+ const char *proc_sys = "/proc/sys";
59699+ char *path;
59700+ struct acl_object_label *obj;
59701+ unsigned short len = 0, pos = 0, depth = 0, i;
59702+ __u32 err = 0;
59703+ __u32 mode = 0;
59704+
59705+ if (unlikely(!(gr_status & GR_READY)))
59706+ return 0;
59707+
59708+ /* for now, ignore operations on non-sysctl entries if it's not a
59709+ readdir*/
59710+ if (table->child != NULL && op != 0)
59711+ return 0;
59712+
59713+ mode |= GR_FIND;
59714+ /* it's only a read if it's an entry, read on dirs is for readdir */
59715+ if (op & MAY_READ)
59716+ mode |= GR_READ;
59717+ if (op & MAY_WRITE)
59718+ mode |= GR_WRITE;
59719+
59720+ preempt_disable();
59721+
59722+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59723+
59724+ /* it's only a read/write if it's an actual entry, not a dir
59725+ (which are opened for readdir)
59726+ */
59727+
59728+ /* convert the requested sysctl entry into a pathname */
59729+
59730+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59731+ len += strlen(tmp->procname);
59732+ len++;
59733+ depth++;
59734+ }
59735+
59736+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59737+ /* deny */
59738+ goto out;
59739+ }
59740+
59741+ memset(path, 0, PAGE_SIZE);
59742+
59743+ memcpy(path, proc_sys, strlen(proc_sys));
59744+
59745+ pos += strlen(proc_sys);
59746+
59747+ for (; depth > 0; depth--) {
59748+ path[pos] = '/';
59749+ pos++;
59750+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59751+ if (depth == i) {
59752+ memcpy(path + pos, tmp->procname,
59753+ strlen(tmp->procname));
59754+ pos += strlen(tmp->procname);
59755+ }
59756+ i++;
59757+ }
59758+ }
59759+
59760+ obj = gr_lookup_by_name(path, pos);
59761+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59762+
59763+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59764+ ((err & mode) != mode))) {
59765+ __u32 new_mode = mode;
59766+
59767+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59768+
59769+ err = 0;
59770+ gr_log_learn_sysctl(path, new_mode);
59771+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59772+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59773+ err = -ENOENT;
59774+ } else if (!(err & GR_FIND)) {
59775+ err = -ENOENT;
59776+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59777+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59778+ path, (mode & GR_READ) ? " reading" : "",
59779+ (mode & GR_WRITE) ? " writing" : "");
59780+ err = -EACCES;
59781+ } else if ((err & mode) != mode) {
59782+ err = -EACCES;
59783+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59784+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59785+ path, (mode & GR_READ) ? " reading" : "",
59786+ (mode & GR_WRITE) ? " writing" : "");
59787+ err = 0;
59788+ } else
59789+ err = 0;
59790+
59791+ out:
59792+ preempt_enable();
59793+
59794+ return err;
59795+}
59796+#endif
59797+
59798+int
59799+gr_handle_proc_ptrace(struct task_struct *task)
59800+{
59801+ struct file *filp;
59802+ struct task_struct *tmp = task;
59803+ struct task_struct *curtemp = current;
59804+ __u32 retmode;
59805+
59806+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59807+ if (unlikely(!(gr_status & GR_READY)))
59808+ return 0;
59809+#endif
59810+
59811+ read_lock(&tasklist_lock);
59812+ read_lock(&grsec_exec_file_lock);
59813+ filp = task->exec_file;
59814+
59815+ while (tmp->pid > 0) {
59816+ if (tmp == curtemp)
59817+ break;
59818+ tmp = tmp->real_parent;
59819+ }
59820+
59821+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59822+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59823+ read_unlock(&grsec_exec_file_lock);
59824+ read_unlock(&tasklist_lock);
59825+ return 1;
59826+ }
59827+
59828+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59829+ if (!(gr_status & GR_READY)) {
59830+ read_unlock(&grsec_exec_file_lock);
59831+ read_unlock(&tasklist_lock);
59832+ return 0;
59833+ }
59834+#endif
59835+
59836+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59837+ read_unlock(&grsec_exec_file_lock);
59838+ read_unlock(&tasklist_lock);
59839+
59840+ if (retmode & GR_NOPTRACE)
59841+ return 1;
59842+
59843+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59844+ && (current->acl != task->acl || (current->acl != current->role->root_label
59845+ && current->pid != task->pid)))
59846+ return 1;
59847+
59848+ return 0;
59849+}
59850+
59851+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59852+{
59853+ if (unlikely(!(gr_status & GR_READY)))
59854+ return;
59855+
59856+ if (!(current->role->roletype & GR_ROLE_GOD))
59857+ return;
59858+
59859+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59860+ p->role->rolename, gr_task_roletype_to_char(p),
59861+ p->acl->filename);
59862+}
59863+
59864+int
59865+gr_handle_ptrace(struct task_struct *task, const long request)
59866+{
59867+ struct task_struct *tmp = task;
59868+ struct task_struct *curtemp = current;
59869+ __u32 retmode;
59870+
59871+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59872+ if (unlikely(!(gr_status & GR_READY)))
59873+ return 0;
59874+#endif
59875+
59876+ read_lock(&tasklist_lock);
59877+ while (tmp->pid > 0) {
59878+ if (tmp == curtemp)
59879+ break;
59880+ tmp = tmp->real_parent;
59881+ }
59882+
59883+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59884+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59885+ read_unlock(&tasklist_lock);
59886+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59887+ return 1;
59888+ }
59889+ read_unlock(&tasklist_lock);
59890+
59891+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59892+ if (!(gr_status & GR_READY))
59893+ return 0;
59894+#endif
59895+
59896+ read_lock(&grsec_exec_file_lock);
59897+ if (unlikely(!task->exec_file)) {
59898+ read_unlock(&grsec_exec_file_lock);
59899+ return 0;
59900+ }
59901+
59902+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59903+ read_unlock(&grsec_exec_file_lock);
59904+
59905+ if (retmode & GR_NOPTRACE) {
59906+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59907+ return 1;
59908+ }
59909+
59910+ if (retmode & GR_PTRACERD) {
59911+ switch (request) {
59912+ case PTRACE_POKETEXT:
59913+ case PTRACE_POKEDATA:
59914+ case PTRACE_POKEUSR:
59915+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59916+ case PTRACE_SETREGS:
59917+ case PTRACE_SETFPREGS:
59918+#endif
59919+#ifdef CONFIG_X86
59920+ case PTRACE_SETFPXREGS:
59921+#endif
59922+#ifdef CONFIG_ALTIVEC
59923+ case PTRACE_SETVRREGS:
59924+#endif
59925+ return 1;
59926+ default:
59927+ return 0;
59928+ }
59929+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59930+ !(current->role->roletype & GR_ROLE_GOD) &&
59931+ (current->acl != task->acl)) {
59932+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59933+ return 1;
59934+ }
59935+
59936+ return 0;
59937+}
59938+
59939+static int is_writable_mmap(const struct file *filp)
59940+{
59941+ struct task_struct *task = current;
59942+ struct acl_object_label *obj, *obj2;
59943+
59944+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59945+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59946+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59947+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59948+ task->role->root_label);
59949+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59950+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59951+ return 1;
59952+ }
59953+ }
59954+ return 0;
59955+}
59956+
59957+int
59958+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59959+{
59960+ __u32 mode;
59961+
59962+ if (unlikely(!file || !(prot & PROT_EXEC)))
59963+ return 1;
59964+
59965+ if (is_writable_mmap(file))
59966+ return 0;
59967+
59968+ mode =
59969+ gr_search_file(file->f_path.dentry,
59970+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59971+ file->f_path.mnt);
59972+
59973+ if (!gr_tpe_allow(file))
59974+ return 0;
59975+
59976+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59977+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59978+ return 0;
59979+ } else if (unlikely(!(mode & GR_EXEC))) {
59980+ return 0;
59981+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59982+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59983+ return 1;
59984+ }
59985+
59986+ return 1;
59987+}
59988+
59989+int
59990+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59991+{
59992+ __u32 mode;
59993+
59994+ if (unlikely(!file || !(prot & PROT_EXEC)))
59995+ return 1;
59996+
59997+ if (is_writable_mmap(file))
59998+ return 0;
59999+
60000+ mode =
60001+ gr_search_file(file->f_path.dentry,
60002+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
60003+ file->f_path.mnt);
60004+
60005+ if (!gr_tpe_allow(file))
60006+ return 0;
60007+
60008+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
60009+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60010+ return 0;
60011+ } else if (unlikely(!(mode & GR_EXEC))) {
60012+ return 0;
60013+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
60014+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
60015+ return 1;
60016+ }
60017+
60018+ return 1;
60019+}
60020+
60021+void
60022+gr_acl_handle_psacct(struct task_struct *task, const long code)
60023+{
60024+ unsigned long runtime;
60025+ unsigned long cputime;
60026+ unsigned int wday, cday;
60027+ __u8 whr, chr;
60028+ __u8 wmin, cmin;
60029+ __u8 wsec, csec;
60030+ struct timespec timeval;
60031+
60032+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
60033+ !(task->acl->mode & GR_PROCACCT)))
60034+ return;
60035+
60036+ do_posix_clock_monotonic_gettime(&timeval);
60037+ runtime = timeval.tv_sec - task->start_time.tv_sec;
60038+ wday = runtime / (3600 * 24);
60039+ runtime -= wday * (3600 * 24);
60040+ whr = runtime / 3600;
60041+ runtime -= whr * 3600;
60042+ wmin = runtime / 60;
60043+ runtime -= wmin * 60;
60044+ wsec = runtime;
60045+
60046+ cputime = (task->utime + task->stime) / HZ;
60047+ cday = cputime / (3600 * 24);
60048+ cputime -= cday * (3600 * 24);
60049+ chr = cputime / 3600;
60050+ cputime -= chr * 3600;
60051+ cmin = cputime / 60;
60052+ cputime -= cmin * 60;
60053+ csec = cputime;
60054+
60055+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
60056+
60057+ return;
60058+}
60059+
60060+void gr_set_kernel_label(struct task_struct *task)
60061+{
60062+ if (gr_status & GR_READY) {
60063+ task->role = kernel_role;
60064+ task->acl = kernel_role->root_label;
60065+ }
60066+ return;
60067+}
60068+
60069+#ifdef CONFIG_TASKSTATS
60070+int gr_is_taskstats_denied(int pid)
60071+{
60072+ struct task_struct *task;
60073+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60074+ const struct cred *cred;
60075+#endif
60076+ int ret = 0;
60077+
60078+ /* restrict taskstats viewing to un-chrooted root users
60079+ who have the 'view' subject flag if the RBAC system is enabled
60080+ */
60081+
60082+ rcu_read_lock();
60083+ read_lock(&tasklist_lock);
60084+ task = find_task_by_vpid(pid);
60085+ if (task) {
60086+#ifdef CONFIG_GRKERNSEC_CHROOT
60087+ if (proc_is_chrooted(task))
60088+ ret = -EACCES;
60089+#endif
60090+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60091+ cred = __task_cred(task);
60092+#ifdef CONFIG_GRKERNSEC_PROC_USER
60093+ if (cred->uid != 0)
60094+ ret = -EACCES;
60095+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60096+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60097+ ret = -EACCES;
60098+#endif
60099+#endif
60100+ if (gr_status & GR_READY) {
60101+ if (!(task->acl->mode & GR_VIEW))
60102+ ret = -EACCES;
60103+ }
60104+ } else
60105+ ret = -ENOENT;
60106+
60107+ read_unlock(&tasklist_lock);
60108+ rcu_read_unlock();
60109+
60110+ return ret;
60111+}
60112+#endif
60113+
60114+/* AUXV entries are filled via a descendant of search_binary_handler
60115+ after we've already applied the subject for the target
60116+*/
60117+int gr_acl_enable_at_secure(void)
60118+{
60119+ if (unlikely(!(gr_status & GR_READY)))
60120+ return 0;
60121+
60122+ if (current->acl->mode & GR_ATSECURE)
60123+ return 1;
60124+
60125+ return 0;
60126+}
60127+
60128+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60129+{
60130+ struct task_struct *task = current;
60131+ struct dentry *dentry = file->f_path.dentry;
60132+ struct vfsmount *mnt = file->f_path.mnt;
60133+ struct acl_object_label *obj, *tmp;
60134+ struct acl_subject_label *subj;
60135+ unsigned int bufsize;
60136+ int is_not_root;
60137+ char *path;
60138+ dev_t dev = __get_dev(dentry);
60139+
60140+ if (unlikely(!(gr_status & GR_READY)))
60141+ return 1;
60142+
60143+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60144+ return 1;
60145+
60146+ /* ignore Eric Biederman */
60147+ if (IS_PRIVATE(dentry->d_inode))
60148+ return 1;
60149+
60150+ subj = task->acl;
60151+ do {
60152+ obj = lookup_acl_obj_label(ino, dev, subj);
60153+ if (obj != NULL)
60154+ return (obj->mode & GR_FIND) ? 1 : 0;
60155+ } while ((subj = subj->parent_subject));
60156+
60157+ /* this is purely an optimization since we're looking for an object
60158+ for the directory we're doing a readdir on
60159+ if it's possible for any globbed object to match the entry we're
60160+ filling into the directory, then the object we find here will be
60161+ an anchor point with attached globbed objects
60162+ */
60163+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60164+ if (obj->globbed == NULL)
60165+ return (obj->mode & GR_FIND) ? 1 : 0;
60166+
60167+ is_not_root = ((obj->filename[0] == '/') &&
60168+ (obj->filename[1] == '\0')) ? 0 : 1;
60169+ bufsize = PAGE_SIZE - namelen - is_not_root;
60170+
60171+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60172+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60173+ return 1;
60174+
60175+ preempt_disable();
60176+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60177+ bufsize);
60178+
60179+ bufsize = strlen(path);
60180+
60181+ /* if base is "/", don't append an additional slash */
60182+ if (is_not_root)
60183+ *(path + bufsize) = '/';
60184+ memcpy(path + bufsize + is_not_root, name, namelen);
60185+ *(path + bufsize + namelen + is_not_root) = '\0';
60186+
60187+ tmp = obj->globbed;
60188+ while (tmp) {
60189+ if (!glob_match(tmp->filename, path)) {
60190+ preempt_enable();
60191+ return (tmp->mode & GR_FIND) ? 1 : 0;
60192+ }
60193+ tmp = tmp->next;
60194+ }
60195+ preempt_enable();
60196+ return (obj->mode & GR_FIND) ? 1 : 0;
60197+}
60198+
60199+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60200+EXPORT_SYMBOL(gr_acl_is_enabled);
60201+#endif
60202+EXPORT_SYMBOL(gr_learn_resource);
60203+EXPORT_SYMBOL(gr_set_kernel_label);
60204+#ifdef CONFIG_SECURITY
60205+EXPORT_SYMBOL(gr_check_user_change);
60206+EXPORT_SYMBOL(gr_check_group_change);
60207+#endif
60208+
60209diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60210new file mode 100644
60211index 0000000..34fefda
60212--- /dev/null
60213+++ b/grsecurity/gracl_alloc.c
60214@@ -0,0 +1,105 @@
60215+#include <linux/kernel.h>
60216+#include <linux/mm.h>
60217+#include <linux/slab.h>
60218+#include <linux/vmalloc.h>
60219+#include <linux/gracl.h>
60220+#include <linux/grsecurity.h>
60221+
60222+static unsigned long alloc_stack_next = 1;
60223+static unsigned long alloc_stack_size = 1;
60224+static void **alloc_stack;
60225+
60226+static __inline__ int
60227+alloc_pop(void)
60228+{
60229+ if (alloc_stack_next == 1)
60230+ return 0;
60231+
60232+ kfree(alloc_stack[alloc_stack_next - 2]);
60233+
60234+ alloc_stack_next--;
60235+
60236+ return 1;
60237+}
60238+
60239+static __inline__ int
60240+alloc_push(void *buf)
60241+{
60242+ if (alloc_stack_next >= alloc_stack_size)
60243+ return 1;
60244+
60245+ alloc_stack[alloc_stack_next - 1] = buf;
60246+
60247+ alloc_stack_next++;
60248+
60249+ return 0;
60250+}
60251+
60252+void *
60253+acl_alloc(unsigned long len)
60254+{
60255+ void *ret = NULL;
60256+
60257+ if (!len || len > PAGE_SIZE)
60258+ goto out;
60259+
60260+ ret = kmalloc(len, GFP_KERNEL);
60261+
60262+ if (ret) {
60263+ if (alloc_push(ret)) {
60264+ kfree(ret);
60265+ ret = NULL;
60266+ }
60267+ }
60268+
60269+out:
60270+ return ret;
60271+}
60272+
60273+void *
60274+acl_alloc_num(unsigned long num, unsigned long len)
60275+{
60276+ if (!len || (num > (PAGE_SIZE / len)))
60277+ return NULL;
60278+
60279+ return acl_alloc(num * len);
60280+}
60281+
60282+void
60283+acl_free_all(void)
60284+{
60285+ if (gr_acl_is_enabled() || !alloc_stack)
60286+ return;
60287+
60288+ while (alloc_pop()) ;
60289+
60290+ if (alloc_stack) {
60291+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60292+ kfree(alloc_stack);
60293+ else
60294+ vfree(alloc_stack);
60295+ }
60296+
60297+ alloc_stack = NULL;
60298+ alloc_stack_size = 1;
60299+ alloc_stack_next = 1;
60300+
60301+ return;
60302+}
60303+
60304+int
60305+acl_alloc_stack_init(unsigned long size)
60306+{
60307+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60308+ alloc_stack =
60309+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60310+ else
60311+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60312+
60313+ alloc_stack_size = size;
60314+
60315+ if (!alloc_stack)
60316+ return 0;
60317+ else
60318+ return 1;
60319+}
60320diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60321new file mode 100644
60322index 0000000..955ddfb
60323--- /dev/null
60324+++ b/grsecurity/gracl_cap.c
60325@@ -0,0 +1,101 @@
60326+#include <linux/kernel.h>
60327+#include <linux/module.h>
60328+#include <linux/sched.h>
60329+#include <linux/gracl.h>
60330+#include <linux/grsecurity.h>
60331+#include <linux/grinternal.h>
60332+
60333+extern const char *captab_log[];
60334+extern int captab_log_entries;
60335+
60336+int
60337+gr_acl_is_capable(const int cap)
60338+{
60339+ struct task_struct *task = current;
60340+ const struct cred *cred = current_cred();
60341+ struct acl_subject_label *curracl;
60342+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60343+ kernel_cap_t cap_audit = __cap_empty_set;
60344+
60345+ if (!gr_acl_is_enabled())
60346+ return 1;
60347+
60348+ curracl = task->acl;
60349+
60350+ cap_drop = curracl->cap_lower;
60351+ cap_mask = curracl->cap_mask;
60352+ cap_audit = curracl->cap_invert_audit;
60353+
60354+ while ((curracl = curracl->parent_subject)) {
60355+ /* if the cap isn't specified in the current computed mask but is specified in the
60356+ current level subject, and is lowered in the current level subject, then add
60357+ it to the set of dropped capabilities
60358+ otherwise, add the current level subject's mask to the current computed mask
60359+ */
60360+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60361+ cap_raise(cap_mask, cap);
60362+ if (cap_raised(curracl->cap_lower, cap))
60363+ cap_raise(cap_drop, cap);
60364+ if (cap_raised(curracl->cap_invert_audit, cap))
60365+ cap_raise(cap_audit, cap);
60366+ }
60367+ }
60368+
60369+ if (!cap_raised(cap_drop, cap)) {
60370+ if (cap_raised(cap_audit, cap))
60371+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60372+ return 1;
60373+ }
60374+
60375+ curracl = task->acl;
60376+
60377+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60378+ && cap_raised(cred->cap_effective, cap)) {
60379+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60380+ task->role->roletype, cred->uid,
60381+ cred->gid, task->exec_file ?
60382+ gr_to_filename(task->exec_file->f_path.dentry,
60383+ task->exec_file->f_path.mnt) : curracl->filename,
60384+ curracl->filename, 0UL,
60385+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60386+ return 1;
60387+ }
60388+
60389+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60390+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60391+ return 0;
60392+}
60393+
60394+int
60395+gr_acl_is_capable_nolog(const int cap)
60396+{
60397+ struct acl_subject_label *curracl;
60398+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60399+
60400+ if (!gr_acl_is_enabled())
60401+ return 1;
60402+
60403+ curracl = current->acl;
60404+
60405+ cap_drop = curracl->cap_lower;
60406+ cap_mask = curracl->cap_mask;
60407+
60408+ while ((curracl = curracl->parent_subject)) {
60409+ /* if the cap isn't specified in the current computed mask but is specified in the
60410+ current level subject, and is lowered in the current level subject, then add
60411+ it to the set of dropped capabilities
60412+ otherwise, add the current level subject's mask to the current computed mask
60413+ */
60414+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60415+ cap_raise(cap_mask, cap);
60416+ if (cap_raised(curracl->cap_lower, cap))
60417+ cap_raise(cap_drop, cap);
60418+ }
60419+ }
60420+
60421+ if (!cap_raised(cap_drop, cap))
60422+ return 1;
60423+
60424+ return 0;
60425+}
60426+
60427diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60428new file mode 100644
60429index 0000000..d5f210c
60430--- /dev/null
60431+++ b/grsecurity/gracl_fs.c
60432@@ -0,0 +1,433 @@
60433+#include <linux/kernel.h>
60434+#include <linux/sched.h>
60435+#include <linux/types.h>
60436+#include <linux/fs.h>
60437+#include <linux/file.h>
60438+#include <linux/stat.h>
60439+#include <linux/grsecurity.h>
60440+#include <linux/grinternal.h>
60441+#include <linux/gracl.h>
60442+
60443+__u32
60444+gr_acl_handle_hidden_file(const struct dentry * dentry,
60445+ const struct vfsmount * mnt)
60446+{
60447+ __u32 mode;
60448+
60449+ if (unlikely(!dentry->d_inode))
60450+ return GR_FIND;
60451+
60452+ mode =
60453+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60454+
60455+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60456+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60457+ return mode;
60458+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60459+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60460+ return 0;
60461+ } else if (unlikely(!(mode & GR_FIND)))
60462+ return 0;
60463+
60464+ return GR_FIND;
60465+}
60466+
60467+__u32
60468+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60469+ int acc_mode)
60470+{
60471+ __u32 reqmode = GR_FIND;
60472+ __u32 mode;
60473+
60474+ if (unlikely(!dentry->d_inode))
60475+ return reqmode;
60476+
60477+ if (acc_mode & MAY_APPEND)
60478+ reqmode |= GR_APPEND;
60479+ else if (acc_mode & MAY_WRITE)
60480+ reqmode |= GR_WRITE;
60481+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60482+ reqmode |= GR_READ;
60483+
60484+ mode =
60485+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60486+ mnt);
60487+
60488+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60489+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60490+ reqmode & GR_READ ? " reading" : "",
60491+ reqmode & GR_WRITE ? " writing" : reqmode &
60492+ GR_APPEND ? " appending" : "");
60493+ return reqmode;
60494+ } else
60495+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60496+ {
60497+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60498+ reqmode & GR_READ ? " reading" : "",
60499+ reqmode & GR_WRITE ? " writing" : reqmode &
60500+ GR_APPEND ? " appending" : "");
60501+ return 0;
60502+ } else if (unlikely((mode & reqmode) != reqmode))
60503+ return 0;
60504+
60505+ return reqmode;
60506+}
60507+
60508+__u32
60509+gr_acl_handle_creat(const struct dentry * dentry,
60510+ const struct dentry * p_dentry,
60511+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60512+ const int imode)
60513+{
60514+ __u32 reqmode = GR_WRITE | GR_CREATE;
60515+ __u32 mode;
60516+
60517+ if (acc_mode & MAY_APPEND)
60518+ reqmode |= GR_APPEND;
60519+ // if a directory was required or the directory already exists, then
60520+ // don't count this open as a read
60521+ if ((acc_mode & MAY_READ) &&
60522+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60523+ reqmode |= GR_READ;
60524+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60525+ reqmode |= GR_SETID;
60526+
60527+ mode =
60528+ gr_check_create(dentry, p_dentry, p_mnt,
60529+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60530+
60531+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60532+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60533+ reqmode & GR_READ ? " reading" : "",
60534+ reqmode & GR_WRITE ? " writing" : reqmode &
60535+ GR_APPEND ? " appending" : "");
60536+ return reqmode;
60537+ } else
60538+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60539+ {
60540+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60541+ reqmode & GR_READ ? " reading" : "",
60542+ reqmode & GR_WRITE ? " writing" : reqmode &
60543+ GR_APPEND ? " appending" : "");
60544+ return 0;
60545+ } else if (unlikely((mode & reqmode) != reqmode))
60546+ return 0;
60547+
60548+ return reqmode;
60549+}
60550+
60551+__u32
60552+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60553+ const int fmode)
60554+{
60555+ __u32 mode, reqmode = GR_FIND;
60556+
60557+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60558+ reqmode |= GR_EXEC;
60559+ if (fmode & S_IWOTH)
60560+ reqmode |= GR_WRITE;
60561+ if (fmode & S_IROTH)
60562+ reqmode |= GR_READ;
60563+
60564+ mode =
60565+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60566+ mnt);
60567+
60568+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60569+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60570+ reqmode & GR_READ ? " reading" : "",
60571+ reqmode & GR_WRITE ? " writing" : "",
60572+ reqmode & GR_EXEC ? " executing" : "");
60573+ return reqmode;
60574+ } else
60575+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60576+ {
60577+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60578+ reqmode & GR_READ ? " reading" : "",
60579+ reqmode & GR_WRITE ? " writing" : "",
60580+ reqmode & GR_EXEC ? " executing" : "");
60581+ return 0;
60582+ } else if (unlikely((mode & reqmode) != reqmode))
60583+ return 0;
60584+
60585+ return reqmode;
60586+}
60587+
60588+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60589+{
60590+ __u32 mode;
60591+
60592+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60593+
60594+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60595+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60596+ return mode;
60597+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60598+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60599+ return 0;
60600+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60601+ return 0;
60602+
60603+ return (reqmode);
60604+}
60605+
60606+__u32
60607+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60608+{
60609+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60610+}
60611+
60612+__u32
60613+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60614+{
60615+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60616+}
60617+
60618+__u32
60619+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60620+{
60621+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60622+}
60623+
60624+__u32
60625+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60626+{
60627+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60628+}
60629+
60630+__u32
60631+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60632+ mode_t mode)
60633+{
60634+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60635+ return 1;
60636+
60637+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60638+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60639+ GR_FCHMOD_ACL_MSG);
60640+ } else {
60641+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60642+ }
60643+}
60644+
60645+__u32
60646+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60647+ mode_t mode)
60648+{
60649+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60650+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60651+ GR_CHMOD_ACL_MSG);
60652+ } else {
60653+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60654+ }
60655+}
60656+
60657+__u32
60658+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60659+{
60660+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60661+}
60662+
60663+__u32
60664+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60665+{
60666+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60667+}
60668+
60669+__u32
60670+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60671+{
60672+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60673+}
60674+
60675+__u32
60676+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60677+{
60678+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60679+ GR_UNIXCONNECT_ACL_MSG);
60680+}
60681+
60682+/* hardlinks require at minimum create and link permission,
60683+ any additional privilege required is based on the
60684+ privilege of the file being linked to
60685+*/
60686+__u32
60687+gr_acl_handle_link(const struct dentry * new_dentry,
60688+ const struct dentry * parent_dentry,
60689+ const struct vfsmount * parent_mnt,
60690+ const struct dentry * old_dentry,
60691+ const struct vfsmount * old_mnt, const char *to)
60692+{
60693+ __u32 mode;
60694+ __u32 needmode = GR_CREATE | GR_LINK;
60695+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60696+
60697+ mode =
60698+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60699+ old_mnt);
60700+
60701+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60702+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60703+ return mode;
60704+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60705+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60706+ return 0;
60707+ } else if (unlikely((mode & needmode) != needmode))
60708+ return 0;
60709+
60710+ return 1;
60711+}
60712+
60713+__u32
60714+gr_acl_handle_symlink(const struct dentry * new_dentry,
60715+ const struct dentry * parent_dentry,
60716+ const struct vfsmount * parent_mnt, const char *from)
60717+{
60718+ __u32 needmode = GR_WRITE | GR_CREATE;
60719+ __u32 mode;
60720+
60721+ mode =
60722+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
60723+ GR_CREATE | GR_AUDIT_CREATE |
60724+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60725+
60726+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60727+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60728+ return mode;
60729+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60730+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60731+ return 0;
60732+ } else if (unlikely((mode & needmode) != needmode))
60733+ return 0;
60734+
60735+ return (GR_WRITE | GR_CREATE);
60736+}
60737+
60738+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60739+{
60740+ __u32 mode;
60741+
60742+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60743+
60744+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60745+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60746+ return mode;
60747+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60748+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60749+ return 0;
60750+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60751+ return 0;
60752+
60753+ return (reqmode);
60754+}
60755+
60756+__u32
60757+gr_acl_handle_mknod(const struct dentry * new_dentry,
60758+ const struct dentry * parent_dentry,
60759+ const struct vfsmount * parent_mnt,
60760+ const int mode)
60761+{
60762+ __u32 reqmode = GR_WRITE | GR_CREATE;
60763+ if (unlikely(mode & (S_ISUID | S_ISGID)))
60764+ reqmode |= GR_SETID;
60765+
60766+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60767+ reqmode, GR_MKNOD_ACL_MSG);
60768+}
60769+
60770+__u32
60771+gr_acl_handle_mkdir(const struct dentry *new_dentry,
60772+ const struct dentry *parent_dentry,
60773+ const struct vfsmount *parent_mnt)
60774+{
60775+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60776+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60777+}
60778+
60779+#define RENAME_CHECK_SUCCESS(old, new) \
60780+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60781+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60782+
60783+int
60784+gr_acl_handle_rename(struct dentry *new_dentry,
60785+ struct dentry *parent_dentry,
60786+ const struct vfsmount *parent_mnt,
60787+ struct dentry *old_dentry,
60788+ struct inode *old_parent_inode,
60789+ struct vfsmount *old_mnt, const char *newname)
60790+{
60791+ __u32 comp1, comp2;
60792+ int error = 0;
60793+
60794+ if (unlikely(!gr_acl_is_enabled()))
60795+ return 0;
60796+
60797+ if (!new_dentry->d_inode) {
60798+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60799+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60800+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60801+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60802+ GR_DELETE | GR_AUDIT_DELETE |
60803+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60804+ GR_SUPPRESS, old_mnt);
60805+ } else {
60806+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60807+ GR_CREATE | GR_DELETE |
60808+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60809+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60810+ GR_SUPPRESS, parent_mnt);
60811+ comp2 =
60812+ gr_search_file(old_dentry,
60813+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60814+ GR_DELETE | GR_AUDIT_DELETE |
60815+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60816+ }
60817+
60818+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60819+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60820+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60821+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60822+ && !(comp2 & GR_SUPPRESS)) {
60823+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60824+ error = -EACCES;
60825+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60826+ error = -EACCES;
60827+
60828+ return error;
60829+}
60830+
60831+void
60832+gr_acl_handle_exit(void)
60833+{
60834+ u16 id;
60835+ char *rolename;
60836+ struct file *exec_file;
60837+
60838+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60839+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60840+ id = current->acl_role_id;
60841+ rolename = current->role->rolename;
60842+ gr_set_acls(1);
60843+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60844+ }
60845+
60846+ write_lock(&grsec_exec_file_lock);
60847+ exec_file = current->exec_file;
60848+ current->exec_file = NULL;
60849+ write_unlock(&grsec_exec_file_lock);
60850+
60851+ if (exec_file)
60852+ fput(exec_file);
60853+}
60854+
60855+int
60856+gr_acl_handle_procpidmem(const struct task_struct *task)
60857+{
60858+ if (unlikely(!gr_acl_is_enabled()))
60859+ return 0;
60860+
60861+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60862+ return -EACCES;
60863+
60864+ return 0;
60865+}
60866diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60867new file mode 100644
60868index 0000000..cd07b96
60869--- /dev/null
60870+++ b/grsecurity/gracl_ip.c
60871@@ -0,0 +1,382 @@
60872+#include <linux/kernel.h>
60873+#include <asm/uaccess.h>
60874+#include <asm/errno.h>
60875+#include <net/sock.h>
60876+#include <linux/file.h>
60877+#include <linux/fs.h>
60878+#include <linux/net.h>
60879+#include <linux/in.h>
60880+#include <linux/skbuff.h>
60881+#include <linux/ip.h>
60882+#include <linux/udp.h>
60883+#include <linux/smp_lock.h>
60884+#include <linux/types.h>
60885+#include <linux/sched.h>
60886+#include <linux/netdevice.h>
60887+#include <linux/inetdevice.h>
60888+#include <linux/gracl.h>
60889+#include <linux/grsecurity.h>
60890+#include <linux/grinternal.h>
60891+
60892+#define GR_BIND 0x01
60893+#define GR_CONNECT 0x02
60894+#define GR_INVERT 0x04
60895+#define GR_BINDOVERRIDE 0x08
60896+#define GR_CONNECTOVERRIDE 0x10
60897+#define GR_SOCK_FAMILY 0x20
60898+
60899+static const char * gr_protocols[IPPROTO_MAX] = {
60900+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60901+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60902+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60903+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60904+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60905+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60906+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60907+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60908+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60909+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60910+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60911+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60912+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60913+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60914+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60915+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60916+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60917+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60918+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60919+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60920+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60921+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60922+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60923+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60924+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60925+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60926+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60927+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60928+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60929+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60930+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60931+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60932+ };
60933+
60934+static const char * gr_socktypes[SOCK_MAX] = {
60935+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60936+ "unknown:7", "unknown:8", "unknown:9", "packet"
60937+ };
60938+
60939+static const char * gr_sockfamilies[AF_MAX+1] = {
60940+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60941+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60942+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60943+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60944+ };
60945+
60946+const char *
60947+gr_proto_to_name(unsigned char proto)
60948+{
60949+ return gr_protocols[proto];
60950+}
60951+
60952+const char *
60953+gr_socktype_to_name(unsigned char type)
60954+{
60955+ return gr_socktypes[type];
60956+}
60957+
60958+const char *
60959+gr_sockfamily_to_name(unsigned char family)
60960+{
60961+ return gr_sockfamilies[family];
60962+}
60963+
60964+int
60965+gr_search_socket(const int domain, const int type, const int protocol)
60966+{
60967+ struct acl_subject_label *curr;
60968+ const struct cred *cred = current_cred();
60969+
60970+ if (unlikely(!gr_acl_is_enabled()))
60971+ goto exit;
60972+
60973+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60974+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60975+ goto exit; // let the kernel handle it
60976+
60977+ curr = current->acl;
60978+
60979+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60980+ /* the family is allowed, if this is PF_INET allow it only if
60981+ the extra sock type/protocol checks pass */
60982+ if (domain == PF_INET)
60983+ goto inet_check;
60984+ goto exit;
60985+ } else {
60986+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60987+ __u32 fakeip = 0;
60988+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60989+ current->role->roletype, cred->uid,
60990+ cred->gid, current->exec_file ?
60991+ gr_to_filename(current->exec_file->f_path.dentry,
60992+ current->exec_file->f_path.mnt) :
60993+ curr->filename, curr->filename,
60994+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60995+ &current->signal->saved_ip);
60996+ goto exit;
60997+ }
60998+ goto exit_fail;
60999+ }
61000+
61001+inet_check:
61002+ /* the rest of this checking is for IPv4 only */
61003+ if (!curr->ips)
61004+ goto exit;
61005+
61006+ if ((curr->ip_type & (1 << type)) &&
61007+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
61008+ goto exit;
61009+
61010+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61011+ /* we don't place acls on raw sockets , and sometimes
61012+ dgram/ip sockets are opened for ioctl and not
61013+ bind/connect, so we'll fake a bind learn log */
61014+ if (type == SOCK_RAW || type == SOCK_PACKET) {
61015+ __u32 fakeip = 0;
61016+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61017+ current->role->roletype, cred->uid,
61018+ cred->gid, current->exec_file ?
61019+ gr_to_filename(current->exec_file->f_path.dentry,
61020+ current->exec_file->f_path.mnt) :
61021+ curr->filename, curr->filename,
61022+ &fakeip, 0, type,
61023+ protocol, GR_CONNECT, &current->signal->saved_ip);
61024+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
61025+ __u32 fakeip = 0;
61026+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61027+ current->role->roletype, cred->uid,
61028+ cred->gid, current->exec_file ?
61029+ gr_to_filename(current->exec_file->f_path.dentry,
61030+ current->exec_file->f_path.mnt) :
61031+ curr->filename, curr->filename,
61032+ &fakeip, 0, type,
61033+ protocol, GR_BIND, &current->signal->saved_ip);
61034+ }
61035+ /* we'll log when they use connect or bind */
61036+ goto exit;
61037+ }
61038+
61039+exit_fail:
61040+ if (domain == PF_INET)
61041+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
61042+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
61043+ else
61044+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
61045+ gr_socktype_to_name(type), protocol);
61046+
61047+ return 0;
61048+exit:
61049+ return 1;
61050+}
61051+
61052+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
61053+{
61054+ if ((ip->mode & mode) &&
61055+ (ip_port >= ip->low) &&
61056+ (ip_port <= ip->high) &&
61057+ ((ntohl(ip_addr) & our_netmask) ==
61058+ (ntohl(our_addr) & our_netmask))
61059+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
61060+ && (ip->type & (1 << type))) {
61061+ if (ip->mode & GR_INVERT)
61062+ return 2; // specifically denied
61063+ else
61064+ return 1; // allowed
61065+ }
61066+
61067+ return 0; // not specifically allowed, may continue parsing
61068+}
61069+
61070+static int
61071+gr_search_connectbind(const int full_mode, struct sock *sk,
61072+ struct sockaddr_in *addr, const int type)
61073+{
61074+ char iface[IFNAMSIZ] = {0};
61075+ struct acl_subject_label *curr;
61076+ struct acl_ip_label *ip;
61077+ struct inet_sock *isk;
61078+ struct net_device *dev;
61079+ struct in_device *idev;
61080+ unsigned long i;
61081+ int ret;
61082+ int mode = full_mode & (GR_BIND | GR_CONNECT);
61083+ __u32 ip_addr = 0;
61084+ __u32 our_addr;
61085+ __u32 our_netmask;
61086+ char *p;
61087+ __u16 ip_port = 0;
61088+ const struct cred *cred = current_cred();
61089+
61090+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61091+ return 0;
61092+
61093+ curr = current->acl;
61094+ isk = inet_sk(sk);
61095+
61096+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61097+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61098+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61099+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61100+ struct sockaddr_in saddr;
61101+ int err;
61102+
61103+ saddr.sin_family = AF_INET;
61104+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61105+ saddr.sin_port = isk->sport;
61106+
61107+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61108+ if (err)
61109+ return err;
61110+
61111+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61112+ if (err)
61113+ return err;
61114+ }
61115+
61116+ if (!curr->ips)
61117+ return 0;
61118+
61119+ ip_addr = addr->sin_addr.s_addr;
61120+ ip_port = ntohs(addr->sin_port);
61121+
61122+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61123+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61124+ current->role->roletype, cred->uid,
61125+ cred->gid, current->exec_file ?
61126+ gr_to_filename(current->exec_file->f_path.dentry,
61127+ current->exec_file->f_path.mnt) :
61128+ curr->filename, curr->filename,
61129+ &ip_addr, ip_port, type,
61130+ sk->sk_protocol, mode, &current->signal->saved_ip);
61131+ return 0;
61132+ }
61133+
61134+ for (i = 0; i < curr->ip_num; i++) {
61135+ ip = *(curr->ips + i);
61136+ if (ip->iface != NULL) {
61137+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61138+ p = strchr(iface, ':');
61139+ if (p != NULL)
61140+ *p = '\0';
61141+ dev = dev_get_by_name(sock_net(sk), iface);
61142+ if (dev == NULL)
61143+ continue;
61144+ idev = in_dev_get(dev);
61145+ if (idev == NULL) {
61146+ dev_put(dev);
61147+ continue;
61148+ }
61149+ rcu_read_lock();
61150+ for_ifa(idev) {
61151+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61152+ our_addr = ifa->ifa_address;
61153+ our_netmask = 0xffffffff;
61154+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61155+ if (ret == 1) {
61156+ rcu_read_unlock();
61157+ in_dev_put(idev);
61158+ dev_put(dev);
61159+ return 0;
61160+ } else if (ret == 2) {
61161+ rcu_read_unlock();
61162+ in_dev_put(idev);
61163+ dev_put(dev);
61164+ goto denied;
61165+ }
61166+ }
61167+ } endfor_ifa(idev);
61168+ rcu_read_unlock();
61169+ in_dev_put(idev);
61170+ dev_put(dev);
61171+ } else {
61172+ our_addr = ip->addr;
61173+ our_netmask = ip->netmask;
61174+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61175+ if (ret == 1)
61176+ return 0;
61177+ else if (ret == 2)
61178+ goto denied;
61179+ }
61180+ }
61181+
61182+denied:
61183+ if (mode == GR_BIND)
61184+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61185+ else if (mode == GR_CONNECT)
61186+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61187+
61188+ return -EACCES;
61189+}
61190+
61191+int
61192+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61193+{
61194+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61195+}
61196+
61197+int
61198+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61199+{
61200+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61201+}
61202+
61203+int gr_search_listen(struct socket *sock)
61204+{
61205+ struct sock *sk = sock->sk;
61206+ struct sockaddr_in addr;
61207+
61208+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61209+ addr.sin_port = inet_sk(sk)->sport;
61210+
61211+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61212+}
61213+
61214+int gr_search_accept(struct socket *sock)
61215+{
61216+ struct sock *sk = sock->sk;
61217+ struct sockaddr_in addr;
61218+
61219+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61220+ addr.sin_port = inet_sk(sk)->sport;
61221+
61222+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61223+}
61224+
61225+int
61226+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61227+{
61228+ if (addr)
61229+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61230+ else {
61231+ struct sockaddr_in sin;
61232+ const struct inet_sock *inet = inet_sk(sk);
61233+
61234+ sin.sin_addr.s_addr = inet->daddr;
61235+ sin.sin_port = inet->dport;
61236+
61237+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61238+ }
61239+}
61240+
61241+int
61242+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61243+{
61244+ struct sockaddr_in sin;
61245+
61246+ if (unlikely(skb->len < sizeof (struct udphdr)))
61247+ return 0; // skip this packet
61248+
61249+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61250+ sin.sin_port = udp_hdr(skb)->source;
61251+
61252+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61253+}
61254diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61255new file mode 100644
61256index 0000000..34bdd46
61257--- /dev/null
61258+++ b/grsecurity/gracl_learn.c
61259@@ -0,0 +1,208 @@
61260+#include <linux/kernel.h>
61261+#include <linux/mm.h>
61262+#include <linux/sched.h>
61263+#include <linux/poll.h>
61264+#include <linux/smp_lock.h>
61265+#include <linux/string.h>
61266+#include <linux/file.h>
61267+#include <linux/types.h>
61268+#include <linux/vmalloc.h>
61269+#include <linux/grinternal.h>
61270+
61271+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61272+ size_t count, loff_t *ppos);
61273+extern int gr_acl_is_enabled(void);
61274+
61275+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61276+static int gr_learn_attached;
61277+
61278+/* use a 512k buffer */
61279+#define LEARN_BUFFER_SIZE (512 * 1024)
61280+
61281+static DEFINE_SPINLOCK(gr_learn_lock);
61282+static DEFINE_MUTEX(gr_learn_user_mutex);
61283+
61284+/* we need to maintain two buffers, so that the kernel context of grlearn
61285+ uses a semaphore around the userspace copying, and the other kernel contexts
61286+ use a spinlock when copying into the buffer, since they cannot sleep
61287+*/
61288+static char *learn_buffer;
61289+static char *learn_buffer_user;
61290+static int learn_buffer_len;
61291+static int learn_buffer_user_len;
61292+
61293+static ssize_t
61294+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61295+{
61296+ DECLARE_WAITQUEUE(wait, current);
61297+ ssize_t retval = 0;
61298+
61299+ add_wait_queue(&learn_wait, &wait);
61300+ set_current_state(TASK_INTERRUPTIBLE);
61301+ do {
61302+ mutex_lock(&gr_learn_user_mutex);
61303+ spin_lock(&gr_learn_lock);
61304+ if (learn_buffer_len)
61305+ break;
61306+ spin_unlock(&gr_learn_lock);
61307+ mutex_unlock(&gr_learn_user_mutex);
61308+ if (file->f_flags & O_NONBLOCK) {
61309+ retval = -EAGAIN;
61310+ goto out;
61311+ }
61312+ if (signal_pending(current)) {
61313+ retval = -ERESTARTSYS;
61314+ goto out;
61315+ }
61316+
61317+ schedule();
61318+ } while (1);
61319+
61320+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61321+ learn_buffer_user_len = learn_buffer_len;
61322+ retval = learn_buffer_len;
61323+ learn_buffer_len = 0;
61324+
61325+ spin_unlock(&gr_learn_lock);
61326+
61327+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61328+ retval = -EFAULT;
61329+
61330+ mutex_unlock(&gr_learn_user_mutex);
61331+out:
61332+ set_current_state(TASK_RUNNING);
61333+ remove_wait_queue(&learn_wait, &wait);
61334+ return retval;
61335+}
61336+
61337+static unsigned int
61338+poll_learn(struct file * file, poll_table * wait)
61339+{
61340+ poll_wait(file, &learn_wait, wait);
61341+
61342+ if (learn_buffer_len)
61343+ return (POLLIN | POLLRDNORM);
61344+
61345+ return 0;
61346+}
61347+
61348+void
61349+gr_clear_learn_entries(void)
61350+{
61351+ char *tmp;
61352+
61353+ mutex_lock(&gr_learn_user_mutex);
61354+ spin_lock(&gr_learn_lock);
61355+ tmp = learn_buffer;
61356+ learn_buffer = NULL;
61357+ spin_unlock(&gr_learn_lock);
61358+ if (tmp)
61359+ vfree(tmp);
61360+ if (learn_buffer_user != NULL) {
61361+ vfree(learn_buffer_user);
61362+ learn_buffer_user = NULL;
61363+ }
61364+ learn_buffer_len = 0;
61365+ mutex_unlock(&gr_learn_user_mutex);
61366+
61367+ return;
61368+}
61369+
61370+void
61371+gr_add_learn_entry(const char *fmt, ...)
61372+{
61373+ va_list args;
61374+ unsigned int len;
61375+
61376+ if (!gr_learn_attached)
61377+ return;
61378+
61379+ spin_lock(&gr_learn_lock);
61380+
61381+ /* leave a gap at the end so we know when it's "full" but don't have to
61382+ compute the exact length of the string we're trying to append
61383+ */
61384+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61385+ spin_unlock(&gr_learn_lock);
61386+ wake_up_interruptible(&learn_wait);
61387+ return;
61388+ }
61389+ if (learn_buffer == NULL) {
61390+ spin_unlock(&gr_learn_lock);
61391+ return;
61392+ }
61393+
61394+ va_start(args, fmt);
61395+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61396+ va_end(args);
61397+
61398+ learn_buffer_len += len + 1;
61399+
61400+ spin_unlock(&gr_learn_lock);
61401+ wake_up_interruptible(&learn_wait);
61402+
61403+ return;
61404+}
61405+
61406+static int
61407+open_learn(struct inode *inode, struct file *file)
61408+{
61409+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61410+ return -EBUSY;
61411+ if (file->f_mode & FMODE_READ) {
61412+ int retval = 0;
61413+ mutex_lock(&gr_learn_user_mutex);
61414+ if (learn_buffer == NULL)
61415+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61416+ if (learn_buffer_user == NULL)
61417+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61418+ if (learn_buffer == NULL) {
61419+ retval = -ENOMEM;
61420+ goto out_error;
61421+ }
61422+ if (learn_buffer_user == NULL) {
61423+ retval = -ENOMEM;
61424+ goto out_error;
61425+ }
61426+ learn_buffer_len = 0;
61427+ learn_buffer_user_len = 0;
61428+ gr_learn_attached = 1;
61429+out_error:
61430+ mutex_unlock(&gr_learn_user_mutex);
61431+ return retval;
61432+ }
61433+ return 0;
61434+}
61435+
61436+static int
61437+close_learn(struct inode *inode, struct file *file)
61438+{
61439+ if (file->f_mode & FMODE_READ) {
61440+ char *tmp = NULL;
61441+ mutex_lock(&gr_learn_user_mutex);
61442+ spin_lock(&gr_learn_lock);
61443+ tmp = learn_buffer;
61444+ learn_buffer = NULL;
61445+ spin_unlock(&gr_learn_lock);
61446+ if (tmp)
61447+ vfree(tmp);
61448+ if (learn_buffer_user != NULL) {
61449+ vfree(learn_buffer_user);
61450+ learn_buffer_user = NULL;
61451+ }
61452+ learn_buffer_len = 0;
61453+ learn_buffer_user_len = 0;
61454+ gr_learn_attached = 0;
61455+ mutex_unlock(&gr_learn_user_mutex);
61456+ }
61457+
61458+ return 0;
61459+}
61460+
61461+const struct file_operations grsec_fops = {
61462+ .read = read_learn,
61463+ .write = write_grsec_handler,
61464+ .open = open_learn,
61465+ .release = close_learn,
61466+ .poll = poll_learn,
61467+};
61468diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61469new file mode 100644
61470index 0000000..70b2179
61471--- /dev/null
61472+++ b/grsecurity/gracl_res.c
61473@@ -0,0 +1,67 @@
61474+#include <linux/kernel.h>
61475+#include <linux/sched.h>
61476+#include <linux/gracl.h>
61477+#include <linux/grinternal.h>
61478+
61479+static const char *restab_log[] = {
61480+ [RLIMIT_CPU] = "RLIMIT_CPU",
61481+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61482+ [RLIMIT_DATA] = "RLIMIT_DATA",
61483+ [RLIMIT_STACK] = "RLIMIT_STACK",
61484+ [RLIMIT_CORE] = "RLIMIT_CORE",
61485+ [RLIMIT_RSS] = "RLIMIT_RSS",
61486+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
61487+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61488+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61489+ [RLIMIT_AS] = "RLIMIT_AS",
61490+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61491+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61492+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61493+ [RLIMIT_NICE] = "RLIMIT_NICE",
61494+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61495+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61496+ [GR_CRASH_RES] = "RLIMIT_CRASH"
61497+};
61498+
61499+void
61500+gr_log_resource(const struct task_struct *task,
61501+ const int res, const unsigned long wanted, const int gt)
61502+{
61503+ const struct cred *cred;
61504+ unsigned long rlim;
61505+
61506+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
61507+ return;
61508+
61509+ // not yet supported resource
61510+ if (unlikely(!restab_log[res]))
61511+ return;
61512+
61513+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61514+ rlim = task->signal->rlim[res].rlim_max;
61515+ else
61516+ rlim = task->signal->rlim[res].rlim_cur;
61517+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61518+ return;
61519+
61520+ rcu_read_lock();
61521+ cred = __task_cred(task);
61522+
61523+ if (res == RLIMIT_NPROC &&
61524+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61525+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61526+ goto out_rcu_unlock;
61527+ else if (res == RLIMIT_MEMLOCK &&
61528+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61529+ goto out_rcu_unlock;
61530+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61531+ goto out_rcu_unlock;
61532+ rcu_read_unlock();
61533+
61534+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61535+
61536+ return;
61537+out_rcu_unlock:
61538+ rcu_read_unlock();
61539+ return;
61540+}
61541diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61542new file mode 100644
61543index 0000000..1d1b734
61544--- /dev/null
61545+++ b/grsecurity/gracl_segv.c
61546@@ -0,0 +1,284 @@
61547+#include <linux/kernel.h>
61548+#include <linux/mm.h>
61549+#include <asm/uaccess.h>
61550+#include <asm/errno.h>
61551+#include <asm/mman.h>
61552+#include <net/sock.h>
61553+#include <linux/file.h>
61554+#include <linux/fs.h>
61555+#include <linux/net.h>
61556+#include <linux/in.h>
61557+#include <linux/smp_lock.h>
61558+#include <linux/slab.h>
61559+#include <linux/types.h>
61560+#include <linux/sched.h>
61561+#include <linux/timer.h>
61562+#include <linux/gracl.h>
61563+#include <linux/grsecurity.h>
61564+#include <linux/grinternal.h>
61565+
61566+static struct crash_uid *uid_set;
61567+static unsigned short uid_used;
61568+static DEFINE_SPINLOCK(gr_uid_lock);
61569+extern rwlock_t gr_inode_lock;
61570+extern struct acl_subject_label *
61571+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61572+ struct acl_role_label *role);
61573+extern int gr_fake_force_sig(int sig, struct task_struct *t);
61574+
61575+int
61576+gr_init_uidset(void)
61577+{
61578+ uid_set =
61579+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61580+ uid_used = 0;
61581+
61582+ return uid_set ? 1 : 0;
61583+}
61584+
61585+void
61586+gr_free_uidset(void)
61587+{
61588+ if (uid_set)
61589+ kfree(uid_set);
61590+
61591+ return;
61592+}
61593+
61594+int
61595+gr_find_uid(const uid_t uid)
61596+{
61597+ struct crash_uid *tmp = uid_set;
61598+ uid_t buid;
61599+ int low = 0, high = uid_used - 1, mid;
61600+
61601+ while (high >= low) {
61602+ mid = (low + high) >> 1;
61603+ buid = tmp[mid].uid;
61604+ if (buid == uid)
61605+ return mid;
61606+ if (buid > uid)
61607+ high = mid - 1;
61608+ if (buid < uid)
61609+ low = mid + 1;
61610+ }
61611+
61612+ return -1;
61613+}
61614+
61615+static __inline__ void
61616+gr_insertsort(void)
61617+{
61618+ unsigned short i, j;
61619+ struct crash_uid index;
61620+
61621+ for (i = 1; i < uid_used; i++) {
61622+ index = uid_set[i];
61623+ j = i;
61624+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61625+ uid_set[j] = uid_set[j - 1];
61626+ j--;
61627+ }
61628+ uid_set[j] = index;
61629+ }
61630+
61631+ return;
61632+}
61633+
61634+static __inline__ void
61635+gr_insert_uid(const uid_t uid, const unsigned long expires)
61636+{
61637+ int loc;
61638+
61639+ if (uid_used == GR_UIDTABLE_MAX)
61640+ return;
61641+
61642+ loc = gr_find_uid(uid);
61643+
61644+ if (loc >= 0) {
61645+ uid_set[loc].expires = expires;
61646+ return;
61647+ }
61648+
61649+ uid_set[uid_used].uid = uid;
61650+ uid_set[uid_used].expires = expires;
61651+ uid_used++;
61652+
61653+ gr_insertsort();
61654+
61655+ return;
61656+}
61657+
61658+void
61659+gr_remove_uid(const unsigned short loc)
61660+{
61661+ unsigned short i;
61662+
61663+ for (i = loc + 1; i < uid_used; i++)
61664+ uid_set[i - 1] = uid_set[i];
61665+
61666+ uid_used--;
61667+
61668+ return;
61669+}
61670+
61671+int
61672+gr_check_crash_uid(const uid_t uid)
61673+{
61674+ int loc;
61675+ int ret = 0;
61676+
61677+ if (unlikely(!gr_acl_is_enabled()))
61678+ return 0;
61679+
61680+ spin_lock(&gr_uid_lock);
61681+ loc = gr_find_uid(uid);
61682+
61683+ if (loc < 0)
61684+ goto out_unlock;
61685+
61686+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
61687+ gr_remove_uid(loc);
61688+ else
61689+ ret = 1;
61690+
61691+out_unlock:
61692+ spin_unlock(&gr_uid_lock);
61693+ return ret;
61694+}
61695+
61696+static __inline__ int
61697+proc_is_setxid(const struct cred *cred)
61698+{
61699+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
61700+ cred->uid != cred->fsuid)
61701+ return 1;
61702+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61703+ cred->gid != cred->fsgid)
61704+ return 1;
61705+
61706+ return 0;
61707+}
61708+
61709+void
61710+gr_handle_crash(struct task_struct *task, const int sig)
61711+{
61712+ struct acl_subject_label *curr;
61713+ struct task_struct *tsk, *tsk2;
61714+ const struct cred *cred;
61715+ const struct cred *cred2;
61716+
61717+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61718+ return;
61719+
61720+ if (unlikely(!gr_acl_is_enabled()))
61721+ return;
61722+
61723+ curr = task->acl;
61724+
61725+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
61726+ return;
61727+
61728+ if (time_before_eq(curr->expires, get_seconds())) {
61729+ curr->expires = 0;
61730+ curr->crashes = 0;
61731+ }
61732+
61733+ curr->crashes++;
61734+
61735+ if (!curr->expires)
61736+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61737+
61738+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61739+ time_after(curr->expires, get_seconds())) {
61740+ rcu_read_lock();
61741+ cred = __task_cred(task);
61742+ if (cred->uid && proc_is_setxid(cred)) {
61743+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61744+ spin_lock(&gr_uid_lock);
61745+ gr_insert_uid(cred->uid, curr->expires);
61746+ spin_unlock(&gr_uid_lock);
61747+ curr->expires = 0;
61748+ curr->crashes = 0;
61749+ read_lock(&tasklist_lock);
61750+ do_each_thread(tsk2, tsk) {
61751+ cred2 = __task_cred(tsk);
61752+ if (tsk != task && cred2->uid == cred->uid)
61753+ gr_fake_force_sig(SIGKILL, tsk);
61754+ } while_each_thread(tsk2, tsk);
61755+ read_unlock(&tasklist_lock);
61756+ } else {
61757+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61758+ read_lock(&tasklist_lock);
61759+ read_lock(&grsec_exec_file_lock);
61760+ do_each_thread(tsk2, tsk) {
61761+ if (likely(tsk != task)) {
61762+ // if this thread has the same subject as the one that triggered
61763+ // RES_CRASH and it's the same binary, kill it
61764+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61765+ gr_fake_force_sig(SIGKILL, tsk);
61766+ }
61767+ } while_each_thread(tsk2, tsk);
61768+ read_unlock(&grsec_exec_file_lock);
61769+ read_unlock(&tasklist_lock);
61770+ }
61771+ rcu_read_unlock();
61772+ }
61773+
61774+ return;
61775+}
61776+
61777+int
61778+gr_check_crash_exec(const struct file *filp)
61779+{
61780+ struct acl_subject_label *curr;
61781+
61782+ if (unlikely(!gr_acl_is_enabled()))
61783+ return 0;
61784+
61785+ read_lock(&gr_inode_lock);
61786+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61787+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61788+ current->role);
61789+ read_unlock(&gr_inode_lock);
61790+
61791+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61792+ (!curr->crashes && !curr->expires))
61793+ return 0;
61794+
61795+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61796+ time_after(curr->expires, get_seconds()))
61797+ return 1;
61798+ else if (time_before_eq(curr->expires, get_seconds())) {
61799+ curr->crashes = 0;
61800+ curr->expires = 0;
61801+ }
61802+
61803+ return 0;
61804+}
61805+
61806+void
61807+gr_handle_alertkill(struct task_struct *task)
61808+{
61809+ struct acl_subject_label *curracl;
61810+ __u32 curr_ip;
61811+ struct task_struct *p, *p2;
61812+
61813+ if (unlikely(!gr_acl_is_enabled()))
61814+ return;
61815+
61816+ curracl = task->acl;
61817+ curr_ip = task->signal->curr_ip;
61818+
61819+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61820+ read_lock(&tasklist_lock);
61821+ do_each_thread(p2, p) {
61822+ if (p->signal->curr_ip == curr_ip)
61823+ gr_fake_force_sig(SIGKILL, p);
61824+ } while_each_thread(p2, p);
61825+ read_unlock(&tasklist_lock);
61826+ } else if (curracl->mode & GR_KILLPROC)
61827+ gr_fake_force_sig(SIGKILL, task);
61828+
61829+ return;
61830+}
61831diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61832new file mode 100644
61833index 0000000..9d83a69
61834--- /dev/null
61835+++ b/grsecurity/gracl_shm.c
61836@@ -0,0 +1,40 @@
61837+#include <linux/kernel.h>
61838+#include <linux/mm.h>
61839+#include <linux/sched.h>
61840+#include <linux/file.h>
61841+#include <linux/ipc.h>
61842+#include <linux/gracl.h>
61843+#include <linux/grsecurity.h>
61844+#include <linux/grinternal.h>
61845+
61846+int
61847+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61848+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61849+{
61850+ struct task_struct *task;
61851+
61852+ if (!gr_acl_is_enabled())
61853+ return 1;
61854+
61855+ rcu_read_lock();
61856+ read_lock(&tasklist_lock);
61857+
61858+ task = find_task_by_vpid(shm_cprid);
61859+
61860+ if (unlikely(!task))
61861+ task = find_task_by_vpid(shm_lapid);
61862+
61863+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61864+ (task->pid == shm_lapid)) &&
61865+ (task->acl->mode & GR_PROTSHM) &&
61866+ (task->acl != current->acl))) {
61867+ read_unlock(&tasklist_lock);
61868+ rcu_read_unlock();
61869+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61870+ return 0;
61871+ }
61872+ read_unlock(&tasklist_lock);
61873+ rcu_read_unlock();
61874+
61875+ return 1;
61876+}
61877diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61878new file mode 100644
61879index 0000000..bc0be01
61880--- /dev/null
61881+++ b/grsecurity/grsec_chdir.c
61882@@ -0,0 +1,19 @@
61883+#include <linux/kernel.h>
61884+#include <linux/sched.h>
61885+#include <linux/fs.h>
61886+#include <linux/file.h>
61887+#include <linux/grsecurity.h>
61888+#include <linux/grinternal.h>
61889+
61890+void
61891+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61892+{
61893+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61894+ if ((grsec_enable_chdir && grsec_enable_group &&
61895+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61896+ !grsec_enable_group)) {
61897+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61898+ }
61899+#endif
61900+ return;
61901+}
61902diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61903new file mode 100644
61904index 0000000..197bdd5
61905--- /dev/null
61906+++ b/grsecurity/grsec_chroot.c
61907@@ -0,0 +1,386 @@
61908+#include <linux/kernel.h>
61909+#include <linux/module.h>
61910+#include <linux/sched.h>
61911+#include <linux/file.h>
61912+#include <linux/fs.h>
61913+#include <linux/mount.h>
61914+#include <linux/types.h>
61915+#include <linux/pid_namespace.h>
61916+#include <linux/grsecurity.h>
61917+#include <linux/grinternal.h>
61918+
61919+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61920+{
61921+#ifdef CONFIG_GRKERNSEC
61922+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61923+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61924+ task->gr_is_chrooted = 1;
61925+ else
61926+ task->gr_is_chrooted = 0;
61927+
61928+ task->gr_chroot_dentry = path->dentry;
61929+#endif
61930+ return;
61931+}
61932+
61933+void gr_clear_chroot_entries(struct task_struct *task)
61934+{
61935+#ifdef CONFIG_GRKERNSEC
61936+ task->gr_is_chrooted = 0;
61937+ task->gr_chroot_dentry = NULL;
61938+#endif
61939+ return;
61940+}
61941+
61942+int
61943+gr_handle_chroot_unix(const pid_t pid)
61944+{
61945+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61946+ struct task_struct *p;
61947+
61948+ if (unlikely(!grsec_enable_chroot_unix))
61949+ return 1;
61950+
61951+ if (likely(!proc_is_chrooted(current)))
61952+ return 1;
61953+
61954+ rcu_read_lock();
61955+ read_lock(&tasklist_lock);
61956+
61957+ p = find_task_by_vpid_unrestricted(pid);
61958+ if (unlikely(p && !have_same_root(current, p))) {
61959+ read_unlock(&tasklist_lock);
61960+ rcu_read_unlock();
61961+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61962+ return 0;
61963+ }
61964+ read_unlock(&tasklist_lock);
61965+ rcu_read_unlock();
61966+#endif
61967+ return 1;
61968+}
61969+
61970+int
61971+gr_handle_chroot_nice(void)
61972+{
61973+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61974+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61975+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61976+ return -EPERM;
61977+ }
61978+#endif
61979+ return 0;
61980+}
61981+
61982+int
61983+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61984+{
61985+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61986+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61987+ && proc_is_chrooted(current)) {
61988+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61989+ return -EACCES;
61990+ }
61991+#endif
61992+ return 0;
61993+}
61994+
61995+int
61996+gr_handle_chroot_rawio(const struct inode *inode)
61997+{
61998+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61999+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
62000+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
62001+ return 1;
62002+#endif
62003+ return 0;
62004+}
62005+
62006+int
62007+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
62008+{
62009+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62010+ struct task_struct *p;
62011+ int ret = 0;
62012+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
62013+ return ret;
62014+
62015+ read_lock(&tasklist_lock);
62016+ do_each_pid_task(pid, type, p) {
62017+ if (!have_same_root(current, p)) {
62018+ ret = 1;
62019+ goto out;
62020+ }
62021+ } while_each_pid_task(pid, type, p);
62022+out:
62023+ read_unlock(&tasklist_lock);
62024+ return ret;
62025+#endif
62026+ return 0;
62027+}
62028+
62029+int
62030+gr_pid_is_chrooted(struct task_struct *p)
62031+{
62032+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62033+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
62034+ return 0;
62035+
62036+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
62037+ !have_same_root(current, p)) {
62038+ return 1;
62039+ }
62040+#endif
62041+ return 0;
62042+}
62043+
62044+EXPORT_SYMBOL(gr_pid_is_chrooted);
62045+
62046+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
62047+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
62048+{
62049+ struct dentry *dentry = (struct dentry *)u_dentry;
62050+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
62051+ struct dentry *realroot;
62052+ struct vfsmount *realrootmnt;
62053+ struct dentry *currentroot;
62054+ struct vfsmount *currentmnt;
62055+ struct task_struct *reaper = &init_task;
62056+ int ret = 1;
62057+
62058+ read_lock(&reaper->fs->lock);
62059+ realrootmnt = mntget(reaper->fs->root.mnt);
62060+ realroot = dget(reaper->fs->root.dentry);
62061+ read_unlock(&reaper->fs->lock);
62062+
62063+ read_lock(&current->fs->lock);
62064+ currentmnt = mntget(current->fs->root.mnt);
62065+ currentroot = dget(current->fs->root.dentry);
62066+ read_unlock(&current->fs->lock);
62067+
62068+ spin_lock(&dcache_lock);
62069+ for (;;) {
62070+ if (unlikely((dentry == realroot && mnt == realrootmnt)
62071+ || (dentry == currentroot && mnt == currentmnt)))
62072+ break;
62073+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
62074+ if (mnt->mnt_parent == mnt)
62075+ break;
62076+ dentry = mnt->mnt_mountpoint;
62077+ mnt = mnt->mnt_parent;
62078+ continue;
62079+ }
62080+ dentry = dentry->d_parent;
62081+ }
62082+ spin_unlock(&dcache_lock);
62083+
62084+ dput(currentroot);
62085+ mntput(currentmnt);
62086+
62087+ /* access is outside of chroot */
62088+ if (dentry == realroot && mnt == realrootmnt)
62089+ ret = 0;
62090+
62091+ dput(realroot);
62092+ mntput(realrootmnt);
62093+ return ret;
62094+}
62095+#endif
62096+
62097+int
62098+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62099+{
62100+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62101+ if (!grsec_enable_chroot_fchdir)
62102+ return 1;
62103+
62104+ if (!proc_is_chrooted(current))
62105+ return 1;
62106+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62107+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62108+ return 0;
62109+ }
62110+#endif
62111+ return 1;
62112+}
62113+
62114+int
62115+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62116+ const time_t shm_createtime)
62117+{
62118+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62119+ struct task_struct *p;
62120+ time_t starttime;
62121+
62122+ if (unlikely(!grsec_enable_chroot_shmat))
62123+ return 1;
62124+
62125+ if (likely(!proc_is_chrooted(current)))
62126+ return 1;
62127+
62128+ rcu_read_lock();
62129+ read_lock(&tasklist_lock);
62130+
62131+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62132+ starttime = p->start_time.tv_sec;
62133+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62134+ if (have_same_root(current, p)) {
62135+ goto allow;
62136+ } else {
62137+ read_unlock(&tasklist_lock);
62138+ rcu_read_unlock();
62139+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62140+ return 0;
62141+ }
62142+ }
62143+ /* creator exited, pid reuse, fall through to next check */
62144+ }
62145+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62146+ if (unlikely(!have_same_root(current, p))) {
62147+ read_unlock(&tasklist_lock);
62148+ rcu_read_unlock();
62149+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62150+ return 0;
62151+ }
62152+ }
62153+
62154+allow:
62155+ read_unlock(&tasklist_lock);
62156+ rcu_read_unlock();
62157+#endif
62158+ return 1;
62159+}
62160+
62161+void
62162+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62163+{
62164+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62165+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62166+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62167+#endif
62168+ return;
62169+}
62170+
62171+int
62172+gr_handle_chroot_mknod(const struct dentry *dentry,
62173+ const struct vfsmount *mnt, const int mode)
62174+{
62175+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62176+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62177+ proc_is_chrooted(current)) {
62178+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62179+ return -EPERM;
62180+ }
62181+#endif
62182+ return 0;
62183+}
62184+
62185+int
62186+gr_handle_chroot_mount(const struct dentry *dentry,
62187+ const struct vfsmount *mnt, const char *dev_name)
62188+{
62189+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62190+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62191+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62192+ return -EPERM;
62193+ }
62194+#endif
62195+ return 0;
62196+}
62197+
62198+int
62199+gr_handle_chroot_pivot(void)
62200+{
62201+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62202+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62203+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62204+ return -EPERM;
62205+ }
62206+#endif
62207+ return 0;
62208+}
62209+
62210+int
62211+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62212+{
62213+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62214+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62215+ !gr_is_outside_chroot(dentry, mnt)) {
62216+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62217+ return -EPERM;
62218+ }
62219+#endif
62220+ return 0;
62221+}
62222+
62223+extern const char *captab_log[];
62224+extern int captab_log_entries;
62225+
62226+int
62227+gr_chroot_is_capable(const int cap)
62228+{
62229+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62230+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62231+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62232+ if (cap_raised(chroot_caps, cap)) {
62233+ const struct cred *creds = current_cred();
62234+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62235+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62236+ }
62237+ return 0;
62238+ }
62239+ }
62240+#endif
62241+ return 1;
62242+}
62243+
62244+int
62245+gr_chroot_is_capable_nolog(const int cap)
62246+{
62247+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62248+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62249+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62250+ if (cap_raised(chroot_caps, cap)) {
62251+ return 0;
62252+ }
62253+ }
62254+#endif
62255+ return 1;
62256+}
62257+
62258+int
62259+gr_handle_chroot_sysctl(const int op)
62260+{
62261+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62262+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62263+ && (op & MAY_WRITE))
62264+ return -EACCES;
62265+#endif
62266+ return 0;
62267+}
62268+
62269+void
62270+gr_handle_chroot_chdir(struct path *path)
62271+{
62272+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62273+ if (grsec_enable_chroot_chdir)
62274+ set_fs_pwd(current->fs, path);
62275+#endif
62276+ return;
62277+}
62278+
62279+int
62280+gr_handle_chroot_chmod(const struct dentry *dentry,
62281+ const struct vfsmount *mnt, const int mode)
62282+{
62283+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62284+ /* allow chmod +s on directories, but not on files */
62285+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62286+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62287+ proc_is_chrooted(current)) {
62288+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62289+ return -EPERM;
62290+ }
62291+#endif
62292+ return 0;
62293+}
62294diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62295new file mode 100644
62296index 0000000..b81db5b
62297--- /dev/null
62298+++ b/grsecurity/grsec_disabled.c
62299@@ -0,0 +1,439 @@
62300+#include <linux/kernel.h>
62301+#include <linux/module.h>
62302+#include <linux/sched.h>
62303+#include <linux/file.h>
62304+#include <linux/fs.h>
62305+#include <linux/kdev_t.h>
62306+#include <linux/net.h>
62307+#include <linux/in.h>
62308+#include <linux/ip.h>
62309+#include <linux/skbuff.h>
62310+#include <linux/sysctl.h>
62311+
62312+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62313+void
62314+pax_set_initial_flags(struct linux_binprm *bprm)
62315+{
62316+ return;
62317+}
62318+#endif
62319+
62320+#ifdef CONFIG_SYSCTL
62321+__u32
62322+gr_handle_sysctl(const struct ctl_table * table, const int op)
62323+{
62324+ return 0;
62325+}
62326+#endif
62327+
62328+#ifdef CONFIG_TASKSTATS
62329+int gr_is_taskstats_denied(int pid)
62330+{
62331+ return 0;
62332+}
62333+#endif
62334+
62335+int
62336+gr_acl_is_enabled(void)
62337+{
62338+ return 0;
62339+}
62340+
62341+void
62342+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62343+{
62344+ return;
62345+}
62346+
62347+int
62348+gr_handle_rawio(const struct inode *inode)
62349+{
62350+ return 0;
62351+}
62352+
62353+void
62354+gr_acl_handle_psacct(struct task_struct *task, const long code)
62355+{
62356+ return;
62357+}
62358+
62359+int
62360+gr_handle_ptrace(struct task_struct *task, const long request)
62361+{
62362+ return 0;
62363+}
62364+
62365+int
62366+gr_handle_proc_ptrace(struct task_struct *task)
62367+{
62368+ return 0;
62369+}
62370+
62371+void
62372+gr_learn_resource(const struct task_struct *task,
62373+ const int res, const unsigned long wanted, const int gt)
62374+{
62375+ return;
62376+}
62377+
62378+int
62379+gr_set_acls(const int type)
62380+{
62381+ return 0;
62382+}
62383+
62384+int
62385+gr_check_hidden_task(const struct task_struct *tsk)
62386+{
62387+ return 0;
62388+}
62389+
62390+int
62391+gr_check_protected_task(const struct task_struct *task)
62392+{
62393+ return 0;
62394+}
62395+
62396+int
62397+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62398+{
62399+ return 0;
62400+}
62401+
62402+void
62403+gr_copy_label(struct task_struct *tsk)
62404+{
62405+ return;
62406+}
62407+
62408+void
62409+gr_set_pax_flags(struct task_struct *task)
62410+{
62411+ return;
62412+}
62413+
62414+int
62415+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62416+ const int unsafe_share)
62417+{
62418+ return 0;
62419+}
62420+
62421+void
62422+gr_handle_delete(const ino_t ino, const dev_t dev)
62423+{
62424+ return;
62425+}
62426+
62427+void
62428+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62429+{
62430+ return;
62431+}
62432+
62433+void
62434+gr_handle_crash(struct task_struct *task, const int sig)
62435+{
62436+ return;
62437+}
62438+
62439+int
62440+gr_check_crash_exec(const struct file *filp)
62441+{
62442+ return 0;
62443+}
62444+
62445+int
62446+gr_check_crash_uid(const uid_t uid)
62447+{
62448+ return 0;
62449+}
62450+
62451+void
62452+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62453+ struct dentry *old_dentry,
62454+ struct dentry *new_dentry,
62455+ struct vfsmount *mnt, const __u8 replace)
62456+{
62457+ return;
62458+}
62459+
62460+int
62461+gr_search_socket(const int family, const int type, const int protocol)
62462+{
62463+ return 1;
62464+}
62465+
62466+int
62467+gr_search_connectbind(const int mode, const struct socket *sock,
62468+ const struct sockaddr_in *addr)
62469+{
62470+ return 0;
62471+}
62472+
62473+void
62474+gr_handle_alertkill(struct task_struct *task)
62475+{
62476+ return;
62477+}
62478+
62479+__u32
62480+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62481+{
62482+ return 1;
62483+}
62484+
62485+__u32
62486+gr_acl_handle_hidden_file(const struct dentry * dentry,
62487+ const struct vfsmount * mnt)
62488+{
62489+ return 1;
62490+}
62491+
62492+__u32
62493+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62494+ int acc_mode)
62495+{
62496+ return 1;
62497+}
62498+
62499+__u32
62500+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62501+{
62502+ return 1;
62503+}
62504+
62505+__u32
62506+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62507+{
62508+ return 1;
62509+}
62510+
62511+int
62512+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62513+ unsigned int *vm_flags)
62514+{
62515+ return 1;
62516+}
62517+
62518+__u32
62519+gr_acl_handle_truncate(const struct dentry * dentry,
62520+ const struct vfsmount * mnt)
62521+{
62522+ return 1;
62523+}
62524+
62525+__u32
62526+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62527+{
62528+ return 1;
62529+}
62530+
62531+__u32
62532+gr_acl_handle_access(const struct dentry * dentry,
62533+ const struct vfsmount * mnt, const int fmode)
62534+{
62535+ return 1;
62536+}
62537+
62538+__u32
62539+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62540+ mode_t mode)
62541+{
62542+ return 1;
62543+}
62544+
62545+__u32
62546+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62547+ mode_t mode)
62548+{
62549+ return 1;
62550+}
62551+
62552+__u32
62553+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62554+{
62555+ return 1;
62556+}
62557+
62558+__u32
62559+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62560+{
62561+ return 1;
62562+}
62563+
62564+void
62565+grsecurity_init(void)
62566+{
62567+ return;
62568+}
62569+
62570+__u32
62571+gr_acl_handle_mknod(const struct dentry * new_dentry,
62572+ const struct dentry * parent_dentry,
62573+ const struct vfsmount * parent_mnt,
62574+ const int mode)
62575+{
62576+ return 1;
62577+}
62578+
62579+__u32
62580+gr_acl_handle_mkdir(const struct dentry * new_dentry,
62581+ const struct dentry * parent_dentry,
62582+ const struct vfsmount * parent_mnt)
62583+{
62584+ return 1;
62585+}
62586+
62587+__u32
62588+gr_acl_handle_symlink(const struct dentry * new_dentry,
62589+ const struct dentry * parent_dentry,
62590+ const struct vfsmount * parent_mnt, const char *from)
62591+{
62592+ return 1;
62593+}
62594+
62595+__u32
62596+gr_acl_handle_link(const struct dentry * new_dentry,
62597+ const struct dentry * parent_dentry,
62598+ const struct vfsmount * parent_mnt,
62599+ const struct dentry * old_dentry,
62600+ const struct vfsmount * old_mnt, const char *to)
62601+{
62602+ return 1;
62603+}
62604+
62605+int
62606+gr_acl_handle_rename(const struct dentry *new_dentry,
62607+ const struct dentry *parent_dentry,
62608+ const struct vfsmount *parent_mnt,
62609+ const struct dentry *old_dentry,
62610+ const struct inode *old_parent_inode,
62611+ const struct vfsmount *old_mnt, const char *newname)
62612+{
62613+ return 0;
62614+}
62615+
62616+int
62617+gr_acl_handle_filldir(const struct file *file, const char *name,
62618+ const int namelen, const ino_t ino)
62619+{
62620+ return 1;
62621+}
62622+
62623+int
62624+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62625+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62626+{
62627+ return 1;
62628+}
62629+
62630+int
62631+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62632+{
62633+ return 0;
62634+}
62635+
62636+int
62637+gr_search_accept(const struct socket *sock)
62638+{
62639+ return 0;
62640+}
62641+
62642+int
62643+gr_search_listen(const struct socket *sock)
62644+{
62645+ return 0;
62646+}
62647+
62648+int
62649+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62650+{
62651+ return 0;
62652+}
62653+
62654+__u32
62655+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62656+{
62657+ return 1;
62658+}
62659+
62660+__u32
62661+gr_acl_handle_creat(const struct dentry * dentry,
62662+ const struct dentry * p_dentry,
62663+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62664+ const int imode)
62665+{
62666+ return 1;
62667+}
62668+
62669+void
62670+gr_acl_handle_exit(void)
62671+{
62672+ return;
62673+}
62674+
62675+int
62676+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62677+{
62678+ return 1;
62679+}
62680+
62681+void
62682+gr_set_role_label(const uid_t uid, const gid_t gid)
62683+{
62684+ return;
62685+}
62686+
62687+int
62688+gr_acl_handle_procpidmem(const struct task_struct *task)
62689+{
62690+ return 0;
62691+}
62692+
62693+int
62694+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62695+{
62696+ return 0;
62697+}
62698+
62699+int
62700+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62701+{
62702+ return 0;
62703+}
62704+
62705+void
62706+gr_set_kernel_label(struct task_struct *task)
62707+{
62708+ return;
62709+}
62710+
62711+int
62712+gr_check_user_change(int real, int effective, int fs)
62713+{
62714+ return 0;
62715+}
62716+
62717+int
62718+gr_check_group_change(int real, int effective, int fs)
62719+{
62720+ return 0;
62721+}
62722+
62723+int gr_acl_enable_at_secure(void)
62724+{
62725+ return 0;
62726+}
62727+
62728+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62729+{
62730+ return dentry->d_inode->i_sb->s_dev;
62731+}
62732+
62733+EXPORT_SYMBOL(gr_learn_resource);
62734+EXPORT_SYMBOL(gr_set_kernel_label);
62735+#ifdef CONFIG_SECURITY
62736+EXPORT_SYMBOL(gr_check_user_change);
62737+EXPORT_SYMBOL(gr_check_group_change);
62738+#endif
62739diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62740new file mode 100644
62741index 0000000..a96e155
62742--- /dev/null
62743+++ b/grsecurity/grsec_exec.c
62744@@ -0,0 +1,204 @@
62745+#include <linux/kernel.h>
62746+#include <linux/sched.h>
62747+#include <linux/file.h>
62748+#include <linux/binfmts.h>
62749+#include <linux/smp_lock.h>
62750+#include <linux/fs.h>
62751+#include <linux/types.h>
62752+#include <linux/grdefs.h>
62753+#include <linux/grinternal.h>
62754+#include <linux/capability.h>
62755+#include <linux/compat.h>
62756+#include <linux/module.h>
62757+
62758+#include <asm/uaccess.h>
62759+
62760+#ifdef CONFIG_GRKERNSEC_EXECLOG
62761+static char gr_exec_arg_buf[132];
62762+static DEFINE_MUTEX(gr_exec_arg_mutex);
62763+#endif
62764+
62765+void
62766+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62767+{
62768+#ifdef CONFIG_GRKERNSEC_EXECLOG
62769+ char *grarg = gr_exec_arg_buf;
62770+ unsigned int i, x, execlen = 0;
62771+ char c;
62772+
62773+ if (!((grsec_enable_execlog && grsec_enable_group &&
62774+ in_group_p(grsec_audit_gid))
62775+ || (grsec_enable_execlog && !grsec_enable_group)))
62776+ return;
62777+
62778+ mutex_lock(&gr_exec_arg_mutex);
62779+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62780+
62781+ if (unlikely(argv == NULL))
62782+ goto log;
62783+
62784+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62785+ const char __user *p;
62786+ unsigned int len;
62787+
62788+ if (copy_from_user(&p, argv + i, sizeof(p)))
62789+ goto log;
62790+ if (!p)
62791+ goto log;
62792+ len = strnlen_user(p, 128 - execlen);
62793+ if (len > 128 - execlen)
62794+ len = 128 - execlen;
62795+ else if (len > 0)
62796+ len--;
62797+ if (copy_from_user(grarg + execlen, p, len))
62798+ goto log;
62799+
62800+ /* rewrite unprintable characters */
62801+ for (x = 0; x < len; x++) {
62802+ c = *(grarg + execlen + x);
62803+ if (c < 32 || c > 126)
62804+ *(grarg + execlen + x) = ' ';
62805+ }
62806+
62807+ execlen += len;
62808+ *(grarg + execlen) = ' ';
62809+ *(grarg + execlen + 1) = '\0';
62810+ execlen++;
62811+ }
62812+
62813+ log:
62814+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62815+ bprm->file->f_path.mnt, grarg);
62816+ mutex_unlock(&gr_exec_arg_mutex);
62817+#endif
62818+ return;
62819+}
62820+
62821+#ifdef CONFIG_COMPAT
62822+void
62823+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62824+{
62825+#ifdef CONFIG_GRKERNSEC_EXECLOG
62826+ char *grarg = gr_exec_arg_buf;
62827+ unsigned int i, x, execlen = 0;
62828+ char c;
62829+
62830+ if (!((grsec_enable_execlog && grsec_enable_group &&
62831+ in_group_p(grsec_audit_gid))
62832+ || (grsec_enable_execlog && !grsec_enable_group)))
62833+ return;
62834+
62835+ mutex_lock(&gr_exec_arg_mutex);
62836+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62837+
62838+ if (unlikely(argv == NULL))
62839+ goto log;
62840+
62841+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62842+ compat_uptr_t p;
62843+ unsigned int len;
62844+
62845+ if (get_user(p, argv + i))
62846+ goto log;
62847+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62848+ if (len > 128 - execlen)
62849+ len = 128 - execlen;
62850+ else if (len > 0)
62851+ len--;
62852+ else
62853+ goto log;
62854+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62855+ goto log;
62856+
62857+ /* rewrite unprintable characters */
62858+ for (x = 0; x < len; x++) {
62859+ c = *(grarg + execlen + x);
62860+ if (c < 32 || c > 126)
62861+ *(grarg + execlen + x) = ' ';
62862+ }
62863+
62864+ execlen += len;
62865+ *(grarg + execlen) = ' ';
62866+ *(grarg + execlen + 1) = '\0';
62867+ execlen++;
62868+ }
62869+
62870+ log:
62871+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62872+ bprm->file->f_path.mnt, grarg);
62873+ mutex_unlock(&gr_exec_arg_mutex);
62874+#endif
62875+ return;
62876+}
62877+#endif
62878+
62879+#ifdef CONFIG_GRKERNSEC
62880+extern int gr_acl_is_capable(const int cap);
62881+extern int gr_acl_is_capable_nolog(const int cap);
62882+extern int gr_chroot_is_capable(const int cap);
62883+extern int gr_chroot_is_capable_nolog(const int cap);
62884+#endif
62885+
62886+const char *captab_log[] = {
62887+ "CAP_CHOWN",
62888+ "CAP_DAC_OVERRIDE",
62889+ "CAP_DAC_READ_SEARCH",
62890+ "CAP_FOWNER",
62891+ "CAP_FSETID",
62892+ "CAP_KILL",
62893+ "CAP_SETGID",
62894+ "CAP_SETUID",
62895+ "CAP_SETPCAP",
62896+ "CAP_LINUX_IMMUTABLE",
62897+ "CAP_NET_BIND_SERVICE",
62898+ "CAP_NET_BROADCAST",
62899+ "CAP_NET_ADMIN",
62900+ "CAP_NET_RAW",
62901+ "CAP_IPC_LOCK",
62902+ "CAP_IPC_OWNER",
62903+ "CAP_SYS_MODULE",
62904+ "CAP_SYS_RAWIO",
62905+ "CAP_SYS_CHROOT",
62906+ "CAP_SYS_PTRACE",
62907+ "CAP_SYS_PACCT",
62908+ "CAP_SYS_ADMIN",
62909+ "CAP_SYS_BOOT",
62910+ "CAP_SYS_NICE",
62911+ "CAP_SYS_RESOURCE",
62912+ "CAP_SYS_TIME",
62913+ "CAP_SYS_TTY_CONFIG",
62914+ "CAP_MKNOD",
62915+ "CAP_LEASE",
62916+ "CAP_AUDIT_WRITE",
62917+ "CAP_AUDIT_CONTROL",
62918+ "CAP_SETFCAP",
62919+ "CAP_MAC_OVERRIDE",
62920+ "CAP_MAC_ADMIN"
62921+};
62922+
62923+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62924+
62925+int gr_is_capable(const int cap)
62926+{
62927+#ifdef CONFIG_GRKERNSEC
62928+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62929+ return 1;
62930+ return 0;
62931+#else
62932+ return 1;
62933+#endif
62934+}
62935+
62936+int gr_is_capable_nolog(const int cap)
62937+{
62938+#ifdef CONFIG_GRKERNSEC
62939+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62940+ return 1;
62941+ return 0;
62942+#else
62943+ return 1;
62944+#endif
62945+}
62946+
62947+EXPORT_SYMBOL(gr_is_capable);
62948+EXPORT_SYMBOL(gr_is_capable_nolog);
62949diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62950new file mode 100644
62951index 0000000..d3ee748
62952--- /dev/null
62953+++ b/grsecurity/grsec_fifo.c
62954@@ -0,0 +1,24 @@
62955+#include <linux/kernel.h>
62956+#include <linux/sched.h>
62957+#include <linux/fs.h>
62958+#include <linux/file.h>
62959+#include <linux/grinternal.h>
62960+
62961+int
62962+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62963+ const struct dentry *dir, const int flag, const int acc_mode)
62964+{
62965+#ifdef CONFIG_GRKERNSEC_FIFO
62966+ const struct cred *cred = current_cred();
62967+
62968+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62969+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62970+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62971+ (cred->fsuid != dentry->d_inode->i_uid)) {
62972+ if (!inode_permission(dentry->d_inode, acc_mode))
62973+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62974+ return -EACCES;
62975+ }
62976+#endif
62977+ return 0;
62978+}
62979diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62980new file mode 100644
62981index 0000000..8ca18bf
62982--- /dev/null
62983+++ b/grsecurity/grsec_fork.c
62984@@ -0,0 +1,23 @@
62985+#include <linux/kernel.h>
62986+#include <linux/sched.h>
62987+#include <linux/grsecurity.h>
62988+#include <linux/grinternal.h>
62989+#include <linux/errno.h>
62990+
62991+void
62992+gr_log_forkfail(const int retval)
62993+{
62994+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62995+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62996+ switch (retval) {
62997+ case -EAGAIN:
62998+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62999+ break;
63000+ case -ENOMEM:
63001+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
63002+ break;
63003+ }
63004+ }
63005+#endif
63006+ return;
63007+}
63008diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
63009new file mode 100644
63010index 0000000..1e995d3
63011--- /dev/null
63012+++ b/grsecurity/grsec_init.c
63013@@ -0,0 +1,278 @@
63014+#include <linux/kernel.h>
63015+#include <linux/sched.h>
63016+#include <linux/mm.h>
63017+#include <linux/smp_lock.h>
63018+#include <linux/gracl.h>
63019+#include <linux/slab.h>
63020+#include <linux/vmalloc.h>
63021+#include <linux/percpu.h>
63022+#include <linux/module.h>
63023+
63024+int grsec_enable_ptrace_readexec;
63025+int grsec_enable_setxid;
63026+int grsec_enable_brute;
63027+int grsec_enable_link;
63028+int grsec_enable_dmesg;
63029+int grsec_enable_harden_ptrace;
63030+int grsec_enable_fifo;
63031+int grsec_enable_execlog;
63032+int grsec_enable_signal;
63033+int grsec_enable_forkfail;
63034+int grsec_enable_audit_ptrace;
63035+int grsec_enable_time;
63036+int grsec_enable_audit_textrel;
63037+int grsec_enable_group;
63038+int grsec_audit_gid;
63039+int grsec_enable_chdir;
63040+int grsec_enable_mount;
63041+int grsec_enable_rofs;
63042+int grsec_enable_chroot_findtask;
63043+int grsec_enable_chroot_mount;
63044+int grsec_enable_chroot_shmat;
63045+int grsec_enable_chroot_fchdir;
63046+int grsec_enable_chroot_double;
63047+int grsec_enable_chroot_pivot;
63048+int grsec_enable_chroot_chdir;
63049+int grsec_enable_chroot_chmod;
63050+int grsec_enable_chroot_mknod;
63051+int grsec_enable_chroot_nice;
63052+int grsec_enable_chroot_execlog;
63053+int grsec_enable_chroot_caps;
63054+int grsec_enable_chroot_sysctl;
63055+int grsec_enable_chroot_unix;
63056+int grsec_enable_tpe;
63057+int grsec_tpe_gid;
63058+int grsec_enable_blackhole;
63059+#ifdef CONFIG_IPV6_MODULE
63060+EXPORT_SYMBOL(grsec_enable_blackhole);
63061+#endif
63062+int grsec_lastack_retries;
63063+int grsec_enable_tpe_all;
63064+int grsec_enable_tpe_invert;
63065+int grsec_enable_socket_all;
63066+int grsec_socket_all_gid;
63067+int grsec_enable_socket_client;
63068+int grsec_socket_client_gid;
63069+int grsec_enable_socket_server;
63070+int grsec_socket_server_gid;
63071+int grsec_resource_logging;
63072+int grsec_disable_privio;
63073+int grsec_enable_log_rwxmaps;
63074+int grsec_lock;
63075+
63076+DEFINE_SPINLOCK(grsec_alert_lock);
63077+unsigned long grsec_alert_wtime = 0;
63078+unsigned long grsec_alert_fyet = 0;
63079+
63080+DEFINE_SPINLOCK(grsec_audit_lock);
63081+
63082+DEFINE_RWLOCK(grsec_exec_file_lock);
63083+
63084+char *gr_shared_page[4];
63085+
63086+char *gr_alert_log_fmt;
63087+char *gr_audit_log_fmt;
63088+char *gr_alert_log_buf;
63089+char *gr_audit_log_buf;
63090+
63091+extern struct gr_arg *gr_usermode;
63092+extern unsigned char *gr_system_salt;
63093+extern unsigned char *gr_system_sum;
63094+
63095+void __init
63096+grsecurity_init(void)
63097+{
63098+ int j;
63099+ /* create the per-cpu shared pages */
63100+
63101+#ifdef CONFIG_X86
63102+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63103+#endif
63104+
63105+ for (j = 0; j < 4; j++) {
63106+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63107+ if (gr_shared_page[j] == NULL) {
63108+ panic("Unable to allocate grsecurity shared page");
63109+ return;
63110+ }
63111+ }
63112+
63113+ /* allocate log buffers */
63114+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63115+ if (!gr_alert_log_fmt) {
63116+ panic("Unable to allocate grsecurity alert log format buffer");
63117+ return;
63118+ }
63119+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63120+ if (!gr_audit_log_fmt) {
63121+ panic("Unable to allocate grsecurity audit log format buffer");
63122+ return;
63123+ }
63124+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63125+ if (!gr_alert_log_buf) {
63126+ panic("Unable to allocate grsecurity alert log buffer");
63127+ return;
63128+ }
63129+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63130+ if (!gr_audit_log_buf) {
63131+ panic("Unable to allocate grsecurity audit log buffer");
63132+ return;
63133+ }
63134+
63135+ /* allocate memory for authentication structure */
63136+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63137+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63138+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63139+
63140+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63141+ panic("Unable to allocate grsecurity authentication structure");
63142+ return;
63143+ }
63144+
63145+
63146+#ifdef CONFIG_GRKERNSEC_IO
63147+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63148+ grsec_disable_privio = 1;
63149+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63150+ grsec_disable_privio = 1;
63151+#else
63152+ grsec_disable_privio = 0;
63153+#endif
63154+#endif
63155+
63156+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63157+ /* for backward compatibility, tpe_invert always defaults to on if
63158+ enabled in the kernel
63159+ */
63160+ grsec_enable_tpe_invert = 1;
63161+#endif
63162+
63163+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63164+#ifndef CONFIG_GRKERNSEC_SYSCTL
63165+ grsec_lock = 1;
63166+#endif
63167+
63168+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63169+ grsec_enable_audit_textrel = 1;
63170+#endif
63171+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63172+ grsec_enable_log_rwxmaps = 1;
63173+#endif
63174+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63175+ grsec_enable_group = 1;
63176+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63177+#endif
63178+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63179+ grsec_enable_chdir = 1;
63180+#endif
63181+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63182+ grsec_enable_harden_ptrace = 1;
63183+#endif
63184+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63185+ grsec_enable_mount = 1;
63186+#endif
63187+#ifdef CONFIG_GRKERNSEC_LINK
63188+ grsec_enable_link = 1;
63189+#endif
63190+#ifdef CONFIG_GRKERNSEC_BRUTE
63191+ grsec_enable_brute = 1;
63192+#endif
63193+#ifdef CONFIG_GRKERNSEC_DMESG
63194+ grsec_enable_dmesg = 1;
63195+#endif
63196+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63197+ grsec_enable_blackhole = 1;
63198+ grsec_lastack_retries = 4;
63199+#endif
63200+#ifdef CONFIG_GRKERNSEC_FIFO
63201+ grsec_enable_fifo = 1;
63202+#endif
63203+#ifdef CONFIG_GRKERNSEC_EXECLOG
63204+ grsec_enable_execlog = 1;
63205+#endif
63206+#ifdef CONFIG_GRKERNSEC_SETXID
63207+ grsec_enable_setxid = 1;
63208+#endif
63209+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63210+ grsec_enable_ptrace_readexec = 1;
63211+#endif
63212+#ifdef CONFIG_GRKERNSEC_SIGNAL
63213+ grsec_enable_signal = 1;
63214+#endif
63215+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63216+ grsec_enable_forkfail = 1;
63217+#endif
63218+#ifdef CONFIG_GRKERNSEC_TIME
63219+ grsec_enable_time = 1;
63220+#endif
63221+#ifdef CONFIG_GRKERNSEC_RESLOG
63222+ grsec_resource_logging = 1;
63223+#endif
63224+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63225+ grsec_enable_chroot_findtask = 1;
63226+#endif
63227+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63228+ grsec_enable_chroot_unix = 1;
63229+#endif
63230+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63231+ grsec_enable_chroot_mount = 1;
63232+#endif
63233+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63234+ grsec_enable_chroot_fchdir = 1;
63235+#endif
63236+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63237+ grsec_enable_chroot_shmat = 1;
63238+#endif
63239+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63240+ grsec_enable_audit_ptrace = 1;
63241+#endif
63242+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63243+ grsec_enable_chroot_double = 1;
63244+#endif
63245+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63246+ grsec_enable_chroot_pivot = 1;
63247+#endif
63248+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63249+ grsec_enable_chroot_chdir = 1;
63250+#endif
63251+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63252+ grsec_enable_chroot_chmod = 1;
63253+#endif
63254+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63255+ grsec_enable_chroot_mknod = 1;
63256+#endif
63257+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63258+ grsec_enable_chroot_nice = 1;
63259+#endif
63260+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63261+ grsec_enable_chroot_execlog = 1;
63262+#endif
63263+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63264+ grsec_enable_chroot_caps = 1;
63265+#endif
63266+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63267+ grsec_enable_chroot_sysctl = 1;
63268+#endif
63269+#ifdef CONFIG_GRKERNSEC_TPE
63270+ grsec_enable_tpe = 1;
63271+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63272+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63273+ grsec_enable_tpe_all = 1;
63274+#endif
63275+#endif
63276+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63277+ grsec_enable_socket_all = 1;
63278+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63279+#endif
63280+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63281+ grsec_enable_socket_client = 1;
63282+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63283+#endif
63284+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63285+ grsec_enable_socket_server = 1;
63286+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63287+#endif
63288+#endif
63289+
63290+ return;
63291+}
63292diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63293new file mode 100644
63294index 0000000..3efe141
63295--- /dev/null
63296+++ b/grsecurity/grsec_link.c
63297@@ -0,0 +1,43 @@
63298+#include <linux/kernel.h>
63299+#include <linux/sched.h>
63300+#include <linux/fs.h>
63301+#include <linux/file.h>
63302+#include <linux/grinternal.h>
63303+
63304+int
63305+gr_handle_follow_link(const struct inode *parent,
63306+ const struct inode *inode,
63307+ const struct dentry *dentry, const struct vfsmount *mnt)
63308+{
63309+#ifdef CONFIG_GRKERNSEC_LINK
63310+ const struct cred *cred = current_cred();
63311+
63312+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63313+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63314+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63315+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63316+ return -EACCES;
63317+ }
63318+#endif
63319+ return 0;
63320+}
63321+
63322+int
63323+gr_handle_hardlink(const struct dentry *dentry,
63324+ const struct vfsmount *mnt,
63325+ struct inode *inode, const int mode, const char *to)
63326+{
63327+#ifdef CONFIG_GRKERNSEC_LINK
63328+ const struct cred *cred = current_cred();
63329+
63330+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63331+ (!S_ISREG(mode) || (mode & S_ISUID) ||
63332+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63333+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63334+ !capable(CAP_FOWNER) && cred->uid) {
63335+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63336+ return -EPERM;
63337+ }
63338+#endif
63339+ return 0;
63340+}
63341diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63342new file mode 100644
63343index 0000000..a45d2e9
63344--- /dev/null
63345+++ b/grsecurity/grsec_log.c
63346@@ -0,0 +1,322 @@
63347+#include <linux/kernel.h>
63348+#include <linux/sched.h>
63349+#include <linux/file.h>
63350+#include <linux/tty.h>
63351+#include <linux/fs.h>
63352+#include <linux/grinternal.h>
63353+
63354+#ifdef CONFIG_TREE_PREEMPT_RCU
63355+#define DISABLE_PREEMPT() preempt_disable()
63356+#define ENABLE_PREEMPT() preempt_enable()
63357+#else
63358+#define DISABLE_PREEMPT()
63359+#define ENABLE_PREEMPT()
63360+#endif
63361+
63362+#define BEGIN_LOCKS(x) \
63363+ DISABLE_PREEMPT(); \
63364+ rcu_read_lock(); \
63365+ read_lock(&tasklist_lock); \
63366+ read_lock(&grsec_exec_file_lock); \
63367+ if (x != GR_DO_AUDIT) \
63368+ spin_lock(&grsec_alert_lock); \
63369+ else \
63370+ spin_lock(&grsec_audit_lock)
63371+
63372+#define END_LOCKS(x) \
63373+ if (x != GR_DO_AUDIT) \
63374+ spin_unlock(&grsec_alert_lock); \
63375+ else \
63376+ spin_unlock(&grsec_audit_lock); \
63377+ read_unlock(&grsec_exec_file_lock); \
63378+ read_unlock(&tasklist_lock); \
63379+ rcu_read_unlock(); \
63380+ ENABLE_PREEMPT(); \
63381+ if (x == GR_DONT_AUDIT) \
63382+ gr_handle_alertkill(current)
63383+
63384+enum {
63385+ FLOODING,
63386+ NO_FLOODING
63387+};
63388+
63389+extern char *gr_alert_log_fmt;
63390+extern char *gr_audit_log_fmt;
63391+extern char *gr_alert_log_buf;
63392+extern char *gr_audit_log_buf;
63393+
63394+static int gr_log_start(int audit)
63395+{
63396+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63397+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63398+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63399+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63400+ unsigned long curr_secs = get_seconds();
63401+
63402+ if (audit == GR_DO_AUDIT)
63403+ goto set_fmt;
63404+
63405+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63406+ grsec_alert_wtime = curr_secs;
63407+ grsec_alert_fyet = 0;
63408+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63409+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63410+ grsec_alert_fyet++;
63411+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63412+ grsec_alert_wtime = curr_secs;
63413+ grsec_alert_fyet++;
63414+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63415+ return FLOODING;
63416+ }
63417+ else return FLOODING;
63418+
63419+set_fmt:
63420+#endif
63421+ memset(buf, 0, PAGE_SIZE);
63422+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63423+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63424+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63425+ } else if (current->signal->curr_ip) {
63426+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63427+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63428+ } else if (gr_acl_is_enabled()) {
63429+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63430+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63431+ } else {
63432+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63433+ strcpy(buf, fmt);
63434+ }
63435+
63436+ return NO_FLOODING;
63437+}
63438+
63439+static void gr_log_middle(int audit, const char *msg, va_list ap)
63440+ __attribute__ ((format (printf, 2, 0)));
63441+
63442+static void gr_log_middle(int audit, const char *msg, va_list ap)
63443+{
63444+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63445+ unsigned int len = strlen(buf);
63446+
63447+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63448+
63449+ return;
63450+}
63451+
63452+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63453+ __attribute__ ((format (printf, 2, 3)));
63454+
63455+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63456+{
63457+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63458+ unsigned int len = strlen(buf);
63459+ va_list ap;
63460+
63461+ va_start(ap, msg);
63462+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63463+ va_end(ap);
63464+
63465+ return;
63466+}
63467+
63468+static void gr_log_end(int audit, int append_default)
63469+{
63470+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63471+
63472+ if (append_default) {
63473+ unsigned int len = strlen(buf);
63474+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63475+ }
63476+
63477+ printk("%s\n", buf);
63478+
63479+ return;
63480+}
63481+
63482+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63483+{
63484+ int logtype;
63485+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63486+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63487+ void *voidptr = NULL;
63488+ int num1 = 0, num2 = 0;
63489+ unsigned long ulong1 = 0, ulong2 = 0;
63490+ struct dentry *dentry = NULL;
63491+ struct vfsmount *mnt = NULL;
63492+ struct file *file = NULL;
63493+ struct task_struct *task = NULL;
63494+ const struct cred *cred, *pcred;
63495+ va_list ap;
63496+
63497+ BEGIN_LOCKS(audit);
63498+ logtype = gr_log_start(audit);
63499+ if (logtype == FLOODING) {
63500+ END_LOCKS(audit);
63501+ return;
63502+ }
63503+ va_start(ap, argtypes);
63504+ switch (argtypes) {
63505+ case GR_TTYSNIFF:
63506+ task = va_arg(ap, struct task_struct *);
63507+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63508+ break;
63509+ case GR_SYSCTL_HIDDEN:
63510+ str1 = va_arg(ap, char *);
63511+ gr_log_middle_varargs(audit, msg, result, str1);
63512+ break;
63513+ case GR_RBAC:
63514+ dentry = va_arg(ap, struct dentry *);
63515+ mnt = va_arg(ap, struct vfsmount *);
63516+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63517+ break;
63518+ case GR_RBAC_STR:
63519+ dentry = va_arg(ap, struct dentry *);
63520+ mnt = va_arg(ap, struct vfsmount *);
63521+ str1 = va_arg(ap, char *);
63522+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63523+ break;
63524+ case GR_STR_RBAC:
63525+ str1 = va_arg(ap, char *);
63526+ dentry = va_arg(ap, struct dentry *);
63527+ mnt = va_arg(ap, struct vfsmount *);
63528+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63529+ break;
63530+ case GR_RBAC_MODE2:
63531+ dentry = va_arg(ap, struct dentry *);
63532+ mnt = va_arg(ap, struct vfsmount *);
63533+ str1 = va_arg(ap, char *);
63534+ str2 = va_arg(ap, char *);
63535+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63536+ break;
63537+ case GR_RBAC_MODE3:
63538+ dentry = va_arg(ap, struct dentry *);
63539+ mnt = va_arg(ap, struct vfsmount *);
63540+ str1 = va_arg(ap, char *);
63541+ str2 = va_arg(ap, char *);
63542+ str3 = va_arg(ap, char *);
63543+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63544+ break;
63545+ case GR_FILENAME:
63546+ dentry = va_arg(ap, struct dentry *);
63547+ mnt = va_arg(ap, struct vfsmount *);
63548+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63549+ break;
63550+ case GR_STR_FILENAME:
63551+ str1 = va_arg(ap, char *);
63552+ dentry = va_arg(ap, struct dentry *);
63553+ mnt = va_arg(ap, struct vfsmount *);
63554+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63555+ break;
63556+ case GR_FILENAME_STR:
63557+ dentry = va_arg(ap, struct dentry *);
63558+ mnt = va_arg(ap, struct vfsmount *);
63559+ str1 = va_arg(ap, char *);
63560+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63561+ break;
63562+ case GR_FILENAME_TWO_INT:
63563+ dentry = va_arg(ap, struct dentry *);
63564+ mnt = va_arg(ap, struct vfsmount *);
63565+ num1 = va_arg(ap, int);
63566+ num2 = va_arg(ap, int);
63567+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63568+ break;
63569+ case GR_FILENAME_TWO_INT_STR:
63570+ dentry = va_arg(ap, struct dentry *);
63571+ mnt = va_arg(ap, struct vfsmount *);
63572+ num1 = va_arg(ap, int);
63573+ num2 = va_arg(ap, int);
63574+ str1 = va_arg(ap, char *);
63575+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63576+ break;
63577+ case GR_TEXTREL:
63578+ file = va_arg(ap, struct file *);
63579+ ulong1 = va_arg(ap, unsigned long);
63580+ ulong2 = va_arg(ap, unsigned long);
63581+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63582+ break;
63583+ case GR_PTRACE:
63584+ task = va_arg(ap, struct task_struct *);
63585+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63586+ break;
63587+ case GR_RESOURCE:
63588+ task = va_arg(ap, struct task_struct *);
63589+ cred = __task_cred(task);
63590+ pcred = __task_cred(task->real_parent);
63591+ ulong1 = va_arg(ap, unsigned long);
63592+ str1 = va_arg(ap, char *);
63593+ ulong2 = va_arg(ap, unsigned long);
63594+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63595+ break;
63596+ case GR_CAP:
63597+ task = va_arg(ap, struct task_struct *);
63598+ cred = __task_cred(task);
63599+ pcred = __task_cred(task->real_parent);
63600+ str1 = va_arg(ap, char *);
63601+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63602+ break;
63603+ case GR_SIG:
63604+ str1 = va_arg(ap, char *);
63605+ voidptr = va_arg(ap, void *);
63606+ gr_log_middle_varargs(audit, msg, str1, voidptr);
63607+ break;
63608+ case GR_SIG2:
63609+ task = va_arg(ap, struct task_struct *);
63610+ cred = __task_cred(task);
63611+ pcred = __task_cred(task->real_parent);
63612+ num1 = va_arg(ap, int);
63613+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63614+ break;
63615+ case GR_CRASH1:
63616+ task = va_arg(ap, struct task_struct *);
63617+ cred = __task_cred(task);
63618+ pcred = __task_cred(task->real_parent);
63619+ ulong1 = va_arg(ap, unsigned long);
63620+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63621+ break;
63622+ case GR_CRASH2:
63623+ task = va_arg(ap, struct task_struct *);
63624+ cred = __task_cred(task);
63625+ pcred = __task_cred(task->real_parent);
63626+ ulong1 = va_arg(ap, unsigned long);
63627+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63628+ break;
63629+ case GR_RWXMAP:
63630+ file = va_arg(ap, struct file *);
63631+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63632+ break;
63633+ case GR_PSACCT:
63634+ {
63635+ unsigned int wday, cday;
63636+ __u8 whr, chr;
63637+ __u8 wmin, cmin;
63638+ __u8 wsec, csec;
63639+ char cur_tty[64] = { 0 };
63640+ char parent_tty[64] = { 0 };
63641+
63642+ task = va_arg(ap, struct task_struct *);
63643+ wday = va_arg(ap, unsigned int);
63644+ cday = va_arg(ap, unsigned int);
63645+ whr = va_arg(ap, int);
63646+ chr = va_arg(ap, int);
63647+ wmin = va_arg(ap, int);
63648+ cmin = va_arg(ap, int);
63649+ wsec = va_arg(ap, int);
63650+ csec = va_arg(ap, int);
63651+ ulong1 = va_arg(ap, unsigned long);
63652+ cred = __task_cred(task);
63653+ pcred = __task_cred(task->real_parent);
63654+
63655+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63656+ }
63657+ break;
63658+ default:
63659+ gr_log_middle(audit, msg, ap);
63660+ }
63661+ va_end(ap);
63662+ // these don't need DEFAULTSECARGS printed on the end
63663+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63664+ gr_log_end(audit, 0);
63665+ else
63666+ gr_log_end(audit, 1);
63667+ END_LOCKS(audit);
63668+}
63669diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63670new file mode 100644
63671index 0000000..f536303
63672--- /dev/null
63673+++ b/grsecurity/grsec_mem.c
63674@@ -0,0 +1,40 @@
63675+#include <linux/kernel.h>
63676+#include <linux/sched.h>
63677+#include <linux/mm.h>
63678+#include <linux/mman.h>
63679+#include <linux/grinternal.h>
63680+
63681+void
63682+gr_handle_ioperm(void)
63683+{
63684+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63685+ return;
63686+}
63687+
63688+void
63689+gr_handle_iopl(void)
63690+{
63691+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63692+ return;
63693+}
63694+
63695+void
63696+gr_handle_mem_readwrite(u64 from, u64 to)
63697+{
63698+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63699+ return;
63700+}
63701+
63702+void
63703+gr_handle_vm86(void)
63704+{
63705+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63706+ return;
63707+}
63708+
63709+void
63710+gr_log_badprocpid(const char *entry)
63711+{
63712+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
63713+ return;
63714+}
63715diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63716new file mode 100644
63717index 0000000..2131422
63718--- /dev/null
63719+++ b/grsecurity/grsec_mount.c
63720@@ -0,0 +1,62 @@
63721+#include <linux/kernel.h>
63722+#include <linux/sched.h>
63723+#include <linux/mount.h>
63724+#include <linux/grsecurity.h>
63725+#include <linux/grinternal.h>
63726+
63727+void
63728+gr_log_remount(const char *devname, const int retval)
63729+{
63730+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63731+ if (grsec_enable_mount && (retval >= 0))
63732+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63733+#endif
63734+ return;
63735+}
63736+
63737+void
63738+gr_log_unmount(const char *devname, const int retval)
63739+{
63740+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63741+ if (grsec_enable_mount && (retval >= 0))
63742+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63743+#endif
63744+ return;
63745+}
63746+
63747+void
63748+gr_log_mount(const char *from, const char *to, const int retval)
63749+{
63750+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63751+ if (grsec_enable_mount && (retval >= 0))
63752+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63753+#endif
63754+ return;
63755+}
63756+
63757+int
63758+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63759+{
63760+#ifdef CONFIG_GRKERNSEC_ROFS
63761+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63762+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63763+ return -EPERM;
63764+ } else
63765+ return 0;
63766+#endif
63767+ return 0;
63768+}
63769+
63770+int
63771+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63772+{
63773+#ifdef CONFIG_GRKERNSEC_ROFS
63774+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63775+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63776+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63777+ return -EPERM;
63778+ } else
63779+ return 0;
63780+#endif
63781+ return 0;
63782+}
63783diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63784new file mode 100644
63785index 0000000..a3b12a0
63786--- /dev/null
63787+++ b/grsecurity/grsec_pax.c
63788@@ -0,0 +1,36 @@
63789+#include <linux/kernel.h>
63790+#include <linux/sched.h>
63791+#include <linux/mm.h>
63792+#include <linux/file.h>
63793+#include <linux/grinternal.h>
63794+#include <linux/grsecurity.h>
63795+
63796+void
63797+gr_log_textrel(struct vm_area_struct * vma)
63798+{
63799+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63800+ if (grsec_enable_audit_textrel)
63801+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63802+#endif
63803+ return;
63804+}
63805+
63806+void
63807+gr_log_rwxmmap(struct file *file)
63808+{
63809+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63810+ if (grsec_enable_log_rwxmaps)
63811+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63812+#endif
63813+ return;
63814+}
63815+
63816+void
63817+gr_log_rwxmprotect(struct file *file)
63818+{
63819+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63820+ if (grsec_enable_log_rwxmaps)
63821+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63822+#endif
63823+ return;
63824+}
63825diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63826new file mode 100644
63827index 0000000..78f8733
63828--- /dev/null
63829+++ b/grsecurity/grsec_ptrace.c
63830@@ -0,0 +1,30 @@
63831+#include <linux/kernel.h>
63832+#include <linux/sched.h>
63833+#include <linux/grinternal.h>
63834+#include <linux/security.h>
63835+
63836+void
63837+gr_audit_ptrace(struct task_struct *task)
63838+{
63839+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63840+ if (grsec_enable_audit_ptrace)
63841+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63842+#endif
63843+ return;
63844+}
63845+
63846+int
63847+gr_ptrace_readexec(struct file *file, int unsafe_flags)
63848+{
63849+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63850+ const struct dentry *dentry = file->f_path.dentry;
63851+ const struct vfsmount *mnt = file->f_path.mnt;
63852+
63853+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
63854+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
63855+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
63856+ return -EACCES;
63857+ }
63858+#endif
63859+ return 0;
63860+}
63861diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63862new file mode 100644
63863index 0000000..c648492
63864--- /dev/null
63865+++ b/grsecurity/grsec_sig.c
63866@@ -0,0 +1,206 @@
63867+#include <linux/kernel.h>
63868+#include <linux/sched.h>
63869+#include <linux/delay.h>
63870+#include <linux/grsecurity.h>
63871+#include <linux/grinternal.h>
63872+#include <linux/hardirq.h>
63873+
63874+char *signames[] = {
63875+ [SIGSEGV] = "Segmentation fault",
63876+ [SIGILL] = "Illegal instruction",
63877+ [SIGABRT] = "Abort",
63878+ [SIGBUS] = "Invalid alignment/Bus error"
63879+};
63880+
63881+void
63882+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63883+{
63884+#ifdef CONFIG_GRKERNSEC_SIGNAL
63885+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63886+ (sig == SIGABRT) || (sig == SIGBUS))) {
63887+ if (t->pid == current->pid) {
63888+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63889+ } else {
63890+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63891+ }
63892+ }
63893+#endif
63894+ return;
63895+}
63896+
63897+int
63898+gr_handle_signal(const struct task_struct *p, const int sig)
63899+{
63900+#ifdef CONFIG_GRKERNSEC
63901+ /* ignore the 0 signal for protected task checks */
63902+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
63903+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63904+ return -EPERM;
63905+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63906+ return -EPERM;
63907+ }
63908+#endif
63909+ return 0;
63910+}
63911+
63912+#ifdef CONFIG_GRKERNSEC
63913+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63914+
63915+int gr_fake_force_sig(int sig, struct task_struct *t)
63916+{
63917+ unsigned long int flags;
63918+ int ret, blocked, ignored;
63919+ struct k_sigaction *action;
63920+
63921+ spin_lock_irqsave(&t->sighand->siglock, flags);
63922+ action = &t->sighand->action[sig-1];
63923+ ignored = action->sa.sa_handler == SIG_IGN;
63924+ blocked = sigismember(&t->blocked, sig);
63925+ if (blocked || ignored) {
63926+ action->sa.sa_handler = SIG_DFL;
63927+ if (blocked) {
63928+ sigdelset(&t->blocked, sig);
63929+ recalc_sigpending_and_wake(t);
63930+ }
63931+ }
63932+ if (action->sa.sa_handler == SIG_DFL)
63933+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63934+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63935+
63936+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63937+
63938+ return ret;
63939+}
63940+#endif
63941+
63942+#ifdef CONFIG_GRKERNSEC_BRUTE
63943+#define GR_USER_BAN_TIME (15 * 60)
63944+
63945+static int __get_dumpable(unsigned long mm_flags)
63946+{
63947+ int ret;
63948+
63949+ ret = mm_flags & MMF_DUMPABLE_MASK;
63950+ return (ret >= 2) ? 2 : ret;
63951+}
63952+#endif
63953+
63954+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63955+{
63956+#ifdef CONFIG_GRKERNSEC_BRUTE
63957+ uid_t uid = 0;
63958+
63959+ if (!grsec_enable_brute)
63960+ return;
63961+
63962+ rcu_read_lock();
63963+ read_lock(&tasklist_lock);
63964+ read_lock(&grsec_exec_file_lock);
63965+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63966+ p->real_parent->brute = 1;
63967+ else {
63968+ const struct cred *cred = __task_cred(p), *cred2;
63969+ struct task_struct *tsk, *tsk2;
63970+
63971+ if (!__get_dumpable(mm_flags) && cred->uid) {
63972+ struct user_struct *user;
63973+
63974+ uid = cred->uid;
63975+
63976+ /* this is put upon execution past expiration */
63977+ user = find_user(uid);
63978+ if (user == NULL)
63979+ goto unlock;
63980+ user->banned = 1;
63981+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63982+ if (user->ban_expires == ~0UL)
63983+ user->ban_expires--;
63984+
63985+ do_each_thread(tsk2, tsk) {
63986+ cred2 = __task_cred(tsk);
63987+ if (tsk != p && cred2->uid == uid)
63988+ gr_fake_force_sig(SIGKILL, tsk);
63989+ } while_each_thread(tsk2, tsk);
63990+ }
63991+ }
63992+unlock:
63993+ read_unlock(&grsec_exec_file_lock);
63994+ read_unlock(&tasklist_lock);
63995+ rcu_read_unlock();
63996+
63997+ if (uid)
63998+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63999+#endif
64000+ return;
64001+}
64002+
64003+void gr_handle_brute_check(void)
64004+{
64005+#ifdef CONFIG_GRKERNSEC_BRUTE
64006+ if (current->brute)
64007+ msleep(30 * 1000);
64008+#endif
64009+ return;
64010+}
64011+
64012+void gr_handle_kernel_exploit(void)
64013+{
64014+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
64015+ const struct cred *cred;
64016+ struct task_struct *tsk, *tsk2;
64017+ struct user_struct *user;
64018+ uid_t uid;
64019+
64020+ if (in_irq() || in_serving_softirq() || in_nmi())
64021+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
64022+
64023+ uid = current_uid();
64024+
64025+ if (uid == 0)
64026+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
64027+ else {
64028+ /* kill all the processes of this user, hold a reference
64029+ to their creds struct, and prevent them from creating
64030+ another process until system reset
64031+ */
64032+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
64033+ /* we intentionally leak this ref */
64034+ user = get_uid(current->cred->user);
64035+ if (user) {
64036+ user->banned = 1;
64037+ user->ban_expires = ~0UL;
64038+ }
64039+
64040+ read_lock(&tasklist_lock);
64041+ do_each_thread(tsk2, tsk) {
64042+ cred = __task_cred(tsk);
64043+ if (cred->uid == uid)
64044+ gr_fake_force_sig(SIGKILL, tsk);
64045+ } while_each_thread(tsk2, tsk);
64046+ read_unlock(&tasklist_lock);
64047+ }
64048+#endif
64049+}
64050+
64051+int __gr_process_user_ban(struct user_struct *user)
64052+{
64053+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64054+ if (unlikely(user->banned)) {
64055+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
64056+ user->banned = 0;
64057+ user->ban_expires = 0;
64058+ free_uid(user);
64059+ } else
64060+ return -EPERM;
64061+ }
64062+#endif
64063+ return 0;
64064+}
64065+
64066+int gr_process_user_ban(void)
64067+{
64068+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
64069+ return __gr_process_user_ban(current->cred->user);
64070+#endif
64071+ return 0;
64072+}
64073diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
64074new file mode 100644
64075index 0000000..7512ea9
64076--- /dev/null
64077+++ b/grsecurity/grsec_sock.c
64078@@ -0,0 +1,275 @@
64079+#include <linux/kernel.h>
64080+#include <linux/module.h>
64081+#include <linux/sched.h>
64082+#include <linux/file.h>
64083+#include <linux/net.h>
64084+#include <linux/in.h>
64085+#include <linux/ip.h>
64086+#include <net/sock.h>
64087+#include <net/inet_sock.h>
64088+#include <linux/grsecurity.h>
64089+#include <linux/grinternal.h>
64090+#include <linux/gracl.h>
64091+
64092+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64093+EXPORT_SYMBOL(gr_cap_rtnetlink);
64094+
64095+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64096+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64097+
64098+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64099+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64100+
64101+#ifdef CONFIG_UNIX_MODULE
64102+EXPORT_SYMBOL(gr_acl_handle_unix);
64103+EXPORT_SYMBOL(gr_acl_handle_mknod);
64104+EXPORT_SYMBOL(gr_handle_chroot_unix);
64105+EXPORT_SYMBOL(gr_handle_create);
64106+#endif
64107+
64108+#ifdef CONFIG_GRKERNSEC
64109+#define gr_conn_table_size 32749
64110+struct conn_table_entry {
64111+ struct conn_table_entry *next;
64112+ struct signal_struct *sig;
64113+};
64114+
64115+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64116+DEFINE_SPINLOCK(gr_conn_table_lock);
64117+
64118+extern const char * gr_socktype_to_name(unsigned char type);
64119+extern const char * gr_proto_to_name(unsigned char proto);
64120+extern const char * gr_sockfamily_to_name(unsigned char family);
64121+
64122+static __inline__ int
64123+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64124+{
64125+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64126+}
64127+
64128+static __inline__ int
64129+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64130+ __u16 sport, __u16 dport)
64131+{
64132+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64133+ sig->gr_sport == sport && sig->gr_dport == dport))
64134+ return 1;
64135+ else
64136+ return 0;
64137+}
64138+
64139+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64140+{
64141+ struct conn_table_entry **match;
64142+ unsigned int index;
64143+
64144+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64145+ sig->gr_sport, sig->gr_dport,
64146+ gr_conn_table_size);
64147+
64148+ newent->sig = sig;
64149+
64150+ match = &gr_conn_table[index];
64151+ newent->next = *match;
64152+ *match = newent;
64153+
64154+ return;
64155+}
64156+
64157+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64158+{
64159+ struct conn_table_entry *match, *last = NULL;
64160+ unsigned int index;
64161+
64162+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64163+ sig->gr_sport, sig->gr_dport,
64164+ gr_conn_table_size);
64165+
64166+ match = gr_conn_table[index];
64167+ while (match && !conn_match(match->sig,
64168+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64169+ sig->gr_dport)) {
64170+ last = match;
64171+ match = match->next;
64172+ }
64173+
64174+ if (match) {
64175+ if (last)
64176+ last->next = match->next;
64177+ else
64178+ gr_conn_table[index] = NULL;
64179+ kfree(match);
64180+ }
64181+
64182+ return;
64183+}
64184+
64185+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64186+ __u16 sport, __u16 dport)
64187+{
64188+ struct conn_table_entry *match;
64189+ unsigned int index;
64190+
64191+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64192+
64193+ match = gr_conn_table[index];
64194+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64195+ match = match->next;
64196+
64197+ if (match)
64198+ return match->sig;
64199+ else
64200+ return NULL;
64201+}
64202+
64203+#endif
64204+
64205+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64206+{
64207+#ifdef CONFIG_GRKERNSEC
64208+ struct signal_struct *sig = task->signal;
64209+ struct conn_table_entry *newent;
64210+
64211+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64212+ if (newent == NULL)
64213+ return;
64214+ /* no bh lock needed since we are called with bh disabled */
64215+ spin_lock(&gr_conn_table_lock);
64216+ gr_del_task_from_ip_table_nolock(sig);
64217+ sig->gr_saddr = inet->rcv_saddr;
64218+ sig->gr_daddr = inet->daddr;
64219+ sig->gr_sport = inet->sport;
64220+ sig->gr_dport = inet->dport;
64221+ gr_add_to_task_ip_table_nolock(sig, newent);
64222+ spin_unlock(&gr_conn_table_lock);
64223+#endif
64224+ return;
64225+}
64226+
64227+void gr_del_task_from_ip_table(struct task_struct *task)
64228+{
64229+#ifdef CONFIG_GRKERNSEC
64230+ spin_lock_bh(&gr_conn_table_lock);
64231+ gr_del_task_from_ip_table_nolock(task->signal);
64232+ spin_unlock_bh(&gr_conn_table_lock);
64233+#endif
64234+ return;
64235+}
64236+
64237+void
64238+gr_attach_curr_ip(const struct sock *sk)
64239+{
64240+#ifdef CONFIG_GRKERNSEC
64241+ struct signal_struct *p, *set;
64242+ const struct inet_sock *inet = inet_sk(sk);
64243+
64244+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64245+ return;
64246+
64247+ set = current->signal;
64248+
64249+ spin_lock_bh(&gr_conn_table_lock);
64250+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64251+ inet->dport, inet->sport);
64252+ if (unlikely(p != NULL)) {
64253+ set->curr_ip = p->curr_ip;
64254+ set->used_accept = 1;
64255+ gr_del_task_from_ip_table_nolock(p);
64256+ spin_unlock_bh(&gr_conn_table_lock);
64257+ return;
64258+ }
64259+ spin_unlock_bh(&gr_conn_table_lock);
64260+
64261+ set->curr_ip = inet->daddr;
64262+ set->used_accept = 1;
64263+#endif
64264+ return;
64265+}
64266+
64267+int
64268+gr_handle_sock_all(const int family, const int type, const int protocol)
64269+{
64270+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64271+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64272+ (family != AF_UNIX)) {
64273+ if (family == AF_INET)
64274+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64275+ else
64276+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64277+ return -EACCES;
64278+ }
64279+#endif
64280+ return 0;
64281+}
64282+
64283+int
64284+gr_handle_sock_server(const struct sockaddr *sck)
64285+{
64286+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64287+ if (grsec_enable_socket_server &&
64288+ in_group_p(grsec_socket_server_gid) &&
64289+ sck && (sck->sa_family != AF_UNIX) &&
64290+ (sck->sa_family != AF_LOCAL)) {
64291+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64292+ return -EACCES;
64293+ }
64294+#endif
64295+ return 0;
64296+}
64297+
64298+int
64299+gr_handle_sock_server_other(const struct sock *sck)
64300+{
64301+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64302+ if (grsec_enable_socket_server &&
64303+ in_group_p(grsec_socket_server_gid) &&
64304+ sck && (sck->sk_family != AF_UNIX) &&
64305+ (sck->sk_family != AF_LOCAL)) {
64306+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64307+ return -EACCES;
64308+ }
64309+#endif
64310+ return 0;
64311+}
64312+
64313+int
64314+gr_handle_sock_client(const struct sockaddr *sck)
64315+{
64316+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64317+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64318+ sck && (sck->sa_family != AF_UNIX) &&
64319+ (sck->sa_family != AF_LOCAL)) {
64320+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64321+ return -EACCES;
64322+ }
64323+#endif
64324+ return 0;
64325+}
64326+
64327+kernel_cap_t
64328+gr_cap_rtnetlink(struct sock *sock)
64329+{
64330+#ifdef CONFIG_GRKERNSEC
64331+ if (!gr_acl_is_enabled())
64332+ return current_cap();
64333+ else if (sock->sk_protocol == NETLINK_ISCSI &&
64334+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64335+ gr_is_capable(CAP_SYS_ADMIN))
64336+ return current_cap();
64337+ else if (sock->sk_protocol == NETLINK_AUDIT &&
64338+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64339+ gr_is_capable(CAP_AUDIT_WRITE) &&
64340+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64341+ gr_is_capable(CAP_AUDIT_CONTROL))
64342+ return current_cap();
64343+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64344+ ((sock->sk_protocol == NETLINK_ROUTE) ?
64345+ gr_is_capable_nolog(CAP_NET_ADMIN) :
64346+ gr_is_capable(CAP_NET_ADMIN)))
64347+ return current_cap();
64348+ else
64349+ return __cap_empty_set;
64350+#else
64351+ return current_cap();
64352+#endif
64353+}
64354diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64355new file mode 100644
64356index 0000000..31f3258
64357--- /dev/null
64358+++ b/grsecurity/grsec_sysctl.c
64359@@ -0,0 +1,499 @@
64360+#include <linux/kernel.h>
64361+#include <linux/sched.h>
64362+#include <linux/sysctl.h>
64363+#include <linux/grsecurity.h>
64364+#include <linux/grinternal.h>
64365+
64366+int
64367+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64368+{
64369+#ifdef CONFIG_GRKERNSEC_SYSCTL
64370+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64371+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64372+ return -EACCES;
64373+ }
64374+#endif
64375+ return 0;
64376+}
64377+
64378+#ifdef CONFIG_GRKERNSEC_ROFS
64379+static int __maybe_unused one = 1;
64380+#endif
64381+
64382+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64383+ctl_table grsecurity_table[] = {
64384+#ifdef CONFIG_GRKERNSEC_SYSCTL
64385+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64386+#ifdef CONFIG_GRKERNSEC_IO
64387+ {
64388+ .ctl_name = CTL_UNNUMBERED,
64389+ .procname = "disable_priv_io",
64390+ .data = &grsec_disable_privio,
64391+ .maxlen = sizeof(int),
64392+ .mode = 0600,
64393+ .proc_handler = &proc_dointvec,
64394+ },
64395+#endif
64396+#endif
64397+#ifdef CONFIG_GRKERNSEC_LINK
64398+ {
64399+ .ctl_name = CTL_UNNUMBERED,
64400+ .procname = "linking_restrictions",
64401+ .data = &grsec_enable_link,
64402+ .maxlen = sizeof(int),
64403+ .mode = 0600,
64404+ .proc_handler = &proc_dointvec,
64405+ },
64406+#endif
64407+#ifdef CONFIG_GRKERNSEC_BRUTE
64408+ {
64409+ .ctl_name = CTL_UNNUMBERED,
64410+ .procname = "deter_bruteforce",
64411+ .data = &grsec_enable_brute,
64412+ .maxlen = sizeof(int),
64413+ .mode = 0600,
64414+ .proc_handler = &proc_dointvec,
64415+ },
64416+#endif
64417+#ifdef CONFIG_GRKERNSEC_FIFO
64418+ {
64419+ .ctl_name = CTL_UNNUMBERED,
64420+ .procname = "fifo_restrictions",
64421+ .data = &grsec_enable_fifo,
64422+ .maxlen = sizeof(int),
64423+ .mode = 0600,
64424+ .proc_handler = &proc_dointvec,
64425+ },
64426+#endif
64427+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64428+ {
64429+ .ctl_name = CTL_UNNUMBERED,
64430+ .procname = "ptrace_readexec",
64431+ .data = &grsec_enable_ptrace_readexec,
64432+ .maxlen = sizeof(int),
64433+ .mode = 0600,
64434+ .proc_handler = &proc_dointvec,
64435+ },
64436+#endif
64437+#ifdef CONFIG_GRKERNSEC_SETXID
64438+ {
64439+ .ctl_name = CTL_UNNUMBERED,
64440+ .procname = "consistent_setxid",
64441+ .data = &grsec_enable_setxid,
64442+ .maxlen = sizeof(int),
64443+ .mode = 0600,
64444+ .proc_handler = &proc_dointvec,
64445+ },
64446+#endif
64447+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64448+ {
64449+ .ctl_name = CTL_UNNUMBERED,
64450+ .procname = "ip_blackhole",
64451+ .data = &grsec_enable_blackhole,
64452+ .maxlen = sizeof(int),
64453+ .mode = 0600,
64454+ .proc_handler = &proc_dointvec,
64455+ },
64456+ {
64457+ .ctl_name = CTL_UNNUMBERED,
64458+ .procname = "lastack_retries",
64459+ .data = &grsec_lastack_retries,
64460+ .maxlen = sizeof(int),
64461+ .mode = 0600,
64462+ .proc_handler = &proc_dointvec,
64463+ },
64464+#endif
64465+#ifdef CONFIG_GRKERNSEC_EXECLOG
64466+ {
64467+ .ctl_name = CTL_UNNUMBERED,
64468+ .procname = "exec_logging",
64469+ .data = &grsec_enable_execlog,
64470+ .maxlen = sizeof(int),
64471+ .mode = 0600,
64472+ .proc_handler = &proc_dointvec,
64473+ },
64474+#endif
64475+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64476+ {
64477+ .ctl_name = CTL_UNNUMBERED,
64478+ .procname = "rwxmap_logging",
64479+ .data = &grsec_enable_log_rwxmaps,
64480+ .maxlen = sizeof(int),
64481+ .mode = 0600,
64482+ .proc_handler = &proc_dointvec,
64483+ },
64484+#endif
64485+#ifdef CONFIG_GRKERNSEC_SIGNAL
64486+ {
64487+ .ctl_name = CTL_UNNUMBERED,
64488+ .procname = "signal_logging",
64489+ .data = &grsec_enable_signal,
64490+ .maxlen = sizeof(int),
64491+ .mode = 0600,
64492+ .proc_handler = &proc_dointvec,
64493+ },
64494+#endif
64495+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64496+ {
64497+ .ctl_name = CTL_UNNUMBERED,
64498+ .procname = "forkfail_logging",
64499+ .data = &grsec_enable_forkfail,
64500+ .maxlen = sizeof(int),
64501+ .mode = 0600,
64502+ .proc_handler = &proc_dointvec,
64503+ },
64504+#endif
64505+#ifdef CONFIG_GRKERNSEC_TIME
64506+ {
64507+ .ctl_name = CTL_UNNUMBERED,
64508+ .procname = "timechange_logging",
64509+ .data = &grsec_enable_time,
64510+ .maxlen = sizeof(int),
64511+ .mode = 0600,
64512+ .proc_handler = &proc_dointvec,
64513+ },
64514+#endif
64515+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64516+ {
64517+ .ctl_name = CTL_UNNUMBERED,
64518+ .procname = "chroot_deny_shmat",
64519+ .data = &grsec_enable_chroot_shmat,
64520+ .maxlen = sizeof(int),
64521+ .mode = 0600,
64522+ .proc_handler = &proc_dointvec,
64523+ },
64524+#endif
64525+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64526+ {
64527+ .ctl_name = CTL_UNNUMBERED,
64528+ .procname = "chroot_deny_unix",
64529+ .data = &grsec_enable_chroot_unix,
64530+ .maxlen = sizeof(int),
64531+ .mode = 0600,
64532+ .proc_handler = &proc_dointvec,
64533+ },
64534+#endif
64535+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64536+ {
64537+ .ctl_name = CTL_UNNUMBERED,
64538+ .procname = "chroot_deny_mount",
64539+ .data = &grsec_enable_chroot_mount,
64540+ .maxlen = sizeof(int),
64541+ .mode = 0600,
64542+ .proc_handler = &proc_dointvec,
64543+ },
64544+#endif
64545+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64546+ {
64547+ .ctl_name = CTL_UNNUMBERED,
64548+ .procname = "chroot_deny_fchdir",
64549+ .data = &grsec_enable_chroot_fchdir,
64550+ .maxlen = sizeof(int),
64551+ .mode = 0600,
64552+ .proc_handler = &proc_dointvec,
64553+ },
64554+#endif
64555+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64556+ {
64557+ .ctl_name = CTL_UNNUMBERED,
64558+ .procname = "chroot_deny_chroot",
64559+ .data = &grsec_enable_chroot_double,
64560+ .maxlen = sizeof(int),
64561+ .mode = 0600,
64562+ .proc_handler = &proc_dointvec,
64563+ },
64564+#endif
64565+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64566+ {
64567+ .ctl_name = CTL_UNNUMBERED,
64568+ .procname = "chroot_deny_pivot",
64569+ .data = &grsec_enable_chroot_pivot,
64570+ .maxlen = sizeof(int),
64571+ .mode = 0600,
64572+ .proc_handler = &proc_dointvec,
64573+ },
64574+#endif
64575+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64576+ {
64577+ .ctl_name = CTL_UNNUMBERED,
64578+ .procname = "chroot_enforce_chdir",
64579+ .data = &grsec_enable_chroot_chdir,
64580+ .maxlen = sizeof(int),
64581+ .mode = 0600,
64582+ .proc_handler = &proc_dointvec,
64583+ },
64584+#endif
64585+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64586+ {
64587+ .ctl_name = CTL_UNNUMBERED,
64588+ .procname = "chroot_deny_chmod",
64589+ .data = &grsec_enable_chroot_chmod,
64590+ .maxlen = sizeof(int),
64591+ .mode = 0600,
64592+ .proc_handler = &proc_dointvec,
64593+ },
64594+#endif
64595+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64596+ {
64597+ .ctl_name = CTL_UNNUMBERED,
64598+ .procname = "chroot_deny_mknod",
64599+ .data = &grsec_enable_chroot_mknod,
64600+ .maxlen = sizeof(int),
64601+ .mode = 0600,
64602+ .proc_handler = &proc_dointvec,
64603+ },
64604+#endif
64605+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64606+ {
64607+ .ctl_name = CTL_UNNUMBERED,
64608+ .procname = "chroot_restrict_nice",
64609+ .data = &grsec_enable_chroot_nice,
64610+ .maxlen = sizeof(int),
64611+ .mode = 0600,
64612+ .proc_handler = &proc_dointvec,
64613+ },
64614+#endif
64615+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64616+ {
64617+ .ctl_name = CTL_UNNUMBERED,
64618+ .procname = "chroot_execlog",
64619+ .data = &grsec_enable_chroot_execlog,
64620+ .maxlen = sizeof(int),
64621+ .mode = 0600,
64622+ .proc_handler = &proc_dointvec,
64623+ },
64624+#endif
64625+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64626+ {
64627+ .ctl_name = CTL_UNNUMBERED,
64628+ .procname = "chroot_caps",
64629+ .data = &grsec_enable_chroot_caps,
64630+ .maxlen = sizeof(int),
64631+ .mode = 0600,
64632+ .proc_handler = &proc_dointvec,
64633+ },
64634+#endif
64635+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64636+ {
64637+ .ctl_name = CTL_UNNUMBERED,
64638+ .procname = "chroot_deny_sysctl",
64639+ .data = &grsec_enable_chroot_sysctl,
64640+ .maxlen = sizeof(int),
64641+ .mode = 0600,
64642+ .proc_handler = &proc_dointvec,
64643+ },
64644+#endif
64645+#ifdef CONFIG_GRKERNSEC_TPE
64646+ {
64647+ .ctl_name = CTL_UNNUMBERED,
64648+ .procname = "tpe",
64649+ .data = &grsec_enable_tpe,
64650+ .maxlen = sizeof(int),
64651+ .mode = 0600,
64652+ .proc_handler = &proc_dointvec,
64653+ },
64654+ {
64655+ .ctl_name = CTL_UNNUMBERED,
64656+ .procname = "tpe_gid",
64657+ .data = &grsec_tpe_gid,
64658+ .maxlen = sizeof(int),
64659+ .mode = 0600,
64660+ .proc_handler = &proc_dointvec,
64661+ },
64662+#endif
64663+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64664+ {
64665+ .ctl_name = CTL_UNNUMBERED,
64666+ .procname = "tpe_invert",
64667+ .data = &grsec_enable_tpe_invert,
64668+ .maxlen = sizeof(int),
64669+ .mode = 0600,
64670+ .proc_handler = &proc_dointvec,
64671+ },
64672+#endif
64673+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64674+ {
64675+ .ctl_name = CTL_UNNUMBERED,
64676+ .procname = "tpe_restrict_all",
64677+ .data = &grsec_enable_tpe_all,
64678+ .maxlen = sizeof(int),
64679+ .mode = 0600,
64680+ .proc_handler = &proc_dointvec,
64681+ },
64682+#endif
64683+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64684+ {
64685+ .ctl_name = CTL_UNNUMBERED,
64686+ .procname = "socket_all",
64687+ .data = &grsec_enable_socket_all,
64688+ .maxlen = sizeof(int),
64689+ .mode = 0600,
64690+ .proc_handler = &proc_dointvec,
64691+ },
64692+ {
64693+ .ctl_name = CTL_UNNUMBERED,
64694+ .procname = "socket_all_gid",
64695+ .data = &grsec_socket_all_gid,
64696+ .maxlen = sizeof(int),
64697+ .mode = 0600,
64698+ .proc_handler = &proc_dointvec,
64699+ },
64700+#endif
64701+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64702+ {
64703+ .ctl_name = CTL_UNNUMBERED,
64704+ .procname = "socket_client",
64705+ .data = &grsec_enable_socket_client,
64706+ .maxlen = sizeof(int),
64707+ .mode = 0600,
64708+ .proc_handler = &proc_dointvec,
64709+ },
64710+ {
64711+ .ctl_name = CTL_UNNUMBERED,
64712+ .procname = "socket_client_gid",
64713+ .data = &grsec_socket_client_gid,
64714+ .maxlen = sizeof(int),
64715+ .mode = 0600,
64716+ .proc_handler = &proc_dointvec,
64717+ },
64718+#endif
64719+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64720+ {
64721+ .ctl_name = CTL_UNNUMBERED,
64722+ .procname = "socket_server",
64723+ .data = &grsec_enable_socket_server,
64724+ .maxlen = sizeof(int),
64725+ .mode = 0600,
64726+ .proc_handler = &proc_dointvec,
64727+ },
64728+ {
64729+ .ctl_name = CTL_UNNUMBERED,
64730+ .procname = "socket_server_gid",
64731+ .data = &grsec_socket_server_gid,
64732+ .maxlen = sizeof(int),
64733+ .mode = 0600,
64734+ .proc_handler = &proc_dointvec,
64735+ },
64736+#endif
64737+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64738+ {
64739+ .ctl_name = CTL_UNNUMBERED,
64740+ .procname = "audit_group",
64741+ .data = &grsec_enable_group,
64742+ .maxlen = sizeof(int),
64743+ .mode = 0600,
64744+ .proc_handler = &proc_dointvec,
64745+ },
64746+ {
64747+ .ctl_name = CTL_UNNUMBERED,
64748+ .procname = "audit_gid",
64749+ .data = &grsec_audit_gid,
64750+ .maxlen = sizeof(int),
64751+ .mode = 0600,
64752+ .proc_handler = &proc_dointvec,
64753+ },
64754+#endif
64755+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64756+ {
64757+ .ctl_name = CTL_UNNUMBERED,
64758+ .procname = "audit_chdir",
64759+ .data = &grsec_enable_chdir,
64760+ .maxlen = sizeof(int),
64761+ .mode = 0600,
64762+ .proc_handler = &proc_dointvec,
64763+ },
64764+#endif
64765+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64766+ {
64767+ .ctl_name = CTL_UNNUMBERED,
64768+ .procname = "audit_mount",
64769+ .data = &grsec_enable_mount,
64770+ .maxlen = sizeof(int),
64771+ .mode = 0600,
64772+ .proc_handler = &proc_dointvec,
64773+ },
64774+#endif
64775+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64776+ {
64777+ .ctl_name = CTL_UNNUMBERED,
64778+ .procname = "audit_textrel",
64779+ .data = &grsec_enable_audit_textrel,
64780+ .maxlen = sizeof(int),
64781+ .mode = 0600,
64782+ .proc_handler = &proc_dointvec,
64783+ },
64784+#endif
64785+#ifdef CONFIG_GRKERNSEC_DMESG
64786+ {
64787+ .ctl_name = CTL_UNNUMBERED,
64788+ .procname = "dmesg",
64789+ .data = &grsec_enable_dmesg,
64790+ .maxlen = sizeof(int),
64791+ .mode = 0600,
64792+ .proc_handler = &proc_dointvec,
64793+ },
64794+#endif
64795+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64796+ {
64797+ .ctl_name = CTL_UNNUMBERED,
64798+ .procname = "chroot_findtask",
64799+ .data = &grsec_enable_chroot_findtask,
64800+ .maxlen = sizeof(int),
64801+ .mode = 0600,
64802+ .proc_handler = &proc_dointvec,
64803+ },
64804+#endif
64805+#ifdef CONFIG_GRKERNSEC_RESLOG
64806+ {
64807+ .ctl_name = CTL_UNNUMBERED,
64808+ .procname = "resource_logging",
64809+ .data = &grsec_resource_logging,
64810+ .maxlen = sizeof(int),
64811+ .mode = 0600,
64812+ .proc_handler = &proc_dointvec,
64813+ },
64814+#endif
64815+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64816+ {
64817+ .ctl_name = CTL_UNNUMBERED,
64818+ .procname = "audit_ptrace",
64819+ .data = &grsec_enable_audit_ptrace,
64820+ .maxlen = sizeof(int),
64821+ .mode = 0600,
64822+ .proc_handler = &proc_dointvec,
64823+ },
64824+#endif
64825+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64826+ {
64827+ .ctl_name = CTL_UNNUMBERED,
64828+ .procname = "harden_ptrace",
64829+ .data = &grsec_enable_harden_ptrace,
64830+ .maxlen = sizeof(int),
64831+ .mode = 0600,
64832+ .proc_handler = &proc_dointvec,
64833+ },
64834+#endif
64835+ {
64836+ .ctl_name = CTL_UNNUMBERED,
64837+ .procname = "grsec_lock",
64838+ .data = &grsec_lock,
64839+ .maxlen = sizeof(int),
64840+ .mode = 0600,
64841+ .proc_handler = &proc_dointvec,
64842+ },
64843+#endif
64844+#ifdef CONFIG_GRKERNSEC_ROFS
64845+ {
64846+ .ctl_name = CTL_UNNUMBERED,
64847+ .procname = "romount_protect",
64848+ .data = &grsec_enable_rofs,
64849+ .maxlen = sizeof(int),
64850+ .mode = 0600,
64851+ .proc_handler = &proc_dointvec_minmax,
64852+ .extra1 = &one,
64853+ .extra2 = &one,
64854+ },
64855+#endif
64856+ { .ctl_name = 0 }
64857+};
64858+#endif
64859diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64860new file mode 100644
64861index 0000000..0dc13c3
64862--- /dev/null
64863+++ b/grsecurity/grsec_time.c
64864@@ -0,0 +1,16 @@
64865+#include <linux/kernel.h>
64866+#include <linux/sched.h>
64867+#include <linux/grinternal.h>
64868+#include <linux/module.h>
64869+
64870+void
64871+gr_log_timechange(void)
64872+{
64873+#ifdef CONFIG_GRKERNSEC_TIME
64874+ if (grsec_enable_time)
64875+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64876+#endif
64877+ return;
64878+}
64879+
64880+EXPORT_SYMBOL(gr_log_timechange);
64881diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64882new file mode 100644
64883index 0000000..07e0dc0
64884--- /dev/null
64885+++ b/grsecurity/grsec_tpe.c
64886@@ -0,0 +1,73 @@
64887+#include <linux/kernel.h>
64888+#include <linux/sched.h>
64889+#include <linux/file.h>
64890+#include <linux/fs.h>
64891+#include <linux/grinternal.h>
64892+
64893+extern int gr_acl_tpe_check(void);
64894+
64895+int
64896+gr_tpe_allow(const struct file *file)
64897+{
64898+#ifdef CONFIG_GRKERNSEC
64899+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64900+ const struct cred *cred = current_cred();
64901+ char *msg = NULL;
64902+ char *msg2 = NULL;
64903+
64904+ // never restrict root
64905+ if (!cred->uid)
64906+ return 1;
64907+
64908+ if (grsec_enable_tpe) {
64909+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64910+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
64911+ msg = "not being in trusted group";
64912+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
64913+ msg = "being in untrusted group";
64914+#else
64915+ if (in_group_p(grsec_tpe_gid))
64916+ msg = "being in untrusted group";
64917+#endif
64918+ }
64919+ if (!msg && gr_acl_tpe_check())
64920+ msg = "being in untrusted role";
64921+
64922+ // not in any affected group/role
64923+ if (!msg)
64924+ goto next_check;
64925+
64926+ if (inode->i_uid)
64927+ msg2 = "file in non-root-owned directory";
64928+ else if (inode->i_mode & S_IWOTH)
64929+ msg2 = "file in world-writable directory";
64930+ else if (inode->i_mode & S_IWGRP)
64931+ msg2 = "file in group-writable directory";
64932+
64933+ if (msg && msg2) {
64934+ char fullmsg[70] = {0};
64935+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
64936+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
64937+ return 0;
64938+ }
64939+ msg = NULL;
64940+next_check:
64941+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64942+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
64943+ return 1;
64944+
64945+ if (inode->i_uid && (inode->i_uid != cred->uid))
64946+ msg = "directory not owned by user";
64947+ else if (inode->i_mode & S_IWOTH)
64948+ msg = "file in world-writable directory";
64949+ else if (inode->i_mode & S_IWGRP)
64950+ msg = "file in group-writable directory";
64951+
64952+ if (msg) {
64953+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
64954+ return 0;
64955+ }
64956+#endif
64957+#endif
64958+ return 1;
64959+}
64960diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64961new file mode 100644
64962index 0000000..9f7b1ac
64963--- /dev/null
64964+++ b/grsecurity/grsum.c
64965@@ -0,0 +1,61 @@
64966+#include <linux/err.h>
64967+#include <linux/kernel.h>
64968+#include <linux/sched.h>
64969+#include <linux/mm.h>
64970+#include <linux/scatterlist.h>
64971+#include <linux/crypto.h>
64972+#include <linux/gracl.h>
64973+
64974+
64975+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64976+#error "crypto and sha256 must be built into the kernel"
64977+#endif
64978+
64979+int
64980+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64981+{
64982+ char *p;
64983+ struct crypto_hash *tfm;
64984+ struct hash_desc desc;
64985+ struct scatterlist sg;
64986+ unsigned char temp_sum[GR_SHA_LEN];
64987+ volatile int retval = 0;
64988+ volatile int dummy = 0;
64989+ unsigned int i;
64990+
64991+ sg_init_table(&sg, 1);
64992+
64993+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64994+ if (IS_ERR(tfm)) {
64995+ /* should never happen, since sha256 should be built in */
64996+ return 1;
64997+ }
64998+
64999+ desc.tfm = tfm;
65000+ desc.flags = 0;
65001+
65002+ crypto_hash_init(&desc);
65003+
65004+ p = salt;
65005+ sg_set_buf(&sg, p, GR_SALT_LEN);
65006+ crypto_hash_update(&desc, &sg, sg.length);
65007+
65008+ p = entry->pw;
65009+ sg_set_buf(&sg, p, strlen(p));
65010+
65011+ crypto_hash_update(&desc, &sg, sg.length);
65012+
65013+ crypto_hash_final(&desc, temp_sum);
65014+
65015+ memset(entry->pw, 0, GR_PW_LEN);
65016+
65017+ for (i = 0; i < GR_SHA_LEN; i++)
65018+ if (sum[i] != temp_sum[i])
65019+ retval = 1;
65020+ else
65021+ dummy = 1; // waste a cycle
65022+
65023+ crypto_free_hash(tfm);
65024+
65025+ return retval;
65026+}
65027diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
65028index 3cd9ccd..fe16d47 100644
65029--- a/include/acpi/acpi_bus.h
65030+++ b/include/acpi/acpi_bus.h
65031@@ -107,7 +107,7 @@ struct acpi_device_ops {
65032 acpi_op_bind bind;
65033 acpi_op_unbind unbind;
65034 acpi_op_notify notify;
65035-};
65036+} __no_const;
65037
65038 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
65039
65040diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
65041index f4906f6..71feb73 100644
65042--- a/include/acpi/acpi_drivers.h
65043+++ b/include/acpi/acpi_drivers.h
65044@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
65045 Dock Station
65046 -------------------------------------------------------------------------- */
65047 struct acpi_dock_ops {
65048- acpi_notify_handler handler;
65049- acpi_notify_handler uevent;
65050+ const acpi_notify_handler handler;
65051+ const acpi_notify_handler uevent;
65052 };
65053
65054 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
65055@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
65056 extern int register_dock_notifier(struct notifier_block *nb);
65057 extern void unregister_dock_notifier(struct notifier_block *nb);
65058 extern int register_hotplug_dock_device(acpi_handle handle,
65059- struct acpi_dock_ops *ops,
65060+ const struct acpi_dock_ops *ops,
65061 void *context);
65062 extern void unregister_hotplug_dock_device(acpi_handle handle);
65063 #else
65064@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
65065 {
65066 }
65067 static inline int register_hotplug_dock_device(acpi_handle handle,
65068- struct acpi_dock_ops *ops,
65069+ const struct acpi_dock_ops *ops,
65070 void *context)
65071 {
65072 return -ENODEV;
65073diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
65074index b7babf0..a9ac9fc 100644
65075--- a/include/asm-generic/atomic-long.h
65076+++ b/include/asm-generic/atomic-long.h
65077@@ -22,6 +22,12 @@
65078
65079 typedef atomic64_t atomic_long_t;
65080
65081+#ifdef CONFIG_PAX_REFCOUNT
65082+typedef atomic64_unchecked_t atomic_long_unchecked_t;
65083+#else
65084+typedef atomic64_t atomic_long_unchecked_t;
65085+#endif
65086+
65087 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
65088
65089 static inline long atomic_long_read(atomic_long_t *l)
65090@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65091 return (long)atomic64_read(v);
65092 }
65093
65094+#ifdef CONFIG_PAX_REFCOUNT
65095+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65096+{
65097+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65098+
65099+ return (long)atomic64_read_unchecked(v);
65100+}
65101+#endif
65102+
65103 static inline void atomic_long_set(atomic_long_t *l, long i)
65104 {
65105 atomic64_t *v = (atomic64_t *)l;
65106@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65107 atomic64_set(v, i);
65108 }
65109
65110+#ifdef CONFIG_PAX_REFCOUNT
65111+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65112+{
65113+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65114+
65115+ atomic64_set_unchecked(v, i);
65116+}
65117+#endif
65118+
65119 static inline void atomic_long_inc(atomic_long_t *l)
65120 {
65121 atomic64_t *v = (atomic64_t *)l;
65122@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65123 atomic64_inc(v);
65124 }
65125
65126+#ifdef CONFIG_PAX_REFCOUNT
65127+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65128+{
65129+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65130+
65131+ atomic64_inc_unchecked(v);
65132+}
65133+#endif
65134+
65135 static inline void atomic_long_dec(atomic_long_t *l)
65136 {
65137 atomic64_t *v = (atomic64_t *)l;
65138@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65139 atomic64_dec(v);
65140 }
65141
65142+#ifdef CONFIG_PAX_REFCOUNT
65143+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65144+{
65145+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65146+
65147+ atomic64_dec_unchecked(v);
65148+}
65149+#endif
65150+
65151 static inline void atomic_long_add(long i, atomic_long_t *l)
65152 {
65153 atomic64_t *v = (atomic64_t *)l;
65154@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65155 atomic64_add(i, v);
65156 }
65157
65158+#ifdef CONFIG_PAX_REFCOUNT
65159+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65160+{
65161+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65162+
65163+ atomic64_add_unchecked(i, v);
65164+}
65165+#endif
65166+
65167 static inline void atomic_long_sub(long i, atomic_long_t *l)
65168 {
65169 atomic64_t *v = (atomic64_t *)l;
65170@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65171 return (long)atomic64_inc_return(v);
65172 }
65173
65174+#ifdef CONFIG_PAX_REFCOUNT
65175+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65176+{
65177+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65178+
65179+ return (long)atomic64_inc_return_unchecked(v);
65180+}
65181+#endif
65182+
65183 static inline long atomic_long_dec_return(atomic_long_t *l)
65184 {
65185 atomic64_t *v = (atomic64_t *)l;
65186@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65187
65188 typedef atomic_t atomic_long_t;
65189
65190+#ifdef CONFIG_PAX_REFCOUNT
65191+typedef atomic_unchecked_t atomic_long_unchecked_t;
65192+#else
65193+typedef atomic_t atomic_long_unchecked_t;
65194+#endif
65195+
65196 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65197 static inline long atomic_long_read(atomic_long_t *l)
65198 {
65199@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65200 return (long)atomic_read(v);
65201 }
65202
65203+#ifdef CONFIG_PAX_REFCOUNT
65204+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65205+{
65206+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65207+
65208+ return (long)atomic_read_unchecked(v);
65209+}
65210+#endif
65211+
65212 static inline void atomic_long_set(atomic_long_t *l, long i)
65213 {
65214 atomic_t *v = (atomic_t *)l;
65215@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65216 atomic_set(v, i);
65217 }
65218
65219+#ifdef CONFIG_PAX_REFCOUNT
65220+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65221+{
65222+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65223+
65224+ atomic_set_unchecked(v, i);
65225+}
65226+#endif
65227+
65228 static inline void atomic_long_inc(atomic_long_t *l)
65229 {
65230 atomic_t *v = (atomic_t *)l;
65231@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65232 atomic_inc(v);
65233 }
65234
65235+#ifdef CONFIG_PAX_REFCOUNT
65236+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65237+{
65238+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65239+
65240+ atomic_inc_unchecked(v);
65241+}
65242+#endif
65243+
65244 static inline void atomic_long_dec(atomic_long_t *l)
65245 {
65246 atomic_t *v = (atomic_t *)l;
65247@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65248 atomic_dec(v);
65249 }
65250
65251+#ifdef CONFIG_PAX_REFCOUNT
65252+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65253+{
65254+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65255+
65256+ atomic_dec_unchecked(v);
65257+}
65258+#endif
65259+
65260 static inline void atomic_long_add(long i, atomic_long_t *l)
65261 {
65262 atomic_t *v = (atomic_t *)l;
65263@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65264 atomic_add(i, v);
65265 }
65266
65267+#ifdef CONFIG_PAX_REFCOUNT
65268+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65269+{
65270+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65271+
65272+ atomic_add_unchecked(i, v);
65273+}
65274+#endif
65275+
65276 static inline void atomic_long_sub(long i, atomic_long_t *l)
65277 {
65278 atomic_t *v = (atomic_t *)l;
65279@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65280 return (long)atomic_inc_return(v);
65281 }
65282
65283+#ifdef CONFIG_PAX_REFCOUNT
65284+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65285+{
65286+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65287+
65288+ return (long)atomic_inc_return_unchecked(v);
65289+}
65290+#endif
65291+
65292 static inline long atomic_long_dec_return(atomic_long_t *l)
65293 {
65294 atomic_t *v = (atomic_t *)l;
65295@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65296
65297 #endif /* BITS_PER_LONG == 64 */
65298
65299+#ifdef CONFIG_PAX_REFCOUNT
65300+static inline void pax_refcount_needs_these_functions(void)
65301+{
65302+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65303+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65304+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65305+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65306+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65307+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65308+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65309+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65310+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65311+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65312+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65313+
65314+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65315+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65316+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65317+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65318+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65319+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65320+}
65321+#else
65322+#define atomic_read_unchecked(v) atomic_read(v)
65323+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65324+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65325+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65326+#define atomic_inc_unchecked(v) atomic_inc(v)
65327+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65328+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65329+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65330+#define atomic_dec_unchecked(v) atomic_dec(v)
65331+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65332+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65333+
65334+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65335+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65336+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65337+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65338+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65339+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65340+#endif
65341+
65342 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65343diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65344index b18ce4f..2ee2843 100644
65345--- a/include/asm-generic/atomic64.h
65346+++ b/include/asm-generic/atomic64.h
65347@@ -16,6 +16,8 @@ typedef struct {
65348 long long counter;
65349 } atomic64_t;
65350
65351+typedef atomic64_t atomic64_unchecked_t;
65352+
65353 #define ATOMIC64_INIT(i) { (i) }
65354
65355 extern long long atomic64_read(const atomic64_t *v);
65356@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65357 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65358 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65359
65360+#define atomic64_read_unchecked(v) atomic64_read(v)
65361+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65362+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65363+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65364+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65365+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65366+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65367+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65368+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65369+
65370 #endif /* _ASM_GENERIC_ATOMIC64_H */
65371diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65372index d48ddf0..656a0ac 100644
65373--- a/include/asm-generic/bug.h
65374+++ b/include/asm-generic/bug.h
65375@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65376
65377 #else /* !CONFIG_BUG */
65378 #ifndef HAVE_ARCH_BUG
65379-#define BUG() do {} while(0)
65380+#define BUG() do { for (;;) ; } while(0)
65381 #endif
65382
65383 #ifndef HAVE_ARCH_BUG_ON
65384-#define BUG_ON(condition) do { if (condition) ; } while(0)
65385+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65386 #endif
65387
65388 #ifndef HAVE_ARCH_WARN_ON
65389diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65390index 1bfcfe5..e04c5c9 100644
65391--- a/include/asm-generic/cache.h
65392+++ b/include/asm-generic/cache.h
65393@@ -6,7 +6,7 @@
65394 * cache lines need to provide their own cache.h.
65395 */
65396
65397-#define L1_CACHE_SHIFT 5
65398-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65399+#define L1_CACHE_SHIFT 5UL
65400+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65401
65402 #endif /* __ASM_GENERIC_CACHE_H */
65403diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65404index 6920695..41038bc 100644
65405--- a/include/asm-generic/dma-mapping-common.h
65406+++ b/include/asm-generic/dma-mapping-common.h
65407@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65408 enum dma_data_direction dir,
65409 struct dma_attrs *attrs)
65410 {
65411- struct dma_map_ops *ops = get_dma_ops(dev);
65412+ const struct dma_map_ops *ops = get_dma_ops(dev);
65413 dma_addr_t addr;
65414
65415 kmemcheck_mark_initialized(ptr, size);
65416@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65417 enum dma_data_direction dir,
65418 struct dma_attrs *attrs)
65419 {
65420- struct dma_map_ops *ops = get_dma_ops(dev);
65421+ const struct dma_map_ops *ops = get_dma_ops(dev);
65422
65423 BUG_ON(!valid_dma_direction(dir));
65424 if (ops->unmap_page)
65425@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65426 int nents, enum dma_data_direction dir,
65427 struct dma_attrs *attrs)
65428 {
65429- struct dma_map_ops *ops = get_dma_ops(dev);
65430+ const struct dma_map_ops *ops = get_dma_ops(dev);
65431 int i, ents;
65432 struct scatterlist *s;
65433
65434@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65435 int nents, enum dma_data_direction dir,
65436 struct dma_attrs *attrs)
65437 {
65438- struct dma_map_ops *ops = get_dma_ops(dev);
65439+ const struct dma_map_ops *ops = get_dma_ops(dev);
65440
65441 BUG_ON(!valid_dma_direction(dir));
65442 debug_dma_unmap_sg(dev, sg, nents, dir);
65443@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65444 size_t offset, size_t size,
65445 enum dma_data_direction dir)
65446 {
65447- struct dma_map_ops *ops = get_dma_ops(dev);
65448+ const struct dma_map_ops *ops = get_dma_ops(dev);
65449 dma_addr_t addr;
65450
65451 kmemcheck_mark_initialized(page_address(page) + offset, size);
65452@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65453 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65454 size_t size, enum dma_data_direction dir)
65455 {
65456- struct dma_map_ops *ops = get_dma_ops(dev);
65457+ const struct dma_map_ops *ops = get_dma_ops(dev);
65458
65459 BUG_ON(!valid_dma_direction(dir));
65460 if (ops->unmap_page)
65461@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65462 size_t size,
65463 enum dma_data_direction dir)
65464 {
65465- struct dma_map_ops *ops = get_dma_ops(dev);
65466+ const struct dma_map_ops *ops = get_dma_ops(dev);
65467
65468 BUG_ON(!valid_dma_direction(dir));
65469 if (ops->sync_single_for_cpu)
65470@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65471 dma_addr_t addr, size_t size,
65472 enum dma_data_direction dir)
65473 {
65474- struct dma_map_ops *ops = get_dma_ops(dev);
65475+ const struct dma_map_ops *ops = get_dma_ops(dev);
65476
65477 BUG_ON(!valid_dma_direction(dir));
65478 if (ops->sync_single_for_device)
65479@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65480 size_t size,
65481 enum dma_data_direction dir)
65482 {
65483- struct dma_map_ops *ops = get_dma_ops(dev);
65484+ const struct dma_map_ops *ops = get_dma_ops(dev);
65485
65486 BUG_ON(!valid_dma_direction(dir));
65487 if (ops->sync_single_range_for_cpu) {
65488@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65489 size_t size,
65490 enum dma_data_direction dir)
65491 {
65492- struct dma_map_ops *ops = get_dma_ops(dev);
65493+ const struct dma_map_ops *ops = get_dma_ops(dev);
65494
65495 BUG_ON(!valid_dma_direction(dir));
65496 if (ops->sync_single_range_for_device) {
65497@@ -155,7 +155,7 @@ static inline void
65498 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65499 int nelems, enum dma_data_direction dir)
65500 {
65501- struct dma_map_ops *ops = get_dma_ops(dev);
65502+ const struct dma_map_ops *ops = get_dma_ops(dev);
65503
65504 BUG_ON(!valid_dma_direction(dir));
65505 if (ops->sync_sg_for_cpu)
65506@@ -167,7 +167,7 @@ static inline void
65507 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65508 int nelems, enum dma_data_direction dir)
65509 {
65510- struct dma_map_ops *ops = get_dma_ops(dev);
65511+ const struct dma_map_ops *ops = get_dma_ops(dev);
65512
65513 BUG_ON(!valid_dma_direction(dir));
65514 if (ops->sync_sg_for_device)
65515diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65516index 0d68a1e..b74a761 100644
65517--- a/include/asm-generic/emergency-restart.h
65518+++ b/include/asm-generic/emergency-restart.h
65519@@ -1,7 +1,7 @@
65520 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65521 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65522
65523-static inline void machine_emergency_restart(void)
65524+static inline __noreturn void machine_emergency_restart(void)
65525 {
65526 machine_restart(NULL);
65527 }
65528diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65529index 3c2344f..4590a7d 100644
65530--- a/include/asm-generic/futex.h
65531+++ b/include/asm-generic/futex.h
65532@@ -6,7 +6,7 @@
65533 #include <asm/errno.h>
65534
65535 static inline int
65536-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65537+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65538 {
65539 int op = (encoded_op >> 28) & 7;
65540 int cmp = (encoded_op >> 24) & 15;
65541@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65542 }
65543
65544 static inline int
65545-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65546+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65547 {
65548 return -ENOSYS;
65549 }
65550diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65551index 1ca3efc..e3dc852 100644
65552--- a/include/asm-generic/int-l64.h
65553+++ b/include/asm-generic/int-l64.h
65554@@ -46,6 +46,8 @@ typedef unsigned int u32;
65555 typedef signed long s64;
65556 typedef unsigned long u64;
65557
65558+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65559+
65560 #define S8_C(x) x
65561 #define U8_C(x) x ## U
65562 #define S16_C(x) x
65563diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65564index f394147..b6152b9 100644
65565--- a/include/asm-generic/int-ll64.h
65566+++ b/include/asm-generic/int-ll64.h
65567@@ -51,6 +51,8 @@ typedef unsigned int u32;
65568 typedef signed long long s64;
65569 typedef unsigned long long u64;
65570
65571+typedef unsigned long long intoverflow_t;
65572+
65573 #define S8_C(x) x
65574 #define U8_C(x) x ## U
65575 #define S16_C(x) x
65576diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65577index e5f234a..cdb16b3 100644
65578--- a/include/asm-generic/kmap_types.h
65579+++ b/include/asm-generic/kmap_types.h
65580@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65581 KMAP_D(16) KM_IRQ_PTE,
65582 KMAP_D(17) KM_NMI,
65583 KMAP_D(18) KM_NMI_PTE,
65584-KMAP_D(19) KM_TYPE_NR
65585+KMAP_D(19) KM_CLEARPAGE,
65586+KMAP_D(20) KM_TYPE_NR
65587 };
65588
65589 #undef KMAP_D
65590diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65591index 725612b..9cc513a 100644
65592--- a/include/asm-generic/pgtable-nopmd.h
65593+++ b/include/asm-generic/pgtable-nopmd.h
65594@@ -1,14 +1,19 @@
65595 #ifndef _PGTABLE_NOPMD_H
65596 #define _PGTABLE_NOPMD_H
65597
65598-#ifndef __ASSEMBLY__
65599-
65600 #include <asm-generic/pgtable-nopud.h>
65601
65602-struct mm_struct;
65603-
65604 #define __PAGETABLE_PMD_FOLDED
65605
65606+#define PMD_SHIFT PUD_SHIFT
65607+#define PTRS_PER_PMD 1
65608+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65609+#define PMD_MASK (~(PMD_SIZE-1))
65610+
65611+#ifndef __ASSEMBLY__
65612+
65613+struct mm_struct;
65614+
65615 /*
65616 * Having the pmd type consist of a pud gets the size right, and allows
65617 * us to conceptually access the pud entry that this pmd is folded into
65618@@ -16,11 +21,6 @@ struct mm_struct;
65619 */
65620 typedef struct { pud_t pud; } pmd_t;
65621
65622-#define PMD_SHIFT PUD_SHIFT
65623-#define PTRS_PER_PMD 1
65624-#define PMD_SIZE (1UL << PMD_SHIFT)
65625-#define PMD_MASK (~(PMD_SIZE-1))
65626-
65627 /*
65628 * The "pud_xxx()" functions here are trivial for a folded two-level
65629 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65630diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65631index 810431d..ccc3638 100644
65632--- a/include/asm-generic/pgtable-nopud.h
65633+++ b/include/asm-generic/pgtable-nopud.h
65634@@ -1,10 +1,15 @@
65635 #ifndef _PGTABLE_NOPUD_H
65636 #define _PGTABLE_NOPUD_H
65637
65638-#ifndef __ASSEMBLY__
65639-
65640 #define __PAGETABLE_PUD_FOLDED
65641
65642+#define PUD_SHIFT PGDIR_SHIFT
65643+#define PTRS_PER_PUD 1
65644+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65645+#define PUD_MASK (~(PUD_SIZE-1))
65646+
65647+#ifndef __ASSEMBLY__
65648+
65649 /*
65650 * Having the pud type consist of a pgd gets the size right, and allows
65651 * us to conceptually access the pgd entry that this pud is folded into
65652@@ -12,11 +17,6 @@
65653 */
65654 typedef struct { pgd_t pgd; } pud_t;
65655
65656-#define PUD_SHIFT PGDIR_SHIFT
65657-#define PTRS_PER_PUD 1
65658-#define PUD_SIZE (1UL << PUD_SHIFT)
65659-#define PUD_MASK (~(PUD_SIZE-1))
65660-
65661 /*
65662 * The "pgd_xxx()" functions here are trivial for a folded two-level
65663 * setup: the pud is never bad, and a pud always exists (as it's folded
65664diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65665index e2bd73e..fea8ed3 100644
65666--- a/include/asm-generic/pgtable.h
65667+++ b/include/asm-generic/pgtable.h
65668@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65669 unsigned long size);
65670 #endif
65671
65672+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65673+static inline unsigned long pax_open_kernel(void) { return 0; }
65674+#endif
65675+
65676+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65677+static inline unsigned long pax_close_kernel(void) { return 0; }
65678+#endif
65679+
65680 #endif /* !__ASSEMBLY__ */
65681
65682 #endif /* _ASM_GENERIC_PGTABLE_H */
65683diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65684index b6e818f..21aa58a 100644
65685--- a/include/asm-generic/vmlinux.lds.h
65686+++ b/include/asm-generic/vmlinux.lds.h
65687@@ -199,6 +199,7 @@
65688 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65689 VMLINUX_SYMBOL(__start_rodata) = .; \
65690 *(.rodata) *(.rodata.*) \
65691+ *(.data.read_only) \
65692 *(__vermagic) /* Kernel version magic */ \
65693 *(__markers_strings) /* Markers: strings */ \
65694 *(__tracepoints_strings)/* Tracepoints: strings */ \
65695@@ -656,22 +657,24 @@
65696 * section in the linker script will go there too. @phdr should have
65697 * a leading colon.
65698 *
65699- * Note that this macros defines __per_cpu_load as an absolute symbol.
65700+ * Note that this macros defines per_cpu_load as an absolute symbol.
65701 * If there is no need to put the percpu section at a predetermined
65702 * address, use PERCPU().
65703 */
65704 #define PERCPU_VADDR(vaddr, phdr) \
65705- VMLINUX_SYMBOL(__per_cpu_load) = .; \
65706- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65707+ per_cpu_load = .; \
65708+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65709 - LOAD_OFFSET) { \
65710+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65711 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65712 *(.data.percpu.first) \
65713- *(.data.percpu.page_aligned) \
65714 *(.data.percpu) \
65715+ . = ALIGN(PAGE_SIZE); \
65716+ *(.data.percpu.page_aligned) \
65717 *(.data.percpu.shared_aligned) \
65718 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65719 } phdr \
65720- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65721+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65722
65723 /**
65724 * PERCPU - define output section for percpu area, simple version
65725diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65726index ebab6a6..351dba1 100644
65727--- a/include/drm/drmP.h
65728+++ b/include/drm/drmP.h
65729@@ -71,6 +71,7 @@
65730 #include <linux/workqueue.h>
65731 #include <linux/poll.h>
65732 #include <asm/pgalloc.h>
65733+#include <asm/local.h>
65734 #include "drm.h"
65735
65736 #include <linux/idr.h>
65737@@ -814,7 +815,7 @@ struct drm_driver {
65738 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65739
65740 /* Driver private ops for this object */
65741- struct vm_operations_struct *gem_vm_ops;
65742+ const struct vm_operations_struct *gem_vm_ops;
65743
65744 int major;
65745 int minor;
65746@@ -917,7 +918,7 @@ struct drm_device {
65747
65748 /** \name Usage Counters */
65749 /*@{ */
65750- int open_count; /**< Outstanding files open */
65751+ local_t open_count; /**< Outstanding files open */
65752 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65753 atomic_t vma_count; /**< Outstanding vma areas open */
65754 int buf_use; /**< Buffers in use -- cannot alloc */
65755@@ -928,7 +929,7 @@ struct drm_device {
65756 /*@{ */
65757 unsigned long counters;
65758 enum drm_stat_type types[15];
65759- atomic_t counts[15];
65760+ atomic_unchecked_t counts[15];
65761 /*@} */
65762
65763 struct list_head filelist;
65764@@ -1016,7 +1017,7 @@ struct drm_device {
65765 struct pci_controller *hose;
65766 #endif
65767 struct drm_sg_mem *sg; /**< Scatter gather memory */
65768- unsigned int num_crtcs; /**< Number of CRTCs on this device */
65769+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
65770 void *dev_private; /**< device private data */
65771 void *mm_private;
65772 struct address_space *dev_mapping;
65773@@ -1042,11 +1043,11 @@ struct drm_device {
65774 spinlock_t object_name_lock;
65775 struct idr object_name_idr;
65776 atomic_t object_count;
65777- atomic_t object_memory;
65778+ atomic_unchecked_t object_memory;
65779 atomic_t pin_count;
65780- atomic_t pin_memory;
65781+ atomic_unchecked_t pin_memory;
65782 atomic_t gtt_count;
65783- atomic_t gtt_memory;
65784+ atomic_unchecked_t gtt_memory;
65785 uint32_t gtt_total;
65786 uint32_t invalidate_domains; /* domains pending invalidation */
65787 uint32_t flush_domains; /* domains pending flush */
65788diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65789index b29e201..3413cc9 100644
65790--- a/include/drm/drm_crtc_helper.h
65791+++ b/include/drm/drm_crtc_helper.h
65792@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65793
65794 /* reload the current crtc LUT */
65795 void (*load_lut)(struct drm_crtc *crtc);
65796-};
65797+} __no_const;
65798
65799 struct drm_encoder_helper_funcs {
65800 void (*dpms)(struct drm_encoder *encoder, int mode);
65801@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65802 struct drm_connector *connector);
65803 /* disable encoder when not in use - more explicit than dpms off */
65804 void (*disable)(struct drm_encoder *encoder);
65805-};
65806+} __no_const;
65807
65808 struct drm_connector_helper_funcs {
65809 int (*get_modes)(struct drm_connector *connector);
65810diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65811index b199170..6f9e64c 100644
65812--- a/include/drm/ttm/ttm_memory.h
65813+++ b/include/drm/ttm/ttm_memory.h
65814@@ -47,7 +47,7 @@
65815
65816 struct ttm_mem_shrink {
65817 int (*do_shrink) (struct ttm_mem_shrink *);
65818-};
65819+} __no_const;
65820
65821 /**
65822 * struct ttm_mem_global - Global memory accounting structure.
65823diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65824index e86dfca..40cc55f 100644
65825--- a/include/linux/a.out.h
65826+++ b/include/linux/a.out.h
65827@@ -39,6 +39,14 @@ enum machine_type {
65828 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65829 };
65830
65831+/* Constants for the N_FLAGS field */
65832+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65833+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65834+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65835+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65836+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65837+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65838+
65839 #if !defined (N_MAGIC)
65840 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65841 #endif
65842diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65843index 817b237..62c10bc 100644
65844--- a/include/linux/atmdev.h
65845+++ b/include/linux/atmdev.h
65846@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65847 #endif
65848
65849 struct k_atm_aal_stats {
65850-#define __HANDLE_ITEM(i) atomic_t i
65851+#define __HANDLE_ITEM(i) atomic_unchecked_t i
65852 __AAL_STAT_ITEMS
65853 #undef __HANDLE_ITEM
65854 };
65855diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65856index 0f5f578..8c4f884 100644
65857--- a/include/linux/backlight.h
65858+++ b/include/linux/backlight.h
65859@@ -36,18 +36,18 @@ struct backlight_device;
65860 struct fb_info;
65861
65862 struct backlight_ops {
65863- unsigned int options;
65864+ const unsigned int options;
65865
65866 #define BL_CORE_SUSPENDRESUME (1 << 0)
65867
65868 /* Notify the backlight driver some property has changed */
65869- int (*update_status)(struct backlight_device *);
65870+ int (* const update_status)(struct backlight_device *);
65871 /* Return the current backlight brightness (accounting for power,
65872 fb_blank etc.) */
65873- int (*get_brightness)(struct backlight_device *);
65874+ int (* const get_brightness)(struct backlight_device *);
65875 /* Check if given framebuffer device is the one bound to this backlight;
65876 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65877- int (*check_fb)(struct fb_info *);
65878+ int (* const check_fb)(struct fb_info *);
65879 };
65880
65881 /* This structure defines all the properties of a backlight */
65882@@ -86,7 +86,7 @@ struct backlight_device {
65883 registered this device has been unloaded, and if class_get_devdata()
65884 points to something in the body of that driver, it is also invalid. */
65885 struct mutex ops_lock;
65886- struct backlight_ops *ops;
65887+ const struct backlight_ops *ops;
65888
65889 /* The framebuffer notifier block */
65890 struct notifier_block fb_notif;
65891@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65892 }
65893
65894 extern struct backlight_device *backlight_device_register(const char *name,
65895- struct device *dev, void *devdata, struct backlight_ops *ops);
65896+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65897 extern void backlight_device_unregister(struct backlight_device *bd);
65898 extern void backlight_force_update(struct backlight_device *bd,
65899 enum backlight_update_reason reason);
65900diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65901index a3d802e..93a2ef4 100644
65902--- a/include/linux/binfmts.h
65903+++ b/include/linux/binfmts.h
65904@@ -18,7 +18,7 @@ struct pt_regs;
65905 #define BINPRM_BUF_SIZE 128
65906
65907 #ifdef __KERNEL__
65908-#include <linux/list.h>
65909+#include <linux/sched.h>
65910
65911 #define CORENAME_MAX_SIZE 128
65912
65913@@ -58,6 +58,7 @@ struct linux_binprm{
65914 unsigned interp_flags;
65915 unsigned interp_data;
65916 unsigned long loader, exec;
65917+ char tcomm[TASK_COMM_LEN];
65918 };
65919
65920 extern void acct_arg_size(struct linux_binprm *bprm, unsigned long pages);
65921@@ -83,6 +84,7 @@ struct linux_binfmt {
65922 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65923 int (*load_shlib)(struct file *);
65924 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65925+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65926 unsigned long min_coredump; /* minimal dump size */
65927 int hasvdso;
65928 };
65929diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65930index 5eb6cb0..a2906d2 100644
65931--- a/include/linux/blkdev.h
65932+++ b/include/linux/blkdev.h
65933@@ -1281,7 +1281,7 @@ struct block_device_operations {
65934 int (*revalidate_disk) (struct gendisk *);
65935 int (*getgeo)(struct block_device *, struct hd_geometry *);
65936 struct module *owner;
65937-};
65938+} __do_const;
65939
65940 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65941 unsigned long);
65942diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65943index 3b73b99..629d21b 100644
65944--- a/include/linux/blktrace_api.h
65945+++ b/include/linux/blktrace_api.h
65946@@ -160,7 +160,7 @@ struct blk_trace {
65947 struct dentry *dir;
65948 struct dentry *dropped_file;
65949 struct dentry *msg_file;
65950- atomic_t dropped;
65951+ atomic_unchecked_t dropped;
65952 };
65953
65954 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65955diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65956index 83195fb..0b0f77d 100644
65957--- a/include/linux/byteorder/little_endian.h
65958+++ b/include/linux/byteorder/little_endian.h
65959@@ -42,51 +42,51 @@
65960
65961 static inline __le64 __cpu_to_le64p(const __u64 *p)
65962 {
65963- return (__force __le64)*p;
65964+ return (__force const __le64)*p;
65965 }
65966 static inline __u64 __le64_to_cpup(const __le64 *p)
65967 {
65968- return (__force __u64)*p;
65969+ return (__force const __u64)*p;
65970 }
65971 static inline __le32 __cpu_to_le32p(const __u32 *p)
65972 {
65973- return (__force __le32)*p;
65974+ return (__force const __le32)*p;
65975 }
65976 static inline __u32 __le32_to_cpup(const __le32 *p)
65977 {
65978- return (__force __u32)*p;
65979+ return (__force const __u32)*p;
65980 }
65981 static inline __le16 __cpu_to_le16p(const __u16 *p)
65982 {
65983- return (__force __le16)*p;
65984+ return (__force const __le16)*p;
65985 }
65986 static inline __u16 __le16_to_cpup(const __le16 *p)
65987 {
65988- return (__force __u16)*p;
65989+ return (__force const __u16)*p;
65990 }
65991 static inline __be64 __cpu_to_be64p(const __u64 *p)
65992 {
65993- return (__force __be64)__swab64p(p);
65994+ return (__force const __be64)__swab64p(p);
65995 }
65996 static inline __u64 __be64_to_cpup(const __be64 *p)
65997 {
65998- return __swab64p((__u64 *)p);
65999+ return __swab64p((const __u64 *)p);
66000 }
66001 static inline __be32 __cpu_to_be32p(const __u32 *p)
66002 {
66003- return (__force __be32)__swab32p(p);
66004+ return (__force const __be32)__swab32p(p);
66005 }
66006 static inline __u32 __be32_to_cpup(const __be32 *p)
66007 {
66008- return __swab32p((__u32 *)p);
66009+ return __swab32p((const __u32 *)p);
66010 }
66011 static inline __be16 __cpu_to_be16p(const __u16 *p)
66012 {
66013- return (__force __be16)__swab16p(p);
66014+ return (__force const __be16)__swab16p(p);
66015 }
66016 static inline __u16 __be16_to_cpup(const __be16 *p)
66017 {
66018- return __swab16p((__u16 *)p);
66019+ return __swab16p((const __u16 *)p);
66020 }
66021 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
66022 #define __le64_to_cpus(x) do { (void)(x); } while (0)
66023diff --git a/include/linux/cache.h b/include/linux/cache.h
66024index 97e2488..e7576b9 100644
66025--- a/include/linux/cache.h
66026+++ b/include/linux/cache.h
66027@@ -16,6 +16,10 @@
66028 #define __read_mostly
66029 #endif
66030
66031+#ifndef __read_only
66032+#define __read_only __read_mostly
66033+#endif
66034+
66035 #ifndef ____cacheline_aligned
66036 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
66037 #endif
66038diff --git a/include/linux/capability.h b/include/linux/capability.h
66039index c8f2a5f7..1618a5c 100644
66040--- a/include/linux/capability.h
66041+++ b/include/linux/capability.h
66042@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
66043 (security_real_capable_noaudit((t), (cap)) == 0)
66044
66045 extern int capable(int cap);
66046+int capable_nolog(int cap);
66047
66048 /* audit system wants to get cap info from files as well */
66049 struct dentry;
66050diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
66051index 450fa59..86019fb 100644
66052--- a/include/linux/compiler-gcc4.h
66053+++ b/include/linux/compiler-gcc4.h
66054@@ -36,4 +36,16 @@
66055 the kernel context */
66056 #define __cold __attribute__((__cold__))
66057
66058+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
66059+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
66060+#define __bos0(ptr) __bos((ptr), 0)
66061+#define __bos1(ptr) __bos((ptr), 1)
66062+
66063+#if __GNUC_MINOR__ >= 5
66064+#ifdef CONSTIFY_PLUGIN
66065+#define __no_const __attribute__((no_const))
66066+#define __do_const __attribute__((do_const))
66067+#endif
66068+#endif
66069+
66070 #endif
66071diff --git a/include/linux/compiler.h b/include/linux/compiler.h
66072index 04fb513..fd6477b 100644
66073--- a/include/linux/compiler.h
66074+++ b/include/linux/compiler.h
66075@@ -5,11 +5,14 @@
66076
66077 #ifdef __CHECKER__
66078 # define __user __attribute__((noderef, address_space(1)))
66079+# define __force_user __force __user
66080 # define __kernel /* default address space */
66081+# define __force_kernel __force __kernel
66082 # define __safe __attribute__((safe))
66083 # define __force __attribute__((force))
66084 # define __nocast __attribute__((nocast))
66085 # define __iomem __attribute__((noderef, address_space(2)))
66086+# define __force_iomem __force __iomem
66087 # define __acquires(x) __attribute__((context(x,0,1)))
66088 # define __releases(x) __attribute__((context(x,1,0)))
66089 # define __acquire(x) __context__(x,1)
66090@@ -17,13 +20,34 @@
66091 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
66092 extern void __chk_user_ptr(const volatile void __user *);
66093 extern void __chk_io_ptr(const volatile void __iomem *);
66094+#elif defined(CHECKER_PLUGIN)
66095+//# define __user
66096+//# define __force_user
66097+//# define __kernel
66098+//# define __force_kernel
66099+# define __safe
66100+# define __force
66101+# define __nocast
66102+# define __iomem
66103+# define __force_iomem
66104+# define __chk_user_ptr(x) (void)0
66105+# define __chk_io_ptr(x) (void)0
66106+# define __builtin_warning(x, y...) (1)
66107+# define __acquires(x)
66108+# define __releases(x)
66109+# define __acquire(x) (void)0
66110+# define __release(x) (void)0
66111+# define __cond_lock(x,c) (c)
66112 #else
66113 # define __user
66114+# define __force_user
66115 # define __kernel
66116+# define __force_kernel
66117 # define __safe
66118 # define __force
66119 # define __nocast
66120 # define __iomem
66121+# define __force_iomem
66122 # define __chk_user_ptr(x) (void)0
66123 # define __chk_io_ptr(x) (void)0
66124 # define __builtin_warning(x, y...) (1)
66125@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66126 # define __attribute_const__ /* unimplemented */
66127 #endif
66128
66129+#ifndef __no_const
66130+# define __no_const
66131+#endif
66132+
66133+#ifndef __do_const
66134+# define __do_const
66135+#endif
66136+
66137 /*
66138 * Tell gcc if a function is cold. The compiler will assume any path
66139 * directly leading to the call is unlikely.
66140@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66141 #define __cold
66142 #endif
66143
66144+#ifndef __alloc_size
66145+#define __alloc_size(...)
66146+#endif
66147+
66148+#ifndef __bos
66149+#define __bos(ptr, arg)
66150+#endif
66151+
66152+#ifndef __bos0
66153+#define __bos0(ptr)
66154+#endif
66155+
66156+#ifndef __bos1
66157+#define __bos1(ptr)
66158+#endif
66159+
66160 /* Simple shorthand for a section definition */
66161 #ifndef __section
66162 # define __section(S) __attribute__ ((__section__(#S)))
66163@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66164 * use is to mediate communication between process-level code and irq/NMI
66165 * handlers, all running on the same CPU.
66166 */
66167-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66168+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66169+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66170
66171 #endif /* __LINUX_COMPILER_H */
66172diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66173index fd92988..a3164bd 100644
66174--- a/include/linux/crypto.h
66175+++ b/include/linux/crypto.h
66176@@ -394,7 +394,7 @@ struct cipher_tfm {
66177 const u8 *key, unsigned int keylen);
66178 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66179 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66180-};
66181+} __no_const;
66182
66183 struct hash_tfm {
66184 int (*init)(struct hash_desc *desc);
66185@@ -415,13 +415,13 @@ struct compress_tfm {
66186 int (*cot_decompress)(struct crypto_tfm *tfm,
66187 const u8 *src, unsigned int slen,
66188 u8 *dst, unsigned int *dlen);
66189-};
66190+} __no_const;
66191
66192 struct rng_tfm {
66193 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66194 unsigned int dlen);
66195 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66196-};
66197+} __no_const;
66198
66199 #define crt_ablkcipher crt_u.ablkcipher
66200 #define crt_aead crt_u.aead
66201diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66202index 30b93b2..cd7a8db 100644
66203--- a/include/linux/dcache.h
66204+++ b/include/linux/dcache.h
66205@@ -119,6 +119,8 @@ struct dentry {
66206 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66207 };
66208
66209+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66210+
66211 /*
66212 * dentry->d_lock spinlock nesting subclasses:
66213 *
66214diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66215index 3e9bd6a..f4e1aa0 100644
66216--- a/include/linux/decompress/mm.h
66217+++ b/include/linux/decompress/mm.h
66218@@ -78,7 +78,7 @@ static void free(void *where)
66219 * warnings when not needed (indeed large_malloc / large_free are not
66220 * needed by inflate */
66221
66222-#define malloc(a) kmalloc(a, GFP_KERNEL)
66223+#define malloc(a) kmalloc((a), GFP_KERNEL)
66224 #define free(a) kfree(a)
66225
66226 #define large_malloc(a) vmalloc(a)
66227diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66228index 91b7618..92a93d32 100644
66229--- a/include/linux/dma-mapping.h
66230+++ b/include/linux/dma-mapping.h
66231@@ -16,51 +16,51 @@ enum dma_data_direction {
66232 };
66233
66234 struct dma_map_ops {
66235- void* (*alloc_coherent)(struct device *dev, size_t size,
66236+ void* (* const alloc_coherent)(struct device *dev, size_t size,
66237 dma_addr_t *dma_handle, gfp_t gfp);
66238- void (*free_coherent)(struct device *dev, size_t size,
66239+ void (* const free_coherent)(struct device *dev, size_t size,
66240 void *vaddr, dma_addr_t dma_handle);
66241- dma_addr_t (*map_page)(struct device *dev, struct page *page,
66242+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66243 unsigned long offset, size_t size,
66244 enum dma_data_direction dir,
66245 struct dma_attrs *attrs);
66246- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66247+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66248 size_t size, enum dma_data_direction dir,
66249 struct dma_attrs *attrs);
66250- int (*map_sg)(struct device *dev, struct scatterlist *sg,
66251+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66252 int nents, enum dma_data_direction dir,
66253 struct dma_attrs *attrs);
66254- void (*unmap_sg)(struct device *dev,
66255+ void (* const unmap_sg)(struct device *dev,
66256 struct scatterlist *sg, int nents,
66257 enum dma_data_direction dir,
66258 struct dma_attrs *attrs);
66259- void (*sync_single_for_cpu)(struct device *dev,
66260+ void (* const sync_single_for_cpu)(struct device *dev,
66261 dma_addr_t dma_handle, size_t size,
66262 enum dma_data_direction dir);
66263- void (*sync_single_for_device)(struct device *dev,
66264+ void (* const sync_single_for_device)(struct device *dev,
66265 dma_addr_t dma_handle, size_t size,
66266 enum dma_data_direction dir);
66267- void (*sync_single_range_for_cpu)(struct device *dev,
66268+ void (* const sync_single_range_for_cpu)(struct device *dev,
66269 dma_addr_t dma_handle,
66270 unsigned long offset,
66271 size_t size,
66272 enum dma_data_direction dir);
66273- void (*sync_single_range_for_device)(struct device *dev,
66274+ void (* const sync_single_range_for_device)(struct device *dev,
66275 dma_addr_t dma_handle,
66276 unsigned long offset,
66277 size_t size,
66278 enum dma_data_direction dir);
66279- void (*sync_sg_for_cpu)(struct device *dev,
66280+ void (* const sync_sg_for_cpu)(struct device *dev,
66281 struct scatterlist *sg, int nents,
66282 enum dma_data_direction dir);
66283- void (*sync_sg_for_device)(struct device *dev,
66284+ void (* const sync_sg_for_device)(struct device *dev,
66285 struct scatterlist *sg, int nents,
66286 enum dma_data_direction dir);
66287- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66288- int (*dma_supported)(struct device *dev, u64 mask);
66289+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66290+ int (* const dma_supported)(struct device *dev, u64 mask);
66291 int (*set_dma_mask)(struct device *dev, u64 mask);
66292 int is_phys;
66293-};
66294+} __do_const;
66295
66296 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66297
66298diff --git a/include/linux/dst.h b/include/linux/dst.h
66299index e26fed8..b976d9f 100644
66300--- a/include/linux/dst.h
66301+++ b/include/linux/dst.h
66302@@ -380,7 +380,7 @@ struct dst_node
66303 struct thread_pool *pool;
66304
66305 /* Transaction IDs live here */
66306- atomic_long_t gen;
66307+ atomic_long_unchecked_t gen;
66308
66309 /*
66310 * How frequently and how many times transaction
66311diff --git a/include/linux/elf.h b/include/linux/elf.h
66312index 90a4ed0..d652617 100644
66313--- a/include/linux/elf.h
66314+++ b/include/linux/elf.h
66315@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66316 #define PT_GNU_EH_FRAME 0x6474e550
66317
66318 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66319+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66320+
66321+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66322+
66323+/* Constants for the e_flags field */
66324+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66325+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66326+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66327+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66328+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66329+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66330
66331 /* These constants define the different elf file types */
66332 #define ET_NONE 0
66333@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66334 #define DT_DEBUG 21
66335 #define DT_TEXTREL 22
66336 #define DT_JMPREL 23
66337+#define DT_FLAGS 30
66338+ #define DF_TEXTREL 0x00000004
66339 #define DT_ENCODING 32
66340 #define OLD_DT_LOOS 0x60000000
66341 #define DT_LOOS 0x6000000d
66342@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66343 #define PF_W 0x2
66344 #define PF_X 0x1
66345
66346+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66347+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66348+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66349+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66350+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66351+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66352+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66353+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66354+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66355+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66356+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66357+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66358+
66359 typedef struct elf32_phdr{
66360 Elf32_Word p_type;
66361 Elf32_Off p_offset;
66362@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66363 #define EI_OSABI 7
66364 #define EI_PAD 8
66365
66366+#define EI_PAX 14
66367+
66368 #define ELFMAG0 0x7f /* EI_MAG */
66369 #define ELFMAG1 'E'
66370 #define ELFMAG2 'L'
66371@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66372 #define elf_phdr elf32_phdr
66373 #define elf_note elf32_note
66374 #define elf_addr_t Elf32_Off
66375+#define elf_dyn Elf32_Dyn
66376
66377 #else
66378
66379@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66380 #define elf_phdr elf64_phdr
66381 #define elf_note elf64_note
66382 #define elf_addr_t Elf64_Off
66383+#define elf_dyn Elf64_Dyn
66384
66385 #endif
66386
66387diff --git a/include/linux/fs.h b/include/linux/fs.h
66388index 1b9a47a..6fe2934 100644
66389--- a/include/linux/fs.h
66390+++ b/include/linux/fs.h
66391@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66392 unsigned long, unsigned long);
66393
66394 struct address_space_operations {
66395- int (*writepage)(struct page *page, struct writeback_control *wbc);
66396- int (*readpage)(struct file *, struct page *);
66397- void (*sync_page)(struct page *);
66398+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
66399+ int (* const readpage)(struct file *, struct page *);
66400+ void (* const sync_page)(struct page *);
66401
66402 /* Write back some dirty pages from this mapping. */
66403- int (*writepages)(struct address_space *, struct writeback_control *);
66404+ int (* const writepages)(struct address_space *, struct writeback_control *);
66405
66406 /* Set a page dirty. Return true if this dirtied it */
66407- int (*set_page_dirty)(struct page *page);
66408+ int (* const set_page_dirty)(struct page *page);
66409
66410- int (*readpages)(struct file *filp, struct address_space *mapping,
66411+ int (* const readpages)(struct file *filp, struct address_space *mapping,
66412 struct list_head *pages, unsigned nr_pages);
66413
66414- int (*write_begin)(struct file *, struct address_space *mapping,
66415+ int (* const write_begin)(struct file *, struct address_space *mapping,
66416 loff_t pos, unsigned len, unsigned flags,
66417 struct page **pagep, void **fsdata);
66418- int (*write_end)(struct file *, struct address_space *mapping,
66419+ int (* const write_end)(struct file *, struct address_space *mapping,
66420 loff_t pos, unsigned len, unsigned copied,
66421 struct page *page, void *fsdata);
66422
66423 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66424- sector_t (*bmap)(struct address_space *, sector_t);
66425- void (*invalidatepage) (struct page *, unsigned long);
66426- int (*releasepage) (struct page *, gfp_t);
66427- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66428+ sector_t (* const bmap)(struct address_space *, sector_t);
66429+ void (* const invalidatepage) (struct page *, unsigned long);
66430+ int (* const releasepage) (struct page *, gfp_t);
66431+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66432 loff_t offset, unsigned long nr_segs);
66433- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66434+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66435 void **, unsigned long *);
66436 /* migrate the contents of a page to the specified target */
66437- int (*migratepage) (struct address_space *,
66438+ int (* const migratepage) (struct address_space *,
66439 struct page *, struct page *);
66440- int (*launder_page) (struct page *);
66441- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66442+ int (* const launder_page) (struct page *);
66443+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66444 unsigned long);
66445- int (*error_remove_page)(struct address_space *, struct page *);
66446+ int (* const error_remove_page)(struct address_space *, struct page *);
66447 };
66448
66449 /*
66450@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66451 typedef struct files_struct *fl_owner_t;
66452
66453 struct file_lock_operations {
66454- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66455- void (*fl_release_private)(struct file_lock *);
66456+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66457+ void (* const fl_release_private)(struct file_lock *);
66458 };
66459
66460 struct lock_manager_operations {
66461- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66462- void (*fl_notify)(struct file_lock *); /* unblock callback */
66463- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66464- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66465- void (*fl_release_private)(struct file_lock *);
66466- void (*fl_break)(struct file_lock *);
66467- int (*fl_mylease)(struct file_lock *, struct file_lock *);
66468- int (*fl_change)(struct file_lock **, int);
66469+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66470+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
66471+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66472+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66473+ void (* const fl_release_private)(struct file_lock *);
66474+ void (* const fl_break)(struct file_lock *);
66475+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66476+ int (* const fl_change)(struct file_lock **, int);
66477 };
66478
66479 struct lock_manager {
66480@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66481 unsigned int fi_flags; /* Flags as passed from user */
66482 unsigned int fi_extents_mapped; /* Number of mapped extents */
66483 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66484- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66485+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66486 * array */
66487 };
66488 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66489@@ -1512,7 +1512,8 @@ struct file_operations {
66490 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66491 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66492 int (*setlease)(struct file *, long, struct file_lock **);
66493-};
66494+} __do_const;
66495+typedef struct file_operations __no_const file_operations_no_const;
66496
66497 struct inode_operations {
66498 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66499@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66500 unsigned long, loff_t *);
66501
66502 struct super_operations {
66503- struct inode *(*alloc_inode)(struct super_block *sb);
66504- void (*destroy_inode)(struct inode *);
66505+ struct inode *(* const alloc_inode)(struct super_block *sb);
66506+ void (* const destroy_inode)(struct inode *);
66507
66508- void (*dirty_inode) (struct inode *);
66509- int (*write_inode) (struct inode *, int);
66510- void (*drop_inode) (struct inode *);
66511- void (*delete_inode) (struct inode *);
66512- void (*put_super) (struct super_block *);
66513- void (*write_super) (struct super_block *);
66514- int (*sync_fs)(struct super_block *sb, int wait);
66515- int (*freeze_fs) (struct super_block *);
66516- int (*unfreeze_fs) (struct super_block *);
66517- int (*statfs) (struct dentry *, struct kstatfs *);
66518- int (*remount_fs) (struct super_block *, int *, char *);
66519- void (*clear_inode) (struct inode *);
66520- void (*umount_begin) (struct super_block *);
66521+ void (* const dirty_inode) (struct inode *);
66522+ int (* const write_inode) (struct inode *, int);
66523+ void (* const drop_inode) (struct inode *);
66524+ void (* const delete_inode) (struct inode *);
66525+ void (* const put_super) (struct super_block *);
66526+ void (* const write_super) (struct super_block *);
66527+ int (* const sync_fs)(struct super_block *sb, int wait);
66528+ int (* const freeze_fs) (struct super_block *);
66529+ int (* const unfreeze_fs) (struct super_block *);
66530+ int (* const statfs) (struct dentry *, struct kstatfs *);
66531+ int (* const remount_fs) (struct super_block *, int *, char *);
66532+ void (* const clear_inode) (struct inode *);
66533+ void (* const umount_begin) (struct super_block *);
66534
66535- int (*show_options)(struct seq_file *, struct vfsmount *);
66536- int (*show_stats)(struct seq_file *, struct vfsmount *);
66537+ int (* const show_options)(struct seq_file *, struct vfsmount *);
66538+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
66539 #ifdef CONFIG_QUOTA
66540- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66541- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66542+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66543+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66544 #endif
66545- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66546+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66547 };
66548
66549 /*
66550diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66551index 78a05bf..2a7d3e1 100644
66552--- a/include/linux/fs_struct.h
66553+++ b/include/linux/fs_struct.h
66554@@ -4,7 +4,7 @@
66555 #include <linux/path.h>
66556
66557 struct fs_struct {
66558- int users;
66559+ atomic_t users;
66560 rwlock_t lock;
66561 int umask;
66562 int in_exec;
66563diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66564index 7be0c6f..2f63a2b 100644
66565--- a/include/linux/fscache-cache.h
66566+++ b/include/linux/fscache-cache.h
66567@@ -116,7 +116,7 @@ struct fscache_operation {
66568 #endif
66569 };
66570
66571-extern atomic_t fscache_op_debug_id;
66572+extern atomic_unchecked_t fscache_op_debug_id;
66573 extern const struct slow_work_ops fscache_op_slow_work_ops;
66574
66575 extern void fscache_enqueue_operation(struct fscache_operation *);
66576@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66577 fscache_operation_release_t release)
66578 {
66579 atomic_set(&op->usage, 1);
66580- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66581+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66582 op->release = release;
66583 INIT_LIST_HEAD(&op->pend_link);
66584 fscache_set_op_state(op, "Init");
66585diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66586index 4d6f47b..00bcedb 100644
66587--- a/include/linux/fsnotify_backend.h
66588+++ b/include/linux/fsnotify_backend.h
66589@@ -86,6 +86,7 @@ struct fsnotify_ops {
66590 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66591 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66592 };
66593+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66594
66595 /*
66596 * A group is a "thing" that wants to receive notification about filesystem
66597diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66598index 4ec5e67..42f1eb9 100644
66599--- a/include/linux/ftrace_event.h
66600+++ b/include/linux/ftrace_event.h
66601@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66602 int filter_type);
66603 extern int trace_define_common_fields(struct ftrace_event_call *call);
66604
66605-#define is_signed_type(type) (((type)(-1)) < 0)
66606+#define is_signed_type(type) (((type)(-1)) < (type)1)
66607
66608 int trace_set_clr_event(const char *system, const char *event, int set);
66609
66610diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66611index 297df45..b6a74ff 100644
66612--- a/include/linux/genhd.h
66613+++ b/include/linux/genhd.h
66614@@ -161,7 +161,7 @@ struct gendisk {
66615
66616 struct timer_rand_state *random;
66617
66618- atomic_t sync_io; /* RAID */
66619+ atomic_unchecked_t sync_io; /* RAID */
66620 struct work_struct async_notify;
66621 #ifdef CONFIG_BLK_DEV_INTEGRITY
66622 struct blk_integrity *integrity;
66623diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66624new file mode 100644
66625index 0000000..0dc3943
66626--- /dev/null
66627+++ b/include/linux/gracl.h
66628@@ -0,0 +1,317 @@
66629+#ifndef GR_ACL_H
66630+#define GR_ACL_H
66631+
66632+#include <linux/grdefs.h>
66633+#include <linux/resource.h>
66634+#include <linux/capability.h>
66635+#include <linux/dcache.h>
66636+#include <asm/resource.h>
66637+
66638+/* Major status information */
66639+
66640+#define GR_VERSION "grsecurity 2.2.2"
66641+#define GRSECURITY_VERSION 0x2202
66642+
66643+enum {
66644+ GR_SHUTDOWN = 0,
66645+ GR_ENABLE = 1,
66646+ GR_SPROLE = 2,
66647+ GR_RELOAD = 3,
66648+ GR_SEGVMOD = 4,
66649+ GR_STATUS = 5,
66650+ GR_UNSPROLE = 6,
66651+ GR_PASSSET = 7,
66652+ GR_SPROLEPAM = 8,
66653+};
66654+
66655+/* Password setup definitions
66656+ * kernel/grhash.c */
66657+enum {
66658+ GR_PW_LEN = 128,
66659+ GR_SALT_LEN = 16,
66660+ GR_SHA_LEN = 32,
66661+};
66662+
66663+enum {
66664+ GR_SPROLE_LEN = 64,
66665+};
66666+
66667+enum {
66668+ GR_NO_GLOB = 0,
66669+ GR_REG_GLOB,
66670+ GR_CREATE_GLOB
66671+};
66672+
66673+#define GR_NLIMITS 32
66674+
66675+/* Begin Data Structures */
66676+
66677+struct sprole_pw {
66678+ unsigned char *rolename;
66679+ unsigned char salt[GR_SALT_LEN];
66680+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66681+};
66682+
66683+struct name_entry {
66684+ __u32 key;
66685+ ino_t inode;
66686+ dev_t device;
66687+ char *name;
66688+ __u16 len;
66689+ __u8 deleted;
66690+ struct name_entry *prev;
66691+ struct name_entry *next;
66692+};
66693+
66694+struct inodev_entry {
66695+ struct name_entry *nentry;
66696+ struct inodev_entry *prev;
66697+ struct inodev_entry *next;
66698+};
66699+
66700+struct acl_role_db {
66701+ struct acl_role_label **r_hash;
66702+ __u32 r_size;
66703+};
66704+
66705+struct inodev_db {
66706+ struct inodev_entry **i_hash;
66707+ __u32 i_size;
66708+};
66709+
66710+struct name_db {
66711+ struct name_entry **n_hash;
66712+ __u32 n_size;
66713+};
66714+
66715+struct crash_uid {
66716+ uid_t uid;
66717+ unsigned long expires;
66718+};
66719+
66720+struct gr_hash_struct {
66721+ void **table;
66722+ void **nametable;
66723+ void *first;
66724+ __u32 table_size;
66725+ __u32 used_size;
66726+ int type;
66727+};
66728+
66729+/* Userspace Grsecurity ACL data structures */
66730+
66731+struct acl_subject_label {
66732+ char *filename;
66733+ ino_t inode;
66734+ dev_t device;
66735+ __u32 mode;
66736+ kernel_cap_t cap_mask;
66737+ kernel_cap_t cap_lower;
66738+ kernel_cap_t cap_invert_audit;
66739+
66740+ struct rlimit res[GR_NLIMITS];
66741+ __u32 resmask;
66742+
66743+ __u8 user_trans_type;
66744+ __u8 group_trans_type;
66745+ uid_t *user_transitions;
66746+ gid_t *group_transitions;
66747+ __u16 user_trans_num;
66748+ __u16 group_trans_num;
66749+
66750+ __u32 sock_families[2];
66751+ __u32 ip_proto[8];
66752+ __u32 ip_type;
66753+ struct acl_ip_label **ips;
66754+ __u32 ip_num;
66755+ __u32 inaddr_any_override;
66756+
66757+ __u32 crashes;
66758+ unsigned long expires;
66759+
66760+ struct acl_subject_label *parent_subject;
66761+ struct gr_hash_struct *hash;
66762+ struct acl_subject_label *prev;
66763+ struct acl_subject_label *next;
66764+
66765+ struct acl_object_label **obj_hash;
66766+ __u32 obj_hash_size;
66767+ __u16 pax_flags;
66768+};
66769+
66770+struct role_allowed_ip {
66771+ __u32 addr;
66772+ __u32 netmask;
66773+
66774+ struct role_allowed_ip *prev;
66775+ struct role_allowed_ip *next;
66776+};
66777+
66778+struct role_transition {
66779+ char *rolename;
66780+
66781+ struct role_transition *prev;
66782+ struct role_transition *next;
66783+};
66784+
66785+struct acl_role_label {
66786+ char *rolename;
66787+ uid_t uidgid;
66788+ __u16 roletype;
66789+
66790+ __u16 auth_attempts;
66791+ unsigned long expires;
66792+
66793+ struct acl_subject_label *root_label;
66794+ struct gr_hash_struct *hash;
66795+
66796+ struct acl_role_label *prev;
66797+ struct acl_role_label *next;
66798+
66799+ struct role_transition *transitions;
66800+ struct role_allowed_ip *allowed_ips;
66801+ uid_t *domain_children;
66802+ __u16 domain_child_num;
66803+
66804+ struct acl_subject_label **subj_hash;
66805+ __u32 subj_hash_size;
66806+};
66807+
66808+struct user_acl_role_db {
66809+ struct acl_role_label **r_table;
66810+ __u32 num_pointers; /* Number of allocations to track */
66811+ __u32 num_roles; /* Number of roles */
66812+ __u32 num_domain_children; /* Number of domain children */
66813+ __u32 num_subjects; /* Number of subjects */
66814+ __u32 num_objects; /* Number of objects */
66815+};
66816+
66817+struct acl_object_label {
66818+ char *filename;
66819+ ino_t inode;
66820+ dev_t device;
66821+ __u32 mode;
66822+
66823+ struct acl_subject_label *nested;
66824+ struct acl_object_label *globbed;
66825+
66826+ /* next two structures not used */
66827+
66828+ struct acl_object_label *prev;
66829+ struct acl_object_label *next;
66830+};
66831+
66832+struct acl_ip_label {
66833+ char *iface;
66834+ __u32 addr;
66835+ __u32 netmask;
66836+ __u16 low, high;
66837+ __u8 mode;
66838+ __u32 type;
66839+ __u32 proto[8];
66840+
66841+ /* next two structures not used */
66842+
66843+ struct acl_ip_label *prev;
66844+ struct acl_ip_label *next;
66845+};
66846+
66847+struct gr_arg {
66848+ struct user_acl_role_db role_db;
66849+ unsigned char pw[GR_PW_LEN];
66850+ unsigned char salt[GR_SALT_LEN];
66851+ unsigned char sum[GR_SHA_LEN];
66852+ unsigned char sp_role[GR_SPROLE_LEN];
66853+ struct sprole_pw *sprole_pws;
66854+ dev_t segv_device;
66855+ ino_t segv_inode;
66856+ uid_t segv_uid;
66857+ __u16 num_sprole_pws;
66858+ __u16 mode;
66859+};
66860+
66861+struct gr_arg_wrapper {
66862+ struct gr_arg *arg;
66863+ __u32 version;
66864+ __u32 size;
66865+};
66866+
66867+struct subject_map {
66868+ struct acl_subject_label *user;
66869+ struct acl_subject_label *kernel;
66870+ struct subject_map *prev;
66871+ struct subject_map *next;
66872+};
66873+
66874+struct acl_subj_map_db {
66875+ struct subject_map **s_hash;
66876+ __u32 s_size;
66877+};
66878+
66879+/* End Data Structures Section */
66880+
66881+/* Hash functions generated by empirical testing by Brad Spengler
66882+ Makes good use of the low bits of the inode. Generally 0-1 times
66883+ in loop for successful match. 0-3 for unsuccessful match.
66884+ Shift/add algorithm with modulus of table size and an XOR*/
66885+
66886+static __inline__ unsigned int
66887+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66888+{
66889+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
66890+}
66891+
66892+ static __inline__ unsigned int
66893+shash(const struct acl_subject_label *userp, const unsigned int sz)
66894+{
66895+ return ((const unsigned long)userp % sz);
66896+}
66897+
66898+static __inline__ unsigned int
66899+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66900+{
66901+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66902+}
66903+
66904+static __inline__ unsigned int
66905+nhash(const char *name, const __u16 len, const unsigned int sz)
66906+{
66907+ return full_name_hash((const unsigned char *)name, len) % sz;
66908+}
66909+
66910+#define FOR_EACH_ROLE_START(role) \
66911+ role = role_list; \
66912+ while (role) {
66913+
66914+#define FOR_EACH_ROLE_END(role) \
66915+ role = role->prev; \
66916+ }
66917+
66918+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66919+ subj = NULL; \
66920+ iter = 0; \
66921+ while (iter < role->subj_hash_size) { \
66922+ if (subj == NULL) \
66923+ subj = role->subj_hash[iter]; \
66924+ if (subj == NULL) { \
66925+ iter++; \
66926+ continue; \
66927+ }
66928+
66929+#define FOR_EACH_SUBJECT_END(subj,iter) \
66930+ subj = subj->next; \
66931+ if (subj == NULL) \
66932+ iter++; \
66933+ }
66934+
66935+
66936+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66937+ subj = role->hash->first; \
66938+ while (subj != NULL) {
66939+
66940+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66941+ subj = subj->next; \
66942+ }
66943+
66944+#endif
66945+
66946diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66947new file mode 100644
66948index 0000000..323ecf2
66949--- /dev/null
66950+++ b/include/linux/gralloc.h
66951@@ -0,0 +1,9 @@
66952+#ifndef __GRALLOC_H
66953+#define __GRALLOC_H
66954+
66955+void acl_free_all(void);
66956+int acl_alloc_stack_init(unsigned long size);
66957+void *acl_alloc(unsigned long len);
66958+void *acl_alloc_num(unsigned long num, unsigned long len);
66959+
66960+#endif
66961diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66962new file mode 100644
66963index 0000000..70d6cd5
66964--- /dev/null
66965+++ b/include/linux/grdefs.h
66966@@ -0,0 +1,140 @@
66967+#ifndef GRDEFS_H
66968+#define GRDEFS_H
66969+
66970+/* Begin grsecurity status declarations */
66971+
66972+enum {
66973+ GR_READY = 0x01,
66974+ GR_STATUS_INIT = 0x00 // disabled state
66975+};
66976+
66977+/* Begin ACL declarations */
66978+
66979+/* Role flags */
66980+
66981+enum {
66982+ GR_ROLE_USER = 0x0001,
66983+ GR_ROLE_GROUP = 0x0002,
66984+ GR_ROLE_DEFAULT = 0x0004,
66985+ GR_ROLE_SPECIAL = 0x0008,
66986+ GR_ROLE_AUTH = 0x0010,
66987+ GR_ROLE_NOPW = 0x0020,
66988+ GR_ROLE_GOD = 0x0040,
66989+ GR_ROLE_LEARN = 0x0080,
66990+ GR_ROLE_TPE = 0x0100,
66991+ GR_ROLE_DOMAIN = 0x0200,
66992+ GR_ROLE_PAM = 0x0400,
66993+ GR_ROLE_PERSIST = 0x800
66994+};
66995+
66996+/* ACL Subject and Object mode flags */
66997+enum {
66998+ GR_DELETED = 0x80000000
66999+};
67000+
67001+/* ACL Object-only mode flags */
67002+enum {
67003+ GR_READ = 0x00000001,
67004+ GR_APPEND = 0x00000002,
67005+ GR_WRITE = 0x00000004,
67006+ GR_EXEC = 0x00000008,
67007+ GR_FIND = 0x00000010,
67008+ GR_INHERIT = 0x00000020,
67009+ GR_SETID = 0x00000040,
67010+ GR_CREATE = 0x00000080,
67011+ GR_DELETE = 0x00000100,
67012+ GR_LINK = 0x00000200,
67013+ GR_AUDIT_READ = 0x00000400,
67014+ GR_AUDIT_APPEND = 0x00000800,
67015+ GR_AUDIT_WRITE = 0x00001000,
67016+ GR_AUDIT_EXEC = 0x00002000,
67017+ GR_AUDIT_FIND = 0x00004000,
67018+ GR_AUDIT_INHERIT= 0x00008000,
67019+ GR_AUDIT_SETID = 0x00010000,
67020+ GR_AUDIT_CREATE = 0x00020000,
67021+ GR_AUDIT_DELETE = 0x00040000,
67022+ GR_AUDIT_LINK = 0x00080000,
67023+ GR_PTRACERD = 0x00100000,
67024+ GR_NOPTRACE = 0x00200000,
67025+ GR_SUPPRESS = 0x00400000,
67026+ GR_NOLEARN = 0x00800000,
67027+ GR_INIT_TRANSFER= 0x01000000
67028+};
67029+
67030+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
67031+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
67032+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
67033+
67034+/* ACL subject-only mode flags */
67035+enum {
67036+ GR_KILL = 0x00000001,
67037+ GR_VIEW = 0x00000002,
67038+ GR_PROTECTED = 0x00000004,
67039+ GR_LEARN = 0x00000008,
67040+ GR_OVERRIDE = 0x00000010,
67041+ /* just a placeholder, this mode is only used in userspace */
67042+ GR_DUMMY = 0x00000020,
67043+ GR_PROTSHM = 0x00000040,
67044+ GR_KILLPROC = 0x00000080,
67045+ GR_KILLIPPROC = 0x00000100,
67046+ /* just a placeholder, this mode is only used in userspace */
67047+ GR_NOTROJAN = 0x00000200,
67048+ GR_PROTPROCFD = 0x00000400,
67049+ GR_PROCACCT = 0x00000800,
67050+ GR_RELAXPTRACE = 0x00001000,
67051+ GR_NESTED = 0x00002000,
67052+ GR_INHERITLEARN = 0x00004000,
67053+ GR_PROCFIND = 0x00008000,
67054+ GR_POVERRIDE = 0x00010000,
67055+ GR_KERNELAUTH = 0x00020000,
67056+ GR_ATSECURE = 0x00040000,
67057+ GR_SHMEXEC = 0x00080000
67058+};
67059+
67060+enum {
67061+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
67062+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
67063+ GR_PAX_ENABLE_MPROTECT = 0x0004,
67064+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
67065+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
67066+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
67067+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
67068+ GR_PAX_DISABLE_MPROTECT = 0x0400,
67069+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
67070+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
67071+};
67072+
67073+enum {
67074+ GR_ID_USER = 0x01,
67075+ GR_ID_GROUP = 0x02,
67076+};
67077+
67078+enum {
67079+ GR_ID_ALLOW = 0x01,
67080+ GR_ID_DENY = 0x02,
67081+};
67082+
67083+#define GR_CRASH_RES 31
67084+#define GR_UIDTABLE_MAX 500
67085+
67086+/* begin resource learning section */
67087+enum {
67088+ GR_RLIM_CPU_BUMP = 60,
67089+ GR_RLIM_FSIZE_BUMP = 50000,
67090+ GR_RLIM_DATA_BUMP = 10000,
67091+ GR_RLIM_STACK_BUMP = 1000,
67092+ GR_RLIM_CORE_BUMP = 10000,
67093+ GR_RLIM_RSS_BUMP = 500000,
67094+ GR_RLIM_NPROC_BUMP = 1,
67095+ GR_RLIM_NOFILE_BUMP = 5,
67096+ GR_RLIM_MEMLOCK_BUMP = 50000,
67097+ GR_RLIM_AS_BUMP = 500000,
67098+ GR_RLIM_LOCKS_BUMP = 2,
67099+ GR_RLIM_SIGPENDING_BUMP = 5,
67100+ GR_RLIM_MSGQUEUE_BUMP = 10000,
67101+ GR_RLIM_NICE_BUMP = 1,
67102+ GR_RLIM_RTPRIO_BUMP = 1,
67103+ GR_RLIM_RTTIME_BUMP = 1000000
67104+};
67105+
67106+#endif
67107diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67108new file mode 100644
67109index 0000000..3826b91
67110--- /dev/null
67111+++ b/include/linux/grinternal.h
67112@@ -0,0 +1,219 @@
67113+#ifndef __GRINTERNAL_H
67114+#define __GRINTERNAL_H
67115+
67116+#ifdef CONFIG_GRKERNSEC
67117+
67118+#include <linux/fs.h>
67119+#include <linux/mnt_namespace.h>
67120+#include <linux/nsproxy.h>
67121+#include <linux/gracl.h>
67122+#include <linux/grdefs.h>
67123+#include <linux/grmsg.h>
67124+
67125+void gr_add_learn_entry(const char *fmt, ...)
67126+ __attribute__ ((format (printf, 1, 2)));
67127+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67128+ const struct vfsmount *mnt);
67129+__u32 gr_check_create(const struct dentry *new_dentry,
67130+ const struct dentry *parent,
67131+ const struct vfsmount *mnt, const __u32 mode);
67132+int gr_check_protected_task(const struct task_struct *task);
67133+__u32 to_gr_audit(const __u32 reqmode);
67134+int gr_set_acls(const int type);
67135+int gr_apply_subject_to_task(struct task_struct *task);
67136+int gr_acl_is_enabled(void);
67137+char gr_roletype_to_char(void);
67138+
67139+void gr_handle_alertkill(struct task_struct *task);
67140+char *gr_to_filename(const struct dentry *dentry,
67141+ const struct vfsmount *mnt);
67142+char *gr_to_filename1(const struct dentry *dentry,
67143+ const struct vfsmount *mnt);
67144+char *gr_to_filename2(const struct dentry *dentry,
67145+ const struct vfsmount *mnt);
67146+char *gr_to_filename3(const struct dentry *dentry,
67147+ const struct vfsmount *mnt);
67148+
67149+extern int grsec_enable_ptrace_readexec;
67150+extern int grsec_enable_harden_ptrace;
67151+extern int grsec_enable_link;
67152+extern int grsec_enable_fifo;
67153+extern int grsec_enable_shm;
67154+extern int grsec_enable_execlog;
67155+extern int grsec_enable_signal;
67156+extern int grsec_enable_audit_ptrace;
67157+extern int grsec_enable_forkfail;
67158+extern int grsec_enable_time;
67159+extern int grsec_enable_rofs;
67160+extern int grsec_enable_chroot_shmat;
67161+extern int grsec_enable_chroot_mount;
67162+extern int grsec_enable_chroot_double;
67163+extern int grsec_enable_chroot_pivot;
67164+extern int grsec_enable_chroot_chdir;
67165+extern int grsec_enable_chroot_chmod;
67166+extern int grsec_enable_chroot_mknod;
67167+extern int grsec_enable_chroot_fchdir;
67168+extern int grsec_enable_chroot_nice;
67169+extern int grsec_enable_chroot_execlog;
67170+extern int grsec_enable_chroot_caps;
67171+extern int grsec_enable_chroot_sysctl;
67172+extern int grsec_enable_chroot_unix;
67173+extern int grsec_enable_tpe;
67174+extern int grsec_tpe_gid;
67175+extern int grsec_enable_tpe_all;
67176+extern int grsec_enable_tpe_invert;
67177+extern int grsec_enable_socket_all;
67178+extern int grsec_socket_all_gid;
67179+extern int grsec_enable_socket_client;
67180+extern int grsec_socket_client_gid;
67181+extern int grsec_enable_socket_server;
67182+extern int grsec_socket_server_gid;
67183+extern int grsec_audit_gid;
67184+extern int grsec_enable_group;
67185+extern int grsec_enable_audit_textrel;
67186+extern int grsec_enable_log_rwxmaps;
67187+extern int grsec_enable_mount;
67188+extern int grsec_enable_chdir;
67189+extern int grsec_resource_logging;
67190+extern int grsec_enable_blackhole;
67191+extern int grsec_lastack_retries;
67192+extern int grsec_enable_brute;
67193+extern int grsec_lock;
67194+
67195+extern spinlock_t grsec_alert_lock;
67196+extern unsigned long grsec_alert_wtime;
67197+extern unsigned long grsec_alert_fyet;
67198+
67199+extern spinlock_t grsec_audit_lock;
67200+
67201+extern rwlock_t grsec_exec_file_lock;
67202+
67203+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67204+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67205+ (tsk)->exec_file->f_vfsmnt) : "/")
67206+
67207+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67208+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67209+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67210+
67211+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67212+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67213+ (tsk)->exec_file->f_vfsmnt) : "/")
67214+
67215+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67216+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67217+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67218+
67219+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67220+
67221+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67222+
67223+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67224+ (task)->pid, (cred)->uid, \
67225+ (cred)->euid, (cred)->gid, (cred)->egid, \
67226+ gr_parent_task_fullpath(task), \
67227+ (task)->real_parent->comm, (task)->real_parent->pid, \
67228+ (pcred)->uid, (pcred)->euid, \
67229+ (pcred)->gid, (pcred)->egid
67230+
67231+#define GR_CHROOT_CAPS {{ \
67232+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67233+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67234+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67235+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67236+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67237+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67238+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
67239+
67240+#define security_learn(normal_msg,args...) \
67241+({ \
67242+ read_lock(&grsec_exec_file_lock); \
67243+ gr_add_learn_entry(normal_msg "\n", ## args); \
67244+ read_unlock(&grsec_exec_file_lock); \
67245+})
67246+
67247+enum {
67248+ GR_DO_AUDIT,
67249+ GR_DONT_AUDIT,
67250+ GR_DONT_AUDIT_GOOD
67251+};
67252+
67253+enum {
67254+ GR_TTYSNIFF,
67255+ GR_RBAC,
67256+ GR_RBAC_STR,
67257+ GR_STR_RBAC,
67258+ GR_RBAC_MODE2,
67259+ GR_RBAC_MODE3,
67260+ GR_FILENAME,
67261+ GR_SYSCTL_HIDDEN,
67262+ GR_NOARGS,
67263+ GR_ONE_INT,
67264+ GR_ONE_INT_TWO_STR,
67265+ GR_ONE_STR,
67266+ GR_STR_INT,
67267+ GR_TWO_STR_INT,
67268+ GR_TWO_INT,
67269+ GR_TWO_U64,
67270+ GR_THREE_INT,
67271+ GR_FIVE_INT_TWO_STR,
67272+ GR_TWO_STR,
67273+ GR_THREE_STR,
67274+ GR_FOUR_STR,
67275+ GR_STR_FILENAME,
67276+ GR_FILENAME_STR,
67277+ GR_FILENAME_TWO_INT,
67278+ GR_FILENAME_TWO_INT_STR,
67279+ GR_TEXTREL,
67280+ GR_PTRACE,
67281+ GR_RESOURCE,
67282+ GR_CAP,
67283+ GR_SIG,
67284+ GR_SIG2,
67285+ GR_CRASH1,
67286+ GR_CRASH2,
67287+ GR_PSACCT,
67288+ GR_RWXMAP
67289+};
67290+
67291+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67292+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67293+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67294+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67295+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67296+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67297+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67298+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67299+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67300+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67301+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67302+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67303+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67304+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67305+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67306+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67307+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67308+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67309+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67310+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67311+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67312+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67313+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67314+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67315+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67316+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67317+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67318+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67319+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67320+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67321+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67322+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67323+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67324+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67325+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67326+
67327+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67328+
67329+#endif
67330+
67331+#endif
67332diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67333new file mode 100644
67334index 0000000..8b9ed56
67335--- /dev/null
67336+++ b/include/linux/grmsg.h
67337@@ -0,0 +1,110 @@
67338+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67339+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67340+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67341+#define GR_STOPMOD_MSG "denied modification of module state by "
67342+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67343+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67344+#define GR_IOPERM_MSG "denied use of ioperm() by "
67345+#define GR_IOPL_MSG "denied use of iopl() by "
67346+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67347+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67348+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67349+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67350+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67351+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67352+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67353+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67354+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67355+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67356+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67357+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67358+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67359+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67360+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67361+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67362+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67363+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67364+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67365+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67366+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67367+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67368+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67369+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67370+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67371+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67372+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67373+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67374+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67375+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67376+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67377+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67378+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67379+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67380+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67381+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
67382+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67383+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67384+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67385+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67386+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67387+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67388+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67389+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67390+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67391+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67392+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67393+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67394+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67395+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67396+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67397+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67398+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67399+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67400+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67401+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67402+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67403+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67404+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67405+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67406+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67407+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67408+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67409+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67410+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67411+#define GR_NICE_CHROOT_MSG "denied priority change by "
67412+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67413+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67414+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67415+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67416+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67417+#define GR_TIME_MSG "time set by "
67418+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67419+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67420+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67421+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67422+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67423+#define GR_BIND_MSG "denied bind() by "
67424+#define GR_CONNECT_MSG "denied connect() by "
67425+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67426+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67427+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67428+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67429+#define GR_CAP_ACL_MSG "use of %s denied for "
67430+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67431+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67432+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67433+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67434+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67435+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67436+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67437+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67438+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67439+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67440+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67441+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67442+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67443+#define GR_VM86_MSG "denied use of vm86 by "
67444+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67445+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67446+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67447+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67448diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67449new file mode 100644
67450index 0000000..bb1e366
67451--- /dev/null
67452+++ b/include/linux/grsecurity.h
67453@@ -0,0 +1,219 @@
67454+#ifndef GR_SECURITY_H
67455+#define GR_SECURITY_H
67456+#include <linux/fs.h>
67457+#include <linux/fs_struct.h>
67458+#include <linux/binfmts.h>
67459+#include <linux/gracl.h>
67460+#include <linux/compat.h>
67461+
67462+/* notify of brain-dead configs */
67463+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67464+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67465+#endif
67466+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67467+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67468+#endif
67469+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67470+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67471+#endif
67472+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67473+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67474+#endif
67475+
67476+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67477+void gr_handle_brute_check(void);
67478+void gr_handle_kernel_exploit(void);
67479+int gr_process_user_ban(void);
67480+
67481+char gr_roletype_to_char(void);
67482+
67483+int gr_acl_enable_at_secure(void);
67484+
67485+int gr_check_user_change(int real, int effective, int fs);
67486+int gr_check_group_change(int real, int effective, int fs);
67487+
67488+void gr_del_task_from_ip_table(struct task_struct *p);
67489+
67490+int gr_pid_is_chrooted(struct task_struct *p);
67491+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67492+int gr_handle_chroot_nice(void);
67493+int gr_handle_chroot_sysctl(const int op);
67494+int gr_handle_chroot_setpriority(struct task_struct *p,
67495+ const int niceval);
67496+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67497+int gr_handle_chroot_chroot(const struct dentry *dentry,
67498+ const struct vfsmount *mnt);
67499+void gr_handle_chroot_chdir(struct path *path);
67500+int gr_handle_chroot_chmod(const struct dentry *dentry,
67501+ const struct vfsmount *mnt, const int mode);
67502+int gr_handle_chroot_mknod(const struct dentry *dentry,
67503+ const struct vfsmount *mnt, const int mode);
67504+int gr_handle_chroot_mount(const struct dentry *dentry,
67505+ const struct vfsmount *mnt,
67506+ const char *dev_name);
67507+int gr_handle_chroot_pivot(void);
67508+int gr_handle_chroot_unix(const pid_t pid);
67509+
67510+int gr_handle_rawio(const struct inode *inode);
67511+
67512+void gr_handle_ioperm(void);
67513+void gr_handle_iopl(void);
67514+
67515+int gr_tpe_allow(const struct file *file);
67516+
67517+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67518+void gr_clear_chroot_entries(struct task_struct *task);
67519+
67520+void gr_log_forkfail(const int retval);
67521+void gr_log_timechange(void);
67522+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67523+void gr_log_chdir(const struct dentry *dentry,
67524+ const struct vfsmount *mnt);
67525+void gr_log_chroot_exec(const struct dentry *dentry,
67526+ const struct vfsmount *mnt);
67527+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67528+#ifdef CONFIG_COMPAT
67529+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67530+#endif
67531+void gr_log_remount(const char *devname, const int retval);
67532+void gr_log_unmount(const char *devname, const int retval);
67533+void gr_log_mount(const char *from, const char *to, const int retval);
67534+void gr_log_textrel(struct vm_area_struct *vma);
67535+void gr_log_rwxmmap(struct file *file);
67536+void gr_log_rwxmprotect(struct file *file);
67537+
67538+int gr_handle_follow_link(const struct inode *parent,
67539+ const struct inode *inode,
67540+ const struct dentry *dentry,
67541+ const struct vfsmount *mnt);
67542+int gr_handle_fifo(const struct dentry *dentry,
67543+ const struct vfsmount *mnt,
67544+ const struct dentry *dir, const int flag,
67545+ const int acc_mode);
67546+int gr_handle_hardlink(const struct dentry *dentry,
67547+ const struct vfsmount *mnt,
67548+ struct inode *inode,
67549+ const int mode, const char *to);
67550+
67551+int gr_is_capable(const int cap);
67552+int gr_is_capable_nolog(const int cap);
67553+void gr_learn_resource(const struct task_struct *task, const int limit,
67554+ const unsigned long wanted, const int gt);
67555+void gr_copy_label(struct task_struct *tsk);
67556+void gr_handle_crash(struct task_struct *task, const int sig);
67557+int gr_handle_signal(const struct task_struct *p, const int sig);
67558+int gr_check_crash_uid(const uid_t uid);
67559+int gr_check_protected_task(const struct task_struct *task);
67560+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67561+int gr_acl_handle_mmap(const struct file *file,
67562+ const unsigned long prot);
67563+int gr_acl_handle_mprotect(const struct file *file,
67564+ const unsigned long prot);
67565+int gr_check_hidden_task(const struct task_struct *tsk);
67566+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67567+ const struct vfsmount *mnt);
67568+__u32 gr_acl_handle_utime(const struct dentry *dentry,
67569+ const struct vfsmount *mnt);
67570+__u32 gr_acl_handle_access(const struct dentry *dentry,
67571+ const struct vfsmount *mnt, const int fmode);
67572+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
67573+ const struct vfsmount *mnt, mode_t mode);
67574+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67575+ const struct vfsmount *mnt, mode_t mode);
67576+__u32 gr_acl_handle_chown(const struct dentry *dentry,
67577+ const struct vfsmount *mnt);
67578+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67579+ const struct vfsmount *mnt);
67580+int gr_handle_ptrace(struct task_struct *task, const long request);
67581+int gr_handle_proc_ptrace(struct task_struct *task);
67582+__u32 gr_acl_handle_execve(const struct dentry *dentry,
67583+ const struct vfsmount *mnt);
67584+int gr_check_crash_exec(const struct file *filp);
67585+int gr_acl_is_enabled(void);
67586+void gr_set_kernel_label(struct task_struct *task);
67587+void gr_set_role_label(struct task_struct *task, const uid_t uid,
67588+ const gid_t gid);
67589+int gr_set_proc_label(const struct dentry *dentry,
67590+ const struct vfsmount *mnt,
67591+ const int unsafe_flags);
67592+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67593+ const struct vfsmount *mnt);
67594+__u32 gr_acl_handle_open(const struct dentry *dentry,
67595+ const struct vfsmount *mnt, int acc_mode);
67596+__u32 gr_acl_handle_creat(const struct dentry *dentry,
67597+ const struct dentry *p_dentry,
67598+ const struct vfsmount *p_mnt,
67599+ int open_flags, int acc_mode, const int imode);
67600+void gr_handle_create(const struct dentry *dentry,
67601+ const struct vfsmount *mnt);
67602+void gr_handle_proc_create(const struct dentry *dentry,
67603+ const struct inode *inode);
67604+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67605+ const struct dentry *parent_dentry,
67606+ const struct vfsmount *parent_mnt,
67607+ const int mode);
67608+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67609+ const struct dentry *parent_dentry,
67610+ const struct vfsmount *parent_mnt);
67611+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67612+ const struct vfsmount *mnt);
67613+void gr_handle_delete(const ino_t ino, const dev_t dev);
67614+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67615+ const struct vfsmount *mnt);
67616+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67617+ const struct dentry *parent_dentry,
67618+ const struct vfsmount *parent_mnt,
67619+ const char *from);
67620+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67621+ const struct dentry *parent_dentry,
67622+ const struct vfsmount *parent_mnt,
67623+ const struct dentry *old_dentry,
67624+ const struct vfsmount *old_mnt, const char *to);
67625+int gr_acl_handle_rename(struct dentry *new_dentry,
67626+ struct dentry *parent_dentry,
67627+ const struct vfsmount *parent_mnt,
67628+ struct dentry *old_dentry,
67629+ struct inode *old_parent_inode,
67630+ struct vfsmount *old_mnt, const char *newname);
67631+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67632+ struct dentry *old_dentry,
67633+ struct dentry *new_dentry,
67634+ struct vfsmount *mnt, const __u8 replace);
67635+__u32 gr_check_link(const struct dentry *new_dentry,
67636+ const struct dentry *parent_dentry,
67637+ const struct vfsmount *parent_mnt,
67638+ const struct dentry *old_dentry,
67639+ const struct vfsmount *old_mnt);
67640+int gr_acl_handle_filldir(const struct file *file, const char *name,
67641+ const unsigned int namelen, const ino_t ino);
67642+
67643+__u32 gr_acl_handle_unix(const struct dentry *dentry,
67644+ const struct vfsmount *mnt);
67645+void gr_acl_handle_exit(void);
67646+void gr_acl_handle_psacct(struct task_struct *task, const long code);
67647+int gr_acl_handle_procpidmem(const struct task_struct *task);
67648+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67649+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67650+void gr_audit_ptrace(struct task_struct *task);
67651+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67652+
67653+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67654+
67655+#ifdef CONFIG_GRKERNSEC
67656+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67657+void gr_handle_vm86(void);
67658+void gr_handle_mem_readwrite(u64 from, u64 to);
67659+
67660+void gr_log_badprocpid(const char *entry);
67661+
67662+extern int grsec_enable_dmesg;
67663+extern int grsec_disable_privio;
67664+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67665+extern int grsec_enable_chroot_findtask;
67666+#endif
67667+#ifdef CONFIG_GRKERNSEC_SETXID
67668+extern int grsec_enable_setxid;
67669+#endif
67670+#endif
67671+
67672+#endif
67673diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67674index 6a87154..a3ce57b 100644
67675--- a/include/linux/hdpu_features.h
67676+++ b/include/linux/hdpu_features.h
67677@@ -3,7 +3,7 @@
67678 struct cpustate_t {
67679 spinlock_t lock;
67680 int excl;
67681- int open_count;
67682+ atomic_t open_count;
67683 unsigned char cached_val;
67684 int inited;
67685 unsigned long *set_addr;
67686diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67687index 211ff44..00ab6d7 100644
67688--- a/include/linux/highmem.h
67689+++ b/include/linux/highmem.h
67690@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67691 kunmap_atomic(kaddr, KM_USER0);
67692 }
67693
67694+static inline void sanitize_highpage(struct page *page)
67695+{
67696+ void *kaddr;
67697+ unsigned long flags;
67698+
67699+ local_irq_save(flags);
67700+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
67701+ clear_page(kaddr);
67702+ kunmap_atomic(kaddr, KM_CLEARPAGE);
67703+ local_irq_restore(flags);
67704+}
67705+
67706 static inline void zero_user_segments(struct page *page,
67707 unsigned start1, unsigned end1,
67708 unsigned start2, unsigned end2)
67709diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67710index 7b40cda..24eb44e 100644
67711--- a/include/linux/i2c.h
67712+++ b/include/linux/i2c.h
67713@@ -325,6 +325,7 @@ struct i2c_algorithm {
67714 /* To determine what the adapter supports */
67715 u32 (*functionality) (struct i2c_adapter *);
67716 };
67717+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67718
67719 /*
67720 * i2c_adapter is the structure used to identify a physical i2c bus along
67721diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67722index 4c4e57d..f3c5303 100644
67723--- a/include/linux/i2o.h
67724+++ b/include/linux/i2o.h
67725@@ -564,7 +564,7 @@ struct i2o_controller {
67726 struct i2o_device *exec; /* Executive */
67727 #if BITS_PER_LONG == 64
67728 spinlock_t context_list_lock; /* lock for context_list */
67729- atomic_t context_list_counter; /* needed for unique contexts */
67730+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67731 struct list_head context_list; /* list of context id's
67732 and pointers */
67733 #endif
67734diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67735index 21a6f5d..dc42eab 100644
67736--- a/include/linux/init_task.h
67737+++ b/include/linux/init_task.h
67738@@ -83,6 +83,12 @@ extern struct group_info init_groups;
67739 #define INIT_IDS
67740 #endif
67741
67742+#ifdef CONFIG_X86
67743+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67744+#else
67745+#define INIT_TASK_THREAD_INFO
67746+#endif
67747+
67748 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67749 /*
67750 * Because of the reduced scope of CAP_SETPCAP when filesystem
67751@@ -156,6 +162,7 @@ extern struct cred init_cred;
67752 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67753 .comm = "swapper", \
67754 .thread = INIT_THREAD, \
67755+ INIT_TASK_THREAD_INFO \
67756 .fs = &init_fs, \
67757 .files = &init_files, \
67758 .signal = &init_signals, \
67759diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67760index 4f0a72a..a849599 100644
67761--- a/include/linux/intel-iommu.h
67762+++ b/include/linux/intel-iommu.h
67763@@ -296,7 +296,7 @@ struct iommu_flush {
67764 u8 fm, u64 type);
67765 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67766 unsigned int size_order, u64 type);
67767-};
67768+} __no_const;
67769
67770 enum {
67771 SR_DMAR_FECTL_REG,
67772diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67773index c739150..be577b5 100644
67774--- a/include/linux/interrupt.h
67775+++ b/include/linux/interrupt.h
67776@@ -369,7 +369,7 @@ enum
67777 /* map softirq index to softirq name. update 'softirq_to_name' in
67778 * kernel/softirq.c when adding a new softirq.
67779 */
67780-extern char *softirq_to_name[NR_SOFTIRQS];
67781+extern const char * const softirq_to_name[NR_SOFTIRQS];
67782
67783 /* softirq mask and active fields moved to irq_cpustat_t in
67784 * asm/hardirq.h to get better cache usage. KAO
67785@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67786
67787 struct softirq_action
67788 {
67789- void (*action)(struct softirq_action *);
67790+ void (*action)(void);
67791 };
67792
67793 asmlinkage void do_softirq(void);
67794 asmlinkage void __do_softirq(void);
67795-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67796+extern void open_softirq(int nr, void (*action)(void));
67797 extern void softirq_init(void);
67798 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67799 extern void raise_softirq_irqoff(unsigned int nr);
67800diff --git a/include/linux/irq.h b/include/linux/irq.h
67801index 9e5f45a..025865b 100644
67802--- a/include/linux/irq.h
67803+++ b/include/linux/irq.h
67804@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67805 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67806 bool boot)
67807 {
67808+#ifdef CONFIG_CPUMASK_OFFSTACK
67809 gfp_t gfp = GFP_ATOMIC;
67810
67811 if (boot)
67812 gfp = GFP_NOWAIT;
67813
67814-#ifdef CONFIG_CPUMASK_OFFSTACK
67815 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67816 return false;
67817
67818diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67819index 7922742..27306a2 100644
67820--- a/include/linux/kallsyms.h
67821+++ b/include/linux/kallsyms.h
67822@@ -15,7 +15,8 @@
67823
67824 struct module;
67825
67826-#ifdef CONFIG_KALLSYMS
67827+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67828+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67829 /* Lookup the address for a symbol. Returns 0 if not found. */
67830 unsigned long kallsyms_lookup_name(const char *name);
67831
67832@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67833 /* Stupid that this does nothing, but I didn't create this mess. */
67834 #define __print_symbol(fmt, addr)
67835 #endif /*CONFIG_KALLSYMS*/
67836+#else /* when included by kallsyms.c, vsnprintf.c, or
67837+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67838+extern void __print_symbol(const char *fmt, unsigned long address);
67839+extern int sprint_symbol(char *buffer, unsigned long address);
67840+const char *kallsyms_lookup(unsigned long addr,
67841+ unsigned long *symbolsize,
67842+ unsigned long *offset,
67843+ char **modname, char *namebuf);
67844+#endif
67845
67846 /* This macro allows us to keep printk typechecking */
67847 static void __check_printsym_format(const char *fmt, ...)
67848diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67849index 6adcc29..13369e8 100644
67850--- a/include/linux/kgdb.h
67851+++ b/include/linux/kgdb.h
67852@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67853
67854 extern int kgdb_connected;
67855
67856-extern atomic_t kgdb_setting_breakpoint;
67857-extern atomic_t kgdb_cpu_doing_single_step;
67858+extern atomic_unchecked_t kgdb_setting_breakpoint;
67859+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67860
67861 extern struct task_struct *kgdb_usethread;
67862 extern struct task_struct *kgdb_contthread;
67863@@ -235,7 +235,7 @@ struct kgdb_arch {
67864 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67865 void (*remove_all_hw_break)(void);
67866 void (*correct_hw_break)(void);
67867-};
67868+} __do_const;
67869
67870 /**
67871 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67872@@ -257,14 +257,14 @@ struct kgdb_io {
67873 int (*init) (void);
67874 void (*pre_exception) (void);
67875 void (*post_exception) (void);
67876-};
67877+} __do_const;
67878
67879-extern struct kgdb_arch arch_kgdb_ops;
67880+extern const struct kgdb_arch arch_kgdb_ops;
67881
67882 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67883
67884-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67885-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67886+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67887+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67888
67889 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67890 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67891diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67892index 384ca8b..83dd97d 100644
67893--- a/include/linux/kmod.h
67894+++ b/include/linux/kmod.h
67895@@ -31,6 +31,8 @@
67896 * usually useless though. */
67897 extern int __request_module(bool wait, const char *name, ...) \
67898 __attribute__((format(printf, 2, 3)));
67899+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67900+ __attribute__((format(printf, 3, 4)));
67901 #define request_module(mod...) __request_module(true, mod)
67902 #define request_module_nowait(mod...) __request_module(false, mod)
67903 #define try_then_request_module(x, mod...) \
67904diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67905index 58ae8e0..3950d3c 100644
67906--- a/include/linux/kobject.h
67907+++ b/include/linux/kobject.h
67908@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67909
67910 struct kobj_type {
67911 void (*release)(struct kobject *kobj);
67912- struct sysfs_ops *sysfs_ops;
67913+ const struct sysfs_ops *sysfs_ops;
67914 struct attribute **default_attrs;
67915 };
67916
67917@@ -118,9 +118,9 @@ struct kobj_uevent_env {
67918 };
67919
67920 struct kset_uevent_ops {
67921- int (*filter)(struct kset *kset, struct kobject *kobj);
67922- const char *(*name)(struct kset *kset, struct kobject *kobj);
67923- int (*uevent)(struct kset *kset, struct kobject *kobj,
67924+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67925+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67926+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67927 struct kobj_uevent_env *env);
67928 };
67929
67930@@ -132,7 +132,7 @@ struct kobj_attribute {
67931 const char *buf, size_t count);
67932 };
67933
67934-extern struct sysfs_ops kobj_sysfs_ops;
67935+extern const struct sysfs_ops kobj_sysfs_ops;
67936
67937 /**
67938 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67939@@ -155,14 +155,14 @@ struct kset {
67940 struct list_head list;
67941 spinlock_t list_lock;
67942 struct kobject kobj;
67943- struct kset_uevent_ops *uevent_ops;
67944+ const struct kset_uevent_ops *uevent_ops;
67945 };
67946
67947 extern void kset_init(struct kset *kset);
67948 extern int __must_check kset_register(struct kset *kset);
67949 extern void kset_unregister(struct kset *kset);
67950 extern struct kset * __must_check kset_create_and_add(const char *name,
67951- struct kset_uevent_ops *u,
67952+ const struct kset_uevent_ops *u,
67953 struct kobject *parent_kobj);
67954
67955 static inline struct kset *to_kset(struct kobject *kobj)
67956diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67957index c728a50..752d821 100644
67958--- a/include/linux/kvm_host.h
67959+++ b/include/linux/kvm_host.h
67960@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67961 void vcpu_load(struct kvm_vcpu *vcpu);
67962 void vcpu_put(struct kvm_vcpu *vcpu);
67963
67964-int kvm_init(void *opaque, unsigned int vcpu_size,
67965+int kvm_init(const void *opaque, unsigned int vcpu_size,
67966 struct module *module);
67967 void kvm_exit(void);
67968
67969@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67970 struct kvm_guest_debug *dbg);
67971 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67972
67973-int kvm_arch_init(void *opaque);
67974+int kvm_arch_init(const void *opaque);
67975 void kvm_arch_exit(void);
67976
67977 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67978diff --git a/include/linux/libata.h b/include/linux/libata.h
67979index a069916..223edde 100644
67980--- a/include/linux/libata.h
67981+++ b/include/linux/libata.h
67982@@ -525,11 +525,11 @@ struct ata_ioports {
67983
67984 struct ata_host {
67985 spinlock_t lock;
67986- struct device *dev;
67987+ struct device *dev;
67988 void __iomem * const *iomap;
67989 unsigned int n_ports;
67990 void *private_data;
67991- struct ata_port_operations *ops;
67992+ const struct ata_port_operations *ops;
67993 unsigned long flags;
67994 #ifdef CONFIG_ATA_ACPI
67995 acpi_handle acpi_handle;
67996@@ -710,7 +710,7 @@ struct ata_link {
67997
67998 struct ata_port {
67999 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
68000- struct ata_port_operations *ops;
68001+ const struct ata_port_operations *ops;
68002 spinlock_t *lock;
68003 /* Flags owned by the EH context. Only EH should touch these once the
68004 port is active */
68005@@ -884,7 +884,7 @@ struct ata_port_operations {
68006 * fields must be pointers.
68007 */
68008 const struct ata_port_operations *inherits;
68009-};
68010+} __do_const;
68011
68012 struct ata_port_info {
68013 unsigned long flags;
68014@@ -892,7 +892,7 @@ struct ata_port_info {
68015 unsigned long pio_mask;
68016 unsigned long mwdma_mask;
68017 unsigned long udma_mask;
68018- struct ata_port_operations *port_ops;
68019+ const struct ata_port_operations *port_ops;
68020 void *private_data;
68021 };
68022
68023@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
68024 extern const unsigned long sata_deb_timing_hotplug[];
68025 extern const unsigned long sata_deb_timing_long[];
68026
68027-extern struct ata_port_operations ata_dummy_port_ops;
68028+extern const struct ata_port_operations ata_dummy_port_ops;
68029 extern const struct ata_port_info ata_dummy_port_info;
68030
68031 static inline const unsigned long *
68032@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
68033 struct scsi_host_template *sht);
68034 extern void ata_host_detach(struct ata_host *host);
68035 extern void ata_host_init(struct ata_host *, struct device *,
68036- unsigned long, struct ata_port_operations *);
68037+ unsigned long, const struct ata_port_operations *);
68038 extern int ata_scsi_detect(struct scsi_host_template *sht);
68039 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
68040 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
68041diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
68042index fbc48f8..0886e57 100644
68043--- a/include/linux/lockd/bind.h
68044+++ b/include/linux/lockd/bind.h
68045@@ -23,13 +23,13 @@ struct svc_rqst;
68046 * This is the set of functions for lockd->nfsd communication
68047 */
68048 struct nlmsvc_binding {
68049- __be32 (*fopen)(struct svc_rqst *,
68050+ __be32 (* const fopen)(struct svc_rqst *,
68051 struct nfs_fh *,
68052 struct file **);
68053- void (*fclose)(struct file *);
68054+ void (* const fclose)(struct file *);
68055 };
68056
68057-extern struct nlmsvc_binding * nlmsvc_ops;
68058+extern const struct nlmsvc_binding * nlmsvc_ops;
68059
68060 /*
68061 * Similar to nfs_client_initdata, but without the NFS-specific
68062diff --git a/include/linux/mca.h b/include/linux/mca.h
68063index 3797270..7765ede 100644
68064--- a/include/linux/mca.h
68065+++ b/include/linux/mca.h
68066@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
68067 int region);
68068 void * (*mca_transform_memory)(struct mca_device *,
68069 void *memory);
68070-};
68071+} __no_const;
68072
68073 struct mca_bus {
68074 u64 default_dma_mask;
68075diff --git a/include/linux/memory.h b/include/linux/memory.h
68076index 37fa19b..b597c85 100644
68077--- a/include/linux/memory.h
68078+++ b/include/linux/memory.h
68079@@ -108,7 +108,7 @@ struct memory_accessor {
68080 size_t count);
68081 ssize_t (*write)(struct memory_accessor *, const char *buf,
68082 off_t offset, size_t count);
68083-};
68084+} __no_const;
68085
68086 /*
68087 * Kernel text modification mutex, used for code patching. Users of this lock
68088diff --git a/include/linux/mm.h b/include/linux/mm.h
68089index 11e5be6..1ff2423 100644
68090--- a/include/linux/mm.h
68091+++ b/include/linux/mm.h
68092@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
68093
68094 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
68095 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
68096+
68097+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68098+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
68099+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
68100+#else
68101 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
68102+#endif
68103+
68104 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
68105 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
68106
68107@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68108 int set_page_dirty_lock(struct page *page);
68109 int clear_page_dirty_for_io(struct page *page);
68110
68111-/* Is the vma a continuation of the stack vma above it? */
68112-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68113-{
68114- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68115-}
68116-
68117 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68118 unsigned long old_addr, struct vm_area_struct *new_vma,
68119 unsigned long new_addr, unsigned long len);
68120@@ -890,6 +891,8 @@ struct shrinker {
68121 extern void register_shrinker(struct shrinker *);
68122 extern void unregister_shrinker(struct shrinker *);
68123
68124+pgprot_t vm_get_page_prot(unsigned long vm_flags);
68125+
68126 int vma_wants_writenotify(struct vm_area_struct *vma);
68127
68128 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68129@@ -1162,6 +1165,7 @@ out:
68130 }
68131
68132 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68133+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68134
68135 extern unsigned long do_brk(unsigned long, unsigned long);
68136
68137@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68138 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68139 struct vm_area_struct **pprev);
68140
68141+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68142+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68143+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68144+
68145 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68146 NULL if none. Assume start_addr < end_addr. */
68147 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68148@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68149 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68150 }
68151
68152-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68153 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68154 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68155 unsigned long pfn, unsigned long size, pgprot_t);
68156@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68157 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68158 extern int sysctl_memory_failure_early_kill;
68159 extern int sysctl_memory_failure_recovery;
68160-extern atomic_long_t mce_bad_pages;
68161+extern atomic_long_unchecked_t mce_bad_pages;
68162+
68163+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68164+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68165+#else
68166+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68167+#endif
68168
68169 #endif /* __KERNEL__ */
68170 #endif /* _LINUX_MM_H */
68171diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68172index 9d12ed5..6d9707a 100644
68173--- a/include/linux/mm_types.h
68174+++ b/include/linux/mm_types.h
68175@@ -186,6 +186,8 @@ struct vm_area_struct {
68176 #ifdef CONFIG_NUMA
68177 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68178 #endif
68179+
68180+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68181 };
68182
68183 struct core_thread {
68184@@ -287,6 +289,24 @@ struct mm_struct {
68185 #ifdef CONFIG_MMU_NOTIFIER
68186 struct mmu_notifier_mm *mmu_notifier_mm;
68187 #endif
68188+
68189+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68190+ unsigned long pax_flags;
68191+#endif
68192+
68193+#ifdef CONFIG_PAX_DLRESOLVE
68194+ unsigned long call_dl_resolve;
68195+#endif
68196+
68197+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68198+ unsigned long call_syscall;
68199+#endif
68200+
68201+#ifdef CONFIG_PAX_ASLR
68202+ unsigned long delta_mmap; /* randomized offset */
68203+ unsigned long delta_stack; /* randomized offset */
68204+#endif
68205+
68206 };
68207
68208 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68209diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68210index 4e02ee2..afb159e 100644
68211--- a/include/linux/mmu_notifier.h
68212+++ b/include/linux/mmu_notifier.h
68213@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68214 */
68215 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68216 ({ \
68217- pte_t __pte; \
68218+ pte_t ___pte; \
68219 struct vm_area_struct *___vma = __vma; \
68220 unsigned long ___address = __address; \
68221- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68222+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68223 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68224- __pte; \
68225+ ___pte; \
68226 })
68227
68228 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68229diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68230index 6c31a2a..4b0e930 100644
68231--- a/include/linux/mmzone.h
68232+++ b/include/linux/mmzone.h
68233@@ -350,7 +350,7 @@ struct zone {
68234 unsigned long flags; /* zone flags, see below */
68235
68236 /* Zone statistics */
68237- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68238+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68239
68240 /*
68241 * prev_priority holds the scanning priority for this zone. It is
68242diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68243index f58e9d8..3503935 100644
68244--- a/include/linux/mod_devicetable.h
68245+++ b/include/linux/mod_devicetable.h
68246@@ -12,7 +12,7 @@
68247 typedef unsigned long kernel_ulong_t;
68248 #endif
68249
68250-#define PCI_ANY_ID (~0)
68251+#define PCI_ANY_ID ((__u16)~0)
68252
68253 struct pci_device_id {
68254 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68255@@ -131,7 +131,7 @@ struct usb_device_id {
68256 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68257 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68258
68259-#define HID_ANY_ID (~0)
68260+#define HID_ANY_ID (~0U)
68261
68262 struct hid_device_id {
68263 __u16 bus;
68264diff --git a/include/linux/module.h b/include/linux/module.h
68265index 482efc8..642032b 100644
68266--- a/include/linux/module.h
68267+++ b/include/linux/module.h
68268@@ -16,6 +16,7 @@
68269 #include <linux/kobject.h>
68270 #include <linux/moduleparam.h>
68271 #include <linux/tracepoint.h>
68272+#include <linux/fs.h>
68273
68274 #include <asm/local.h>
68275 #include <asm/module.h>
68276@@ -287,16 +288,16 @@ struct module
68277 int (*init)(void);
68278
68279 /* If this is non-NULL, vfree after init() returns */
68280- void *module_init;
68281+ void *module_init_rx, *module_init_rw;
68282
68283 /* Here is the actual code + data, vfree'd on unload. */
68284- void *module_core;
68285+ void *module_core_rx, *module_core_rw;
68286
68287 /* Here are the sizes of the init and core sections */
68288- unsigned int init_size, core_size;
68289+ unsigned int init_size_rw, core_size_rw;
68290
68291 /* The size of the executable code in each section. */
68292- unsigned int init_text_size, core_text_size;
68293+ unsigned int init_size_rx, core_size_rx;
68294
68295 /* Arch-specific module values */
68296 struct mod_arch_specific arch;
68297@@ -345,6 +346,10 @@ struct module
68298 #ifdef CONFIG_EVENT_TRACING
68299 struct ftrace_event_call *trace_events;
68300 unsigned int num_trace_events;
68301+ struct file_operations trace_id;
68302+ struct file_operations trace_enable;
68303+ struct file_operations trace_format;
68304+ struct file_operations trace_filter;
68305 #endif
68306 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68307 unsigned long *ftrace_callsites;
68308@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68309 bool is_module_address(unsigned long addr);
68310 bool is_module_text_address(unsigned long addr);
68311
68312+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68313+{
68314+
68315+#ifdef CONFIG_PAX_KERNEXEC
68316+ if (ktla_ktva(addr) >= (unsigned long)start &&
68317+ ktla_ktva(addr) < (unsigned long)start + size)
68318+ return 1;
68319+#endif
68320+
68321+ return ((void *)addr >= start && (void *)addr < start + size);
68322+}
68323+
68324+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68325+{
68326+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68327+}
68328+
68329+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68330+{
68331+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68332+}
68333+
68334+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68335+{
68336+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68337+}
68338+
68339+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68340+{
68341+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68342+}
68343+
68344 static inline int within_module_core(unsigned long addr, struct module *mod)
68345 {
68346- return (unsigned long)mod->module_core <= addr &&
68347- addr < (unsigned long)mod->module_core + mod->core_size;
68348+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68349 }
68350
68351 static inline int within_module_init(unsigned long addr, struct module *mod)
68352 {
68353- return (unsigned long)mod->module_init <= addr &&
68354- addr < (unsigned long)mod->module_init + mod->init_size;
68355+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68356 }
68357
68358 /* Search for module by name: must hold module_mutex. */
68359diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68360index c1f40c2..682ca53 100644
68361--- a/include/linux/moduleloader.h
68362+++ b/include/linux/moduleloader.h
68363@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68364 sections. Returns NULL on failure. */
68365 void *module_alloc(unsigned long size);
68366
68367+#ifdef CONFIG_PAX_KERNEXEC
68368+void *module_alloc_exec(unsigned long size);
68369+#else
68370+#define module_alloc_exec(x) module_alloc(x)
68371+#endif
68372+
68373 /* Free memory returned from module_alloc. */
68374 void module_free(struct module *mod, void *module_region);
68375
68376+#ifdef CONFIG_PAX_KERNEXEC
68377+void module_free_exec(struct module *mod, void *module_region);
68378+#else
68379+#define module_free_exec(x, y) module_free((x), (y))
68380+#endif
68381+
68382 /* Apply the given relocation to the (simplified) ELF. Return -error
68383 or 0. */
68384 int apply_relocate(Elf_Shdr *sechdrs,
68385diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68386index 82a9124..8a5f622 100644
68387--- a/include/linux/moduleparam.h
68388+++ b/include/linux/moduleparam.h
68389@@ -132,7 +132,7 @@ struct kparam_array
68390
68391 /* Actually copy string: maxlen param is usually sizeof(string). */
68392 #define module_param_string(name, string, len, perm) \
68393- static const struct kparam_string __param_string_##name \
68394+ static const struct kparam_string __param_string_##name __used \
68395 = { len, string }; \
68396 __module_param_call(MODULE_PARAM_PREFIX, name, \
68397 param_set_copystring, param_get_string, \
68398@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68399
68400 /* Comma-separated array: *nump is set to number they actually specified. */
68401 #define module_param_array_named(name, array, type, nump, perm) \
68402- static const struct kparam_array __param_arr_##name \
68403+ static const struct kparam_array __param_arr_##name __used \
68404 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68405 sizeof(array[0]), array }; \
68406 __module_param_call(MODULE_PARAM_PREFIX, name, \
68407diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68408index 878cab4..c92cb3e 100644
68409--- a/include/linux/mutex.h
68410+++ b/include/linux/mutex.h
68411@@ -51,7 +51,7 @@ struct mutex {
68412 spinlock_t wait_lock;
68413 struct list_head wait_list;
68414 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68415- struct thread_info *owner;
68416+ struct task_struct *owner;
68417 #endif
68418 #ifdef CONFIG_DEBUG_MUTEXES
68419 const char *name;
68420diff --git a/include/linux/namei.h b/include/linux/namei.h
68421index ec0f607..d19e675 100644
68422--- a/include/linux/namei.h
68423+++ b/include/linux/namei.h
68424@@ -22,7 +22,7 @@ struct nameidata {
68425 unsigned int flags;
68426 int last_type;
68427 unsigned depth;
68428- char *saved_names[MAX_NESTED_LINKS + 1];
68429+ const char *saved_names[MAX_NESTED_LINKS + 1];
68430
68431 /* Intent data */
68432 union {
68433@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68434 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68435 extern void unlock_rename(struct dentry *, struct dentry *);
68436
68437-static inline void nd_set_link(struct nameidata *nd, char *path)
68438+static inline void nd_set_link(struct nameidata *nd, const char *path)
68439 {
68440 nd->saved_names[nd->depth] = path;
68441 }
68442
68443-static inline char *nd_get_link(struct nameidata *nd)
68444+static inline const char *nd_get_link(const struct nameidata *nd)
68445 {
68446 return nd->saved_names[nd->depth];
68447 }
68448diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68449index 9d7e8f7..04428c5 100644
68450--- a/include/linux/netdevice.h
68451+++ b/include/linux/netdevice.h
68452@@ -637,6 +637,7 @@ struct net_device_ops {
68453 u16 xid);
68454 #endif
68455 };
68456+typedef struct net_device_ops __no_const net_device_ops_no_const;
68457
68458 /*
68459 * The DEVICE structure.
68460diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68461new file mode 100644
68462index 0000000..33f4af8
68463--- /dev/null
68464+++ b/include/linux/netfilter/xt_gradm.h
68465@@ -0,0 +1,9 @@
68466+#ifndef _LINUX_NETFILTER_XT_GRADM_H
68467+#define _LINUX_NETFILTER_XT_GRADM_H 1
68468+
68469+struct xt_gradm_mtinfo {
68470+ __u16 flags;
68471+ __u16 invflags;
68472+};
68473+
68474+#endif
68475diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68476index b359c4a..c08b334 100644
68477--- a/include/linux/nodemask.h
68478+++ b/include/linux/nodemask.h
68479@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68480
68481 #define any_online_node(mask) \
68482 ({ \
68483- int node; \
68484- for_each_node_mask(node, (mask)) \
68485- if (node_online(node)) \
68486+ int __node; \
68487+ for_each_node_mask(__node, (mask)) \
68488+ if (node_online(__node)) \
68489 break; \
68490- node; \
68491+ __node; \
68492 })
68493
68494 #define num_online_nodes() num_node_state(N_ONLINE)
68495diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68496index 5171639..7cf4235 100644
68497--- a/include/linux/oprofile.h
68498+++ b/include/linux/oprofile.h
68499@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68500 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68501 char const * name, ulong * val);
68502
68503-/** Create a file for read-only access to an atomic_t. */
68504+/** Create a file for read-only access to an atomic_unchecked_t. */
68505 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68506- char const * name, atomic_t * val);
68507+ char const * name, atomic_unchecked_t * val);
68508
68509 /** create a directory */
68510 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68511diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68512index 3c62ed4..8924c7c 100644
68513--- a/include/linux/pagemap.h
68514+++ b/include/linux/pagemap.h
68515@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68516 if (((unsigned long)uaddr & PAGE_MASK) !=
68517 ((unsigned long)end & PAGE_MASK))
68518 ret = __get_user(c, end);
68519+ (void)c;
68520 }
68521+ (void)c;
68522 return ret;
68523 }
68524
68525diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68526index 81c9689..a567a55 100644
68527--- a/include/linux/perf_event.h
68528+++ b/include/linux/perf_event.h
68529@@ -476,7 +476,7 @@ struct hw_perf_event {
68530 struct hrtimer hrtimer;
68531 };
68532 };
68533- atomic64_t prev_count;
68534+ atomic64_unchecked_t prev_count;
68535 u64 sample_period;
68536 u64 last_period;
68537 atomic64_t period_left;
68538@@ -557,7 +557,7 @@ struct perf_event {
68539 const struct pmu *pmu;
68540
68541 enum perf_event_active_state state;
68542- atomic64_t count;
68543+ atomic64_unchecked_t count;
68544
68545 /*
68546 * These are the total time in nanoseconds that the event
68547@@ -595,8 +595,8 @@ struct perf_event {
68548 * These accumulate total time (in nanoseconds) that children
68549 * events have been enabled and running, respectively.
68550 */
68551- atomic64_t child_total_time_enabled;
68552- atomic64_t child_total_time_running;
68553+ atomic64_unchecked_t child_total_time_enabled;
68554+ atomic64_unchecked_t child_total_time_running;
68555
68556 /*
68557 * Protect attach/detach and child_list:
68558diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68559index b43a9e0..b77d869 100644
68560--- a/include/linux/pipe_fs_i.h
68561+++ b/include/linux/pipe_fs_i.h
68562@@ -46,9 +46,9 @@ struct pipe_inode_info {
68563 wait_queue_head_t wait;
68564 unsigned int nrbufs, curbuf;
68565 struct page *tmp_page;
68566- unsigned int readers;
68567- unsigned int writers;
68568- unsigned int waiting_writers;
68569+ atomic_t readers;
68570+ atomic_t writers;
68571+ atomic_t waiting_writers;
68572 unsigned int r_counter;
68573 unsigned int w_counter;
68574 struct fasync_struct *fasync_readers;
68575diff --git a/include/linux/poison.h b/include/linux/poison.h
68576index 34066ff..e95d744 100644
68577--- a/include/linux/poison.h
68578+++ b/include/linux/poison.h
68579@@ -19,8 +19,8 @@
68580 * under normal circumstances, used to verify that nobody uses
68581 * non-initialized list entries.
68582 */
68583-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68584-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68585+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68586+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68587
68588 /********** include/linux/timer.h **********/
68589 /*
68590diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68591index 4f71bf4..77ffa64 100644
68592--- a/include/linux/posix-timers.h
68593+++ b/include/linux/posix-timers.h
68594@@ -67,7 +67,7 @@ struct k_itimer {
68595 };
68596
68597 struct k_clock {
68598- int res; /* in nanoseconds */
68599+ const int res; /* in nanoseconds */
68600 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68601 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68602 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68603diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68604index 72b1a10..13303a9 100644
68605--- a/include/linux/preempt.h
68606+++ b/include/linux/preempt.h
68607@@ -110,7 +110,7 @@ struct preempt_ops {
68608 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68609 void (*sched_out)(struct preempt_notifier *notifier,
68610 struct task_struct *next);
68611-};
68612+} __no_const;
68613
68614 /**
68615 * preempt_notifier - key for installing preemption notifiers
68616diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68617index 379eaed..1bf73e3 100644
68618--- a/include/linux/proc_fs.h
68619+++ b/include/linux/proc_fs.h
68620@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68621 return proc_create_data(name, mode, parent, proc_fops, NULL);
68622 }
68623
68624+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68625+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68626+{
68627+#ifdef CONFIG_GRKERNSEC_PROC_USER
68628+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68629+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68630+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68631+#else
68632+ return proc_create_data(name, mode, parent, proc_fops, NULL);
68633+#endif
68634+}
68635+
68636+
68637 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68638 mode_t mode, struct proc_dir_entry *base,
68639 read_proc_t *read_proc, void * data)
68640@@ -256,7 +269,7 @@ union proc_op {
68641 int (*proc_show)(struct seq_file *m,
68642 struct pid_namespace *ns, struct pid *pid,
68643 struct task_struct *task);
68644-};
68645+} __no_const;
68646
68647 struct ctl_table_header;
68648 struct ctl_table;
68649diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68650index 7456d7d..6c1cfc9 100644
68651--- a/include/linux/ptrace.h
68652+++ b/include/linux/ptrace.h
68653@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68654 extern void exit_ptrace(struct task_struct *tracer);
68655 #define PTRACE_MODE_READ 1
68656 #define PTRACE_MODE_ATTACH 2
68657-/* Returns 0 on success, -errno on denial. */
68658-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68659 /* Returns true on success, false on denial. */
68660 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68661+/* Returns true on success, false on denial. */
68662+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68663
68664 static inline int ptrace_reparented(struct task_struct *child)
68665 {
68666diff --git a/include/linux/random.h b/include/linux/random.h
68667index 2948046..3262567 100644
68668--- a/include/linux/random.h
68669+++ b/include/linux/random.h
68670@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68671 u32 random32(void);
68672 void srandom32(u32 seed);
68673
68674+static inline unsigned long pax_get_random_long(void)
68675+{
68676+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68677+}
68678+
68679 #endif /* __KERNEL___ */
68680
68681 #endif /* _LINUX_RANDOM_H */
68682diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68683index 988e55f..17cb4ef 100644
68684--- a/include/linux/reboot.h
68685+++ b/include/linux/reboot.h
68686@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68687 * Architecture-specific implementations of sys_reboot commands.
68688 */
68689
68690-extern void machine_restart(char *cmd);
68691-extern void machine_halt(void);
68692-extern void machine_power_off(void);
68693+extern void machine_restart(char *cmd) __noreturn;
68694+extern void machine_halt(void) __noreturn;
68695+extern void machine_power_off(void) __noreturn;
68696
68697 extern void machine_shutdown(void);
68698 struct pt_regs;
68699@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68700 */
68701
68702 extern void kernel_restart_prepare(char *cmd);
68703-extern void kernel_restart(char *cmd);
68704-extern void kernel_halt(void);
68705-extern void kernel_power_off(void);
68706+extern void kernel_restart(char *cmd) __noreturn;
68707+extern void kernel_halt(void) __noreturn;
68708+extern void kernel_power_off(void) __noreturn;
68709
68710 void ctrl_alt_del(void);
68711
68712@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68713 * Emergency restart, callable from an interrupt handler.
68714 */
68715
68716-extern void emergency_restart(void);
68717+extern void emergency_restart(void) __noreturn;
68718 #include <asm/emergency-restart.h>
68719
68720 #endif
68721diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68722index dd31e7b..5b03c5c 100644
68723--- a/include/linux/reiserfs_fs.h
68724+++ b/include/linux/reiserfs_fs.h
68725@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68726 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68727
68728 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68729-#define get_generation(s) atomic_read (&fs_generation(s))
68730+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68731 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68732 #define __fs_changed(gen,s) (gen != get_generation (s))
68733 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68734@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68735 */
68736
68737 struct item_operations {
68738- int (*bytes_number) (struct item_head * ih, int block_size);
68739- void (*decrement_key) (struct cpu_key *);
68740- int (*is_left_mergeable) (struct reiserfs_key * ih,
68741+ int (* const bytes_number) (struct item_head * ih, int block_size);
68742+ void (* const decrement_key) (struct cpu_key *);
68743+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
68744 unsigned long bsize);
68745- void (*print_item) (struct item_head *, char *item);
68746- void (*check_item) (struct item_head *, char *item);
68747+ void (* const print_item) (struct item_head *, char *item);
68748+ void (* const check_item) (struct item_head *, char *item);
68749
68750- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68751+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68752 int is_affected, int insert_size);
68753- int (*check_left) (struct virtual_item * vi, int free,
68754+ int (* const check_left) (struct virtual_item * vi, int free,
68755 int start_skip, int end_skip);
68756- int (*check_right) (struct virtual_item * vi, int free);
68757- int (*part_size) (struct virtual_item * vi, int from, int to);
68758- int (*unit_num) (struct virtual_item * vi);
68759- void (*print_vi) (struct virtual_item * vi);
68760+ int (* const check_right) (struct virtual_item * vi, int free);
68761+ int (* const part_size) (struct virtual_item * vi, int from, int to);
68762+ int (* const unit_num) (struct virtual_item * vi);
68763+ void (* const print_vi) (struct virtual_item * vi);
68764 };
68765
68766-extern struct item_operations *item_ops[TYPE_ANY + 1];
68767+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68768
68769 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68770 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68771diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68772index dab68bb..0688727 100644
68773--- a/include/linux/reiserfs_fs_sb.h
68774+++ b/include/linux/reiserfs_fs_sb.h
68775@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68776 /* Comment? -Hans */
68777 wait_queue_head_t s_wait;
68778 /* To be obsoleted soon by per buffer seals.. -Hans */
68779- atomic_t s_generation_counter; // increased by one every time the
68780+ atomic_unchecked_t s_generation_counter; // increased by one every time the
68781 // tree gets re-balanced
68782 unsigned long s_properties; /* File system properties. Currently holds
68783 on-disk FS format */
68784diff --git a/include/linux/relay.h b/include/linux/relay.h
68785index 14a86bc..17d0700 100644
68786--- a/include/linux/relay.h
68787+++ b/include/linux/relay.h
68788@@ -159,7 +159,7 @@ struct rchan_callbacks
68789 * The callback should return 0 if successful, negative if not.
68790 */
68791 int (*remove_buf_file)(struct dentry *dentry);
68792-};
68793+} __no_const;
68794
68795 /*
68796 * CONFIG_RELAY kernel API, kernel/relay.c
68797diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68798index 3392c59..a746428 100644
68799--- a/include/linux/rfkill.h
68800+++ b/include/linux/rfkill.h
68801@@ -144,6 +144,7 @@ struct rfkill_ops {
68802 void (*query)(struct rfkill *rfkill, void *data);
68803 int (*set_block)(void *data, bool blocked);
68804 };
68805+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68806
68807 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68808 /**
68809diff --git a/include/linux/sched.h b/include/linux/sched.h
68810index 71849bf..2ef383dc3 100644
68811--- a/include/linux/sched.h
68812+++ b/include/linux/sched.h
68813@@ -101,6 +101,7 @@ struct bio;
68814 struct fs_struct;
68815 struct bts_context;
68816 struct perf_event_context;
68817+struct linux_binprm;
68818
68819 /*
68820 * List of flags we want to share for kernel threads,
68821@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68822 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68823 asmlinkage void __schedule(void);
68824 asmlinkage void schedule(void);
68825-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68826+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68827
68828 struct nsproxy;
68829 struct user_namespace;
68830@@ -371,9 +372,12 @@ struct user_namespace;
68831 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68832
68833 extern int sysctl_max_map_count;
68834+extern unsigned long sysctl_heap_stack_gap;
68835
68836 #include <linux/aio.h>
68837
68838+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68839+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68840 extern unsigned long
68841 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68842 unsigned long, unsigned long);
68843@@ -666,6 +670,16 @@ struct signal_struct {
68844 struct tty_audit_buf *tty_audit_buf;
68845 #endif
68846
68847+#ifdef CONFIG_GRKERNSEC
68848+ u32 curr_ip;
68849+ u32 saved_ip;
68850+ u32 gr_saddr;
68851+ u32 gr_daddr;
68852+ u16 gr_sport;
68853+ u16 gr_dport;
68854+ u8 used_accept:1;
68855+#endif
68856+
68857 int oom_adj; /* OOM kill score adjustment (bit shift) */
68858 };
68859
68860@@ -723,6 +737,11 @@ struct user_struct {
68861 struct key *session_keyring; /* UID's default session keyring */
68862 #endif
68863
68864+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68865+ unsigned int banned;
68866+ unsigned long ban_expires;
68867+#endif
68868+
68869 /* Hash table maintenance information */
68870 struct hlist_node uidhash_node;
68871 uid_t uid;
68872@@ -1328,8 +1347,8 @@ struct task_struct {
68873 struct list_head thread_group;
68874
68875 struct completion *vfork_done; /* for vfork() */
68876- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68877- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68878+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68879+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68880
68881 cputime_t utime, stime, utimescaled, stimescaled;
68882 cputime_t gtime;
68883@@ -1343,16 +1362,6 @@ struct task_struct {
68884 struct task_cputime cputime_expires;
68885 struct list_head cpu_timers[3];
68886
68887-/* process credentials */
68888- const struct cred *real_cred; /* objective and real subjective task
68889- * credentials (COW) */
68890- const struct cred *cred; /* effective (overridable) subjective task
68891- * credentials (COW) */
68892- struct mutex cred_guard_mutex; /* guard against foreign influences on
68893- * credential calculations
68894- * (notably. ptrace) */
68895- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68896-
68897 char comm[TASK_COMM_LEN]; /* executable name excluding path
68898 - access with [gs]et_task_comm (which lock
68899 it with task_lock())
68900@@ -1369,6 +1378,10 @@ struct task_struct {
68901 #endif
68902 /* CPU-specific state of this task */
68903 struct thread_struct thread;
68904+/* thread_info moved to task_struct */
68905+#ifdef CONFIG_X86
68906+ struct thread_info tinfo;
68907+#endif
68908 /* filesystem information */
68909 struct fs_struct *fs;
68910 /* open file information */
68911@@ -1436,6 +1449,15 @@ struct task_struct {
68912 int hardirq_context;
68913 int softirq_context;
68914 #endif
68915+
68916+/* process credentials */
68917+ const struct cred *real_cred; /* objective and real subjective task
68918+ * credentials (COW) */
68919+ struct mutex cred_guard_mutex; /* guard against foreign influences on
68920+ * credential calculations
68921+ * (notably. ptrace) */
68922+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68923+
68924 #ifdef CONFIG_LOCKDEP
68925 # define MAX_LOCK_DEPTH 48UL
68926 u64 curr_chain_key;
68927@@ -1456,6 +1478,9 @@ struct task_struct {
68928
68929 struct backing_dev_info *backing_dev_info;
68930
68931+ const struct cred *cred; /* effective (overridable) subjective task
68932+ * credentials (COW) */
68933+
68934 struct io_context *io_context;
68935
68936 unsigned long ptrace_message;
68937@@ -1519,6 +1544,27 @@ struct task_struct {
68938 unsigned long default_timer_slack_ns;
68939
68940 struct list_head *scm_work_list;
68941+
68942+#ifdef CONFIG_GRKERNSEC
68943+ /* grsecurity */
68944+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68945+ u64 exec_id;
68946+#endif
68947+#ifdef CONFIG_GRKERNSEC_SETXID
68948+ const struct cred *delayed_cred;
68949+#endif
68950+ struct dentry *gr_chroot_dentry;
68951+ struct acl_subject_label *acl;
68952+ struct acl_role_label *role;
68953+ struct file *exec_file;
68954+ u16 acl_role_id;
68955+ /* is this the task that authenticated to the special role */
68956+ u8 acl_sp_role;
68957+ u8 is_writable;
68958+ u8 brute;
68959+ u8 gr_is_chrooted;
68960+#endif
68961+
68962 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68963 /* Index of current stored adress in ret_stack */
68964 int curr_ret_stack;
68965@@ -1542,6 +1588,57 @@ struct task_struct {
68966 #endif /* CONFIG_TRACING */
68967 };
68968
68969+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68970+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68971+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68972+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68973+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68974+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68975+
68976+#ifdef CONFIG_PAX_SOFTMODE
68977+extern int pax_softmode;
68978+#endif
68979+
68980+extern int pax_check_flags(unsigned long *);
68981+
68982+/* if tsk != current then task_lock must be held on it */
68983+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68984+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68985+{
68986+ if (likely(tsk->mm))
68987+ return tsk->mm->pax_flags;
68988+ else
68989+ return 0UL;
68990+}
68991+
68992+/* if tsk != current then task_lock must be held on it */
68993+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68994+{
68995+ if (likely(tsk->mm)) {
68996+ tsk->mm->pax_flags = flags;
68997+ return 0;
68998+ }
68999+ return -EINVAL;
69000+}
69001+#endif
69002+
69003+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
69004+extern void pax_set_initial_flags(struct linux_binprm *bprm);
69005+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
69006+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
69007+#endif
69008+
69009+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
69010+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
69011+extern void pax_report_refcount_overflow(struct pt_regs *regs);
69012+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
69013+
69014+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
69015+extern void pax_track_stack(void);
69016+#else
69017+static inline void pax_track_stack(void) {}
69018+#endif
69019+
69020 /* Future-safe accessor for struct task_struct's cpus_allowed. */
69021 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
69022
69023@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
69024 #define PF_DUMPCORE 0x00000200 /* dumped core */
69025 #define PF_SIGNALED 0x00000400 /* killed by a signal */
69026 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
69027-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
69028+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
69029 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
69030 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
69031 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
69032@@ -1978,7 +2075,9 @@ void yield(void);
69033 extern struct exec_domain default_exec_domain;
69034
69035 union thread_union {
69036+#ifndef CONFIG_X86
69037 struct thread_info thread_info;
69038+#endif
69039 unsigned long stack[THREAD_SIZE/sizeof(long)];
69040 };
69041
69042@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
69043 */
69044
69045 extern struct task_struct *find_task_by_vpid(pid_t nr);
69046+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
69047 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
69048 struct pid_namespace *ns);
69049
69050@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
69051 extern void exit_itimers(struct signal_struct *);
69052 extern void flush_itimer_signals(void);
69053
69054-extern NORET_TYPE void do_group_exit(int);
69055+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
69056
69057 extern void daemonize(const char *, ...);
69058 extern int allow_signal(int);
69059@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
69060
69061 #endif
69062
69063-static inline int object_is_on_stack(void *obj)
69064+static inline int object_starts_on_stack(void *obj)
69065 {
69066- void *stack = task_stack_page(current);
69067+ const void *stack = task_stack_page(current);
69068
69069 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
69070 }
69071
69072+#ifdef CONFIG_PAX_USERCOPY
69073+extern int object_is_on_stack(const void *obj, unsigned long len);
69074+#endif
69075+
69076 extern void thread_info_cache_init(void);
69077
69078 #ifdef CONFIG_DEBUG_STACK_USAGE
69079diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
69080index 1ee2c05..81b7ec4 100644
69081--- a/include/linux/screen_info.h
69082+++ b/include/linux/screen_info.h
69083@@ -42,7 +42,8 @@ struct screen_info {
69084 __u16 pages; /* 0x32 */
69085 __u16 vesa_attributes; /* 0x34 */
69086 __u32 capabilities; /* 0x36 */
69087- __u8 _reserved[6]; /* 0x3a */
69088+ __u16 vesapm_size; /* 0x3a */
69089+ __u8 _reserved[4]; /* 0x3c */
69090 } __attribute__((packed));
69091
69092 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
69093diff --git a/include/linux/security.h b/include/linux/security.h
69094index d40d23f..d739b08 100644
69095--- a/include/linux/security.h
69096+++ b/include/linux/security.h
69097@@ -34,6 +34,7 @@
69098 #include <linux/key.h>
69099 #include <linux/xfrm.h>
69100 #include <linux/gfp.h>
69101+#include <linux/grsecurity.h>
69102 #include <net/flow.h>
69103
69104 /* Maximum number of letters for an LSM name string */
69105@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
69106 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69107 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69108 extern int cap_task_setnice(struct task_struct *p, int nice);
69109-extern int cap_syslog(int type);
69110+extern int cap_syslog(int type, bool from_file);
69111 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69112
69113 struct msghdr;
69114@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69115 * logging to the console.
69116 * See the syslog(2) manual page for an explanation of the @type values.
69117 * @type contains the type of action.
69118+ * @from_file indicates the context of action (if it came from /proc).
69119 * Return 0 if permission is granted.
69120 * @settime:
69121 * Check permission to change the system time.
69122@@ -1445,7 +1447,7 @@ struct security_operations {
69123 int (*sysctl) (struct ctl_table *table, int op);
69124 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69125 int (*quota_on) (struct dentry *dentry);
69126- int (*syslog) (int type);
69127+ int (*syslog) (int type, bool from_file);
69128 int (*settime) (struct timespec *ts, struct timezone *tz);
69129 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69130
69131@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69132 int security_sysctl(struct ctl_table *table, int op);
69133 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69134 int security_quota_on(struct dentry *dentry);
69135-int security_syslog(int type);
69136+int security_syslog(int type, bool from_file);
69137 int security_settime(struct timespec *ts, struct timezone *tz);
69138 int security_vm_enough_memory(long pages);
69139 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69140@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69141 return 0;
69142 }
69143
69144-static inline int security_syslog(int type)
69145+static inline int security_syslog(int type, bool from_file)
69146 {
69147- return cap_syslog(type);
69148+ return cap_syslog(type, from_file);
69149 }
69150
69151 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69152diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69153index 8366d8f..cc5f9d6 100644
69154--- a/include/linux/seq_file.h
69155+++ b/include/linux/seq_file.h
69156@@ -23,6 +23,9 @@ struct seq_file {
69157 u64 version;
69158 struct mutex lock;
69159 const struct seq_operations *op;
69160+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69161+ u64 exec_id;
69162+#endif
69163 void *private;
69164 };
69165
69166@@ -32,6 +35,7 @@ struct seq_operations {
69167 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69168 int (*show) (struct seq_file *m, void *v);
69169 };
69170+typedef struct seq_operations __no_const seq_operations_no_const;
69171
69172 #define SEQ_SKIP 1
69173
69174diff --git a/include/linux/shm.h b/include/linux/shm.h
69175index eca6235..c7417ed 100644
69176--- a/include/linux/shm.h
69177+++ b/include/linux/shm.h
69178@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69179 pid_t shm_cprid;
69180 pid_t shm_lprid;
69181 struct user_struct *mlock_user;
69182+#ifdef CONFIG_GRKERNSEC
69183+ time_t shm_createtime;
69184+ pid_t shm_lapid;
69185+#endif
69186 };
69187
69188 /* shm_mode upper byte flags */
69189diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69190index bcdd660..6e12e11 100644
69191--- a/include/linux/skbuff.h
69192+++ b/include/linux/skbuff.h
69193@@ -14,6 +14,7 @@
69194 #ifndef _LINUX_SKBUFF_H
69195 #define _LINUX_SKBUFF_H
69196
69197+#include <linux/const.h>
69198 #include <linux/kernel.h>
69199 #include <linux/kmemcheck.h>
69200 #include <linux/compiler.h>
69201@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69202 */
69203 static inline int skb_queue_empty(const struct sk_buff_head *list)
69204 {
69205- return list->next == (struct sk_buff *)list;
69206+ return list->next == (const struct sk_buff *)list;
69207 }
69208
69209 /**
69210@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69211 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69212 const struct sk_buff *skb)
69213 {
69214- return (skb->next == (struct sk_buff *) list);
69215+ return (skb->next == (const struct sk_buff *) list);
69216 }
69217
69218 /**
69219@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69220 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69221 const struct sk_buff *skb)
69222 {
69223- return (skb->prev == (struct sk_buff *) list);
69224+ return (skb->prev == (const struct sk_buff *) list);
69225 }
69226
69227 /**
69228@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69229 * headroom, you should not reduce this.
69230 */
69231 #ifndef NET_SKB_PAD
69232-#define NET_SKB_PAD 32
69233+#define NET_SKB_PAD (_AC(32,UL))
69234 #endif
69235
69236 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69237diff --git a/include/linux/slab.h b/include/linux/slab.h
69238index 2da8372..a3be824 100644
69239--- a/include/linux/slab.h
69240+++ b/include/linux/slab.h
69241@@ -11,12 +11,20 @@
69242
69243 #include <linux/gfp.h>
69244 #include <linux/types.h>
69245+#include <linux/err.h>
69246
69247 /*
69248 * Flags to pass to kmem_cache_create().
69249 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69250 */
69251 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69252+
69253+#ifdef CONFIG_PAX_USERCOPY
69254+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69255+#else
69256+#define SLAB_USERCOPY 0x00000000UL
69257+#endif
69258+
69259 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69260 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69261 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69262@@ -82,10 +90,13 @@
69263 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69264 * Both make kfree a no-op.
69265 */
69266-#define ZERO_SIZE_PTR ((void *)16)
69267+#define ZERO_SIZE_PTR \
69268+({ \
69269+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69270+ (void *)(-MAX_ERRNO-1L); \
69271+})
69272
69273-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69274- (unsigned long)ZERO_SIZE_PTR)
69275+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69276
69277 /*
69278 * struct kmem_cache related prototypes
69279@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69280 void kfree(const void *);
69281 void kzfree(const void *);
69282 size_t ksize(const void *);
69283+void check_object_size(const void *ptr, unsigned long n, bool to);
69284
69285 /*
69286 * Allocator specific definitions. These are mainly used to establish optimized
69287@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69288
69289 void __init kmem_cache_init_late(void);
69290
69291+#define kmalloc(x, y) \
69292+({ \
69293+ void *___retval; \
69294+ intoverflow_t ___x = (intoverflow_t)x; \
69295+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69296+ ___retval = NULL; \
69297+ else \
69298+ ___retval = kmalloc((size_t)___x, (y)); \
69299+ ___retval; \
69300+})
69301+
69302+#define kmalloc_node(x, y, z) \
69303+({ \
69304+ void *___retval; \
69305+ intoverflow_t ___x = (intoverflow_t)x; \
69306+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69307+ ___retval = NULL; \
69308+ else \
69309+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
69310+ ___retval; \
69311+})
69312+
69313+#define kzalloc(x, y) \
69314+({ \
69315+ void *___retval; \
69316+ intoverflow_t ___x = (intoverflow_t)x; \
69317+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69318+ ___retval = NULL; \
69319+ else \
69320+ ___retval = kzalloc((size_t)___x, (y)); \
69321+ ___retval; \
69322+})
69323+
69324 #endif /* _LINUX_SLAB_H */
69325diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69326index 850d057..d9dfe3c 100644
69327--- a/include/linux/slab_def.h
69328+++ b/include/linux/slab_def.h
69329@@ -69,10 +69,10 @@ struct kmem_cache {
69330 unsigned long node_allocs;
69331 unsigned long node_frees;
69332 unsigned long node_overflow;
69333- atomic_t allochit;
69334- atomic_t allocmiss;
69335- atomic_t freehit;
69336- atomic_t freemiss;
69337+ atomic_unchecked_t allochit;
69338+ atomic_unchecked_t allocmiss;
69339+ atomic_unchecked_t freehit;
69340+ atomic_unchecked_t freemiss;
69341
69342 /*
69343 * If debugging is enabled, then the allocator can add additional
69344diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69345index 5ad70a6..57f9f65 100644
69346--- a/include/linux/slub_def.h
69347+++ b/include/linux/slub_def.h
69348@@ -86,7 +86,7 @@ struct kmem_cache {
69349 struct kmem_cache_order_objects max;
69350 struct kmem_cache_order_objects min;
69351 gfp_t allocflags; /* gfp flags to use on each alloc */
69352- int refcount; /* Refcount for slab cache destroy */
69353+ atomic_t refcount; /* Refcount for slab cache destroy */
69354 void (*ctor)(void *);
69355 int inuse; /* Offset to metadata */
69356 int align; /* Alignment */
69357@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69358 #endif
69359
69360 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69361-void *__kmalloc(size_t size, gfp_t flags);
69362+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69363
69364 #ifdef CONFIG_KMEMTRACE
69365 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69366diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69367index 67ad11f..0bbd8af 100644
69368--- a/include/linux/sonet.h
69369+++ b/include/linux/sonet.h
69370@@ -61,7 +61,7 @@ struct sonet_stats {
69371 #include <asm/atomic.h>
69372
69373 struct k_sonet_stats {
69374-#define __HANDLE_ITEM(i) atomic_t i
69375+#define __HANDLE_ITEM(i) atomic_unchecked_t i
69376 __SONET_ITEMS
69377 #undef __HANDLE_ITEM
69378 };
69379diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69380index 6f52b4d..5500323 100644
69381--- a/include/linux/sunrpc/cache.h
69382+++ b/include/linux/sunrpc/cache.h
69383@@ -125,7 +125,7 @@ struct cache_detail {
69384 */
69385 struct cache_req {
69386 struct cache_deferred_req *(*defer)(struct cache_req *req);
69387-};
69388+} __no_const;
69389 /* this must be embedded in a deferred_request that is being
69390 * delayed awaiting cache-fill
69391 */
69392diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69393index 8ed9642..101ceab 100644
69394--- a/include/linux/sunrpc/clnt.h
69395+++ b/include/linux/sunrpc/clnt.h
69396@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69397 {
69398 switch (sap->sa_family) {
69399 case AF_INET:
69400- return ntohs(((struct sockaddr_in *)sap)->sin_port);
69401+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69402 case AF_INET6:
69403- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69404+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69405 }
69406 return 0;
69407 }
69408@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69409 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69410 const struct sockaddr *src)
69411 {
69412- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69413+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69414 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69415
69416 dsin->sin_family = ssin->sin_family;
69417@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69418 if (sa->sa_family != AF_INET6)
69419 return 0;
69420
69421- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69422+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69423 }
69424
69425 #endif /* __KERNEL__ */
69426diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69427index c14fe86..393245e 100644
69428--- a/include/linux/sunrpc/svc_rdma.h
69429+++ b/include/linux/sunrpc/svc_rdma.h
69430@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69431 extern unsigned int svcrdma_max_requests;
69432 extern unsigned int svcrdma_max_req_size;
69433
69434-extern atomic_t rdma_stat_recv;
69435-extern atomic_t rdma_stat_read;
69436-extern atomic_t rdma_stat_write;
69437-extern atomic_t rdma_stat_sq_starve;
69438-extern atomic_t rdma_stat_rq_starve;
69439-extern atomic_t rdma_stat_rq_poll;
69440-extern atomic_t rdma_stat_rq_prod;
69441-extern atomic_t rdma_stat_sq_poll;
69442-extern atomic_t rdma_stat_sq_prod;
69443+extern atomic_unchecked_t rdma_stat_recv;
69444+extern atomic_unchecked_t rdma_stat_read;
69445+extern atomic_unchecked_t rdma_stat_write;
69446+extern atomic_unchecked_t rdma_stat_sq_starve;
69447+extern atomic_unchecked_t rdma_stat_rq_starve;
69448+extern atomic_unchecked_t rdma_stat_rq_poll;
69449+extern atomic_unchecked_t rdma_stat_rq_prod;
69450+extern atomic_unchecked_t rdma_stat_sq_poll;
69451+extern atomic_unchecked_t rdma_stat_sq_prod;
69452
69453 #define RPCRDMA_VERSION 1
69454
69455diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69456index 5e781d8..1e62818 100644
69457--- a/include/linux/suspend.h
69458+++ b/include/linux/suspend.h
69459@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69460 * which require special recovery actions in that situation.
69461 */
69462 struct platform_suspend_ops {
69463- int (*valid)(suspend_state_t state);
69464- int (*begin)(suspend_state_t state);
69465- int (*prepare)(void);
69466- int (*prepare_late)(void);
69467- int (*enter)(suspend_state_t state);
69468- void (*wake)(void);
69469- void (*finish)(void);
69470- void (*end)(void);
69471- void (*recover)(void);
69472+ int (* const valid)(suspend_state_t state);
69473+ int (* const begin)(suspend_state_t state);
69474+ int (* const prepare)(void);
69475+ int (* const prepare_late)(void);
69476+ int (* const enter)(suspend_state_t state);
69477+ void (* const wake)(void);
69478+ void (* const finish)(void);
69479+ void (* const end)(void);
69480+ void (* const recover)(void);
69481 };
69482
69483 #ifdef CONFIG_SUSPEND
69484@@ -120,7 +120,7 @@ struct platform_suspend_ops {
69485 * suspend_set_ops - set platform dependent suspend operations
69486 * @ops: The new suspend operations to set.
69487 */
69488-extern void suspend_set_ops(struct platform_suspend_ops *ops);
69489+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69490 extern int suspend_valid_only_mem(suspend_state_t state);
69491
69492 /**
69493@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69494 #else /* !CONFIG_SUSPEND */
69495 #define suspend_valid_only_mem NULL
69496
69497-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69498+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69499 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69500 #endif /* !CONFIG_SUSPEND */
69501
69502@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69503 * platforms which require special recovery actions in that situation.
69504 */
69505 struct platform_hibernation_ops {
69506- int (*begin)(void);
69507- void (*end)(void);
69508- int (*pre_snapshot)(void);
69509- void (*finish)(void);
69510- int (*prepare)(void);
69511- int (*enter)(void);
69512- void (*leave)(void);
69513- int (*pre_restore)(void);
69514- void (*restore_cleanup)(void);
69515- void (*recover)(void);
69516+ int (* const begin)(void);
69517+ void (* const end)(void);
69518+ int (* const pre_snapshot)(void);
69519+ void (* const finish)(void);
69520+ int (* const prepare)(void);
69521+ int (* const enter)(void);
69522+ void (* const leave)(void);
69523+ int (* const pre_restore)(void);
69524+ void (* const restore_cleanup)(void);
69525+ void (* const recover)(void);
69526 };
69527
69528 #ifdef CONFIG_HIBERNATION
69529@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69530 extern void swsusp_unset_page_free(struct page *);
69531 extern unsigned long get_safe_page(gfp_t gfp_mask);
69532
69533-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69534+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69535 extern int hibernate(void);
69536 extern bool system_entering_hibernation(void);
69537 #else /* CONFIG_HIBERNATION */
69538@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69539 static inline void swsusp_set_page_free(struct page *p) {}
69540 static inline void swsusp_unset_page_free(struct page *p) {}
69541
69542-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69543+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69544 static inline int hibernate(void) { return -ENOSYS; }
69545 static inline bool system_entering_hibernation(void) { return false; }
69546 #endif /* CONFIG_HIBERNATION */
69547diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69548index 0eb6942..a805cb6 100644
69549--- a/include/linux/sysctl.h
69550+++ b/include/linux/sysctl.h
69551@@ -164,7 +164,11 @@ enum
69552 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69553 };
69554
69555-
69556+#ifdef CONFIG_PAX_SOFTMODE
69557+enum {
69558+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69559+};
69560+#endif
69561
69562 /* CTL_VM names: */
69563 enum
69564@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69565
69566 extern int proc_dostring(struct ctl_table *, int,
69567 void __user *, size_t *, loff_t *);
69568+extern int proc_dostring_modpriv(struct ctl_table *, int,
69569+ void __user *, size_t *, loff_t *);
69570 extern int proc_dointvec(struct ctl_table *, int,
69571 void __user *, size_t *, loff_t *);
69572 extern int proc_dointvec_minmax(struct ctl_table *, int,
69573@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69574
69575 extern ctl_handler sysctl_data;
69576 extern ctl_handler sysctl_string;
69577+extern ctl_handler sysctl_string_modpriv;
69578 extern ctl_handler sysctl_intvec;
69579 extern ctl_handler sysctl_jiffies;
69580 extern ctl_handler sysctl_ms_jiffies;
69581diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69582index 9d68fed..71f02cc 100644
69583--- a/include/linux/sysfs.h
69584+++ b/include/linux/sysfs.h
69585@@ -75,8 +75,8 @@ struct bin_attribute {
69586 };
69587
69588 struct sysfs_ops {
69589- ssize_t (*show)(struct kobject *, struct attribute *,char *);
69590- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69591+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69592+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69593 };
69594
69595 struct sysfs_dirent;
69596diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69597new file mode 100644
69598index 0000000..3891139
69599--- /dev/null
69600+++ b/include/linux/syslog.h
69601@@ -0,0 +1,52 @@
69602+/* Syslog internals
69603+ *
69604+ * Copyright 2010 Canonical, Ltd.
69605+ * Author: Kees Cook <kees.cook@canonical.com>
69606+ *
69607+ * This program is free software; you can redistribute it and/or modify
69608+ * it under the terms of the GNU General Public License as published by
69609+ * the Free Software Foundation; either version 2, or (at your option)
69610+ * any later version.
69611+ *
69612+ * This program is distributed in the hope that it will be useful,
69613+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
69614+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69615+ * GNU General Public License for more details.
69616+ *
69617+ * You should have received a copy of the GNU General Public License
69618+ * along with this program; see the file COPYING. If not, write to
69619+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69620+ */
69621+
69622+#ifndef _LINUX_SYSLOG_H
69623+#define _LINUX_SYSLOG_H
69624+
69625+/* Close the log. Currently a NOP. */
69626+#define SYSLOG_ACTION_CLOSE 0
69627+/* Open the log. Currently a NOP. */
69628+#define SYSLOG_ACTION_OPEN 1
69629+/* Read from the log. */
69630+#define SYSLOG_ACTION_READ 2
69631+/* Read all messages remaining in the ring buffer. */
69632+#define SYSLOG_ACTION_READ_ALL 3
69633+/* Read and clear all messages remaining in the ring buffer */
69634+#define SYSLOG_ACTION_READ_CLEAR 4
69635+/* Clear ring buffer. */
69636+#define SYSLOG_ACTION_CLEAR 5
69637+/* Disable printk's to console */
69638+#define SYSLOG_ACTION_CONSOLE_OFF 6
69639+/* Enable printk's to console */
69640+#define SYSLOG_ACTION_CONSOLE_ON 7
69641+/* Set level of messages printed to console */
69642+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69643+/* Return number of unread characters in the log buffer */
69644+#define SYSLOG_ACTION_SIZE_UNREAD 9
69645+/* Return size of the log buffer */
69646+#define SYSLOG_ACTION_SIZE_BUFFER 10
69647+
69648+#define SYSLOG_FROM_CALL 0
69649+#define SYSLOG_FROM_FILE 1
69650+
69651+int do_syslog(int type, char __user *buf, int count, bool from_file);
69652+
69653+#endif /* _LINUX_SYSLOG_H */
69654diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69655index a8cc4e1..98d3b85 100644
69656--- a/include/linux/thread_info.h
69657+++ b/include/linux/thread_info.h
69658@@ -23,7 +23,7 @@ struct restart_block {
69659 };
69660 /* For futex_wait and futex_wait_requeue_pi */
69661 struct {
69662- u32 *uaddr;
69663+ u32 __user *uaddr;
69664 u32 val;
69665 u32 flags;
69666 u32 bitset;
69667diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
69668index 1eb44a9..f582df3 100644
69669--- a/include/linux/tracehook.h
69670+++ b/include/linux/tracehook.h
69671@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
69672 /*
69673 * ptrace report for syscall entry and exit looks identical.
69674 */
69675-static inline void ptrace_report_syscall(struct pt_regs *regs)
69676+static inline int ptrace_report_syscall(struct pt_regs *regs)
69677 {
69678 int ptrace = task_ptrace(current);
69679
69680 if (!(ptrace & PT_PTRACED))
69681- return;
69682+ return 0;
69683
69684 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
69685
69686@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69687 send_sig(current->exit_code, current, 1);
69688 current->exit_code = 0;
69689 }
69690+
69691+ return fatal_signal_pending(current);
69692 }
69693
69694 /**
69695@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69696 static inline __must_check int tracehook_report_syscall_entry(
69697 struct pt_regs *regs)
69698 {
69699- ptrace_report_syscall(regs);
69700- return 0;
69701+ return ptrace_report_syscall(regs);
69702 }
69703
69704 /**
69705diff --git a/include/linux/tty.h b/include/linux/tty.h
69706index e9c57e9..ee6d489 100644
69707--- a/include/linux/tty.h
69708+++ b/include/linux/tty.h
69709@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69710 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69711 extern void tty_ldisc_enable(struct tty_struct *tty);
69712
69713-
69714 /* n_tty.c */
69715 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69716
69717diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69718index 0c4ee9b..9f7c426 100644
69719--- a/include/linux/tty_ldisc.h
69720+++ b/include/linux/tty_ldisc.h
69721@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69722
69723 struct module *owner;
69724
69725- int refcount;
69726+ atomic_t refcount;
69727 };
69728
69729 struct tty_ldisc {
69730diff --git a/include/linux/types.h b/include/linux/types.h
69731index c42724f..d190eee 100644
69732--- a/include/linux/types.h
69733+++ b/include/linux/types.h
69734@@ -191,10 +191,26 @@ typedef struct {
69735 volatile int counter;
69736 } atomic_t;
69737
69738+#ifdef CONFIG_PAX_REFCOUNT
69739+typedef struct {
69740+ volatile int counter;
69741+} atomic_unchecked_t;
69742+#else
69743+typedef atomic_t atomic_unchecked_t;
69744+#endif
69745+
69746 #ifdef CONFIG_64BIT
69747 typedef struct {
69748 volatile long counter;
69749 } atomic64_t;
69750+
69751+#ifdef CONFIG_PAX_REFCOUNT
69752+typedef struct {
69753+ volatile long counter;
69754+} atomic64_unchecked_t;
69755+#else
69756+typedef atomic64_t atomic64_unchecked_t;
69757+#endif
69758 #endif
69759
69760 struct ustat {
69761diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69762index 6b58367..53a3e8e 100644
69763--- a/include/linux/uaccess.h
69764+++ b/include/linux/uaccess.h
69765@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69766 long ret; \
69767 mm_segment_t old_fs = get_fs(); \
69768 \
69769- set_fs(KERNEL_DS); \
69770 pagefault_disable(); \
69771- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69772- pagefault_enable(); \
69773+ set_fs(KERNEL_DS); \
69774+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69775 set_fs(old_fs); \
69776+ pagefault_enable(); \
69777 ret; \
69778 })
69779
69780@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69781 * Safely read from address @src to the buffer at @dst. If a kernel fault
69782 * happens, handle that and return -EFAULT.
69783 */
69784-extern long probe_kernel_read(void *dst, void *src, size_t size);
69785+extern long probe_kernel_read(void *dst, const void *src, size_t size);
69786
69787 /*
69788 * probe_kernel_write(): safely attempt to write to a location
69789@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69790 * Safely write to address @dst from the buffer at @src. If a kernel fault
69791 * happens, handle that and return -EFAULT.
69792 */
69793-extern long probe_kernel_write(void *dst, void *src, size_t size);
69794+extern long probe_kernel_write(void *dst, const void *src, size_t size);
69795
69796 #endif /* __LINUX_UACCESS_H__ */
69797diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69798index 99c1b4d..bb94261 100644
69799--- a/include/linux/unaligned/access_ok.h
69800+++ b/include/linux/unaligned/access_ok.h
69801@@ -6,32 +6,32 @@
69802
69803 static inline u16 get_unaligned_le16(const void *p)
69804 {
69805- return le16_to_cpup((__le16 *)p);
69806+ return le16_to_cpup((const __le16 *)p);
69807 }
69808
69809 static inline u32 get_unaligned_le32(const void *p)
69810 {
69811- return le32_to_cpup((__le32 *)p);
69812+ return le32_to_cpup((const __le32 *)p);
69813 }
69814
69815 static inline u64 get_unaligned_le64(const void *p)
69816 {
69817- return le64_to_cpup((__le64 *)p);
69818+ return le64_to_cpup((const __le64 *)p);
69819 }
69820
69821 static inline u16 get_unaligned_be16(const void *p)
69822 {
69823- return be16_to_cpup((__be16 *)p);
69824+ return be16_to_cpup((const __be16 *)p);
69825 }
69826
69827 static inline u32 get_unaligned_be32(const void *p)
69828 {
69829- return be32_to_cpup((__be32 *)p);
69830+ return be32_to_cpup((const __be32 *)p);
69831 }
69832
69833 static inline u64 get_unaligned_be64(const void *p)
69834 {
69835- return be64_to_cpup((__be64 *)p);
69836+ return be64_to_cpup((const __be64 *)p);
69837 }
69838
69839 static inline void put_unaligned_le16(u16 val, void *p)
69840diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69841index 79b9837..b5a56f9 100644
69842--- a/include/linux/vermagic.h
69843+++ b/include/linux/vermagic.h
69844@@ -26,9 +26,35 @@
69845 #define MODULE_ARCH_VERMAGIC ""
69846 #endif
69847
69848+#ifdef CONFIG_PAX_REFCOUNT
69849+#define MODULE_PAX_REFCOUNT "REFCOUNT "
69850+#else
69851+#define MODULE_PAX_REFCOUNT ""
69852+#endif
69853+
69854+#ifdef CONSTIFY_PLUGIN
69855+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69856+#else
69857+#define MODULE_CONSTIFY_PLUGIN ""
69858+#endif
69859+
69860+#ifdef STACKLEAK_PLUGIN
69861+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69862+#else
69863+#define MODULE_STACKLEAK_PLUGIN ""
69864+#endif
69865+
69866+#ifdef CONFIG_GRKERNSEC
69867+#define MODULE_GRSEC "GRSEC "
69868+#else
69869+#define MODULE_GRSEC ""
69870+#endif
69871+
69872 #define VERMAGIC_STRING \
69873 UTS_RELEASE " " \
69874 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69875 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69876- MODULE_ARCH_VERMAGIC
69877+ MODULE_ARCH_VERMAGIC \
69878+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69879+ MODULE_GRSEC
69880
69881diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69882index 819a634..462ac12 100644
69883--- a/include/linux/vmalloc.h
69884+++ b/include/linux/vmalloc.h
69885@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69886 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69887 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69888 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69889+
69890+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69891+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69892+#endif
69893+
69894 /* bits [20..32] reserved for arch specific ioremap internals */
69895
69896 /*
69897@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69898
69899 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69900
69901+#define vmalloc(x) \
69902+({ \
69903+ void *___retval; \
69904+ intoverflow_t ___x = (intoverflow_t)x; \
69905+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69906+ ___retval = NULL; \
69907+ else \
69908+ ___retval = vmalloc((unsigned long)___x); \
69909+ ___retval; \
69910+})
69911+
69912+#define __vmalloc(x, y, z) \
69913+({ \
69914+ void *___retval; \
69915+ intoverflow_t ___x = (intoverflow_t)x; \
69916+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69917+ ___retval = NULL; \
69918+ else \
69919+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69920+ ___retval; \
69921+})
69922+
69923+#define vmalloc_user(x) \
69924+({ \
69925+ void *___retval; \
69926+ intoverflow_t ___x = (intoverflow_t)x; \
69927+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69928+ ___retval = NULL; \
69929+ else \
69930+ ___retval = vmalloc_user((unsigned long)___x); \
69931+ ___retval; \
69932+})
69933+
69934+#define vmalloc_exec(x) \
69935+({ \
69936+ void *___retval; \
69937+ intoverflow_t ___x = (intoverflow_t)x; \
69938+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69939+ ___retval = NULL; \
69940+ else \
69941+ ___retval = vmalloc_exec((unsigned long)___x); \
69942+ ___retval; \
69943+})
69944+
69945+#define vmalloc_node(x, y) \
69946+({ \
69947+ void *___retval; \
69948+ intoverflow_t ___x = (intoverflow_t)x; \
69949+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69950+ ___retval = NULL; \
69951+ else \
69952+ ___retval = vmalloc_node((unsigned long)___x, (y));\
69953+ ___retval; \
69954+})
69955+
69956+#define vmalloc_32(x) \
69957+({ \
69958+ void *___retval; \
69959+ intoverflow_t ___x = (intoverflow_t)x; \
69960+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69961+ ___retval = NULL; \
69962+ else \
69963+ ___retval = vmalloc_32((unsigned long)___x); \
69964+ ___retval; \
69965+})
69966+
69967+#define vmalloc_32_user(x) \
69968+({ \
69969+ void *___retval; \
69970+ intoverflow_t ___x = (intoverflow_t)x; \
69971+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69972+ ___retval = NULL; \
69973+ else \
69974+ ___retval = vmalloc_32_user((unsigned long)___x);\
69975+ ___retval; \
69976+})
69977+
69978 #endif /* _LINUX_VMALLOC_H */
69979diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69980index 13070d6..aa4159a 100644
69981--- a/include/linux/vmstat.h
69982+++ b/include/linux/vmstat.h
69983@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69984 /*
69985 * Zone based page accounting with per cpu differentials.
69986 */
69987-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69988+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69989
69990 static inline void zone_page_state_add(long x, struct zone *zone,
69991 enum zone_stat_item item)
69992 {
69993- atomic_long_add(x, &zone->vm_stat[item]);
69994- atomic_long_add(x, &vm_stat[item]);
69995+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69996+ atomic_long_add_unchecked(x, &vm_stat[item]);
69997 }
69998
69999 static inline unsigned long global_page_state(enum zone_stat_item item)
70000 {
70001- long x = atomic_long_read(&vm_stat[item]);
70002+ long x = atomic_long_read_unchecked(&vm_stat[item]);
70003 #ifdef CONFIG_SMP
70004 if (x < 0)
70005 x = 0;
70006@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
70007 static inline unsigned long zone_page_state(struct zone *zone,
70008 enum zone_stat_item item)
70009 {
70010- long x = atomic_long_read(&zone->vm_stat[item]);
70011+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70012 #ifdef CONFIG_SMP
70013 if (x < 0)
70014 x = 0;
70015@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
70016 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
70017 enum zone_stat_item item)
70018 {
70019- long x = atomic_long_read(&zone->vm_stat[item]);
70020+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
70021
70022 #ifdef CONFIG_SMP
70023 int cpu;
70024@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
70025
70026 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
70027 {
70028- atomic_long_inc(&zone->vm_stat[item]);
70029- atomic_long_inc(&vm_stat[item]);
70030+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
70031+ atomic_long_inc_unchecked(&vm_stat[item]);
70032 }
70033
70034 static inline void __inc_zone_page_state(struct page *page,
70035@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
70036
70037 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
70038 {
70039- atomic_long_dec(&zone->vm_stat[item]);
70040- atomic_long_dec(&vm_stat[item]);
70041+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
70042+ atomic_long_dec_unchecked(&vm_stat[item]);
70043 }
70044
70045 static inline void __dec_zone_page_state(struct page *page,
70046diff --git a/include/linux/xattr.h b/include/linux/xattr.h
70047index 5c84af8..1a3b6e2 100644
70048--- a/include/linux/xattr.h
70049+++ b/include/linux/xattr.h
70050@@ -33,6 +33,11 @@
70051 #define XATTR_USER_PREFIX "user."
70052 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
70053
70054+/* User namespace */
70055+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
70056+#define XATTR_PAX_FLAGS_SUFFIX "flags"
70057+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
70058+
70059 struct inode;
70060 struct dentry;
70061
70062diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
70063index eed5fcc..5080d24 100644
70064--- a/include/media/saa7146_vv.h
70065+++ b/include/media/saa7146_vv.h
70066@@ -167,7 +167,7 @@ struct saa7146_ext_vv
70067 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
70068
70069 /* the extension can override this */
70070- struct v4l2_ioctl_ops ops;
70071+ v4l2_ioctl_ops_no_const ops;
70072 /* pointer to the saa7146 core ops */
70073 const struct v4l2_ioctl_ops *core_ops;
70074
70075diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
70076index 73c9867..2da8837 100644
70077--- a/include/media/v4l2-dev.h
70078+++ b/include/media/v4l2-dev.h
70079@@ -34,7 +34,7 @@ struct v4l2_device;
70080 #define V4L2_FL_UNREGISTERED (0)
70081
70082 struct v4l2_file_operations {
70083- struct module *owner;
70084+ struct module * const owner;
70085 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
70086 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
70087 unsigned int (*poll) (struct file *, struct poll_table_struct *);
70088@@ -46,6 +46,7 @@ struct v4l2_file_operations {
70089 int (*open) (struct file *);
70090 int (*release) (struct file *);
70091 };
70092+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
70093
70094 /*
70095 * Newer version of video_device, handled by videodev2.c
70096diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
70097index 5d5d550..f559ef1 100644
70098--- a/include/media/v4l2-device.h
70099+++ b/include/media/v4l2-device.h
70100@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
70101 this function returns 0. If the name ends with a digit (e.g. cx18),
70102 then the name will be set to cx18-0 since cx180 looks really odd. */
70103 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
70104- atomic_t *instance);
70105+ atomic_unchecked_t *instance);
70106
70107 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70108 Since the parent disappears this ensures that v4l2_dev doesn't have an
70109diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70110index 7a4529d..7244290 100644
70111--- a/include/media/v4l2-ioctl.h
70112+++ b/include/media/v4l2-ioctl.h
70113@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70114 long (*vidioc_default) (struct file *file, void *fh,
70115 int cmd, void *arg);
70116 };
70117+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70118
70119
70120 /* v4l debugging and diagnostics */
70121diff --git a/include/net/flow.h b/include/net/flow.h
70122index 809970b..c3df4f3 100644
70123--- a/include/net/flow.h
70124+++ b/include/net/flow.h
70125@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70126 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70127 u8 dir, flow_resolve_t resolver);
70128 extern void flow_cache_flush(void);
70129-extern atomic_t flow_cache_genid;
70130+extern atomic_unchecked_t flow_cache_genid;
70131
70132 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70133 {
70134diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70135index 15e1f8fe..668837c 100644
70136--- a/include/net/inetpeer.h
70137+++ b/include/net/inetpeer.h
70138@@ -24,7 +24,7 @@ struct inet_peer
70139 __u32 dtime; /* the time of last use of not
70140 * referenced entries */
70141 atomic_t refcnt;
70142- atomic_t rid; /* Frag reception counter */
70143+ atomic_unchecked_t rid; /* Frag reception counter */
70144 __u32 tcp_ts;
70145 unsigned long tcp_ts_stamp;
70146 };
70147diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70148index 98978e7..2243a3d 100644
70149--- a/include/net/ip_vs.h
70150+++ b/include/net/ip_vs.h
70151@@ -365,7 +365,7 @@ struct ip_vs_conn {
70152 struct ip_vs_conn *control; /* Master control connection */
70153 atomic_t n_control; /* Number of controlled ones */
70154 struct ip_vs_dest *dest; /* real server */
70155- atomic_t in_pkts; /* incoming packet counter */
70156+ atomic_unchecked_t in_pkts; /* incoming packet counter */
70157
70158 /* packet transmitter for different forwarding methods. If it
70159 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70160@@ -466,7 +466,7 @@ struct ip_vs_dest {
70161 union nf_inet_addr addr; /* IP address of the server */
70162 __be16 port; /* port number of the server */
70163 volatile unsigned flags; /* dest status flags */
70164- atomic_t conn_flags; /* flags to copy to conn */
70165+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
70166 atomic_t weight; /* server weight */
70167
70168 atomic_t refcnt; /* reference counter */
70169diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70170index 69b610a..fe3962c 100644
70171--- a/include/net/irda/ircomm_core.h
70172+++ b/include/net/irda/ircomm_core.h
70173@@ -51,7 +51,7 @@ typedef struct {
70174 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70175 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70176 struct ircomm_info *);
70177-} call_t;
70178+} __no_const call_t;
70179
70180 struct ircomm_cb {
70181 irda_queue_t queue;
70182diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70183index eea2e61..08c692d 100644
70184--- a/include/net/irda/ircomm_tty.h
70185+++ b/include/net/irda/ircomm_tty.h
70186@@ -35,6 +35,7 @@
70187 #include <linux/termios.h>
70188 #include <linux/timer.h>
70189 #include <linux/tty.h> /* struct tty_struct */
70190+#include <asm/local.h>
70191
70192 #include <net/irda/irias_object.h>
70193 #include <net/irda/ircomm_core.h>
70194@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70195 unsigned short close_delay;
70196 unsigned short closing_wait; /* time to wait before closing */
70197
70198- int open_count;
70199- int blocked_open; /* # of blocked opens */
70200+ local_t open_count;
70201+ local_t blocked_open; /* # of blocked opens */
70202
70203 /* Protect concurent access to :
70204 * o self->open_count
70205diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70206index f82a1e8..82d81e8 100644
70207--- a/include/net/iucv/af_iucv.h
70208+++ b/include/net/iucv/af_iucv.h
70209@@ -87,7 +87,7 @@ struct iucv_sock {
70210 struct iucv_sock_list {
70211 struct hlist_head head;
70212 rwlock_t lock;
70213- atomic_t autobind_name;
70214+ atomic_unchecked_t autobind_name;
70215 };
70216
70217 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70218diff --git a/include/net/lapb.h b/include/net/lapb.h
70219index 96cb5dd..25e8d4f 100644
70220--- a/include/net/lapb.h
70221+++ b/include/net/lapb.h
70222@@ -95,7 +95,7 @@ struct lapb_cb {
70223 struct sk_buff_head write_queue;
70224 struct sk_buff_head ack_queue;
70225 unsigned char window;
70226- struct lapb_register_struct callbacks;
70227+ struct lapb_register_struct *callbacks;
70228
70229 /* FRMR control information */
70230 struct lapb_frame frmr_data;
70231diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70232index 3817fda..cdb2343 100644
70233--- a/include/net/neighbour.h
70234+++ b/include/net/neighbour.h
70235@@ -131,7 +131,7 @@ struct neigh_ops
70236 int (*connected_output)(struct sk_buff*);
70237 int (*hh_output)(struct sk_buff*);
70238 int (*queue_xmit)(struct sk_buff*);
70239-};
70240+} __do_const;
70241
70242 struct pneigh_entry
70243 {
70244diff --git a/include/net/netlink.h b/include/net/netlink.h
70245index c344646..4778c71 100644
70246--- a/include/net/netlink.h
70247+++ b/include/net/netlink.h
70248@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70249 {
70250 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70251 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70252- nlh->nlmsg_len <= remaining);
70253+ nlh->nlmsg_len <= (unsigned int)remaining);
70254 }
70255
70256 /**
70257@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70258 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70259 {
70260 if (mark)
70261- skb_trim(skb, (unsigned char *) mark - skb->data);
70262+ skb_trim(skb, (const unsigned char *) mark - skb->data);
70263 }
70264
70265 /**
70266diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70267index 9a4b8b7..e49e077 100644
70268--- a/include/net/netns/ipv4.h
70269+++ b/include/net/netns/ipv4.h
70270@@ -54,7 +54,7 @@ struct netns_ipv4 {
70271 int current_rt_cache_rebuild_count;
70272
70273 struct timer_list rt_secret_timer;
70274- atomic_t rt_genid;
70275+ atomic_unchecked_t rt_genid;
70276
70277 #ifdef CONFIG_IP_MROUTE
70278 struct sock *mroute_sk;
70279diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70280index 8a6d529..171f401 100644
70281--- a/include/net/sctp/sctp.h
70282+++ b/include/net/sctp/sctp.h
70283@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70284
70285 #else /* SCTP_DEBUG */
70286
70287-#define SCTP_DEBUG_PRINTK(whatever...)
70288-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70289+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70290+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70291 #define SCTP_ENABLE_DEBUG
70292 #define SCTP_DISABLE_DEBUG
70293 #define SCTP_ASSERT(expr, str, func)
70294diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70295index d97f689..f3b90ab 100644
70296--- a/include/net/secure_seq.h
70297+++ b/include/net/secure_seq.h
70298@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70299 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70300 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70301 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70302- __be16 dport);
70303+ __be16 dport);
70304 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70305 __be16 sport, __be16 dport);
70306 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70307- __be16 sport, __be16 dport);
70308+ __be16 sport, __be16 dport);
70309 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70310- __be16 sport, __be16 dport);
70311+ __be16 sport, __be16 dport);
70312 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70313- __be16 sport, __be16 dport);
70314+ __be16 sport, __be16 dport);
70315
70316 #endif /* _NET_SECURE_SEQ */
70317diff --git a/include/net/sock.h b/include/net/sock.h
70318index 78adf52..99afd29 100644
70319--- a/include/net/sock.h
70320+++ b/include/net/sock.h
70321@@ -272,7 +272,7 @@ struct sock {
70322 rwlock_t sk_callback_lock;
70323 int sk_err,
70324 sk_err_soft;
70325- atomic_t sk_drops;
70326+ atomic_unchecked_t sk_drops;
70327 unsigned short sk_ack_backlog;
70328 unsigned short sk_max_ack_backlog;
70329 __u32 sk_priority;
70330@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70331 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70332 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70333 #else
70334-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70335+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70336 int inc)
70337 {
70338 }
70339diff --git a/include/net/tcp.h b/include/net/tcp.h
70340index 6cfe18b..dd21acb 100644
70341--- a/include/net/tcp.h
70342+++ b/include/net/tcp.h
70343@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70344 struct tcp_seq_afinfo {
70345 char *name;
70346 sa_family_t family;
70347- struct file_operations seq_fops;
70348- struct seq_operations seq_ops;
70349+ file_operations_no_const seq_fops;
70350+ seq_operations_no_const seq_ops;
70351 };
70352
70353 struct tcp_iter_state {
70354diff --git a/include/net/udp.h b/include/net/udp.h
70355index f98abd2..b4b042f 100644
70356--- a/include/net/udp.h
70357+++ b/include/net/udp.h
70358@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70359 char *name;
70360 sa_family_t family;
70361 struct udp_table *udp_table;
70362- struct file_operations seq_fops;
70363- struct seq_operations seq_ops;
70364+ file_operations_no_const seq_fops;
70365+ seq_operations_no_const seq_ops;
70366 };
70367
70368 struct udp_iter_state {
70369diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70370index cbb822e..e9c1cbe 100644
70371--- a/include/rdma/iw_cm.h
70372+++ b/include/rdma/iw_cm.h
70373@@ -129,7 +129,7 @@ struct iw_cm_verbs {
70374 int backlog);
70375
70376 int (*destroy_listen)(struct iw_cm_id *cm_id);
70377-};
70378+} __no_const;
70379
70380 /**
70381 * iw_create_cm_id - Create an IW CM identifier.
70382diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70383index 09a124b..caa8ca8 100644
70384--- a/include/scsi/libfc.h
70385+++ b/include/scsi/libfc.h
70386@@ -675,6 +675,7 @@ struct libfc_function_template {
70387 */
70388 void (*disc_stop_final) (struct fc_lport *);
70389 };
70390+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70391
70392 /* information used by the discovery layer */
70393 struct fc_disc {
70394@@ -707,7 +708,7 @@ struct fc_lport {
70395 struct fc_disc disc;
70396
70397 /* Operational Information */
70398- struct libfc_function_template tt;
70399+ libfc_function_template_no_const tt;
70400 u8 link_up;
70401 u8 qfull;
70402 enum fc_lport_state state;
70403diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70404index de8e180..f15e0d7 100644
70405--- a/include/scsi/scsi_device.h
70406+++ b/include/scsi/scsi_device.h
70407@@ -156,9 +156,9 @@ struct scsi_device {
70408 unsigned int max_device_blocked; /* what device_blocked counts down from */
70409 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70410
70411- atomic_t iorequest_cnt;
70412- atomic_t iodone_cnt;
70413- atomic_t ioerr_cnt;
70414+ atomic_unchecked_t iorequest_cnt;
70415+ atomic_unchecked_t iodone_cnt;
70416+ atomic_unchecked_t ioerr_cnt;
70417
70418 struct device sdev_gendev,
70419 sdev_dev;
70420diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70421index fc50bd6..81ba9cb 100644
70422--- a/include/scsi/scsi_transport_fc.h
70423+++ b/include/scsi/scsi_transport_fc.h
70424@@ -708,7 +708,7 @@ struct fc_function_template {
70425 unsigned long show_host_system_hostname:1;
70426
70427 unsigned long disable_target_scan:1;
70428-};
70429+} __do_const;
70430
70431
70432 /**
70433diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70434index 3dae3f7..8440d6f 100644
70435--- a/include/sound/ac97_codec.h
70436+++ b/include/sound/ac97_codec.h
70437@@ -419,15 +419,15 @@
70438 struct snd_ac97;
70439
70440 struct snd_ac97_build_ops {
70441- int (*build_3d) (struct snd_ac97 *ac97);
70442- int (*build_specific) (struct snd_ac97 *ac97);
70443- int (*build_spdif) (struct snd_ac97 *ac97);
70444- int (*build_post_spdif) (struct snd_ac97 *ac97);
70445+ int (* const build_3d) (struct snd_ac97 *ac97);
70446+ int (* const build_specific) (struct snd_ac97 *ac97);
70447+ int (* const build_spdif) (struct snd_ac97 *ac97);
70448+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
70449 #ifdef CONFIG_PM
70450- void (*suspend) (struct snd_ac97 *ac97);
70451- void (*resume) (struct snd_ac97 *ac97);
70452+ void (* const suspend) (struct snd_ac97 *ac97);
70453+ void (* const resume) (struct snd_ac97 *ac97);
70454 #endif
70455- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70456+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70457 };
70458
70459 struct snd_ac97_bus_ops {
70460@@ -477,7 +477,7 @@ struct snd_ac97_template {
70461
70462 struct snd_ac97 {
70463 /* -- lowlevel (hardware) driver specific -- */
70464- struct snd_ac97_build_ops * build_ops;
70465+ const struct snd_ac97_build_ops * build_ops;
70466 void *private_data;
70467 void (*private_free) (struct snd_ac97 *ac97);
70468 /* --- */
70469diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70470index 891cf1a..a94ba2b 100644
70471--- a/include/sound/ak4xxx-adda.h
70472+++ b/include/sound/ak4xxx-adda.h
70473@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70474 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70475 unsigned char val);
70476 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70477-};
70478+} __no_const;
70479
70480 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70481
70482diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70483index 8c05e47..2b5df97 100644
70484--- a/include/sound/hwdep.h
70485+++ b/include/sound/hwdep.h
70486@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70487 struct snd_hwdep_dsp_status *status);
70488 int (*dsp_load)(struct snd_hwdep *hw,
70489 struct snd_hwdep_dsp_image *image);
70490-};
70491+} __no_const;
70492
70493 struct snd_hwdep {
70494 struct snd_card *card;
70495diff --git a/include/sound/info.h b/include/sound/info.h
70496index 112e894..6fda5b5 100644
70497--- a/include/sound/info.h
70498+++ b/include/sound/info.h
70499@@ -44,7 +44,7 @@ struct snd_info_entry_text {
70500 struct snd_info_buffer *buffer);
70501 void (*write)(struct snd_info_entry *entry,
70502 struct snd_info_buffer *buffer);
70503-};
70504+} __no_const;
70505
70506 struct snd_info_entry_ops {
70507 int (*open)(struct snd_info_entry *entry,
70508diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70509index de6d981..590a550 100644
70510--- a/include/sound/pcm.h
70511+++ b/include/sound/pcm.h
70512@@ -80,6 +80,7 @@ struct snd_pcm_ops {
70513 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70514 int (*ack)(struct snd_pcm_substream *substream);
70515 };
70516+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70517
70518 /*
70519 *
70520diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70521index 736eac7..fe8a80f 100644
70522--- a/include/sound/sb16_csp.h
70523+++ b/include/sound/sb16_csp.h
70524@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70525 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70526 int (*csp_stop) (struct snd_sb_csp * p);
70527 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70528-};
70529+} __no_const;
70530
70531 /*
70532 * CSP private data
70533diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70534index 444cd6b..3327cc5 100644
70535--- a/include/sound/ymfpci.h
70536+++ b/include/sound/ymfpci.h
70537@@ -358,7 +358,7 @@ struct snd_ymfpci {
70538 spinlock_t reg_lock;
70539 spinlock_t voice_lock;
70540 wait_queue_head_t interrupt_sleep;
70541- atomic_t interrupt_sleep_count;
70542+ atomic_unchecked_t interrupt_sleep_count;
70543 struct snd_info_entry *proc_entry;
70544 const struct firmware *dsp_microcode;
70545 const struct firmware *controller_microcode;
70546diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70547index b89f9db..f097b38 100644
70548--- a/include/trace/events/irq.h
70549+++ b/include/trace/events/irq.h
70550@@ -34,7 +34,7 @@
70551 */
70552 TRACE_EVENT(irq_handler_entry,
70553
70554- TP_PROTO(int irq, struct irqaction *action),
70555+ TP_PROTO(int irq, const struct irqaction *action),
70556
70557 TP_ARGS(irq, action),
70558
70559@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70560 */
70561 TRACE_EVENT(irq_handler_exit,
70562
70563- TP_PROTO(int irq, struct irqaction *action, int ret),
70564+ TP_PROTO(int irq, const struct irqaction *action, int ret),
70565
70566 TP_ARGS(irq, action, ret),
70567
70568@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70569 */
70570 TRACE_EVENT(softirq_entry,
70571
70572- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70573+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70574
70575 TP_ARGS(h, vec),
70576
70577@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70578 */
70579 TRACE_EVENT(softirq_exit,
70580
70581- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70582+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70583
70584 TP_ARGS(h, vec),
70585
70586diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70587index 0993a22..32ba2fe 100644
70588--- a/include/video/uvesafb.h
70589+++ b/include/video/uvesafb.h
70590@@ -177,6 +177,7 @@ struct uvesafb_par {
70591 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70592 u8 pmi_setpal; /* PMI for palette changes */
70593 u16 *pmi_base; /* protected mode interface location */
70594+ u8 *pmi_code; /* protected mode code location */
70595 void *pmi_start;
70596 void *pmi_pal;
70597 u8 *vbe_state_orig; /*
70598diff --git a/init/Kconfig b/init/Kconfig
70599index d72691b..3996e54 100644
70600--- a/init/Kconfig
70601+++ b/init/Kconfig
70602@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70603
70604 config COMPAT_BRK
70605 bool "Disable heap randomization"
70606- default y
70607+ default n
70608 help
70609 Randomizing heap placement makes heap exploits harder, but it
70610 also breaks ancient binaries (including anything libc5 based).
70611diff --git a/init/do_mounts.c b/init/do_mounts.c
70612index bb008d0..4fa3933 100644
70613--- a/init/do_mounts.c
70614+++ b/init/do_mounts.c
70615@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70616
70617 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70618 {
70619- int err = sys_mount(name, "/root", fs, flags, data);
70620+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70621 if (err)
70622 return err;
70623
70624- sys_chdir("/root");
70625+ sys_chdir((__force const char __user *)"/root");
70626 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70627 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70628 current->fs->pwd.mnt->mnt_sb->s_type->name,
70629@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70630 va_start(args, fmt);
70631 vsprintf(buf, fmt, args);
70632 va_end(args);
70633- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70634+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70635 if (fd >= 0) {
70636 sys_ioctl(fd, FDEJECT, 0);
70637 sys_close(fd);
70638 }
70639 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70640- fd = sys_open("/dev/console", O_RDWR, 0);
70641+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70642 if (fd >= 0) {
70643 sys_ioctl(fd, TCGETS, (long)&termios);
70644 termios.c_lflag &= ~ICANON;
70645 sys_ioctl(fd, TCSETSF, (long)&termios);
70646- sys_read(fd, &c, 1);
70647+ sys_read(fd, (char __user *)&c, 1);
70648 termios.c_lflag |= ICANON;
70649 sys_ioctl(fd, TCSETSF, (long)&termios);
70650 sys_close(fd);
70651@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70652 mount_root();
70653 out:
70654 devtmpfs_mount("dev");
70655- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70656- sys_chroot(".");
70657+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70658+ sys_chroot((__force char __user *)".");
70659 }
70660diff --git a/init/do_mounts.h b/init/do_mounts.h
70661index f5b978a..69dbfe8 100644
70662--- a/init/do_mounts.h
70663+++ b/init/do_mounts.h
70664@@ -15,15 +15,15 @@ extern int root_mountflags;
70665
70666 static inline int create_dev(char *name, dev_t dev)
70667 {
70668- sys_unlink(name);
70669- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70670+ sys_unlink((char __force_user *)name);
70671+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70672 }
70673
70674 #if BITS_PER_LONG == 32
70675 static inline u32 bstat(char *name)
70676 {
70677 struct stat64 stat;
70678- if (sys_stat64(name, &stat) != 0)
70679+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70680 return 0;
70681 if (!S_ISBLK(stat.st_mode))
70682 return 0;
70683@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70684 static inline u32 bstat(char *name)
70685 {
70686 struct stat stat;
70687- if (sys_newstat(name, &stat) != 0)
70688+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70689 return 0;
70690 if (!S_ISBLK(stat.st_mode))
70691 return 0;
70692diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70693index 614241b..4da046b 100644
70694--- a/init/do_mounts_initrd.c
70695+++ b/init/do_mounts_initrd.c
70696@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70697 sys_close(old_fd);sys_close(root_fd);
70698 sys_close(0);sys_close(1);sys_close(2);
70699 sys_setsid();
70700- (void) sys_open("/dev/console",O_RDWR,0);
70701+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70702 (void) sys_dup(0);
70703 (void) sys_dup(0);
70704 return kernel_execve(shell, argv, envp_init);
70705@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70706 create_dev("/dev/root.old", Root_RAM0);
70707 /* mount initrd on rootfs' /root */
70708 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70709- sys_mkdir("/old", 0700);
70710- root_fd = sys_open("/", 0, 0);
70711- old_fd = sys_open("/old", 0, 0);
70712+ sys_mkdir((const char __force_user *)"/old", 0700);
70713+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
70714+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70715 /* move initrd over / and chdir/chroot in initrd root */
70716- sys_chdir("/root");
70717- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70718- sys_chroot(".");
70719+ sys_chdir((const char __force_user *)"/root");
70720+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70721+ sys_chroot((const char __force_user *)".");
70722
70723 /*
70724 * In case that a resume from disk is carried out by linuxrc or one of
70725@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70726
70727 /* move initrd to rootfs' /old */
70728 sys_fchdir(old_fd);
70729- sys_mount("/", ".", NULL, MS_MOVE, NULL);
70730+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70731 /* switch root and cwd back to / of rootfs */
70732 sys_fchdir(root_fd);
70733- sys_chroot(".");
70734+ sys_chroot((const char __force_user *)".");
70735 sys_close(old_fd);
70736 sys_close(root_fd);
70737
70738 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70739- sys_chdir("/old");
70740+ sys_chdir((const char __force_user *)"/old");
70741 return;
70742 }
70743
70744@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70745 mount_root();
70746
70747 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70748- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70749+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70750 if (!error)
70751 printk("okay\n");
70752 else {
70753- int fd = sys_open("/dev/root.old", O_RDWR, 0);
70754+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70755 if (error == -ENOENT)
70756 printk("/initrd does not exist. Ignored.\n");
70757 else
70758 printk("failed\n");
70759 printk(KERN_NOTICE "Unmounting old root\n");
70760- sys_umount("/old", MNT_DETACH);
70761+ sys_umount((char __force_user *)"/old", MNT_DETACH);
70762 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70763 if (fd < 0) {
70764 error = fd;
70765@@ -119,11 +119,11 @@ int __init initrd_load(void)
70766 * mounted in the normal path.
70767 */
70768 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70769- sys_unlink("/initrd.image");
70770+ sys_unlink((const char __force_user *)"/initrd.image");
70771 handle_initrd();
70772 return 1;
70773 }
70774 }
70775- sys_unlink("/initrd.image");
70776+ sys_unlink((const char __force_user *)"/initrd.image");
70777 return 0;
70778 }
70779diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70780index 69aebbf..c0bf6a7 100644
70781--- a/init/do_mounts_md.c
70782+++ b/init/do_mounts_md.c
70783@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70784 partitioned ? "_d" : "", minor,
70785 md_setup_args[ent].device_names);
70786
70787- fd = sys_open(name, 0, 0);
70788+ fd = sys_open((char __force_user *)name, 0, 0);
70789 if (fd < 0) {
70790 printk(KERN_ERR "md: open failed - cannot start "
70791 "array %s\n", name);
70792@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70793 * array without it
70794 */
70795 sys_close(fd);
70796- fd = sys_open(name, 0, 0);
70797+ fd = sys_open((char __force_user *)name, 0, 0);
70798 sys_ioctl(fd, BLKRRPART, 0);
70799 }
70800 sys_close(fd);
70801@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70802
70803 wait_for_device_probe();
70804
70805- fd = sys_open("/dev/md0", 0, 0);
70806+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70807 if (fd >= 0) {
70808 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70809 sys_close(fd);
70810diff --git a/init/initramfs.c b/init/initramfs.c
70811index 1fd59b8..a01b079 100644
70812--- a/init/initramfs.c
70813+++ b/init/initramfs.c
70814@@ -74,7 +74,7 @@ static void __init free_hash(void)
70815 }
70816 }
70817
70818-static long __init do_utime(char __user *filename, time_t mtime)
70819+static long __init do_utime(__force char __user *filename, time_t mtime)
70820 {
70821 struct timespec t[2];
70822
70823@@ -109,7 +109,7 @@ static void __init dir_utime(void)
70824 struct dir_entry *de, *tmp;
70825 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70826 list_del(&de->list);
70827- do_utime(de->name, de->mtime);
70828+ do_utime((char __force_user *)de->name, de->mtime);
70829 kfree(de->name);
70830 kfree(de);
70831 }
70832@@ -271,7 +271,7 @@ static int __init maybe_link(void)
70833 if (nlink >= 2) {
70834 char *old = find_link(major, minor, ino, mode, collected);
70835 if (old)
70836- return (sys_link(old, collected) < 0) ? -1 : 1;
70837+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70838 }
70839 return 0;
70840 }
70841@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70842 {
70843 struct stat st;
70844
70845- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70846+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70847 if (S_ISDIR(st.st_mode))
70848- sys_rmdir(path);
70849+ sys_rmdir((char __force_user *)path);
70850 else
70851- sys_unlink(path);
70852+ sys_unlink((char __force_user *)path);
70853 }
70854 }
70855
70856@@ -305,7 +305,7 @@ static int __init do_name(void)
70857 int openflags = O_WRONLY|O_CREAT;
70858 if (ml != 1)
70859 openflags |= O_TRUNC;
70860- wfd = sys_open(collected, openflags, mode);
70861+ wfd = sys_open((char __force_user *)collected, openflags, mode);
70862
70863 if (wfd >= 0) {
70864 sys_fchown(wfd, uid, gid);
70865@@ -317,17 +317,17 @@ static int __init do_name(void)
70866 }
70867 }
70868 } else if (S_ISDIR(mode)) {
70869- sys_mkdir(collected, mode);
70870- sys_chown(collected, uid, gid);
70871- sys_chmod(collected, mode);
70872+ sys_mkdir((char __force_user *)collected, mode);
70873+ sys_chown((char __force_user *)collected, uid, gid);
70874+ sys_chmod((char __force_user *)collected, mode);
70875 dir_add(collected, mtime);
70876 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70877 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70878 if (maybe_link() == 0) {
70879- sys_mknod(collected, mode, rdev);
70880- sys_chown(collected, uid, gid);
70881- sys_chmod(collected, mode);
70882- do_utime(collected, mtime);
70883+ sys_mknod((char __force_user *)collected, mode, rdev);
70884+ sys_chown((char __force_user *)collected, uid, gid);
70885+ sys_chmod((char __force_user *)collected, mode);
70886+ do_utime((char __force_user *)collected, mtime);
70887 }
70888 }
70889 return 0;
70890@@ -336,15 +336,15 @@ static int __init do_name(void)
70891 static int __init do_copy(void)
70892 {
70893 if (count >= body_len) {
70894- sys_write(wfd, victim, body_len);
70895+ sys_write(wfd, (char __force_user *)victim, body_len);
70896 sys_close(wfd);
70897- do_utime(vcollected, mtime);
70898+ do_utime((char __force_user *)vcollected, mtime);
70899 kfree(vcollected);
70900 eat(body_len);
70901 state = SkipIt;
70902 return 0;
70903 } else {
70904- sys_write(wfd, victim, count);
70905+ sys_write(wfd, (char __force_user *)victim, count);
70906 body_len -= count;
70907 eat(count);
70908 return 1;
70909@@ -355,9 +355,9 @@ static int __init do_symlink(void)
70910 {
70911 collected[N_ALIGN(name_len) + body_len] = '\0';
70912 clean_path(collected, 0);
70913- sys_symlink(collected + N_ALIGN(name_len), collected);
70914- sys_lchown(collected, uid, gid);
70915- do_utime(collected, mtime);
70916+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70917+ sys_lchown((char __force_user *)collected, uid, gid);
70918+ do_utime((char __force_user *)collected, mtime);
70919 state = SkipIt;
70920 next_state = Reset;
70921 return 0;
70922diff --git a/init/main.c b/init/main.c
70923index 1eb4bd5..fea5bbe 100644
70924--- a/init/main.c
70925+++ b/init/main.c
70926@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70927 #ifdef CONFIG_TC
70928 extern void tc_init(void);
70929 #endif
70930+extern void grsecurity_init(void);
70931
70932 enum system_states system_state __read_mostly;
70933 EXPORT_SYMBOL(system_state);
70934@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70935
70936 __setup("reset_devices", set_reset_devices);
70937
70938+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70939+extern char pax_enter_kernel_user[];
70940+extern char pax_exit_kernel_user[];
70941+extern pgdval_t clone_pgd_mask;
70942+#endif
70943+
70944+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70945+static int __init setup_pax_nouderef(char *str)
70946+{
70947+#ifdef CONFIG_X86_32
70948+ unsigned int cpu;
70949+ struct desc_struct *gdt;
70950+
70951+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
70952+ gdt = get_cpu_gdt_table(cpu);
70953+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70954+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70955+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70956+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70957+ }
70958+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70959+#else
70960+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70961+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70962+ clone_pgd_mask = ~(pgdval_t)0UL;
70963+#endif
70964+
70965+ return 0;
70966+}
70967+early_param("pax_nouderef", setup_pax_nouderef);
70968+#endif
70969+
70970+#ifdef CONFIG_PAX_SOFTMODE
70971+int pax_softmode;
70972+
70973+static int __init setup_pax_softmode(char *str)
70974+{
70975+ get_option(&str, &pax_softmode);
70976+ return 1;
70977+}
70978+__setup("pax_softmode=", setup_pax_softmode);
70979+#endif
70980+
70981 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70982 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70983 static const char *panic_later, *panic_param;
70984@@ -705,52 +749,53 @@ int initcall_debug;
70985 core_param(initcall_debug, initcall_debug, bool, 0644);
70986
70987 static char msgbuf[64];
70988-static struct boot_trace_call call;
70989-static struct boot_trace_ret ret;
70990+static struct boot_trace_call trace_call;
70991+static struct boot_trace_ret trace_ret;
70992
70993 int do_one_initcall(initcall_t fn)
70994 {
70995 int count = preempt_count();
70996 ktime_t calltime, delta, rettime;
70997+ const char *msg1 = "", *msg2 = "";
70998
70999 if (initcall_debug) {
71000- call.caller = task_pid_nr(current);
71001- printk("calling %pF @ %i\n", fn, call.caller);
71002+ trace_call.caller = task_pid_nr(current);
71003+ printk("calling %pF @ %i\n", fn, trace_call.caller);
71004 calltime = ktime_get();
71005- trace_boot_call(&call, fn);
71006+ trace_boot_call(&trace_call, fn);
71007 enable_boot_trace();
71008 }
71009
71010- ret.result = fn();
71011+ trace_ret.result = fn();
71012
71013 if (initcall_debug) {
71014 disable_boot_trace();
71015 rettime = ktime_get();
71016 delta = ktime_sub(rettime, calltime);
71017- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71018- trace_boot_ret(&ret, fn);
71019+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
71020+ trace_boot_ret(&trace_ret, fn);
71021 printk("initcall %pF returned %d after %Ld usecs\n", fn,
71022- ret.result, ret.duration);
71023+ trace_ret.result, trace_ret.duration);
71024 }
71025
71026 msgbuf[0] = 0;
71027
71028- if (ret.result && ret.result != -ENODEV && initcall_debug)
71029- sprintf(msgbuf, "error code %d ", ret.result);
71030+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
71031+ sprintf(msgbuf, "error code %d ", trace_ret.result);
71032
71033 if (preempt_count() != count) {
71034- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
71035+ msg1 = " preemption imbalance";
71036 preempt_count() = count;
71037 }
71038 if (irqs_disabled()) {
71039- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
71040+ msg2 = " disabled interrupts";
71041 local_irq_enable();
71042 }
71043- if (msgbuf[0]) {
71044- printk("initcall %pF returned with %s\n", fn, msgbuf);
71045+ if (msgbuf[0] || *msg1 || *msg2) {
71046+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
71047 }
71048
71049- return ret.result;
71050+ return trace_ret.result;
71051 }
71052
71053
71054@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
71055 if (!ramdisk_execute_command)
71056 ramdisk_execute_command = "/init";
71057
71058- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
71059+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
71060 ramdisk_execute_command = NULL;
71061 prepare_namespace();
71062 }
71063
71064+ grsecurity_init();
71065+
71066 /*
71067 * Ok, we have completed the initial bootup, and
71068 * we're essentially up and running. Get rid of the
71069diff --git a/init/noinitramfs.c b/init/noinitramfs.c
71070index f4c1a3a..96c19bd 100644
71071--- a/init/noinitramfs.c
71072+++ b/init/noinitramfs.c
71073@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
71074 {
71075 int err;
71076
71077- err = sys_mkdir("/dev", 0755);
71078+ err = sys_mkdir((const char __user *)"/dev", 0755);
71079 if (err < 0)
71080 goto out;
71081
71082@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
71083 if (err < 0)
71084 goto out;
71085
71086- err = sys_mkdir("/root", 0700);
71087+ err = sys_mkdir((const char __user *)"/root", 0700);
71088 if (err < 0)
71089 goto out;
71090
71091diff --git a/ipc/mqueue.c b/ipc/mqueue.c
71092index d01bc14..8df81db 100644
71093--- a/ipc/mqueue.c
71094+++ b/ipc/mqueue.c
71095@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
71096 mq_bytes = (mq_msg_tblsz +
71097 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
71098
71099+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
71100 spin_lock(&mq_lock);
71101 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
71102 u->mq_bytes + mq_bytes >
71103diff --git a/ipc/msg.c b/ipc/msg.c
71104index 779f762..4af9e36 100644
71105--- a/ipc/msg.c
71106+++ b/ipc/msg.c
71107@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71108 return security_msg_queue_associate(msq, msgflg);
71109 }
71110
71111+static struct ipc_ops msg_ops = {
71112+ .getnew = newque,
71113+ .associate = msg_security,
71114+ .more_checks = NULL
71115+};
71116+
71117 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71118 {
71119 struct ipc_namespace *ns;
71120- struct ipc_ops msg_ops;
71121 struct ipc_params msg_params;
71122
71123 ns = current->nsproxy->ipc_ns;
71124
71125- msg_ops.getnew = newque;
71126- msg_ops.associate = msg_security;
71127- msg_ops.more_checks = NULL;
71128-
71129 msg_params.key = key;
71130 msg_params.flg = msgflg;
71131
71132diff --git a/ipc/sem.c b/ipc/sem.c
71133index b781007..f738b04 100644
71134--- a/ipc/sem.c
71135+++ b/ipc/sem.c
71136@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71137 return 0;
71138 }
71139
71140+static struct ipc_ops sem_ops = {
71141+ .getnew = newary,
71142+ .associate = sem_security,
71143+ .more_checks = sem_more_checks
71144+};
71145+
71146 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71147 {
71148 struct ipc_namespace *ns;
71149- struct ipc_ops sem_ops;
71150 struct ipc_params sem_params;
71151
71152 ns = current->nsproxy->ipc_ns;
71153@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71154 if (nsems < 0 || nsems > ns->sc_semmsl)
71155 return -EINVAL;
71156
71157- sem_ops.getnew = newary;
71158- sem_ops.associate = sem_security;
71159- sem_ops.more_checks = sem_more_checks;
71160-
71161 sem_params.key = key;
71162 sem_params.flg = semflg;
71163 sem_params.u.nsems = nsems;
71164@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71165 ushort* sem_io = fast_sem_io;
71166 int nsems;
71167
71168+ pax_track_stack();
71169+
71170 sma = sem_lock_check(ns, semid);
71171 if (IS_ERR(sma))
71172 return PTR_ERR(sma);
71173@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71174 unsigned long jiffies_left = 0;
71175 struct ipc_namespace *ns;
71176
71177+ pax_track_stack();
71178+
71179 ns = current->nsproxy->ipc_ns;
71180
71181 if (nsops < 1 || semid < 0)
71182diff --git a/ipc/shm.c b/ipc/shm.c
71183index d30732c..e4992cd 100644
71184--- a/ipc/shm.c
71185+++ b/ipc/shm.c
71186@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71187 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71188 #endif
71189
71190+#ifdef CONFIG_GRKERNSEC
71191+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71192+ const time_t shm_createtime, const uid_t cuid,
71193+ const int shmid);
71194+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71195+ const time_t shm_createtime);
71196+#endif
71197+
71198 void shm_init_ns(struct ipc_namespace *ns)
71199 {
71200 ns->shm_ctlmax = SHMMAX;
71201@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71202 shp->shm_lprid = 0;
71203 shp->shm_atim = shp->shm_dtim = 0;
71204 shp->shm_ctim = get_seconds();
71205+#ifdef CONFIG_GRKERNSEC
71206+ {
71207+ struct timespec timeval;
71208+ do_posix_clock_monotonic_gettime(&timeval);
71209+
71210+ shp->shm_createtime = timeval.tv_sec;
71211+ }
71212+#endif
71213 shp->shm_segsz = size;
71214 shp->shm_nattch = 0;
71215 shp->shm_file = file;
71216@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71217 return 0;
71218 }
71219
71220+static struct ipc_ops shm_ops = {
71221+ .getnew = newseg,
71222+ .associate = shm_security,
71223+ .more_checks = shm_more_checks
71224+};
71225+
71226 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71227 {
71228 struct ipc_namespace *ns;
71229- struct ipc_ops shm_ops;
71230 struct ipc_params shm_params;
71231
71232 ns = current->nsproxy->ipc_ns;
71233
71234- shm_ops.getnew = newseg;
71235- shm_ops.associate = shm_security;
71236- shm_ops.more_checks = shm_more_checks;
71237-
71238 shm_params.key = key;
71239 shm_params.flg = shmflg;
71240 shm_params.u.size = size;
71241@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71242 f_mode = FMODE_READ | FMODE_WRITE;
71243 }
71244 if (shmflg & SHM_EXEC) {
71245+
71246+#ifdef CONFIG_PAX_MPROTECT
71247+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
71248+ goto out;
71249+#endif
71250+
71251 prot |= PROT_EXEC;
71252 acc_mode |= S_IXUGO;
71253 }
71254@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71255 if (err)
71256 goto out_unlock;
71257
71258+#ifdef CONFIG_GRKERNSEC
71259+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71260+ shp->shm_perm.cuid, shmid) ||
71261+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71262+ err = -EACCES;
71263+ goto out_unlock;
71264+ }
71265+#endif
71266+
71267 path.dentry = dget(shp->shm_file->f_path.dentry);
71268 path.mnt = shp->shm_file->f_path.mnt;
71269 shp->shm_nattch++;
71270+#ifdef CONFIG_GRKERNSEC
71271+ shp->shm_lapid = current->pid;
71272+#endif
71273 size = i_size_read(path.dentry->d_inode);
71274 shm_unlock(shp);
71275
71276diff --git a/kernel/acct.c b/kernel/acct.c
71277index a6605ca..ca91111 100644
71278--- a/kernel/acct.c
71279+++ b/kernel/acct.c
71280@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71281 */
71282 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71283 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71284- file->f_op->write(file, (char *)&ac,
71285+ file->f_op->write(file, (char __force_user *)&ac,
71286 sizeof(acct_t), &file->f_pos);
71287 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71288 set_fs(fs);
71289diff --git a/kernel/audit.c b/kernel/audit.c
71290index 5feed23..48415fd 100644
71291--- a/kernel/audit.c
71292+++ b/kernel/audit.c
71293@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71294 3) suppressed due to audit_rate_limit
71295 4) suppressed due to audit_backlog_limit
71296 */
71297-static atomic_t audit_lost = ATOMIC_INIT(0);
71298+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71299
71300 /* The netlink socket. */
71301 static struct sock *audit_sock;
71302@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71303 unsigned long now;
71304 int print;
71305
71306- atomic_inc(&audit_lost);
71307+ atomic_inc_unchecked(&audit_lost);
71308
71309 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71310
71311@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71312 printk(KERN_WARNING
71313 "audit: audit_lost=%d audit_rate_limit=%d "
71314 "audit_backlog_limit=%d\n",
71315- atomic_read(&audit_lost),
71316+ atomic_read_unchecked(&audit_lost),
71317 audit_rate_limit,
71318 audit_backlog_limit);
71319 audit_panic(message);
71320@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71321 status_set.pid = audit_pid;
71322 status_set.rate_limit = audit_rate_limit;
71323 status_set.backlog_limit = audit_backlog_limit;
71324- status_set.lost = atomic_read(&audit_lost);
71325+ status_set.lost = atomic_read_unchecked(&audit_lost);
71326 status_set.backlog = skb_queue_len(&audit_skb_queue);
71327 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71328 &status_set, sizeof(status_set));
71329@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71330 spin_unlock_irq(&tsk->sighand->siglock);
71331 }
71332 read_unlock(&tasklist_lock);
71333- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71334- &s, sizeof(s));
71335+
71336+ if (!err)
71337+ audit_send_reply(NETLINK_CB(skb).pid, seq,
71338+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71339 break;
71340 }
71341 case AUDIT_TTY_SET: {
71342@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71343 avail = audit_expand(ab,
71344 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71345 if (!avail)
71346- goto out;
71347+ goto out_va_end;
71348 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71349 }
71350- va_end(args2);
71351 if (len > 0)
71352 skb_put(skb, len);
71353+out_va_end:
71354+ va_end(args2);
71355 out:
71356 return;
71357 }
71358diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71359index 267e484..ac41bc3 100644
71360--- a/kernel/auditsc.c
71361+++ b/kernel/auditsc.c
71362@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71363 struct audit_buffer **ab,
71364 struct audit_aux_data_execve *axi)
71365 {
71366- int i;
71367- size_t len, len_sent = 0;
71368+ int i, len;
71369+ size_t len_sent = 0;
71370 const char __user *p;
71371 char *buf;
71372
71373@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71374 }
71375
71376 /* global counter which is incremented every time something logs in */
71377-static atomic_t session_id = ATOMIC_INIT(0);
71378+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71379
71380 /**
71381 * audit_set_loginuid - set a task's audit_context loginuid
71382@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71383 */
71384 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71385 {
71386- unsigned int sessionid = atomic_inc_return(&session_id);
71387+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71388 struct audit_context *context = task->audit_context;
71389
71390 if (context && context->in_syscall) {
71391diff --git a/kernel/capability.c b/kernel/capability.c
71392index 8a944f5..db5001e 100644
71393--- a/kernel/capability.c
71394+++ b/kernel/capability.c
71395@@ -305,10 +305,26 @@ int capable(int cap)
71396 BUG();
71397 }
71398
71399- if (security_capable(cap) == 0) {
71400+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71401 current->flags |= PF_SUPERPRIV;
71402 return 1;
71403 }
71404 return 0;
71405 }
71406+
71407+int capable_nolog(int cap)
71408+{
71409+ if (unlikely(!cap_valid(cap))) {
71410+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71411+ BUG();
71412+ }
71413+
71414+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71415+ current->flags |= PF_SUPERPRIV;
71416+ return 1;
71417+ }
71418+ return 0;
71419+}
71420+
71421 EXPORT_SYMBOL(capable);
71422+EXPORT_SYMBOL(capable_nolog);
71423diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71424index 1fbcc74..7000012 100644
71425--- a/kernel/cgroup.c
71426+++ b/kernel/cgroup.c
71427@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71428 struct hlist_head *hhead;
71429 struct cg_cgroup_link *link;
71430
71431+ pax_track_stack();
71432+
71433 /* First see if we already have a cgroup group that matches
71434 * the desired set */
71435 read_lock(&css_set_lock);
71436diff --git a/kernel/compat.c b/kernel/compat.c
71437index 8bc5578..186e44a 100644
71438--- a/kernel/compat.c
71439+++ b/kernel/compat.c
71440@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71441 mm_segment_t oldfs;
71442 long ret;
71443
71444- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71445+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71446 oldfs = get_fs();
71447 set_fs(KERNEL_DS);
71448 ret = hrtimer_nanosleep_restart(restart);
71449@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71450 oldfs = get_fs();
71451 set_fs(KERNEL_DS);
71452 ret = hrtimer_nanosleep(&tu,
71453- rmtp ? (struct timespec __user *)&rmt : NULL,
71454+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
71455 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71456 set_fs(oldfs);
71457
71458@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71459 mm_segment_t old_fs = get_fs();
71460
71461 set_fs(KERNEL_DS);
71462- ret = sys_sigpending((old_sigset_t __user *) &s);
71463+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
71464 set_fs(old_fs);
71465 if (ret == 0)
71466 ret = put_user(s, set);
71467@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71468 old_fs = get_fs();
71469 set_fs(KERNEL_DS);
71470 ret = sys_sigprocmask(how,
71471- set ? (old_sigset_t __user *) &s : NULL,
71472- oset ? (old_sigset_t __user *) &s : NULL);
71473+ set ? (old_sigset_t __force_user *) &s : NULL,
71474+ oset ? (old_sigset_t __force_user *) &s : NULL);
71475 set_fs(old_fs);
71476 if (ret == 0)
71477 if (oset)
71478@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71479 mm_segment_t old_fs = get_fs();
71480
71481 set_fs(KERNEL_DS);
71482- ret = sys_old_getrlimit(resource, &r);
71483+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71484 set_fs(old_fs);
71485
71486 if (!ret) {
71487@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71488 mm_segment_t old_fs = get_fs();
71489
71490 set_fs(KERNEL_DS);
71491- ret = sys_getrusage(who, (struct rusage __user *) &r);
71492+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71493 set_fs(old_fs);
71494
71495 if (ret)
71496@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71497 set_fs (KERNEL_DS);
71498 ret = sys_wait4(pid,
71499 (stat_addr ?
71500- (unsigned int __user *) &status : NULL),
71501- options, (struct rusage __user *) &r);
71502+ (unsigned int __force_user *) &status : NULL),
71503+ options, (struct rusage __force_user *) &r);
71504 set_fs (old_fs);
71505
71506 if (ret > 0) {
71507@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71508 memset(&info, 0, sizeof(info));
71509
71510 set_fs(KERNEL_DS);
71511- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71512- uru ? (struct rusage __user *)&ru : NULL);
71513+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71514+ uru ? (struct rusage __force_user *)&ru : NULL);
71515 set_fs(old_fs);
71516
71517 if ((ret < 0) || (info.si_signo == 0))
71518@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71519 oldfs = get_fs();
71520 set_fs(KERNEL_DS);
71521 err = sys_timer_settime(timer_id, flags,
71522- (struct itimerspec __user *) &newts,
71523- (struct itimerspec __user *) &oldts);
71524+ (struct itimerspec __force_user *) &newts,
71525+ (struct itimerspec __force_user *) &oldts);
71526 set_fs(oldfs);
71527 if (!err && old && put_compat_itimerspec(old, &oldts))
71528 return -EFAULT;
71529@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71530 oldfs = get_fs();
71531 set_fs(KERNEL_DS);
71532 err = sys_timer_gettime(timer_id,
71533- (struct itimerspec __user *) &ts);
71534+ (struct itimerspec __force_user *) &ts);
71535 set_fs(oldfs);
71536 if (!err && put_compat_itimerspec(setting, &ts))
71537 return -EFAULT;
71538@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71539 oldfs = get_fs();
71540 set_fs(KERNEL_DS);
71541 err = sys_clock_settime(which_clock,
71542- (struct timespec __user *) &ts);
71543+ (struct timespec __force_user *) &ts);
71544 set_fs(oldfs);
71545 return err;
71546 }
71547@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71548 oldfs = get_fs();
71549 set_fs(KERNEL_DS);
71550 err = sys_clock_gettime(which_clock,
71551- (struct timespec __user *) &ts);
71552+ (struct timespec __force_user *) &ts);
71553 set_fs(oldfs);
71554 if (!err && put_compat_timespec(&ts, tp))
71555 return -EFAULT;
71556@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71557 oldfs = get_fs();
71558 set_fs(KERNEL_DS);
71559 err = sys_clock_getres(which_clock,
71560- (struct timespec __user *) &ts);
71561+ (struct timespec __force_user *) &ts);
71562 set_fs(oldfs);
71563 if (!err && tp && put_compat_timespec(&ts, tp))
71564 return -EFAULT;
71565@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71566 long err;
71567 mm_segment_t oldfs;
71568 struct timespec tu;
71569- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71570+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71571
71572- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71573+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71574 oldfs = get_fs();
71575 set_fs(KERNEL_DS);
71576 err = clock_nanosleep_restart(restart);
71577@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71578 oldfs = get_fs();
71579 set_fs(KERNEL_DS);
71580 err = sys_clock_nanosleep(which_clock, flags,
71581- (struct timespec __user *) &in,
71582- (struct timespec __user *) &out);
71583+ (struct timespec __force_user *) &in,
71584+ (struct timespec __force_user *) &out);
71585 set_fs(oldfs);
71586
71587 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71588diff --git a/kernel/configs.c b/kernel/configs.c
71589index abaee68..047facd 100644
71590--- a/kernel/configs.c
71591+++ b/kernel/configs.c
71592@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71593 struct proc_dir_entry *entry;
71594
71595 /* create the current config file */
71596+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71597+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71598+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71599+ &ikconfig_file_ops);
71600+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71601+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71602+ &ikconfig_file_ops);
71603+#endif
71604+#else
71605 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71606 &ikconfig_file_ops);
71607+#endif
71608+
71609 if (!entry)
71610 return -ENOMEM;
71611
71612diff --git a/kernel/cpu.c b/kernel/cpu.c
71613index 3f2f04f..4e53ded 100644
71614--- a/kernel/cpu.c
71615+++ b/kernel/cpu.c
71616@@ -20,7 +20,7 @@
71617 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71618 static DEFINE_MUTEX(cpu_add_remove_lock);
71619
71620-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71621+static RAW_NOTIFIER_HEAD(cpu_chain);
71622
71623 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71624 * Should always be manipulated under cpu_add_remove_lock
71625diff --git a/kernel/cred.c b/kernel/cred.c
71626index 0b5b5fc..f7fe51a 100644
71627--- a/kernel/cred.c
71628+++ b/kernel/cred.c
71629@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71630 */
71631 void __put_cred(struct cred *cred)
71632 {
71633+ pax_track_stack();
71634+
71635 kdebug("__put_cred(%p{%d,%d})", cred,
71636 atomic_read(&cred->usage),
71637 read_cred_subscribers(cred));
71638@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71639 {
71640 struct cred *cred;
71641
71642+ pax_track_stack();
71643+
71644 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71645 atomic_read(&tsk->cred->usage),
71646 read_cred_subscribers(tsk->cred));
71647@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71648 validate_creds(cred);
71649 put_cred(cred);
71650 }
71651+
71652+#ifdef CONFIG_GRKERNSEC_SETXID
71653+ cred = (struct cred *) tsk->delayed_cred;
71654+ if (cred) {
71655+ tsk->delayed_cred = NULL;
71656+ validate_creds(cred);
71657+ put_cred(cred);
71658+ }
71659+#endif
71660 }
71661
71662 /**
71663@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71664 {
71665 const struct cred *cred;
71666
71667+ pax_track_stack();
71668+
71669 rcu_read_lock();
71670
71671 do {
71672@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71673 {
71674 struct cred *new;
71675
71676+ pax_track_stack();
71677+
71678 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71679 if (!new)
71680 return NULL;
71681@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
71682 const struct cred *old;
71683 struct cred *new;
71684
71685+ pax_track_stack();
71686+
71687 validate_process_creds();
71688
71689 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71690@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
71691 struct thread_group_cred *tgcred = NULL;
71692 struct cred *new;
71693
71694+ pax_track_stack();
71695+
71696 #ifdef CONFIG_KEYS
71697 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
71698 if (!tgcred)
71699@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71700 struct cred *new;
71701 int ret;
71702
71703+ pax_track_stack();
71704+
71705 mutex_init(&p->cred_guard_mutex);
71706
71707 if (
71708@@ -523,11 +546,13 @@ error_put:
71709 * Always returns 0 thus allowing this function to be tail-called at the end
71710 * of, say, sys_setgid().
71711 */
71712-int commit_creds(struct cred *new)
71713+static int __commit_creds(struct cred *new)
71714 {
71715 struct task_struct *task = current;
71716 const struct cred *old = task->real_cred;
71717
71718+ pax_track_stack();
71719+
71720 kdebug("commit_creds(%p{%d,%d})", new,
71721 atomic_read(&new->usage),
71722 read_cred_subscribers(new));
71723@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
71724
71725 get_cred(new); /* we will require a ref for the subj creds too */
71726
71727+ gr_set_role_label(task, new->uid, new->gid);
71728+
71729 /* dumpability changes */
71730 if (old->euid != new->euid ||
71731 old->egid != new->egid ||
71732@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
71733 key_fsgid_changed(task);
71734
71735 /* do it
71736- * - What if a process setreuid()'s and this brings the
71737- * new uid over his NPROC rlimit? We can check this now
71738- * cheaply with the new uid cache, so if it matters
71739- * we should be checking for it. -DaveM
71740+ * RLIMIT_NPROC limits on user->processes have already been checked
71741+ * in set_user().
71742 */
71743 alter_cred_subscribers(new, 2);
71744 if (new->user != old->user)
71745@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
71746 put_cred(old);
71747 return 0;
71748 }
71749+
71750+#ifdef CONFIG_GRKERNSEC_SETXID
71751+extern int set_user(struct cred *new);
71752+
71753+void gr_delayed_cred_worker(void)
71754+{
71755+ const struct cred *new = current->delayed_cred;
71756+ struct cred *ncred;
71757+
71758+ current->delayed_cred = NULL;
71759+
71760+ if (current_uid() && new != NULL) {
71761+ // from doing get_cred on it when queueing this
71762+ put_cred(new);
71763+ return;
71764+ } else if (new == NULL)
71765+ return;
71766+
71767+ ncred = prepare_creds();
71768+ if (!ncred)
71769+ goto die;
71770+ // uids
71771+ ncred->uid = new->uid;
71772+ ncred->euid = new->euid;
71773+ ncred->suid = new->suid;
71774+ ncred->fsuid = new->fsuid;
71775+ // gids
71776+ ncred->gid = new->gid;
71777+ ncred->egid = new->egid;
71778+ ncred->sgid = new->sgid;
71779+ ncred->fsgid = new->fsgid;
71780+ // groups
71781+ if (set_groups(ncred, new->group_info) < 0) {
71782+ abort_creds(ncred);
71783+ goto die;
71784+ }
71785+ // caps
71786+ ncred->securebits = new->securebits;
71787+ ncred->cap_inheritable = new->cap_inheritable;
71788+ ncred->cap_permitted = new->cap_permitted;
71789+ ncred->cap_effective = new->cap_effective;
71790+ ncred->cap_bset = new->cap_bset;
71791+
71792+ if (set_user(ncred)) {
71793+ abort_creds(ncred);
71794+ goto die;
71795+ }
71796+
71797+ // from doing get_cred on it when queueing this
71798+ put_cred(new);
71799+
71800+ __commit_creds(ncred);
71801+ return;
71802+die:
71803+ // from doing get_cred on it when queueing this
71804+ put_cred(new);
71805+ do_group_exit(SIGKILL);
71806+}
71807+#endif
71808+
71809+int commit_creds(struct cred *new)
71810+{
71811+#ifdef CONFIG_GRKERNSEC_SETXID
71812+ struct task_struct *t;
71813+
71814+ /* we won't get called with tasklist_lock held for writing
71815+ and interrupts disabled as the cred struct in that case is
71816+ init_cred
71817+ */
71818+ if (grsec_enable_setxid && !current_is_single_threaded() &&
71819+ !current_uid() && new->uid) {
71820+ rcu_read_lock();
71821+ read_lock(&tasklist_lock);
71822+ for (t = next_thread(current); t != current;
71823+ t = next_thread(t)) {
71824+ if (t->delayed_cred == NULL) {
71825+ t->delayed_cred = get_cred(new);
71826+ set_tsk_need_resched(t);
71827+ }
71828+ }
71829+ read_unlock(&tasklist_lock);
71830+ rcu_read_unlock();
71831+ }
71832+#endif
71833+ return __commit_creds(new);
71834+}
71835+
71836 EXPORT_SYMBOL(commit_creds);
71837
71838+
71839 /**
71840 * abort_creds - Discard a set of credentials and unlock the current task
71841 * @new: The credentials that were going to be applied
71842@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
71843 */
71844 void abort_creds(struct cred *new)
71845 {
71846+ pax_track_stack();
71847+
71848 kdebug("abort_creds(%p{%d,%d})", new,
71849 atomic_read(&new->usage),
71850 read_cred_subscribers(new));
71851@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
71852 {
71853 const struct cred *old = current->cred;
71854
71855+ pax_track_stack();
71856+
71857 kdebug("override_creds(%p{%d,%d})", new,
71858 atomic_read(&new->usage),
71859 read_cred_subscribers(new));
71860@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
71861 {
71862 const struct cred *override = current->cred;
71863
71864+ pax_track_stack();
71865+
71866 kdebug("revert_creds(%p{%d,%d})", old,
71867 atomic_read(&old->usage),
71868 read_cred_subscribers(old));
71869@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71870 const struct cred *old;
71871 struct cred *new;
71872
71873+ pax_track_stack();
71874+
71875 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71876 if (!new)
71877 return NULL;
71878@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71879 */
71880 int set_security_override(struct cred *new, u32 secid)
71881 {
71882+ pax_track_stack();
71883+
71884 return security_kernel_act_as(new, secid);
71885 }
71886 EXPORT_SYMBOL(set_security_override);
71887@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71888 u32 secid;
71889 int ret;
71890
71891+ pax_track_stack();
71892+
71893 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71894 if (ret < 0)
71895 return ret;
71896diff --git a/kernel/exit.c b/kernel/exit.c
71897index 0f8fae3..9344a56 100644
71898--- a/kernel/exit.c
71899+++ b/kernel/exit.c
71900@@ -55,6 +55,10 @@
71901 #include <asm/pgtable.h>
71902 #include <asm/mmu_context.h>
71903
71904+#ifdef CONFIG_GRKERNSEC
71905+extern rwlock_t grsec_exec_file_lock;
71906+#endif
71907+
71908 static void exit_mm(struct task_struct * tsk);
71909
71910 static void __unhash_process(struct task_struct *p)
71911@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71912 struct task_struct *leader;
71913 int zap_leader;
71914 repeat:
71915+#ifdef CONFIG_NET
71916+ gr_del_task_from_ip_table(p);
71917+#endif
71918+
71919 tracehook_prepare_release_task(p);
71920 /* don't need to get the RCU readlock here - the process is dead and
71921 * can't be modifying its own credentials */
71922@@ -397,7 +405,7 @@ int allow_signal(int sig)
71923 * know it'll be handled, so that they don't get converted to
71924 * SIGKILL or just silently dropped.
71925 */
71926- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71927+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71928 recalc_sigpending();
71929 spin_unlock_irq(&current->sighand->siglock);
71930 return 0;
71931@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71932 vsnprintf(current->comm, sizeof(current->comm), name, args);
71933 va_end(args);
71934
71935+#ifdef CONFIG_GRKERNSEC
71936+ write_lock(&grsec_exec_file_lock);
71937+ if (current->exec_file) {
71938+ fput(current->exec_file);
71939+ current->exec_file = NULL;
71940+ }
71941+ write_unlock(&grsec_exec_file_lock);
71942+#endif
71943+
71944+ gr_set_kernel_label(current);
71945+
71946 /*
71947 * If we were started as result of loading a module, close all of the
71948 * user space pages. We don't need them, and if we didn't close them
71949@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71950 struct task_struct *tsk = current;
71951 int group_dead;
71952
71953- profile_task_exit(tsk);
71954-
71955- WARN_ON(atomic_read(&tsk->fs_excl));
71956-
71957+ /*
71958+ * Check this first since set_fs() below depends on
71959+ * current_thread_info(), which we better not access when we're in
71960+ * interrupt context. Other than that, we want to do the set_fs()
71961+ * as early as possible.
71962+ */
71963 if (unlikely(in_interrupt()))
71964 panic("Aiee, killing interrupt handler!");
71965- if (unlikely(!tsk->pid))
71966- panic("Attempted to kill the idle task!");
71967
71968 /*
71969- * If do_exit is called because this processes oopsed, it's possible
71970+ * If do_exit is called because this processes Oops'ed, it's possible
71971 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71972 * continuing. Amongst other possible reasons, this is to prevent
71973 * mm_release()->clear_child_tid() from writing to a user-controlled
71974@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71975 */
71976 set_fs(USER_DS);
71977
71978+ profile_task_exit(tsk);
71979+
71980+ WARN_ON(atomic_read(&tsk->fs_excl));
71981+
71982+ if (unlikely(!tsk->pid))
71983+ panic("Attempted to kill the idle task!");
71984+
71985 tracehook_report_exit(&code);
71986
71987 validate_creds_for_do_exit(tsk);
71988@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71989 tsk->exit_code = code;
71990 taskstats_exit(tsk, group_dead);
71991
71992+ gr_acl_handle_psacct(tsk, code);
71993+ gr_acl_handle_exit();
71994+
71995 exit_mm(tsk);
71996
71997 if (group_dead)
71998@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
71999
72000 if (unlikely(wo->wo_flags & WNOWAIT)) {
72001 int exit_code = p->exit_code;
72002- int why, status;
72003+ int why;
72004
72005 get_task_struct(p);
72006 read_unlock(&tasklist_lock);
72007diff --git a/kernel/fork.c b/kernel/fork.c
72008index 4bde56f..a07de53 100644
72009--- a/kernel/fork.c
72010+++ b/kernel/fork.c
72011@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
72012 *stackend = STACK_END_MAGIC; /* for overflow detection */
72013
72014 #ifdef CONFIG_CC_STACKPROTECTOR
72015- tsk->stack_canary = get_random_int();
72016+ tsk->stack_canary = pax_get_random_long();
72017 #endif
72018
72019 /* One for us, one for whoever does the "release_task()" (usually parent) */
72020@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72021 mm->locked_vm = 0;
72022 mm->mmap = NULL;
72023 mm->mmap_cache = NULL;
72024- mm->free_area_cache = oldmm->mmap_base;
72025- mm->cached_hole_size = ~0UL;
72026+ mm->free_area_cache = oldmm->free_area_cache;
72027+ mm->cached_hole_size = oldmm->cached_hole_size;
72028 mm->map_count = 0;
72029 cpumask_clear(mm_cpumask(mm));
72030 mm->mm_rb = RB_ROOT;
72031@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72032 tmp->vm_flags &= ~VM_LOCKED;
72033 tmp->vm_mm = mm;
72034 tmp->vm_next = tmp->vm_prev = NULL;
72035+ tmp->vm_mirror = NULL;
72036 anon_vma_link(tmp);
72037 file = tmp->vm_file;
72038 if (file) {
72039@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
72040 if (retval)
72041 goto out;
72042 }
72043+
72044+#ifdef CONFIG_PAX_SEGMEXEC
72045+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
72046+ struct vm_area_struct *mpnt_m;
72047+
72048+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
72049+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
72050+
72051+ if (!mpnt->vm_mirror)
72052+ continue;
72053+
72054+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
72055+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
72056+ mpnt->vm_mirror = mpnt_m;
72057+ } else {
72058+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
72059+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
72060+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
72061+ mpnt->vm_mirror->vm_mirror = mpnt;
72062+ }
72063+ }
72064+ BUG_ON(mpnt_m);
72065+ }
72066+#endif
72067+
72068 /* a new mm has just been created */
72069 arch_dup_mmap(oldmm, mm);
72070 retval = 0;
72071@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
72072 write_unlock(&fs->lock);
72073 return -EAGAIN;
72074 }
72075- fs->users++;
72076+ atomic_inc(&fs->users);
72077 write_unlock(&fs->lock);
72078 return 0;
72079 }
72080 tsk->fs = copy_fs_struct(fs);
72081 if (!tsk->fs)
72082 return -ENOMEM;
72083+ gr_set_chroot_entries(tsk, &tsk->fs->root);
72084 return 0;
72085 }
72086
72087@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72088 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
72089 #endif
72090 retval = -EAGAIN;
72091+
72092+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
72093+
72094 if (atomic_read(&p->real_cred->user->processes) >=
72095 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
72096- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
72097- p->real_cred->user != INIT_USER)
72098+ if (p->real_cred->user != INIT_USER &&
72099+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
72100 goto bad_fork_free;
72101 }
72102+ current->flags &= ~PF_NPROC_EXCEEDED;
72103
72104 retval = copy_creds(p, clone_flags);
72105 if (retval < 0)
72106@@ -1183,6 +1214,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72107 goto bad_fork_free_pid;
72108 }
72109
72110+ gr_copy_label(p);
72111+
72112+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72113+ p->exec_id = current->exec_id;
72114+#endif
72115+
72116 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72117 /*
72118 * Clear TID on mm_release()?
72119@@ -1333,6 +1370,8 @@ bad_fork_cleanup_count:
72120 bad_fork_free:
72121 free_task(p);
72122 fork_out:
72123+ gr_log_forkfail(retval);
72124+
72125 return ERR_PTR(retval);
72126 }
72127
72128@@ -1426,6 +1465,8 @@ long do_fork(unsigned long clone_flags,
72129 if (clone_flags & CLONE_PARENT_SETTID)
72130 put_user(nr, parent_tidptr);
72131
72132+ gr_handle_brute_check();
72133+
72134 if (clone_flags & CLONE_VFORK) {
72135 p->vfork_done = &vfork;
72136 init_completion(&vfork);
72137@@ -1558,7 +1599,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72138 return 0;
72139
72140 /* don't need lock here; in the worst case we'll do useless copy */
72141- if (fs->users == 1)
72142+ if (atomic_read(&fs->users) == 1)
72143 return 0;
72144
72145 *new_fsp = copy_fs_struct(fs);
72146@@ -1681,7 +1722,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72147 fs = current->fs;
72148 write_lock(&fs->lock);
72149 current->fs = new_fs;
72150- if (--fs->users)
72151+ gr_set_chroot_entries(current, &current->fs->root);
72152+ if (atomic_dec_return(&fs->users))
72153 new_fs = NULL;
72154 else
72155 new_fs = fs;
72156diff --git a/kernel/futex.c b/kernel/futex.c
72157index fb98c9f..333faec 100644
72158--- a/kernel/futex.c
72159+++ b/kernel/futex.c
72160@@ -54,6 +54,7 @@
72161 #include <linux/mount.h>
72162 #include <linux/pagemap.h>
72163 #include <linux/syscalls.h>
72164+#include <linux/ptrace.h>
72165 #include <linux/signal.h>
72166 #include <linux/module.h>
72167 #include <linux/magic.h>
72168@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72169 struct page *page;
72170 int err, ro = 0;
72171
72172+#ifdef CONFIG_PAX_SEGMEXEC
72173+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72174+ return -EFAULT;
72175+#endif
72176+
72177 /*
72178 * The futex address must be "naturally" aligned.
72179 */
72180@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72181 struct futex_q q;
72182 int ret;
72183
72184+ pax_track_stack();
72185+
72186 if (!bitset)
72187 return -EINVAL;
72188
72189@@ -1871,7 +1879,7 @@ retry:
72190
72191 restart = &current_thread_info()->restart_block;
72192 restart->fn = futex_wait_restart;
72193- restart->futex.uaddr = (u32 *)uaddr;
72194+ restart->futex.uaddr = uaddr;
72195 restart->futex.val = val;
72196 restart->futex.time = abs_time->tv64;
72197 restart->futex.bitset = bitset;
72198@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72199 struct futex_q q;
72200 int res, ret;
72201
72202+ pax_track_stack();
72203+
72204 if (!bitset)
72205 return -EINVAL;
72206
72207@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72208 if (!p)
72209 goto err_unlock;
72210 ret = -EPERM;
72211+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72212+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72213+ goto err_unlock;
72214+#endif
72215 pcred = __task_cred(p);
72216 if (cred->euid != pcred->euid &&
72217 cred->euid != pcred->uid &&
72218@@ -2489,7 +2503,7 @@ retry:
72219 */
72220 static inline int fetch_robust_entry(struct robust_list __user **entry,
72221 struct robust_list __user * __user *head,
72222- int *pi)
72223+ unsigned int *pi)
72224 {
72225 unsigned long uentry;
72226
72227@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72228 {
72229 u32 curval;
72230 int i;
72231+ mm_segment_t oldfs;
72232
72233 /*
72234 * This will fail and we want it. Some arch implementations do
72235@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72236 * implementation, the non functional ones will return
72237 * -ENOSYS.
72238 */
72239+ oldfs = get_fs();
72240+ set_fs(USER_DS);
72241 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72242+ set_fs(oldfs);
72243 if (curval == -EFAULT)
72244 futex_cmpxchg_enabled = 1;
72245
72246diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72247index 2357165..eb25501 100644
72248--- a/kernel/futex_compat.c
72249+++ b/kernel/futex_compat.c
72250@@ -10,6 +10,7 @@
72251 #include <linux/compat.h>
72252 #include <linux/nsproxy.h>
72253 #include <linux/futex.h>
72254+#include <linux/ptrace.h>
72255
72256 #include <asm/uaccess.h>
72257
72258@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72259 {
72260 struct compat_robust_list_head __user *head;
72261 unsigned long ret;
72262- const struct cred *cred = current_cred(), *pcred;
72263+ const struct cred *cred = current_cred();
72264+ const struct cred *pcred;
72265
72266 if (!futex_cmpxchg_enabled)
72267 return -ENOSYS;
72268@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72269 if (!p)
72270 goto err_unlock;
72271 ret = -EPERM;
72272+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72273+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72274+ goto err_unlock;
72275+#endif
72276 pcred = __task_cred(p);
72277 if (cred->euid != pcred->euid &&
72278 cred->euid != pcred->uid &&
72279diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72280index 9b22d03..6295b62 100644
72281--- a/kernel/gcov/base.c
72282+++ b/kernel/gcov/base.c
72283@@ -102,11 +102,6 @@ void gcov_enable_events(void)
72284 }
72285
72286 #ifdef CONFIG_MODULES
72287-static inline int within(void *addr, void *start, unsigned long size)
72288-{
72289- return ((addr >= start) && (addr < start + size));
72290-}
72291-
72292 /* Update list and generate events when modules are unloaded. */
72293 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72294 void *data)
72295@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72296 prev = NULL;
72297 /* Remove entries located in module from linked list. */
72298 for (info = gcov_info_head; info; info = info->next) {
72299- if (within(info, mod->module_core, mod->core_size)) {
72300+ if (within_module_core_rw((unsigned long)info, mod)) {
72301 if (prev)
72302 prev->next = info->next;
72303 else
72304diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72305index a6e9d00..a0da4f9 100644
72306--- a/kernel/hrtimer.c
72307+++ b/kernel/hrtimer.c
72308@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72309 local_irq_restore(flags);
72310 }
72311
72312-static void run_hrtimer_softirq(struct softirq_action *h)
72313+static void run_hrtimer_softirq(void)
72314 {
72315 hrtimer_peek_ahead_timers();
72316 }
72317diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72318index 8b6b8b6..6bc87df 100644
72319--- a/kernel/kallsyms.c
72320+++ b/kernel/kallsyms.c
72321@@ -11,6 +11,9 @@
72322 * Changed the compression method from stem compression to "table lookup"
72323 * compression (see scripts/kallsyms.c for a more complete description)
72324 */
72325+#ifdef CONFIG_GRKERNSEC_HIDESYM
72326+#define __INCLUDED_BY_HIDESYM 1
72327+#endif
72328 #include <linux/kallsyms.h>
72329 #include <linux/module.h>
72330 #include <linux/init.h>
72331@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72332
72333 static inline int is_kernel_inittext(unsigned long addr)
72334 {
72335+ if (system_state != SYSTEM_BOOTING)
72336+ return 0;
72337+
72338 if (addr >= (unsigned long)_sinittext
72339 && addr <= (unsigned long)_einittext)
72340 return 1;
72341 return 0;
72342 }
72343
72344+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72345+#ifdef CONFIG_MODULES
72346+static inline int is_module_text(unsigned long addr)
72347+{
72348+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72349+ return 1;
72350+
72351+ addr = ktla_ktva(addr);
72352+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72353+}
72354+#else
72355+static inline int is_module_text(unsigned long addr)
72356+{
72357+ return 0;
72358+}
72359+#endif
72360+#endif
72361+
72362 static inline int is_kernel_text(unsigned long addr)
72363 {
72364 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72365@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72366
72367 static inline int is_kernel(unsigned long addr)
72368 {
72369+
72370+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72371+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
72372+ return 1;
72373+
72374+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72375+#else
72376 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72377+#endif
72378+
72379 return 1;
72380 return in_gate_area_no_task(addr);
72381 }
72382
72383 static int is_ksym_addr(unsigned long addr)
72384 {
72385+
72386+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72387+ if (is_module_text(addr))
72388+ return 0;
72389+#endif
72390+
72391 if (all_var)
72392 return is_kernel(addr);
72393
72394@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72395
72396 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72397 {
72398- iter->name[0] = '\0';
72399 iter->nameoff = get_symbol_offset(new_pos);
72400 iter->pos = new_pos;
72401 }
72402@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72403 {
72404 struct kallsym_iter *iter = m->private;
72405
72406+#ifdef CONFIG_GRKERNSEC_HIDESYM
72407+ if (current_uid())
72408+ return 0;
72409+#endif
72410+
72411 /* Some debugging symbols have no name. Ignore them. */
72412 if (!iter->name[0])
72413 return 0;
72414@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72415 struct kallsym_iter *iter;
72416 int ret;
72417
72418- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72419+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72420 if (!iter)
72421 return -ENOMEM;
72422 reset_iter(iter, 0);
72423diff --git a/kernel/kexec.c b/kernel/kexec.c
72424index f336e21..9c1c20b 100644
72425--- a/kernel/kexec.c
72426+++ b/kernel/kexec.c
72427@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72428 unsigned long flags)
72429 {
72430 struct compat_kexec_segment in;
72431- struct kexec_segment out, __user *ksegments;
72432+ struct kexec_segment out;
72433+ struct kexec_segment __user *ksegments;
72434 unsigned long i, result;
72435
72436 /* Don't allow clients that don't understand the native
72437diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72438index 53dae4b..9ba3743 100644
72439--- a/kernel/kgdb.c
72440+++ b/kernel/kgdb.c
72441@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72442 /* Guard for recursive entry */
72443 static int exception_level;
72444
72445-static struct kgdb_io *kgdb_io_ops;
72446+static const struct kgdb_io *kgdb_io_ops;
72447 static DEFINE_SPINLOCK(kgdb_registration_lock);
72448
72449 /* kgdb console driver is loaded */
72450@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72451 */
72452 static atomic_t passive_cpu_wait[NR_CPUS];
72453 static atomic_t cpu_in_kgdb[NR_CPUS];
72454-atomic_t kgdb_setting_breakpoint;
72455+atomic_unchecked_t kgdb_setting_breakpoint;
72456
72457 struct task_struct *kgdb_usethread;
72458 struct task_struct *kgdb_contthread;
72459@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72460 sizeof(unsigned long)];
72461
72462 /* to keep track of the CPU which is doing the single stepping*/
72463-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72464+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72465
72466 /*
72467 * If you are debugging a problem where roundup (the collection of
72468@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72469 return 0;
72470 if (kgdb_connected)
72471 return 1;
72472- if (atomic_read(&kgdb_setting_breakpoint))
72473+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72474 return 1;
72475 if (print_wait)
72476 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72477@@ -1426,8 +1426,8 @@ acquirelock:
72478 * instance of the exception handler wanted to come into the
72479 * debugger on a different CPU via a single step
72480 */
72481- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72482- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72483+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72484+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72485
72486 atomic_set(&kgdb_active, -1);
72487 touch_softlockup_watchdog();
72488@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72489 *
72490 * Register it with the KGDB core.
72491 */
72492-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72493+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72494 {
72495 int err;
72496
72497@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72498 *
72499 * Unregister it with the KGDB core.
72500 */
72501-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72502+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72503 {
72504 BUG_ON(kgdb_connected);
72505
72506@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72507 */
72508 void kgdb_breakpoint(void)
72509 {
72510- atomic_set(&kgdb_setting_breakpoint, 1);
72511+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72512 wmb(); /* Sync point before breakpoint */
72513 arch_kgdb_breakpoint();
72514 wmb(); /* Sync point after breakpoint */
72515- atomic_set(&kgdb_setting_breakpoint, 0);
72516+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72517 }
72518 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72519
72520diff --git a/kernel/kmod.c b/kernel/kmod.c
72521index d206078..e27ba6a 100644
72522--- a/kernel/kmod.c
72523+++ b/kernel/kmod.c
72524@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72525 * If module auto-loading support is disabled then this function
72526 * becomes a no-operation.
72527 */
72528-int __request_module(bool wait, const char *fmt, ...)
72529+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72530 {
72531- va_list args;
72532 char module_name[MODULE_NAME_LEN];
72533 unsigned int max_modprobes;
72534 int ret;
72535- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72536+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72537 static char *envp[] = { "HOME=/",
72538 "TERM=linux",
72539 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72540@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72541 if (ret)
72542 return ret;
72543
72544- va_start(args, fmt);
72545- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72546- va_end(args);
72547+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72548 if (ret >= MODULE_NAME_LEN)
72549 return -ENAMETOOLONG;
72550
72551+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72552+ if (!current_uid()) {
72553+ /* hack to workaround consolekit/udisks stupidity */
72554+ read_lock(&tasklist_lock);
72555+ if (!strcmp(current->comm, "mount") &&
72556+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72557+ read_unlock(&tasklist_lock);
72558+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72559+ return -EPERM;
72560+ }
72561+ read_unlock(&tasklist_lock);
72562+ }
72563+#endif
72564+
72565 /* If modprobe needs a service that is in a module, we get a recursive
72566 * loop. Limit the number of running kmod threads to max_threads/2 or
72567 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72568@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72569 atomic_dec(&kmod_concurrent);
72570 return ret;
72571 }
72572+
72573+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72574+{
72575+ va_list args;
72576+ int ret;
72577+
72578+ va_start(args, fmt);
72579+ ret = ____request_module(wait, module_param, fmt, args);
72580+ va_end(args);
72581+
72582+ return ret;
72583+}
72584+
72585+int __request_module(bool wait, const char *fmt, ...)
72586+{
72587+ va_list args;
72588+ int ret;
72589+
72590+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72591+ if (current_uid()) {
72592+ char module_param[MODULE_NAME_LEN];
72593+
72594+ memset(module_param, 0, sizeof(module_param));
72595+
72596+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72597+
72598+ va_start(args, fmt);
72599+ ret = ____request_module(wait, module_param, fmt, args);
72600+ va_end(args);
72601+
72602+ return ret;
72603+ }
72604+#endif
72605+
72606+ va_start(args, fmt);
72607+ ret = ____request_module(wait, NULL, fmt, args);
72608+ va_end(args);
72609+
72610+ return ret;
72611+}
72612+
72613+
72614 EXPORT_SYMBOL(__request_module);
72615 #endif /* CONFIG_MODULES */
72616
72617@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72618 *
72619 * Thus the __user pointer cast is valid here.
72620 */
72621- sys_wait4(pid, (int __user *)&ret, 0, NULL);
72622+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72623
72624 /*
72625 * If ret is 0, either ____call_usermodehelper failed and the
72626diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72627index 176d825..77fa8ea 100644
72628--- a/kernel/kprobes.c
72629+++ b/kernel/kprobes.c
72630@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72631 * kernel image and loaded module images reside. This is required
72632 * so x86_64 can correctly handle the %rip-relative fixups.
72633 */
72634- kip->insns = module_alloc(PAGE_SIZE);
72635+ kip->insns = module_alloc_exec(PAGE_SIZE);
72636 if (!kip->insns) {
72637 kfree(kip);
72638 return NULL;
72639@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72640 */
72641 if (!list_is_singular(&kprobe_insn_pages)) {
72642 list_del(&kip->list);
72643- module_free(NULL, kip->insns);
72644+ module_free_exec(NULL, kip->insns);
72645 kfree(kip);
72646 }
72647 return 1;
72648@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72649 {
72650 int i, err = 0;
72651 unsigned long offset = 0, size = 0;
72652- char *modname, namebuf[128];
72653+ char *modname, namebuf[KSYM_NAME_LEN];
72654 const char *symbol_name;
72655 void *addr;
72656 struct kprobe_blackpoint *kb;
72657@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72658 const char *sym = NULL;
72659 unsigned int i = *(loff_t *) v;
72660 unsigned long offset = 0;
72661- char *modname, namebuf[128];
72662+ char *modname, namebuf[KSYM_NAME_LEN];
72663
72664 head = &kprobe_table[i];
72665 preempt_disable();
72666diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72667index d86fe89..d12fc66 100644
72668--- a/kernel/lockdep.c
72669+++ b/kernel/lockdep.c
72670@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72671 /*
72672 * Various lockdep statistics:
72673 */
72674-atomic_t chain_lookup_hits;
72675-atomic_t chain_lookup_misses;
72676-atomic_t hardirqs_on_events;
72677-atomic_t hardirqs_off_events;
72678-atomic_t redundant_hardirqs_on;
72679-atomic_t redundant_hardirqs_off;
72680-atomic_t softirqs_on_events;
72681-atomic_t softirqs_off_events;
72682-atomic_t redundant_softirqs_on;
72683-atomic_t redundant_softirqs_off;
72684-atomic_t nr_unused_locks;
72685-atomic_t nr_cyclic_checks;
72686-atomic_t nr_find_usage_forwards_checks;
72687-atomic_t nr_find_usage_backwards_checks;
72688+atomic_unchecked_t chain_lookup_hits;
72689+atomic_unchecked_t chain_lookup_misses;
72690+atomic_unchecked_t hardirqs_on_events;
72691+atomic_unchecked_t hardirqs_off_events;
72692+atomic_unchecked_t redundant_hardirqs_on;
72693+atomic_unchecked_t redundant_hardirqs_off;
72694+atomic_unchecked_t softirqs_on_events;
72695+atomic_unchecked_t softirqs_off_events;
72696+atomic_unchecked_t redundant_softirqs_on;
72697+atomic_unchecked_t redundant_softirqs_off;
72698+atomic_unchecked_t nr_unused_locks;
72699+atomic_unchecked_t nr_cyclic_checks;
72700+atomic_unchecked_t nr_find_usage_forwards_checks;
72701+atomic_unchecked_t nr_find_usage_backwards_checks;
72702 #endif
72703
72704 /*
72705@@ -577,6 +577,10 @@ static int static_obj(void *obj)
72706 int i;
72707 #endif
72708
72709+#ifdef CONFIG_PAX_KERNEXEC
72710+ start = ktla_ktva(start);
72711+#endif
72712+
72713 /*
72714 * static variable?
72715 */
72716@@ -592,8 +596,7 @@ static int static_obj(void *obj)
72717 */
72718 for_each_possible_cpu(i) {
72719 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
72720- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
72721- + per_cpu_offset(i);
72722+ end = start + PERCPU_ENOUGH_ROOM;
72723
72724 if ((addr >= start) && (addr < end))
72725 return 1;
72726@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
72727 if (!static_obj(lock->key)) {
72728 debug_locks_off();
72729 printk("INFO: trying to register non-static key.\n");
72730+ printk("lock:%pS key:%pS.\n", lock, lock->key);
72731 printk("the code is fine but needs lockdep annotation.\n");
72732 printk("turning off the locking correctness validator.\n");
72733 dump_stack();
72734@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
72735 if (!class)
72736 return 0;
72737 }
72738- debug_atomic_inc((atomic_t *)&class->ops);
72739+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
72740 if (very_verbose(class)) {
72741 printk("\nacquire class [%p] %s", class->key, class->name);
72742 if (class->name_version > 1)
72743diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
72744index a2ee95a..092f0f2 100644
72745--- a/kernel/lockdep_internals.h
72746+++ b/kernel/lockdep_internals.h
72747@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
72748 /*
72749 * Various lockdep statistics:
72750 */
72751-extern atomic_t chain_lookup_hits;
72752-extern atomic_t chain_lookup_misses;
72753-extern atomic_t hardirqs_on_events;
72754-extern atomic_t hardirqs_off_events;
72755-extern atomic_t redundant_hardirqs_on;
72756-extern atomic_t redundant_hardirqs_off;
72757-extern atomic_t softirqs_on_events;
72758-extern atomic_t softirqs_off_events;
72759-extern atomic_t redundant_softirqs_on;
72760-extern atomic_t redundant_softirqs_off;
72761-extern atomic_t nr_unused_locks;
72762-extern atomic_t nr_cyclic_checks;
72763-extern atomic_t nr_cyclic_check_recursions;
72764-extern atomic_t nr_find_usage_forwards_checks;
72765-extern atomic_t nr_find_usage_forwards_recursions;
72766-extern atomic_t nr_find_usage_backwards_checks;
72767-extern atomic_t nr_find_usage_backwards_recursions;
72768-# define debug_atomic_inc(ptr) atomic_inc(ptr)
72769-# define debug_atomic_dec(ptr) atomic_dec(ptr)
72770-# define debug_atomic_read(ptr) atomic_read(ptr)
72771+extern atomic_unchecked_t chain_lookup_hits;
72772+extern atomic_unchecked_t chain_lookup_misses;
72773+extern atomic_unchecked_t hardirqs_on_events;
72774+extern atomic_unchecked_t hardirqs_off_events;
72775+extern atomic_unchecked_t redundant_hardirqs_on;
72776+extern atomic_unchecked_t redundant_hardirqs_off;
72777+extern atomic_unchecked_t softirqs_on_events;
72778+extern atomic_unchecked_t softirqs_off_events;
72779+extern atomic_unchecked_t redundant_softirqs_on;
72780+extern atomic_unchecked_t redundant_softirqs_off;
72781+extern atomic_unchecked_t nr_unused_locks;
72782+extern atomic_unchecked_t nr_cyclic_checks;
72783+extern atomic_unchecked_t nr_cyclic_check_recursions;
72784+extern atomic_unchecked_t nr_find_usage_forwards_checks;
72785+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
72786+extern atomic_unchecked_t nr_find_usage_backwards_checks;
72787+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
72788+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
72789+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
72790+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
72791 #else
72792 # define debug_atomic_inc(ptr) do { } while (0)
72793 # define debug_atomic_dec(ptr) do { } while (0)
72794diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
72795index d4aba4f..02a353f 100644
72796--- a/kernel/lockdep_proc.c
72797+++ b/kernel/lockdep_proc.c
72798@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
72799
72800 static void print_name(struct seq_file *m, struct lock_class *class)
72801 {
72802- char str[128];
72803+ char str[KSYM_NAME_LEN];
72804 const char *name = class->name;
72805
72806 if (!name) {
72807diff --git a/kernel/module.c b/kernel/module.c
72808index 4b270e6..2226274 100644
72809--- a/kernel/module.c
72810+++ b/kernel/module.c
72811@@ -55,6 +55,7 @@
72812 #include <linux/async.h>
72813 #include <linux/percpu.h>
72814 #include <linux/kmemleak.h>
72815+#include <linux/grsecurity.h>
72816
72817 #define CREATE_TRACE_POINTS
72818 #include <trace/events/module.h>
72819@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72820 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72821
72822 /* Bounds of module allocation, for speeding __module_address */
72823-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72824+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72825+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72826
72827 int register_module_notifier(struct notifier_block * nb)
72828 {
72829@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72830 return true;
72831
72832 list_for_each_entry_rcu(mod, &modules, list) {
72833- struct symsearch arr[] = {
72834+ struct symsearch modarr[] = {
72835 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72836 NOT_GPL_ONLY, false },
72837 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72838@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72839 #endif
72840 };
72841
72842- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72843+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72844 return true;
72845 }
72846 return false;
72847@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72848 void *ptr;
72849 int cpu;
72850
72851- if (align > PAGE_SIZE) {
72852+ if (align-1 >= PAGE_SIZE) {
72853 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72854 name, align, PAGE_SIZE);
72855 align = PAGE_SIZE;
72856@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72857 * /sys/module/foo/sections stuff
72858 * J. Corbet <corbet@lwn.net>
72859 */
72860-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72861+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72862
72863 static inline bool sect_empty(const Elf_Shdr *sect)
72864 {
72865@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72866 destroy_params(mod->kp, mod->num_kp);
72867
72868 /* This may be NULL, but that's OK */
72869- module_free(mod, mod->module_init);
72870+ module_free(mod, mod->module_init_rw);
72871+ module_free_exec(mod, mod->module_init_rx);
72872 kfree(mod->args);
72873 if (mod->percpu)
72874 percpu_modfree(mod->percpu);
72875@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72876 percpu_modfree(mod->refptr);
72877 #endif
72878 /* Free lock-classes: */
72879- lockdep_free_key_range(mod->module_core, mod->core_size);
72880+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72881+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72882
72883 /* Finally, free the core (containing the module structure) */
72884- module_free(mod, mod->module_core);
72885+ module_free_exec(mod, mod->module_core_rx);
72886+ module_free(mod, mod->module_core_rw);
72887
72888 #ifdef CONFIG_MPU
72889 update_protections(current->mm);
72890@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72891 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72892 int ret = 0;
72893 const struct kernel_symbol *ksym;
72894+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72895+ int is_fs_load = 0;
72896+ int register_filesystem_found = 0;
72897+ char *p;
72898+
72899+ p = strstr(mod->args, "grsec_modharden_fs");
72900+
72901+ if (p) {
72902+ char *endptr = p + strlen("grsec_modharden_fs");
72903+ /* copy \0 as well */
72904+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72905+ is_fs_load = 1;
72906+ }
72907+#endif
72908+
72909
72910 for (i = 1; i < n; i++) {
72911+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72912+ const char *name = strtab + sym[i].st_name;
72913+
72914+ /* it's a real shame this will never get ripped and copied
72915+ upstream! ;(
72916+ */
72917+ if (is_fs_load && !strcmp(name, "register_filesystem"))
72918+ register_filesystem_found = 1;
72919+#endif
72920 switch (sym[i].st_shndx) {
72921 case SHN_COMMON:
72922 /* We compiled with -fno-common. These are not
72923@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72924 strtab + sym[i].st_name, mod);
72925 /* Ok if resolved. */
72926 if (ksym) {
72927+ pax_open_kernel();
72928 sym[i].st_value = ksym->value;
72929+ pax_close_kernel();
72930 break;
72931 }
72932
72933@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72934 secbase = (unsigned long)mod->percpu;
72935 else
72936 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72937+ pax_open_kernel();
72938 sym[i].st_value += secbase;
72939+ pax_close_kernel();
72940 break;
72941 }
72942 }
72943
72944+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72945+ if (is_fs_load && !register_filesystem_found) {
72946+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72947+ ret = -EPERM;
72948+ }
72949+#endif
72950+
72951 return ret;
72952 }
72953
72954@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72955 || s->sh_entsize != ~0UL
72956 || strstarts(secstrings + s->sh_name, ".init"))
72957 continue;
72958- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72959+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72960+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72961+ else
72962+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72963 DEBUGP("\t%s\n", secstrings + s->sh_name);
72964 }
72965- if (m == 0)
72966- mod->core_text_size = mod->core_size;
72967 }
72968
72969 DEBUGP("Init section allocation order:\n");
72970@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72971 || s->sh_entsize != ~0UL
72972 || !strstarts(secstrings + s->sh_name, ".init"))
72973 continue;
72974- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72975- | INIT_OFFSET_MASK);
72976+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72977+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72978+ else
72979+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72980+ s->sh_entsize |= INIT_OFFSET_MASK;
72981 DEBUGP("\t%s\n", secstrings + s->sh_name);
72982 }
72983- if (m == 0)
72984- mod->init_text_size = mod->init_size;
72985 }
72986 }
72987
72988@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72989
72990 /* As per nm */
72991 static char elf_type(const Elf_Sym *sym,
72992- Elf_Shdr *sechdrs,
72993- const char *secstrings,
72994- struct module *mod)
72995+ const Elf_Shdr *sechdrs,
72996+ const char *secstrings)
72997 {
72998 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
72999 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
73000@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
73001
73002 /* Put symbol section at end of init part of module. */
73003 symsect->sh_flags |= SHF_ALLOC;
73004- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
73005+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
73006 symindex) | INIT_OFFSET_MASK;
73007 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
73008
73009@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
73010 }
73011
73012 /* Append room for core symbols at end of core part. */
73013- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
73014- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
73015+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
73016+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
73017
73018 /* Put string table section at end of init part of module. */
73019 strsect->sh_flags |= SHF_ALLOC;
73020- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
73021+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
73022 strindex) | INIT_OFFSET_MASK;
73023 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
73024
73025 /* Append room for core symbols' strings at end of core part. */
73026- *pstroffs = mod->core_size;
73027+ *pstroffs = mod->core_size_rx;
73028 __set_bit(0, strmap);
73029- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
73030+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
73031
73032 return symoffs;
73033 }
73034@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
73035 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
73036 mod->strtab = (void *)sechdrs[strindex].sh_addr;
73037
73038+ pax_open_kernel();
73039+
73040 /* Set types up while we still have access to sections. */
73041 for (i = 0; i < mod->num_symtab; i++)
73042 mod->symtab[i].st_info
73043- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
73044+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
73045
73046- mod->core_symtab = dst = mod->module_core + symoffs;
73047+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
73048 src = mod->symtab;
73049 *dst = *src;
73050 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
73051@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
73052 }
73053 mod->core_num_syms = ndst;
73054
73055- mod->core_strtab = s = mod->module_core + stroffs;
73056+ mod->core_strtab = s = mod->module_core_rx + stroffs;
73057 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
73058 if (test_bit(i, strmap))
73059 *++s = mod->strtab[i];
73060+
73061+ pax_close_kernel();
73062 }
73063 #else
73064 static inline unsigned long layout_symtab(struct module *mod,
73065@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
73066 #endif
73067 }
73068
73069-static void *module_alloc_update_bounds(unsigned long size)
73070+static void *module_alloc_update_bounds_rw(unsigned long size)
73071 {
73072 void *ret = module_alloc(size);
73073
73074 if (ret) {
73075 /* Update module bounds. */
73076- if ((unsigned long)ret < module_addr_min)
73077- module_addr_min = (unsigned long)ret;
73078- if ((unsigned long)ret + size > module_addr_max)
73079- module_addr_max = (unsigned long)ret + size;
73080+ if ((unsigned long)ret < module_addr_min_rw)
73081+ module_addr_min_rw = (unsigned long)ret;
73082+ if ((unsigned long)ret + size > module_addr_max_rw)
73083+ module_addr_max_rw = (unsigned long)ret + size;
73084+ }
73085+ return ret;
73086+}
73087+
73088+static void *module_alloc_update_bounds_rx(unsigned long size)
73089+{
73090+ void *ret = module_alloc_exec(size);
73091+
73092+ if (ret) {
73093+ /* Update module bounds. */
73094+ if ((unsigned long)ret < module_addr_min_rx)
73095+ module_addr_min_rx = (unsigned long)ret;
73096+ if ((unsigned long)ret + size > module_addr_max_rx)
73097+ module_addr_max_rx = (unsigned long)ret + size;
73098 }
73099 return ret;
73100 }
73101@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73102 unsigned int i;
73103
73104 /* only scan the sections containing data */
73105- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
73106- (unsigned long)mod->module_core,
73107+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
73108+ (unsigned long)mod->module_core_rw,
73109 sizeof(struct module), GFP_KERNEL);
73110
73111 for (i = 1; i < hdr->e_shnum; i++) {
73112@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73113 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73114 continue;
73115
73116- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73117- (unsigned long)mod->module_core,
73118+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73119+ (unsigned long)mod->module_core_rw,
73120 sechdrs[i].sh_size, GFP_KERNEL);
73121 }
73122 }
73123@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73124 Elf_Ehdr *hdr;
73125 Elf_Shdr *sechdrs;
73126 char *secstrings, *args, *modmagic, *strtab = NULL;
73127- char *staging;
73128+ char *staging, *license;
73129 unsigned int i;
73130 unsigned int symindex = 0;
73131 unsigned int strindex = 0;
73132@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73133 goto free_hdr;
73134 }
73135
73136+ license = get_modinfo(sechdrs, infoindex, "license");
73137+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73138+ if (!license || !license_is_gpl_compatible(license)) {
73139+ err -ENOEXEC;
73140+ goto free_hdr;
73141+ }
73142+#endif
73143+
73144 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73145 /* This is allowed: modprobe --force will invalidate it. */
73146 if (!modmagic) {
73147@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73148 secstrings, &stroffs, strmap);
73149
73150 /* Do the allocs. */
73151- ptr = module_alloc_update_bounds(mod->core_size);
73152+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73153 /*
73154 * The pointer to this block is stored in the module structure
73155 * which is inside the block. Just mark it as not being a
73156@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73157 err = -ENOMEM;
73158 goto free_percpu;
73159 }
73160- memset(ptr, 0, mod->core_size);
73161- mod->module_core = ptr;
73162+ memset(ptr, 0, mod->core_size_rw);
73163+ mod->module_core_rw = ptr;
73164
73165- ptr = module_alloc_update_bounds(mod->init_size);
73166+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73167 /*
73168 * The pointer to this block is stored in the module structure
73169 * which is inside the block. This block doesn't need to be
73170 * scanned as it contains data and code that will be freed
73171 * after the module is initialized.
73172 */
73173- kmemleak_ignore(ptr);
73174- if (!ptr && mod->init_size) {
73175+ kmemleak_not_leak(ptr);
73176+ if (!ptr && mod->init_size_rw) {
73177 err = -ENOMEM;
73178- goto free_core;
73179+ goto free_core_rw;
73180 }
73181- memset(ptr, 0, mod->init_size);
73182- mod->module_init = ptr;
73183+ memset(ptr, 0, mod->init_size_rw);
73184+ mod->module_init_rw = ptr;
73185+
73186+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73187+ kmemleak_not_leak(ptr);
73188+ if (!ptr) {
73189+ err = -ENOMEM;
73190+ goto free_init_rw;
73191+ }
73192+
73193+ pax_open_kernel();
73194+ memset(ptr, 0, mod->core_size_rx);
73195+ pax_close_kernel();
73196+ mod->module_core_rx = ptr;
73197+
73198+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73199+ kmemleak_not_leak(ptr);
73200+ if (!ptr && mod->init_size_rx) {
73201+ err = -ENOMEM;
73202+ goto free_core_rx;
73203+ }
73204+
73205+ pax_open_kernel();
73206+ memset(ptr, 0, mod->init_size_rx);
73207+ pax_close_kernel();
73208+ mod->module_init_rx = ptr;
73209
73210 /* Transfer each section which specifies SHF_ALLOC */
73211 DEBUGP("final section addresses:\n");
73212@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73213 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73214 continue;
73215
73216- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73217- dest = mod->module_init
73218- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73219- else
73220- dest = mod->module_core + sechdrs[i].sh_entsize;
73221+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73222+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73223+ dest = mod->module_init_rw
73224+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73225+ else
73226+ dest = mod->module_init_rx
73227+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73228+ } else {
73229+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73230+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73231+ else
73232+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73233+ }
73234
73235- if (sechdrs[i].sh_type != SHT_NOBITS)
73236- memcpy(dest, (void *)sechdrs[i].sh_addr,
73237- sechdrs[i].sh_size);
73238+ if (sechdrs[i].sh_type != SHT_NOBITS) {
73239+
73240+#ifdef CONFIG_PAX_KERNEXEC
73241+#ifdef CONFIG_X86_64
73242+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73243+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73244+#endif
73245+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73246+ pax_open_kernel();
73247+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73248+ pax_close_kernel();
73249+ } else
73250+#endif
73251+
73252+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73253+ }
73254 /* Update sh_addr to point to copy in image. */
73255- sechdrs[i].sh_addr = (unsigned long)dest;
73256+
73257+#ifdef CONFIG_PAX_KERNEXEC
73258+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73259+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73260+ else
73261+#endif
73262+
73263+ sechdrs[i].sh_addr = (unsigned long)dest;
73264 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73265 }
73266 /* Module has been moved. */
73267@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73268 mod->name);
73269 if (!mod->refptr) {
73270 err = -ENOMEM;
73271- goto free_init;
73272+ goto free_init_rx;
73273 }
73274 #endif
73275 /* Now we've moved module, initialize linked lists, etc. */
73276@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73277 goto free_unload;
73278
73279 /* Set up license info based on the info section */
73280- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73281+ set_license(mod, license);
73282
73283 /*
73284 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73285@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73286 /* Set up MODINFO_ATTR fields */
73287 setup_modinfo(mod, sechdrs, infoindex);
73288
73289+ mod->args = args;
73290+
73291+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73292+ {
73293+ char *p, *p2;
73294+
73295+ if (strstr(mod->args, "grsec_modharden_netdev")) {
73296+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73297+ err = -EPERM;
73298+ goto cleanup;
73299+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73300+ p += strlen("grsec_modharden_normal");
73301+ p2 = strstr(p, "_");
73302+ if (p2) {
73303+ *p2 = '\0';
73304+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73305+ *p2 = '_';
73306+ }
73307+ err = -EPERM;
73308+ goto cleanup;
73309+ }
73310+ }
73311+#endif
73312+
73313+
73314 /* Fix up syms, so that st_value is a pointer to location. */
73315 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73316 mod);
73317@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73318
73319 /* Now do relocations. */
73320 for (i = 1; i < hdr->e_shnum; i++) {
73321- const char *strtab = (char *)sechdrs[strindex].sh_addr;
73322 unsigned int info = sechdrs[i].sh_info;
73323+ strtab = (char *)sechdrs[strindex].sh_addr;
73324
73325 /* Not a valid relocation section? */
73326 if (info >= hdr->e_shnum)
73327@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73328 * Do it before processing of module parameters, so the module
73329 * can provide parameter accessor functions of its own.
73330 */
73331- if (mod->module_init)
73332- flush_icache_range((unsigned long)mod->module_init,
73333- (unsigned long)mod->module_init
73334- + mod->init_size);
73335- flush_icache_range((unsigned long)mod->module_core,
73336- (unsigned long)mod->module_core + mod->core_size);
73337+ if (mod->module_init_rx)
73338+ flush_icache_range((unsigned long)mod->module_init_rx,
73339+ (unsigned long)mod->module_init_rx
73340+ + mod->init_size_rx);
73341+ flush_icache_range((unsigned long)mod->module_core_rx,
73342+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
73343
73344 set_fs(old_fs);
73345
73346- mod->args = args;
73347 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73348 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73349 mod->name);
73350@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73351 free_unload:
73352 module_unload_free(mod);
73353 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73354+ free_init_rx:
73355 percpu_modfree(mod->refptr);
73356- free_init:
73357 #endif
73358- module_free(mod, mod->module_init);
73359- free_core:
73360- module_free(mod, mod->module_core);
73361+ module_free_exec(mod, mod->module_init_rx);
73362+ free_core_rx:
73363+ module_free_exec(mod, mod->module_core_rx);
73364+ free_init_rw:
73365+ module_free(mod, mod->module_init_rw);
73366+ free_core_rw:
73367+ module_free(mod, mod->module_core_rw);
73368 /* mod will be freed with core. Don't access it beyond this line! */
73369 free_percpu:
73370 if (percpu)
73371@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73372 mod->symtab = mod->core_symtab;
73373 mod->strtab = mod->core_strtab;
73374 #endif
73375- module_free(mod, mod->module_init);
73376- mod->module_init = NULL;
73377- mod->init_size = 0;
73378- mod->init_text_size = 0;
73379+ module_free(mod, mod->module_init_rw);
73380+ module_free_exec(mod, mod->module_init_rx);
73381+ mod->module_init_rw = NULL;
73382+ mod->module_init_rx = NULL;
73383+ mod->init_size_rw = 0;
73384+ mod->init_size_rx = 0;
73385 mutex_unlock(&module_mutex);
73386
73387 return 0;
73388@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73389 unsigned long nextval;
73390
73391 /* At worse, next value is at end of module */
73392- if (within_module_init(addr, mod))
73393- nextval = (unsigned long)mod->module_init+mod->init_text_size;
73394+ if (within_module_init_rx(addr, mod))
73395+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73396+ else if (within_module_init_rw(addr, mod))
73397+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73398+ else if (within_module_core_rx(addr, mod))
73399+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73400+ else if (within_module_core_rw(addr, mod))
73401+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73402 else
73403- nextval = (unsigned long)mod->module_core+mod->core_text_size;
73404+ return NULL;
73405
73406 /* Scan for closest preceeding symbol, and next symbol. (ELF
73407 starts real symbols at 1). */
73408@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73409 char buf[8];
73410
73411 seq_printf(m, "%s %u",
73412- mod->name, mod->init_size + mod->core_size);
73413+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73414 print_unload_info(m, mod);
73415
73416 /* Informative for users. */
73417@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73418 mod->state == MODULE_STATE_COMING ? "Loading":
73419 "Live");
73420 /* Used by oprofile and other similar tools. */
73421- seq_printf(m, " 0x%p", mod->module_core);
73422+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73423
73424 /* Taints info */
73425 if (mod->taints)
73426@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73427
73428 static int __init proc_modules_init(void)
73429 {
73430+#ifndef CONFIG_GRKERNSEC_HIDESYM
73431+#ifdef CONFIG_GRKERNSEC_PROC_USER
73432+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73433+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73434+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73435+#else
73436 proc_create("modules", 0, NULL, &proc_modules_operations);
73437+#endif
73438+#else
73439+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73440+#endif
73441 return 0;
73442 }
73443 module_init(proc_modules_init);
73444@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73445 {
73446 struct module *mod;
73447
73448- if (addr < module_addr_min || addr > module_addr_max)
73449+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73450+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
73451 return NULL;
73452
73453 list_for_each_entry_rcu(mod, &modules, list)
73454- if (within_module_core(addr, mod)
73455- || within_module_init(addr, mod))
73456+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
73457 return mod;
73458 return NULL;
73459 }
73460@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73461 */
73462 struct module *__module_text_address(unsigned long addr)
73463 {
73464- struct module *mod = __module_address(addr);
73465+ struct module *mod;
73466+
73467+#ifdef CONFIG_X86_32
73468+ addr = ktla_ktva(addr);
73469+#endif
73470+
73471+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73472+ return NULL;
73473+
73474+ mod = __module_address(addr);
73475+
73476 if (mod) {
73477 /* Make sure it's within the text section. */
73478- if (!within(addr, mod->module_init, mod->init_text_size)
73479- && !within(addr, mod->module_core, mod->core_text_size))
73480+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73481 mod = NULL;
73482 }
73483 return mod;
73484diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73485index ec815a9..fe46e99 100644
73486--- a/kernel/mutex-debug.c
73487+++ b/kernel/mutex-debug.c
73488@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73489 }
73490
73491 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73492- struct thread_info *ti)
73493+ struct task_struct *task)
73494 {
73495 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73496
73497 /* Mark the current thread as blocked on the lock: */
73498- ti->task->blocked_on = waiter;
73499+ task->blocked_on = waiter;
73500 }
73501
73502 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73503- struct thread_info *ti)
73504+ struct task_struct *task)
73505 {
73506 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73507- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73508- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73509- ti->task->blocked_on = NULL;
73510+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
73511+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73512+ task->blocked_on = NULL;
73513
73514 list_del_init(&waiter->list);
73515 waiter->task = NULL;
73516@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73517 return;
73518
73519 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73520- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73521+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
73522 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73523 mutex_clear_owner(lock);
73524 }
73525diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73526index 6b2d735..372d3c4 100644
73527--- a/kernel/mutex-debug.h
73528+++ b/kernel/mutex-debug.h
73529@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73530 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73531 extern void debug_mutex_add_waiter(struct mutex *lock,
73532 struct mutex_waiter *waiter,
73533- struct thread_info *ti);
73534+ struct task_struct *task);
73535 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73536- struct thread_info *ti);
73537+ struct task_struct *task);
73538 extern void debug_mutex_unlock(struct mutex *lock);
73539 extern void debug_mutex_init(struct mutex *lock, const char *name,
73540 struct lock_class_key *key);
73541
73542 static inline void mutex_set_owner(struct mutex *lock)
73543 {
73544- lock->owner = current_thread_info();
73545+ lock->owner = current;
73546 }
73547
73548 static inline void mutex_clear_owner(struct mutex *lock)
73549diff --git a/kernel/mutex.c b/kernel/mutex.c
73550index f85644c..5ee9f77 100644
73551--- a/kernel/mutex.c
73552+++ b/kernel/mutex.c
73553@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73554 */
73555
73556 for (;;) {
73557- struct thread_info *owner;
73558+ struct task_struct *owner;
73559
73560 /*
73561 * If we own the BKL, then don't spin. The owner of
73562@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73563 spin_lock_mutex(&lock->wait_lock, flags);
73564
73565 debug_mutex_lock_common(lock, &waiter);
73566- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73567+ debug_mutex_add_waiter(lock, &waiter, task);
73568
73569 /* add waiting tasks to the end of the waitqueue (FIFO): */
73570 list_add_tail(&waiter.list, &lock->wait_list);
73571@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73572 * TASK_UNINTERRUPTIBLE case.)
73573 */
73574 if (unlikely(signal_pending_state(state, task))) {
73575- mutex_remove_waiter(lock, &waiter,
73576- task_thread_info(task));
73577+ mutex_remove_waiter(lock, &waiter, task);
73578 mutex_release(&lock->dep_map, 1, ip);
73579 spin_unlock_mutex(&lock->wait_lock, flags);
73580
73581@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73582 done:
73583 lock_acquired(&lock->dep_map, ip);
73584 /* got the lock - rejoice! */
73585- mutex_remove_waiter(lock, &waiter, current_thread_info());
73586+ mutex_remove_waiter(lock, &waiter, task);
73587 mutex_set_owner(lock);
73588
73589 /* set it to 0 if there are no waiters left: */
73590diff --git a/kernel/mutex.h b/kernel/mutex.h
73591index 67578ca..4115fbf 100644
73592--- a/kernel/mutex.h
73593+++ b/kernel/mutex.h
73594@@ -19,7 +19,7 @@
73595 #ifdef CONFIG_SMP
73596 static inline void mutex_set_owner(struct mutex *lock)
73597 {
73598- lock->owner = current_thread_info();
73599+ lock->owner = current;
73600 }
73601
73602 static inline void mutex_clear_owner(struct mutex *lock)
73603diff --git a/kernel/panic.c b/kernel/panic.c
73604index 96b45d0..ff70a46 100644
73605--- a/kernel/panic.c
73606+++ b/kernel/panic.c
73607@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73608 va_end(args);
73609 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73610 #ifdef CONFIG_DEBUG_BUGVERBOSE
73611- dump_stack();
73612+ /*
73613+ * Avoid nested stack-dumping if a panic occurs during oops processing
73614+ */
73615+ if (!oops_in_progress)
73616+ dump_stack();
73617 #endif
73618
73619 /*
73620@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73621 const char *board;
73622
73623 printk(KERN_WARNING "------------[ cut here ]------------\n");
73624- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73625+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73626 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73627 if (board)
73628 printk(KERN_WARNING "Hardware name: %s\n", board);
73629@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73630 */
73631 void __stack_chk_fail(void)
73632 {
73633- panic("stack-protector: Kernel stack is corrupted in: %p\n",
73634+ dump_stack();
73635+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73636 __builtin_return_address(0));
73637 }
73638 EXPORT_SYMBOL(__stack_chk_fail);
73639diff --git a/kernel/params.c b/kernel/params.c
73640index d656c27..21e452c 100644
73641--- a/kernel/params.c
73642+++ b/kernel/params.c
73643@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73644 return ret;
73645 }
73646
73647-static struct sysfs_ops module_sysfs_ops = {
73648+static const struct sysfs_ops module_sysfs_ops = {
73649 .show = module_attr_show,
73650 .store = module_attr_store,
73651 };
73652@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73653 return 0;
73654 }
73655
73656-static struct kset_uevent_ops module_uevent_ops = {
73657+static const struct kset_uevent_ops module_uevent_ops = {
73658 .filter = uevent_filter,
73659 };
73660
73661diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73662index 37ebc14..9c121d9 100644
73663--- a/kernel/perf_event.c
73664+++ b/kernel/perf_event.c
73665@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73666 */
73667 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73668
73669-static atomic64_t perf_event_id;
73670+static atomic64_unchecked_t perf_event_id;
73671
73672 /*
73673 * Lock for (sysadmin-configurable) event reservations:
73674@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73675 * In order to keep per-task stats reliable we need to flip the event
73676 * values when we flip the contexts.
73677 */
73678- value = atomic64_read(&next_event->count);
73679- value = atomic64_xchg(&event->count, value);
73680- atomic64_set(&next_event->count, value);
73681+ value = atomic64_read_unchecked(&next_event->count);
73682+ value = atomic64_xchg_unchecked(&event->count, value);
73683+ atomic64_set_unchecked(&next_event->count, value);
73684
73685 swap(event->total_time_enabled, next_event->total_time_enabled);
73686 swap(event->total_time_running, next_event->total_time_running);
73687@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73688 update_event_times(event);
73689 }
73690
73691- return atomic64_read(&event->count);
73692+ return atomic64_read_unchecked(&event->count);
73693 }
73694
73695 /*
73696@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
73697 values[n++] = 1 + leader->nr_siblings;
73698 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73699 values[n++] = leader->total_time_enabled +
73700- atomic64_read(&leader->child_total_time_enabled);
73701+ atomic64_read_unchecked(&leader->child_total_time_enabled);
73702 }
73703 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73704 values[n++] = leader->total_time_running +
73705- atomic64_read(&leader->child_total_time_running);
73706+ atomic64_read_unchecked(&leader->child_total_time_running);
73707 }
73708
73709 size = n * sizeof(u64);
73710@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
73711 values[n++] = perf_event_read_value(event);
73712 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73713 values[n++] = event->total_time_enabled +
73714- atomic64_read(&event->child_total_time_enabled);
73715+ atomic64_read_unchecked(&event->child_total_time_enabled);
73716 }
73717 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73718 values[n++] = event->total_time_running +
73719- atomic64_read(&event->child_total_time_running);
73720+ atomic64_read_unchecked(&event->child_total_time_running);
73721 }
73722 if (read_format & PERF_FORMAT_ID)
73723 values[n++] = primary_event_id(event);
73724@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
73725 static void perf_event_reset(struct perf_event *event)
73726 {
73727 (void)perf_event_read(event);
73728- atomic64_set(&event->count, 0);
73729+ atomic64_set_unchecked(&event->count, 0);
73730 perf_event_update_userpage(event);
73731 }
73732
73733@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
73734 ++userpg->lock;
73735 barrier();
73736 userpg->index = perf_event_index(event);
73737- userpg->offset = atomic64_read(&event->count);
73738+ userpg->offset = atomic64_read_unchecked(&event->count);
73739 if (event->state == PERF_EVENT_STATE_ACTIVE)
73740- userpg->offset -= atomic64_read(&event->hw.prev_count);
73741+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
73742
73743 userpg->time_enabled = event->total_time_enabled +
73744- atomic64_read(&event->child_total_time_enabled);
73745+ atomic64_read_unchecked(&event->child_total_time_enabled);
73746
73747 userpg->time_running = event->total_time_running +
73748- atomic64_read(&event->child_total_time_running);
73749+ atomic64_read_unchecked(&event->child_total_time_running);
73750
73751 barrier();
73752 ++userpg->lock;
73753@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73754 u64 values[4];
73755 int n = 0;
73756
73757- values[n++] = atomic64_read(&event->count);
73758+ values[n++] = atomic64_read_unchecked(&event->count);
73759 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73760 values[n++] = event->total_time_enabled +
73761- atomic64_read(&event->child_total_time_enabled);
73762+ atomic64_read_unchecked(&event->child_total_time_enabled);
73763 }
73764 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73765 values[n++] = event->total_time_running +
73766- atomic64_read(&event->child_total_time_running);
73767+ atomic64_read_unchecked(&event->child_total_time_running);
73768 }
73769 if (read_format & PERF_FORMAT_ID)
73770 values[n++] = primary_event_id(event);
73771@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73772 if (leader != event)
73773 leader->pmu->read(leader);
73774
73775- values[n++] = atomic64_read(&leader->count);
73776+ values[n++] = atomic64_read_unchecked(&leader->count);
73777 if (read_format & PERF_FORMAT_ID)
73778 values[n++] = primary_event_id(leader);
73779
73780@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73781 if (sub != event)
73782 sub->pmu->read(sub);
73783
73784- values[n++] = atomic64_read(&sub->count);
73785+ values[n++] = atomic64_read_unchecked(&sub->count);
73786 if (read_format & PERF_FORMAT_ID)
73787 values[n++] = primary_event_id(sub);
73788
73789@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73790 * need to add enough zero bytes after the string to handle
73791 * the 64bit alignment we do later.
73792 */
73793- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73794+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
73795 if (!buf) {
73796 name = strncpy(tmp, "//enomem", sizeof(tmp));
73797 goto got_name;
73798 }
73799- name = d_path(&file->f_path, buf, PATH_MAX);
73800+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73801 if (IS_ERR(name)) {
73802 name = strncpy(tmp, "//toolong", sizeof(tmp));
73803 goto got_name;
73804@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
73805 {
73806 struct hw_perf_event *hwc = &event->hw;
73807
73808- atomic64_add(nr, &event->count);
73809+ atomic64_add_unchecked(nr, &event->count);
73810
73811 if (!hwc->sample_period)
73812 return;
73813@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
73814 u64 now;
73815
73816 now = cpu_clock(cpu);
73817- prev = atomic64_read(&event->hw.prev_count);
73818- atomic64_set(&event->hw.prev_count, now);
73819- atomic64_add(now - prev, &event->count);
73820+ prev = atomic64_read_unchecked(&event->hw.prev_count);
73821+ atomic64_set_unchecked(&event->hw.prev_count, now);
73822+ atomic64_add_unchecked(now - prev, &event->count);
73823 }
73824
73825 static int cpu_clock_perf_event_enable(struct perf_event *event)
73826@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73827 struct hw_perf_event *hwc = &event->hw;
73828 int cpu = raw_smp_processor_id();
73829
73830- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73831+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73832 perf_swevent_start_hrtimer(event);
73833
73834 return 0;
73835@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73836 u64 prev;
73837 s64 delta;
73838
73839- prev = atomic64_xchg(&event->hw.prev_count, now);
73840+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73841 delta = now - prev;
73842- atomic64_add(delta, &event->count);
73843+ atomic64_add_unchecked(delta, &event->count);
73844 }
73845
73846 static int task_clock_perf_event_enable(struct perf_event *event)
73847@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73848
73849 now = event->ctx->time;
73850
73851- atomic64_set(&hwc->prev_count, now);
73852+ atomic64_set_unchecked(&hwc->prev_count, now);
73853
73854 perf_swevent_start_hrtimer(event);
73855
73856@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73857 event->parent = parent_event;
73858
73859 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73860- event->id = atomic64_inc_return(&perf_event_id);
73861+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
73862
73863 event->state = PERF_EVENT_STATE_INACTIVE;
73864
73865@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73866 if (child_event->attr.inherit_stat)
73867 perf_event_read_event(child_event, child);
73868
73869- child_val = atomic64_read(&child_event->count);
73870+ child_val = atomic64_read_unchecked(&child_event->count);
73871
73872 /*
73873 * Add back the child's count to the parent's count:
73874 */
73875- atomic64_add(child_val, &parent_event->count);
73876- atomic64_add(child_event->total_time_enabled,
73877+ atomic64_add_unchecked(child_val, &parent_event->count);
73878+ atomic64_add_unchecked(child_event->total_time_enabled,
73879 &parent_event->child_total_time_enabled);
73880- atomic64_add(child_event->total_time_running,
73881+ atomic64_add_unchecked(child_event->total_time_running,
73882 &parent_event->child_total_time_running);
73883
73884 /*
73885diff --git a/kernel/pid.c b/kernel/pid.c
73886index fce7198..4f23a7e 100644
73887--- a/kernel/pid.c
73888+++ b/kernel/pid.c
73889@@ -33,6 +33,7 @@
73890 #include <linux/rculist.h>
73891 #include <linux/bootmem.h>
73892 #include <linux/hash.h>
73893+#include <linux/security.h>
73894 #include <linux/pid_namespace.h>
73895 #include <linux/init_task.h>
73896 #include <linux/syscalls.h>
73897@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73898
73899 int pid_max = PID_MAX_DEFAULT;
73900
73901-#define RESERVED_PIDS 300
73902+#define RESERVED_PIDS 500
73903
73904 int pid_max_min = RESERVED_PIDS + 1;
73905 int pid_max_max = PID_MAX_LIMIT;
73906@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73907 */
73908 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73909 {
73910- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73911+ struct task_struct *task;
73912+
73913+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73914+
73915+ if (gr_pid_is_chrooted(task))
73916+ return NULL;
73917+
73918+ return task;
73919 }
73920
73921 struct task_struct *find_task_by_vpid(pid_t vnr)
73922@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73923 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73924 }
73925
73926+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73927+{
73928+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73929+}
73930+
73931 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73932 {
73933 struct pid *pid;
73934diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73935index 5c9dc22..d271117 100644
73936--- a/kernel/posix-cpu-timers.c
73937+++ b/kernel/posix-cpu-timers.c
73938@@ -6,6 +6,7 @@
73939 #include <linux/posix-timers.h>
73940 #include <linux/errno.h>
73941 #include <linux/math64.h>
73942+#include <linux/security.h>
73943 #include <asm/uaccess.h>
73944 #include <linux/kernel_stat.h>
73945 #include <trace/events/timer.h>
73946@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73947
73948 static __init int init_posix_cpu_timers(void)
73949 {
73950- struct k_clock process = {
73951+ static struct k_clock process = {
73952 .clock_getres = process_cpu_clock_getres,
73953 .clock_get = process_cpu_clock_get,
73954 .clock_set = do_posix_clock_nosettime,
73955@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73956 .nsleep = process_cpu_nsleep,
73957 .nsleep_restart = process_cpu_nsleep_restart,
73958 };
73959- struct k_clock thread = {
73960+ static struct k_clock thread = {
73961 .clock_getres = thread_cpu_clock_getres,
73962 .clock_get = thread_cpu_clock_get,
73963 .clock_set = do_posix_clock_nosettime,
73964diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73965index 5e76d22..cf1baeb 100644
73966--- a/kernel/posix-timers.c
73967+++ b/kernel/posix-timers.c
73968@@ -42,6 +42,7 @@
73969 #include <linux/compiler.h>
73970 #include <linux/idr.h>
73971 #include <linux/posix-timers.h>
73972+#include <linux/grsecurity.h>
73973 #include <linux/syscalls.h>
73974 #include <linux/wait.h>
73975 #include <linux/workqueue.h>
73976@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73977 * which we beg off on and pass to do_sys_settimeofday().
73978 */
73979
73980-static struct k_clock posix_clocks[MAX_CLOCKS];
73981+static struct k_clock *posix_clocks[MAX_CLOCKS];
73982
73983 /*
73984 * These ones are defined below.
73985@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73986 */
73987 #define CLOCK_DISPATCH(clock, call, arglist) \
73988 ((clock) < 0 ? posix_cpu_##call arglist : \
73989- (posix_clocks[clock].call != NULL \
73990- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73991+ (posix_clocks[clock]->call != NULL \
73992+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73993
73994 /*
73995 * Default clock hook functions when the struct k_clock passed
73996@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
73997 struct timespec *tp)
73998 {
73999 tp->tv_sec = 0;
74000- tp->tv_nsec = posix_clocks[which_clock].res;
74001+ tp->tv_nsec = posix_clocks[which_clock]->res;
74002 return 0;
74003 }
74004
74005@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
74006 return 0;
74007 if ((unsigned) which_clock >= MAX_CLOCKS)
74008 return 1;
74009- if (posix_clocks[which_clock].clock_getres != NULL)
74010+ if (posix_clocks[which_clock] == NULL)
74011 return 0;
74012- if (posix_clocks[which_clock].res != 0)
74013+ if (posix_clocks[which_clock]->clock_getres != NULL)
74014+ return 0;
74015+ if (posix_clocks[which_clock]->res != 0)
74016 return 0;
74017 return 1;
74018 }
74019@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
74020 */
74021 static __init int init_posix_timers(void)
74022 {
74023- struct k_clock clock_realtime = {
74024+ static struct k_clock clock_realtime = {
74025 .clock_getres = hrtimer_get_res,
74026 };
74027- struct k_clock clock_monotonic = {
74028+ static struct k_clock clock_monotonic = {
74029 .clock_getres = hrtimer_get_res,
74030 .clock_get = posix_ktime_get_ts,
74031 .clock_set = do_posix_clock_nosettime,
74032 };
74033- struct k_clock clock_monotonic_raw = {
74034+ static struct k_clock clock_monotonic_raw = {
74035 .clock_getres = hrtimer_get_res,
74036 .clock_get = posix_get_monotonic_raw,
74037 .clock_set = do_posix_clock_nosettime,
74038 .timer_create = no_timer_create,
74039 .nsleep = no_nsleep,
74040 };
74041- struct k_clock clock_realtime_coarse = {
74042+ static struct k_clock clock_realtime_coarse = {
74043 .clock_getres = posix_get_coarse_res,
74044 .clock_get = posix_get_realtime_coarse,
74045 .clock_set = do_posix_clock_nosettime,
74046 .timer_create = no_timer_create,
74047 .nsleep = no_nsleep,
74048 };
74049- struct k_clock clock_monotonic_coarse = {
74050+ static struct k_clock clock_monotonic_coarse = {
74051 .clock_getres = posix_get_coarse_res,
74052 .clock_get = posix_get_monotonic_coarse,
74053 .clock_set = do_posix_clock_nosettime,
74054@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
74055 .nsleep = no_nsleep,
74056 };
74057
74058+ pax_track_stack();
74059+
74060 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
74061 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
74062 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
74063@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
74064 return;
74065 }
74066
74067- posix_clocks[clock_id] = *new_clock;
74068+ posix_clocks[clock_id] = new_clock;
74069 }
74070 EXPORT_SYMBOL_GPL(register_posix_clock);
74071
74072@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
74073 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
74074 return -EFAULT;
74075
74076+ /* only the CLOCK_REALTIME clock can be set, all other clocks
74077+ have their clock_set fptr set to a nosettime dummy function
74078+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
74079+ call common_clock_set, which calls do_sys_settimeofday, which
74080+ we hook
74081+ */
74082+
74083 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
74084 }
74085
74086diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
74087index 04a9e90..bc355aa 100644
74088--- a/kernel/power/hibernate.c
74089+++ b/kernel/power/hibernate.c
74090@@ -48,14 +48,14 @@ enum {
74091
74092 static int hibernation_mode = HIBERNATION_SHUTDOWN;
74093
74094-static struct platform_hibernation_ops *hibernation_ops;
74095+static const struct platform_hibernation_ops *hibernation_ops;
74096
74097 /**
74098 * hibernation_set_ops - set the global hibernate operations
74099 * @ops: the hibernation operations to use in subsequent hibernation transitions
74100 */
74101
74102-void hibernation_set_ops(struct platform_hibernation_ops *ops)
74103+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
74104 {
74105 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
74106 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
74107diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
74108index e8b3370..484c2e4 100644
74109--- a/kernel/power/poweroff.c
74110+++ b/kernel/power/poweroff.c
74111@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74112 .enable_mask = SYSRQ_ENABLE_BOOT,
74113 };
74114
74115-static int pm_sysrq_init(void)
74116+static int __init pm_sysrq_init(void)
74117 {
74118 register_sysrq_key('o', &sysrq_poweroff_op);
74119 return 0;
74120diff --git a/kernel/power/process.c b/kernel/power/process.c
74121index e7cd671..56d5f459 100644
74122--- a/kernel/power/process.c
74123+++ b/kernel/power/process.c
74124@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74125 struct timeval start, end;
74126 u64 elapsed_csecs64;
74127 unsigned int elapsed_csecs;
74128+ bool timedout = false;
74129
74130 do_gettimeofday(&start);
74131
74132 end_time = jiffies + TIMEOUT;
74133 do {
74134 todo = 0;
74135+ if (time_after(jiffies, end_time))
74136+ timedout = true;
74137 read_lock(&tasklist_lock);
74138 do_each_thread(g, p) {
74139 if (frozen(p) || !freezeable(p))
74140@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74141 * It is "frozen enough". If the task does wake
74142 * up, it will immediately call try_to_freeze.
74143 */
74144- if (!task_is_stopped_or_traced(p) &&
74145- !freezer_should_skip(p))
74146+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74147 todo++;
74148+ if (timedout) {
74149+ printk(KERN_ERR "Task refusing to freeze:\n");
74150+ sched_show_task(p);
74151+ }
74152+ }
74153 } while_each_thread(g, p);
74154 read_unlock(&tasklist_lock);
74155 yield(); /* Yield is okay here */
74156- if (time_after(jiffies, end_time))
74157- break;
74158- } while (todo);
74159+ } while (todo && !timedout);
74160
74161 do_gettimeofday(&end);
74162 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74163diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74164index 40dd021..fb30ceb 100644
74165--- a/kernel/power/suspend.c
74166+++ b/kernel/power/suspend.c
74167@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74168 [PM_SUSPEND_MEM] = "mem",
74169 };
74170
74171-static struct platform_suspend_ops *suspend_ops;
74172+static const struct platform_suspend_ops *suspend_ops;
74173
74174 /**
74175 * suspend_set_ops - Set the global suspend method table.
74176 * @ops: Pointer to ops structure.
74177 */
74178-void suspend_set_ops(struct platform_suspend_ops *ops)
74179+void suspend_set_ops(const struct platform_suspend_ops *ops)
74180 {
74181 mutex_lock(&pm_mutex);
74182 suspend_ops = ops;
74183diff --git a/kernel/printk.c b/kernel/printk.c
74184index 4cade47..4d17900 100644
74185--- a/kernel/printk.c
74186+++ b/kernel/printk.c
74187@@ -33,6 +33,7 @@
74188 #include <linux/bootmem.h>
74189 #include <linux/syscalls.h>
74190 #include <linux/kexec.h>
74191+#include <linux/syslog.h>
74192
74193 #include <asm/uaccess.h>
74194
74195@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74196 }
74197 #endif
74198
74199-/*
74200- * Commands to do_syslog:
74201- *
74202- * 0 -- Close the log. Currently a NOP.
74203- * 1 -- Open the log. Currently a NOP.
74204- * 2 -- Read from the log.
74205- * 3 -- Read all messages remaining in the ring buffer.
74206- * 4 -- Read and clear all messages remaining in the ring buffer
74207- * 5 -- Clear ring buffer.
74208- * 6 -- Disable printk's to console
74209- * 7 -- Enable printk's to console
74210- * 8 -- Set level of messages printed to console
74211- * 9 -- Return number of unread characters in the log buffer
74212- * 10 -- Return size of the log buffer
74213- */
74214-int do_syslog(int type, char __user *buf, int len)
74215+int do_syslog(int type, char __user *buf, int len, bool from_file)
74216 {
74217 unsigned i, j, limit, count;
74218 int do_clear = 0;
74219 char c;
74220 int error = 0;
74221
74222- error = security_syslog(type);
74223+#ifdef CONFIG_GRKERNSEC_DMESG
74224+ if (grsec_enable_dmesg &&
74225+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74226+ !capable(CAP_SYS_ADMIN))
74227+ return -EPERM;
74228+#endif
74229+
74230+ error = security_syslog(type, from_file);
74231 if (error)
74232 return error;
74233
74234 switch (type) {
74235- case 0: /* Close log */
74236+ case SYSLOG_ACTION_CLOSE: /* Close log */
74237 break;
74238- case 1: /* Open log */
74239+ case SYSLOG_ACTION_OPEN: /* Open log */
74240 break;
74241- case 2: /* Read from log */
74242+ case SYSLOG_ACTION_READ: /* Read from log */
74243 error = -EINVAL;
74244 if (!buf || len < 0)
74245 goto out;
74246@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74247 if (!error)
74248 error = i;
74249 break;
74250- case 4: /* Read/clear last kernel messages */
74251+ /* Read/clear last kernel messages */
74252+ case SYSLOG_ACTION_READ_CLEAR:
74253 do_clear = 1;
74254 /* FALL THRU */
74255- case 3: /* Read last kernel messages */
74256+ /* Read last kernel messages */
74257+ case SYSLOG_ACTION_READ_ALL:
74258 error = -EINVAL;
74259 if (!buf || len < 0)
74260 goto out;
74261@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74262 }
74263 }
74264 break;
74265- case 5: /* Clear ring buffer */
74266+ /* Clear ring buffer */
74267+ case SYSLOG_ACTION_CLEAR:
74268 logged_chars = 0;
74269 break;
74270- case 6: /* Disable logging to console */
74271+ /* Disable logging to console */
74272+ case SYSLOG_ACTION_CONSOLE_OFF:
74273 if (saved_console_loglevel == -1)
74274 saved_console_loglevel = console_loglevel;
74275 console_loglevel = minimum_console_loglevel;
74276 break;
74277- case 7: /* Enable logging to console */
74278+ /* Enable logging to console */
74279+ case SYSLOG_ACTION_CONSOLE_ON:
74280 if (saved_console_loglevel != -1) {
74281 console_loglevel = saved_console_loglevel;
74282 saved_console_loglevel = -1;
74283 }
74284 break;
74285- case 8: /* Set level of messages printed to console */
74286+ /* Set level of messages printed to console */
74287+ case SYSLOG_ACTION_CONSOLE_LEVEL:
74288 error = -EINVAL;
74289 if (len < 1 || len > 8)
74290 goto out;
74291@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74292 saved_console_loglevel = -1;
74293 error = 0;
74294 break;
74295- case 9: /* Number of chars in the log buffer */
74296+ /* Number of chars in the log buffer */
74297+ case SYSLOG_ACTION_SIZE_UNREAD:
74298 error = log_end - log_start;
74299 break;
74300- case 10: /* Size of the log buffer */
74301+ /* Size of the log buffer */
74302+ case SYSLOG_ACTION_SIZE_BUFFER:
74303 error = log_buf_len;
74304 break;
74305 default:
74306@@ -415,7 +416,7 @@ out:
74307
74308 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74309 {
74310- return do_syslog(type, buf, len);
74311+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74312 }
74313
74314 /*
74315diff --git a/kernel/profile.c b/kernel/profile.c
74316index dfadc5b..7f59404 100644
74317--- a/kernel/profile.c
74318+++ b/kernel/profile.c
74319@@ -39,7 +39,7 @@ struct profile_hit {
74320 /* Oprofile timer tick hook */
74321 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74322
74323-static atomic_t *prof_buffer;
74324+static atomic_unchecked_t *prof_buffer;
74325 static unsigned long prof_len, prof_shift;
74326
74327 int prof_on __read_mostly;
74328@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74329 hits[i].pc = 0;
74330 continue;
74331 }
74332- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74333+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74334 hits[i].hits = hits[i].pc = 0;
74335 }
74336 }
74337@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74338 * Add the current hit(s) and flush the write-queue out
74339 * to the global buffer:
74340 */
74341- atomic_add(nr_hits, &prof_buffer[pc]);
74342+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74343 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74344- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74345+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74346 hits[i].pc = hits[i].hits = 0;
74347 }
74348 out:
74349@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74350 if (prof_on != type || !prof_buffer)
74351 return;
74352 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74353- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74354+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74355 }
74356 #endif /* !CONFIG_SMP */
74357 EXPORT_SYMBOL_GPL(profile_hits);
74358@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74359 return -EFAULT;
74360 buf++; p++; count--; read++;
74361 }
74362- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74363+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74364 if (copy_to_user(buf, (void *)pnt, count))
74365 return -EFAULT;
74366 read += count;
74367@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74368 }
74369 #endif
74370 profile_discard_flip_buffers();
74371- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74372+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74373 return count;
74374 }
74375
74376diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74377index 05625f6..733bf70 100644
74378--- a/kernel/ptrace.c
74379+++ b/kernel/ptrace.c
74380@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74381 return ret;
74382 }
74383
74384-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74385+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74386+ unsigned int log)
74387 {
74388 const struct cred *cred = current_cred(), *tcred;
74389
74390@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74391 cred->gid != tcred->egid ||
74392 cred->gid != tcred->sgid ||
74393 cred->gid != tcred->gid) &&
74394- !capable(CAP_SYS_PTRACE)) {
74395+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74396+ (log && !capable(CAP_SYS_PTRACE)))
74397+ ) {
74398 rcu_read_unlock();
74399 return -EPERM;
74400 }
74401@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74402 smp_rmb();
74403 if (task->mm)
74404 dumpable = get_dumpable(task->mm);
74405- if (!dumpable && !capable(CAP_SYS_PTRACE))
74406+ if (!dumpable &&
74407+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74408+ (log && !capable(CAP_SYS_PTRACE))))
74409 return -EPERM;
74410
74411 return security_ptrace_access_check(task, mode);
74412@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74413 {
74414 int err;
74415 task_lock(task);
74416- err = __ptrace_may_access(task, mode);
74417+ err = __ptrace_may_access(task, mode, 0);
74418+ task_unlock(task);
74419+ return !err;
74420+}
74421+
74422+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74423+{
74424+ int err;
74425+ task_lock(task);
74426+ err = __ptrace_may_access(task, mode, 1);
74427 task_unlock(task);
74428 return !err;
74429 }
74430@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74431 goto out;
74432
74433 task_lock(task);
74434- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74435+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74436 task_unlock(task);
74437 if (retval)
74438 goto unlock_creds;
74439@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74440 goto unlock_tasklist;
74441
74442 task->ptrace = PT_PTRACED;
74443- if (capable(CAP_SYS_PTRACE))
74444+ if (capable_nolog(CAP_SYS_PTRACE))
74445 task->ptrace |= PT_PTRACE_CAP;
74446
74447 __ptrace_link(task, current);
74448@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74449 {
74450 int copied = 0;
74451
74452+ pax_track_stack();
74453+
74454 while (len > 0) {
74455 char buf[128];
74456 int this_len, retval;
74457@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74458 {
74459 int copied = 0;
74460
74461+ pax_track_stack();
74462+
74463 while (len > 0) {
74464 char buf[128];
74465 int this_len, retval;
74466@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74467 int ret = -EIO;
74468 siginfo_t siginfo;
74469
74470+ pax_track_stack();
74471+
74472 switch (request) {
74473 case PTRACE_PEEKTEXT:
74474 case PTRACE_PEEKDATA:
74475@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74476 ret = ptrace_setoptions(child, data);
74477 break;
74478 case PTRACE_GETEVENTMSG:
74479- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74480+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74481 break;
74482
74483 case PTRACE_GETSIGINFO:
74484 ret = ptrace_getsiginfo(child, &siginfo);
74485 if (!ret)
74486- ret = copy_siginfo_to_user((siginfo_t __user *) data,
74487+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74488 &siginfo);
74489 break;
74490
74491 case PTRACE_SETSIGINFO:
74492- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74493+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74494 sizeof siginfo))
74495 ret = -EFAULT;
74496 else
74497@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74498 goto out;
74499 }
74500
74501+ if (gr_handle_ptrace(child, request)) {
74502+ ret = -EPERM;
74503+ goto out_put_task_struct;
74504+ }
74505+
74506 if (request == PTRACE_ATTACH) {
74507 ret = ptrace_attach(child);
74508 /*
74509 * Some architectures need to do book-keeping after
74510 * a ptrace attach.
74511 */
74512- if (!ret)
74513+ if (!ret) {
74514 arch_ptrace_attach(child);
74515+ gr_audit_ptrace(child);
74516+ }
74517 goto out_put_task_struct;
74518 }
74519
74520@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74521 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74522 if (copied != sizeof(tmp))
74523 return -EIO;
74524- return put_user(tmp, (unsigned long __user *)data);
74525+ return put_user(tmp, (__force unsigned long __user *)data);
74526 }
74527
74528 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74529@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74530 siginfo_t siginfo;
74531 int ret;
74532
74533+ pax_track_stack();
74534+
74535 switch (request) {
74536 case PTRACE_PEEKTEXT:
74537 case PTRACE_PEEKDATA:
74538@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74539 goto out;
74540 }
74541
74542+ if (gr_handle_ptrace(child, request)) {
74543+ ret = -EPERM;
74544+ goto out_put_task_struct;
74545+ }
74546+
74547 if (request == PTRACE_ATTACH) {
74548 ret = ptrace_attach(child);
74549 /*
74550 * Some architectures need to do book-keeping after
74551 * a ptrace attach.
74552 */
74553- if (!ret)
74554+ if (!ret) {
74555 arch_ptrace_attach(child);
74556+ gr_audit_ptrace(child);
74557+ }
74558 goto out_put_task_struct;
74559 }
74560
74561diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74562index 697c0a0..2402696 100644
74563--- a/kernel/rcutorture.c
74564+++ b/kernel/rcutorture.c
74565@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74566 { 0 };
74567 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74568 { 0 };
74569-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74570-static atomic_t n_rcu_torture_alloc;
74571-static atomic_t n_rcu_torture_alloc_fail;
74572-static atomic_t n_rcu_torture_free;
74573-static atomic_t n_rcu_torture_mberror;
74574-static atomic_t n_rcu_torture_error;
74575+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74576+static atomic_unchecked_t n_rcu_torture_alloc;
74577+static atomic_unchecked_t n_rcu_torture_alloc_fail;
74578+static atomic_unchecked_t n_rcu_torture_free;
74579+static atomic_unchecked_t n_rcu_torture_mberror;
74580+static atomic_unchecked_t n_rcu_torture_error;
74581 static long n_rcu_torture_timers;
74582 static struct list_head rcu_torture_removed;
74583 static cpumask_var_t shuffle_tmp_mask;
74584@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74585
74586 spin_lock_bh(&rcu_torture_lock);
74587 if (list_empty(&rcu_torture_freelist)) {
74588- atomic_inc(&n_rcu_torture_alloc_fail);
74589+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74590 spin_unlock_bh(&rcu_torture_lock);
74591 return NULL;
74592 }
74593- atomic_inc(&n_rcu_torture_alloc);
74594+ atomic_inc_unchecked(&n_rcu_torture_alloc);
74595 p = rcu_torture_freelist.next;
74596 list_del_init(p);
74597 spin_unlock_bh(&rcu_torture_lock);
74598@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74599 static void
74600 rcu_torture_free(struct rcu_torture *p)
74601 {
74602- atomic_inc(&n_rcu_torture_free);
74603+ atomic_inc_unchecked(&n_rcu_torture_free);
74604 spin_lock_bh(&rcu_torture_lock);
74605 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74606 spin_unlock_bh(&rcu_torture_lock);
74607@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74608 i = rp->rtort_pipe_count;
74609 if (i > RCU_TORTURE_PIPE_LEN)
74610 i = RCU_TORTURE_PIPE_LEN;
74611- atomic_inc(&rcu_torture_wcount[i]);
74612+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74613 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74614 rp->rtort_mbtest = 0;
74615 rcu_torture_free(rp);
74616@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74617 i = rp->rtort_pipe_count;
74618 if (i > RCU_TORTURE_PIPE_LEN)
74619 i = RCU_TORTURE_PIPE_LEN;
74620- atomic_inc(&rcu_torture_wcount[i]);
74621+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74622 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74623 rp->rtort_mbtest = 0;
74624 list_del(&rp->rtort_free);
74625@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74626 i = old_rp->rtort_pipe_count;
74627 if (i > RCU_TORTURE_PIPE_LEN)
74628 i = RCU_TORTURE_PIPE_LEN;
74629- atomic_inc(&rcu_torture_wcount[i]);
74630+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74631 old_rp->rtort_pipe_count++;
74632 cur_ops->deferred_free(old_rp);
74633 }
74634@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74635 return;
74636 }
74637 if (p->rtort_mbtest == 0)
74638- atomic_inc(&n_rcu_torture_mberror);
74639+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74640 spin_lock(&rand_lock);
74641 cur_ops->read_delay(&rand);
74642 n_rcu_torture_timers++;
74643@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74644 continue;
74645 }
74646 if (p->rtort_mbtest == 0)
74647- atomic_inc(&n_rcu_torture_mberror);
74648+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74649 cur_ops->read_delay(&rand);
74650 preempt_disable();
74651 pipe_count = p->rtort_pipe_count;
74652@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74653 rcu_torture_current,
74654 rcu_torture_current_version,
74655 list_empty(&rcu_torture_freelist),
74656- atomic_read(&n_rcu_torture_alloc),
74657- atomic_read(&n_rcu_torture_alloc_fail),
74658- atomic_read(&n_rcu_torture_free),
74659- atomic_read(&n_rcu_torture_mberror),
74660+ atomic_read_unchecked(&n_rcu_torture_alloc),
74661+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74662+ atomic_read_unchecked(&n_rcu_torture_free),
74663+ atomic_read_unchecked(&n_rcu_torture_mberror),
74664 n_rcu_torture_timers);
74665- if (atomic_read(&n_rcu_torture_mberror) != 0)
74666+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74667 cnt += sprintf(&page[cnt], " !!!");
74668 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74669 if (i > 1) {
74670 cnt += sprintf(&page[cnt], "!!! ");
74671- atomic_inc(&n_rcu_torture_error);
74672+ atomic_inc_unchecked(&n_rcu_torture_error);
74673 WARN_ON_ONCE(1);
74674 }
74675 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74676@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74677 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74678 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74679 cnt += sprintf(&page[cnt], " %d",
74680- atomic_read(&rcu_torture_wcount[i]));
74681+ atomic_read_unchecked(&rcu_torture_wcount[i]));
74682 }
74683 cnt += sprintf(&page[cnt], "\n");
74684 if (cur_ops->stats)
74685@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74686
74687 if (cur_ops->cleanup)
74688 cur_ops->cleanup();
74689- if (atomic_read(&n_rcu_torture_error))
74690+ if (atomic_read_unchecked(&n_rcu_torture_error))
74691 rcu_torture_print_module_parms("End of test: FAILURE");
74692 else
74693 rcu_torture_print_module_parms("End of test: SUCCESS");
74694@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
74695
74696 rcu_torture_current = NULL;
74697 rcu_torture_current_version = 0;
74698- atomic_set(&n_rcu_torture_alloc, 0);
74699- atomic_set(&n_rcu_torture_alloc_fail, 0);
74700- atomic_set(&n_rcu_torture_free, 0);
74701- atomic_set(&n_rcu_torture_mberror, 0);
74702- atomic_set(&n_rcu_torture_error, 0);
74703+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
74704+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
74705+ atomic_set_unchecked(&n_rcu_torture_free, 0);
74706+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
74707+ atomic_set_unchecked(&n_rcu_torture_error, 0);
74708 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
74709- atomic_set(&rcu_torture_wcount[i], 0);
74710+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
74711 for_each_possible_cpu(cpu) {
74712 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74713 per_cpu(rcu_torture_count, cpu)[i] = 0;
74714diff --git a/kernel/rcutree.c b/kernel/rcutree.c
74715index 683c4f3..97f54c6 100644
74716--- a/kernel/rcutree.c
74717+++ b/kernel/rcutree.c
74718@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
74719 /*
74720 * Do softirq processing for the current CPU.
74721 */
74722-static void rcu_process_callbacks(struct softirq_action *unused)
74723+static void rcu_process_callbacks(void)
74724 {
74725 /*
74726 * Memory references from any prior RCU read-side critical sections
74727diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
74728index c03edf7..ac1b341 100644
74729--- a/kernel/rcutree_plugin.h
74730+++ b/kernel/rcutree_plugin.h
74731@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
74732 */
74733 void __rcu_read_lock(void)
74734 {
74735- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
74736+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
74737 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
74738 }
74739 EXPORT_SYMBOL_GPL(__rcu_read_lock);
74740@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
74741 struct task_struct *t = current;
74742
74743 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
74744- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
74745+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
74746 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
74747 rcu_read_unlock_special(t);
74748 }
74749diff --git a/kernel/relay.c b/kernel/relay.c
74750index 760c262..908e9ee 100644
74751--- a/kernel/relay.c
74752+++ b/kernel/relay.c
74753@@ -171,10 +171,14 @@ depopulate:
74754 */
74755 static struct rchan_buf *relay_create_buf(struct rchan *chan)
74756 {
74757- struct rchan_buf *buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
74758+ struct rchan_buf *buf;
74759+
74760+ if (chan->n_subbufs > UINT_MAX / sizeof(size_t *))
74761+ return NULL;
74762+
74763+ buf = kzalloc(sizeof(struct rchan_buf), GFP_KERNEL);
74764 if (!buf)
74765 return NULL;
74766-
74767 buf->padding = kmalloc(chan->n_subbufs * sizeof(size_t *), GFP_KERNEL);
74768 if (!buf->padding)
74769 goto free_buf;
74770@@ -581,6 +585,8 @@ struct rchan *relay_open(const char *base_filename,
74771
74772 if (!(subbuf_size && n_subbufs))
74773 return NULL;
74774+ if (subbuf_size > UINT_MAX / n_subbufs)
74775+ return NULL;
74776
74777 chan = kzalloc(sizeof(struct rchan), GFP_KERNEL);
74778 if (!chan)
74779@@ -1222,7 +1228,7 @@ static int subbuf_splice_actor(struct file *in,
74780 unsigned int flags,
74781 int *nonpad_ret)
74782 {
74783- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
74784+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
74785 struct rchan_buf *rbuf = in->private_data;
74786 unsigned int subbuf_size = rbuf->chan->subbuf_size;
74787 uint64_t pos = (uint64_t) *ppos;
74788@@ -1241,6 +1247,9 @@ static int subbuf_splice_actor(struct file *in,
74789 .ops = &relay_pipe_buf_ops,
74790 .spd_release = relay_page_release,
74791 };
74792+ ssize_t ret;
74793+
74794+ pax_track_stack();
74795
74796 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
74797 return 0;
74798diff --git a/kernel/resource.c b/kernel/resource.c
74799index fb11a58..4e61ae1 100644
74800--- a/kernel/resource.c
74801+++ b/kernel/resource.c
74802@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
74803
74804 static int __init ioresources_init(void)
74805 {
74806+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74807+#ifdef CONFIG_GRKERNSEC_PROC_USER
74808+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
74809+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
74810+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74811+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
74812+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
74813+#endif
74814+#else
74815 proc_create("ioports", 0, NULL, &proc_ioports_operations);
74816 proc_create("iomem", 0, NULL, &proc_iomem_operations);
74817+#endif
74818 return 0;
74819 }
74820 __initcall(ioresources_init);
74821diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
74822index a56f629..1fc4989 100644
74823--- a/kernel/rtmutex-tester.c
74824+++ b/kernel/rtmutex-tester.c
74825@@ -21,7 +21,7 @@
74826 #define MAX_RT_TEST_MUTEXES 8
74827
74828 static spinlock_t rttest_lock;
74829-static atomic_t rttest_event;
74830+static atomic_unchecked_t rttest_event;
74831
74832 struct test_thread_data {
74833 int opcode;
74834@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74835
74836 case RTTEST_LOCKCONT:
74837 td->mutexes[td->opdata] = 1;
74838- td->event = atomic_add_return(1, &rttest_event);
74839+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74840 return 0;
74841
74842 case RTTEST_RESET:
74843@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74844 return 0;
74845
74846 case RTTEST_RESETEVENT:
74847- atomic_set(&rttest_event, 0);
74848+ atomic_set_unchecked(&rttest_event, 0);
74849 return 0;
74850
74851 default:
74852@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74853 return ret;
74854
74855 td->mutexes[id] = 1;
74856- td->event = atomic_add_return(1, &rttest_event);
74857+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74858 rt_mutex_lock(&mutexes[id]);
74859- td->event = atomic_add_return(1, &rttest_event);
74860+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74861 td->mutexes[id] = 4;
74862 return 0;
74863
74864@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74865 return ret;
74866
74867 td->mutexes[id] = 1;
74868- td->event = atomic_add_return(1, &rttest_event);
74869+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74870 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74871- td->event = atomic_add_return(1, &rttest_event);
74872+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74873 td->mutexes[id] = ret ? 0 : 4;
74874 return ret ? -EINTR : 0;
74875
74876@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74877 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74878 return ret;
74879
74880- td->event = atomic_add_return(1, &rttest_event);
74881+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74882 rt_mutex_unlock(&mutexes[id]);
74883- td->event = atomic_add_return(1, &rttest_event);
74884+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74885 td->mutexes[id] = 0;
74886 return 0;
74887
74888@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74889 break;
74890
74891 td->mutexes[dat] = 2;
74892- td->event = atomic_add_return(1, &rttest_event);
74893+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74894 break;
74895
74896 case RTTEST_LOCKBKL:
74897@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74898 return;
74899
74900 td->mutexes[dat] = 3;
74901- td->event = atomic_add_return(1, &rttest_event);
74902+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74903 break;
74904
74905 case RTTEST_LOCKNOWAIT:
74906@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74907 return;
74908
74909 td->mutexes[dat] = 1;
74910- td->event = atomic_add_return(1, &rttest_event);
74911+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74912 return;
74913
74914 case RTTEST_LOCKBKL:
74915diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74916index 29bd4ba..8c5de90 100644
74917--- a/kernel/rtmutex.c
74918+++ b/kernel/rtmutex.c
74919@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74920 */
74921 spin_lock_irqsave(&pendowner->pi_lock, flags);
74922
74923- WARN_ON(!pendowner->pi_blocked_on);
74924+ BUG_ON(!pendowner->pi_blocked_on);
74925 WARN_ON(pendowner->pi_blocked_on != waiter);
74926 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74927
74928diff --git a/kernel/sched.c b/kernel/sched.c
74929index 0591df8..e3af3a4 100644
74930--- a/kernel/sched.c
74931+++ b/kernel/sched.c
74932@@ -5043,7 +5043,7 @@ out:
74933 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74934 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74935 */
74936-static void run_rebalance_domains(struct softirq_action *h)
74937+static void run_rebalance_domains(void)
74938 {
74939 int this_cpu = smp_processor_id();
74940 struct rq *this_rq = cpu_rq(this_cpu);
74941@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
74942 }
74943 }
74944
74945+#ifdef CONFIG_GRKERNSEC_SETXID
74946+extern void gr_delayed_cred_worker(void);
74947+static inline void gr_cred_schedule(void)
74948+{
74949+ if (unlikely(current->delayed_cred))
74950+ gr_delayed_cred_worker();
74951+}
74952+#else
74953+static inline void gr_cred_schedule(void)
74954+{
74955+}
74956+#endif
74957+
74958 /*
74959 * schedule() is the main scheduler function.
74960 */
74961@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
74962 struct rq *rq;
74963 int cpu;
74964
74965+ pax_track_stack();
74966+
74967 need_resched:
74968 preempt_disable();
74969 cpu = smp_processor_id();
74970@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
74971
74972 schedule_debug(prev);
74973
74974+ gr_cred_schedule();
74975+
74976 if (sched_feat(HRTICK))
74977 hrtick_clear(rq);
74978
74979@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
74980 * Look out! "owner" is an entirely speculative pointer
74981 * access and not reliable.
74982 */
74983-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74984+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74985 {
74986 unsigned int cpu;
74987 struct rq *rq;
74988@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74989 * DEBUG_PAGEALLOC could have unmapped it if
74990 * the mutex owner just released it and exited.
74991 */
74992- if (probe_kernel_address(&owner->cpu, cpu))
74993+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74994 return 0;
74995 #else
74996- cpu = owner->cpu;
74997+ cpu = task_thread_info(owner)->cpu;
74998 #endif
74999
75000 /*
75001@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
75002 /*
75003 * Is that owner really running on that cpu?
75004 */
75005- if (task_thread_info(rq->curr) != owner || need_resched())
75006+ if (rq->curr != owner || need_resched())
75007 return 0;
75008
75009 cpu_relax();
75010@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
75011 /* convert nice value [19,-20] to rlimit style value [1,40] */
75012 int nice_rlim = 20 - nice;
75013
75014+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
75015+
75016 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
75017 capable(CAP_SYS_NICE));
75018 }
75019@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
75020 if (nice > 19)
75021 nice = 19;
75022
75023- if (increment < 0 && !can_nice(current, nice))
75024+ if (increment < 0 && (!can_nice(current, nice) ||
75025+ gr_handle_chroot_nice()))
75026 return -EPERM;
75027
75028 retval = security_task_setnice(current, nice);
75029@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
75030 long power;
75031 int weight;
75032
75033- WARN_ON(!sd || !sd->groups);
75034+ BUG_ON(!sd || !sd->groups);
75035
75036 if (cpu != group_first_cpu(sd->groups))
75037 return;
75038diff --git a/kernel/signal.c b/kernel/signal.c
75039index 2494827..cda80a0 100644
75040--- a/kernel/signal.c
75041+++ b/kernel/signal.c
75042@@ -41,12 +41,12 @@
75043
75044 static struct kmem_cache *sigqueue_cachep;
75045
75046-static void __user *sig_handler(struct task_struct *t, int sig)
75047+static __sighandler_t sig_handler(struct task_struct *t, int sig)
75048 {
75049 return t->sighand->action[sig - 1].sa.sa_handler;
75050 }
75051
75052-static int sig_handler_ignored(void __user *handler, int sig)
75053+static int sig_handler_ignored(__sighandler_t handler, int sig)
75054 {
75055 /* Is it explicitly or implicitly ignored? */
75056 return handler == SIG_IGN ||
75057@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
75058 static int sig_task_ignored(struct task_struct *t, int sig,
75059 int from_ancestor_ns)
75060 {
75061- void __user *handler;
75062+ __sighandler_t handler;
75063
75064 handler = sig_handler(t, sig);
75065
75066@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
75067 */
75068 user = get_uid(__task_cred(t)->user);
75069 atomic_inc(&user->sigpending);
75070+
75071+ if (!override_rlimit)
75072+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
75073 if (override_rlimit ||
75074 atomic_read(&user->sigpending) <=
75075 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
75076@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
75077
75078 int unhandled_signal(struct task_struct *tsk, int sig)
75079 {
75080- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
75081+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
75082 if (is_global_init(tsk))
75083 return 1;
75084 if (handler != SIG_IGN && handler != SIG_DFL)
75085@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
75086 }
75087 }
75088
75089+ /* allow glibc communication via tgkill to other threads in our
75090+ thread group */
75091+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
75092+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
75093+ && gr_handle_signal(t, sig))
75094+ return -EPERM;
75095+
75096 return security_task_kill(t, info, sig, 0);
75097 }
75098
75099@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75100 return send_signal(sig, info, p, 1);
75101 }
75102
75103-static int
75104+int
75105 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75106 {
75107 return send_signal(sig, info, t, 0);
75108@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75109 unsigned long int flags;
75110 int ret, blocked, ignored;
75111 struct k_sigaction *action;
75112+ int is_unhandled = 0;
75113
75114 spin_lock_irqsave(&t->sighand->siglock, flags);
75115 action = &t->sighand->action[sig-1];
75116@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
75117 }
75118 if (action->sa.sa_handler == SIG_DFL)
75119 t->signal->flags &= ~SIGNAL_UNKILLABLE;
75120+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
75121+ is_unhandled = 1;
75122 ret = specific_send_sig_info(sig, info, t);
75123 spin_unlock_irqrestore(&t->sighand->siglock, flags);
75124
75125+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
75126+ normal operation */
75127+ if (is_unhandled) {
75128+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
75129+ gr_handle_crash(t, sig);
75130+ }
75131+
75132 return ret;
75133 }
75134
75135@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
75136 {
75137 int ret = check_kill_permission(sig, info, p);
75138
75139- if (!ret && sig)
75140+ if (!ret && sig) {
75141 ret = do_send_sig_info(sig, info, p, true);
75142+ if (!ret)
75143+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75144+ }
75145
75146 return ret;
75147 }
75148@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75149 {
75150 siginfo_t info;
75151
75152+ pax_track_stack();
75153+
75154 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75155
75156 memset(&info, 0, sizeof info);
75157@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75158 int error = -ESRCH;
75159
75160 rcu_read_lock();
75161- p = find_task_by_vpid(pid);
75162+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75163+ /* allow glibc communication via tgkill to other threads in our
75164+ thread group */
75165+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75166+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
75167+ p = find_task_by_vpid_unrestricted(pid);
75168+ else
75169+#endif
75170+ p = find_task_by_vpid(pid);
75171 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75172 error = check_kill_permission(sig, info, p);
75173 /*
75174diff --git a/kernel/smp.c b/kernel/smp.c
75175index aa9cff3..631a0de 100644
75176--- a/kernel/smp.c
75177+++ b/kernel/smp.c
75178@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75179 }
75180 EXPORT_SYMBOL(smp_call_function);
75181
75182-void ipi_call_lock(void)
75183+void ipi_call_lock(void) __acquires(call_function.lock)
75184 {
75185 spin_lock(&call_function.lock);
75186 }
75187
75188-void ipi_call_unlock(void)
75189+void ipi_call_unlock(void) __releases(call_function.lock)
75190 {
75191 spin_unlock(&call_function.lock);
75192 }
75193
75194-void ipi_call_lock_irq(void)
75195+void ipi_call_lock_irq(void) __acquires(call_function.lock)
75196 {
75197 spin_lock_irq(&call_function.lock);
75198 }
75199
75200-void ipi_call_unlock_irq(void)
75201+void ipi_call_unlock_irq(void) __releases(call_function.lock)
75202 {
75203 spin_unlock_irq(&call_function.lock);
75204 }
75205diff --git a/kernel/softirq.c b/kernel/softirq.c
75206index 04a0252..580c512 100644
75207--- a/kernel/softirq.c
75208+++ b/kernel/softirq.c
75209@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75210
75211 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75212
75213-char *softirq_to_name[NR_SOFTIRQS] = {
75214+const char * const softirq_to_name[NR_SOFTIRQS] = {
75215 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75216 "TASKLET", "SCHED", "HRTIMER", "RCU"
75217 };
75218@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75219
75220 asmlinkage void __do_softirq(void)
75221 {
75222- struct softirq_action *h;
75223+ const struct softirq_action *h;
75224 __u32 pending;
75225 int max_restart = MAX_SOFTIRQ_RESTART;
75226 int cpu;
75227@@ -233,7 +233,7 @@ restart:
75228 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75229
75230 trace_softirq_entry(h, softirq_vec);
75231- h->action(h);
75232+ h->action();
75233 trace_softirq_exit(h, softirq_vec);
75234 if (unlikely(prev_count != preempt_count())) {
75235 printk(KERN_ERR "huh, entered softirq %td %s %p"
75236@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75237 local_irq_restore(flags);
75238 }
75239
75240-void open_softirq(int nr, void (*action)(struct softirq_action *))
75241+void open_softirq(int nr, void (*action)(void))
75242 {
75243- softirq_vec[nr].action = action;
75244+ pax_open_kernel();
75245+ *(void **)&softirq_vec[nr].action = action;
75246+ pax_close_kernel();
75247 }
75248
75249 /*
75250@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75251
75252 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75253
75254-static void tasklet_action(struct softirq_action *a)
75255+static void tasklet_action(void)
75256 {
75257 struct tasklet_struct *list;
75258
75259@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75260 }
75261 }
75262
75263-static void tasklet_hi_action(struct softirq_action *a)
75264+static void tasklet_hi_action(void)
75265 {
75266 struct tasklet_struct *list;
75267
75268diff --git a/kernel/sys.c b/kernel/sys.c
75269index e9512b1..f07185f 100644
75270--- a/kernel/sys.c
75271+++ b/kernel/sys.c
75272@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75273 error = -EACCES;
75274 goto out;
75275 }
75276+
75277+ if (gr_handle_chroot_setpriority(p, niceval)) {
75278+ error = -EACCES;
75279+ goto out;
75280+ }
75281+
75282 no_nice = security_task_setnice(p, niceval);
75283 if (no_nice) {
75284 error = no_nice;
75285@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75286 !(user = find_user(who)))
75287 goto out_unlock; /* No processes for this user */
75288
75289- do_each_thread(g, p)
75290+ do_each_thread(g, p) {
75291 if (__task_cred(p)->uid == who)
75292 error = set_one_prio(p, niceval, error);
75293- while_each_thread(g, p);
75294+ } while_each_thread(g, p);
75295 if (who != cred->uid)
75296 free_uid(user); /* For find_user() */
75297 break;
75298@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75299 !(user = find_user(who)))
75300 goto out_unlock; /* No processes for this user */
75301
75302- do_each_thread(g, p)
75303+ do_each_thread(g, p) {
75304 if (__task_cred(p)->uid == who) {
75305 niceval = 20 - task_nice(p);
75306 if (niceval > retval)
75307 retval = niceval;
75308 }
75309- while_each_thread(g, p);
75310+ } while_each_thread(g, p);
75311 if (who != cred->uid)
75312 free_uid(user); /* for find_user() */
75313 break;
75314@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75315 goto error;
75316 }
75317
75318+ if (gr_check_group_change(new->gid, new->egid, -1))
75319+ goto error;
75320+
75321 if (rgid != (gid_t) -1 ||
75322 (egid != (gid_t) -1 && egid != old->gid))
75323 new->sgid = new->egid;
75324@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75325 goto error;
75326
75327 retval = -EPERM;
75328+
75329+ if (gr_check_group_change(gid, gid, gid))
75330+ goto error;
75331+
75332 if (capable(CAP_SETGID))
75333 new->gid = new->egid = new->sgid = new->fsgid = gid;
75334 else if (gid == old->gid || gid == old->sgid)
75335@@ -559,7 +572,7 @@ error:
75336 /*
75337 * change the user struct in a credentials set to match the new UID
75338 */
75339-static int set_user(struct cred *new)
75340+int set_user(struct cred *new)
75341 {
75342 struct user_struct *new_user;
75343
75344@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75345 if (!new_user)
75346 return -EAGAIN;
75347
75348+ /*
75349+ * We don't fail in case of NPROC limit excess here because too many
75350+ * poorly written programs don't check set*uid() return code, assuming
75351+ * it never fails if called by root. We may still enforce NPROC limit
75352+ * for programs doing set*uid()+execve() by harmlessly deferring the
75353+ * failure to the execve() stage.
75354+ */
75355 if (atomic_read(&new_user->processes) >=
75356 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75357- new_user != INIT_USER) {
75358- free_uid(new_user);
75359- return -EAGAIN;
75360- }
75361+ new_user != INIT_USER)
75362+ current->flags |= PF_NPROC_EXCEEDED;
75363+ else
75364+ current->flags &= ~PF_NPROC_EXCEEDED;
75365
75366 free_uid(new->user);
75367 new->user = new_user;
75368@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75369 goto error;
75370 }
75371
75372+ if (gr_check_user_change(new->uid, new->euid, -1))
75373+ goto error;
75374+
75375 if (new->uid != old->uid) {
75376 retval = set_user(new);
75377 if (retval < 0)
75378@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75379 goto error;
75380
75381 retval = -EPERM;
75382+
75383+ if (gr_check_crash_uid(uid))
75384+ goto error;
75385+ if (gr_check_user_change(uid, uid, uid))
75386+ goto error;
75387+
75388 if (capable(CAP_SETUID)) {
75389 new->suid = new->uid = uid;
75390 if (uid != old->uid) {
75391@@ -732,6 +761,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75392 goto error;
75393 }
75394
75395+ if (gr_check_user_change(ruid, euid, -1))
75396+ goto error;
75397+
75398 if (ruid != (uid_t) -1) {
75399 new->uid = ruid;
75400 if (ruid != old->uid) {
75401@@ -800,6 +832,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75402 goto error;
75403 }
75404
75405+ if (gr_check_group_change(rgid, egid, -1))
75406+ goto error;
75407+
75408 if (rgid != (gid_t) -1)
75409 new->gid = rgid;
75410 if (egid != (gid_t) -1)
75411@@ -849,6 +884,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75412 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75413 goto error;
75414
75415+ if (gr_check_user_change(-1, -1, uid))
75416+ goto error;
75417+
75418 if (uid == old->uid || uid == old->euid ||
75419 uid == old->suid || uid == old->fsuid ||
75420 capable(CAP_SETUID)) {
75421@@ -889,6 +927,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75422 if (gid == old->gid || gid == old->egid ||
75423 gid == old->sgid || gid == old->fsgid ||
75424 capable(CAP_SETGID)) {
75425+ if (gr_check_group_change(-1, -1, gid))
75426+ goto error;
75427+
75428 if (gid != old_fsgid) {
75429 new->fsgid = gid;
75430 goto change_okay;
75431@@ -1454,7 +1495,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75432 error = get_dumpable(me->mm);
75433 break;
75434 case PR_SET_DUMPABLE:
75435- if (arg2 < 0 || arg2 > 1) {
75436+ if (arg2 > 1) {
75437 error = -EINVAL;
75438 break;
75439 }
75440diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75441index b8bd058..ab6a76be 100644
75442--- a/kernel/sysctl.c
75443+++ b/kernel/sysctl.c
75444@@ -63,6 +63,13 @@
75445 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75446
75447 #if defined(CONFIG_SYSCTL)
75448+#include <linux/grsecurity.h>
75449+#include <linux/grinternal.h>
75450+
75451+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75452+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75453+ const int op);
75454+extern int gr_handle_chroot_sysctl(const int op);
75455
75456 /* External variables not in a header file. */
75457 extern int C_A_D;
75458@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75459 static int proc_taint(struct ctl_table *table, int write,
75460 void __user *buffer, size_t *lenp, loff_t *ppos);
75461 #endif
75462+extern ctl_table grsecurity_table[];
75463
75464 static struct ctl_table root_table[];
75465 static struct ctl_table_root sysctl_table_root;
75466@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75467 int sysctl_legacy_va_layout;
75468 #endif
75469
75470+#ifdef CONFIG_PAX_SOFTMODE
75471+static ctl_table pax_table[] = {
75472+ {
75473+ .ctl_name = CTL_UNNUMBERED,
75474+ .procname = "softmode",
75475+ .data = &pax_softmode,
75476+ .maxlen = sizeof(unsigned int),
75477+ .mode = 0600,
75478+ .proc_handler = &proc_dointvec,
75479+ },
75480+
75481+ { .ctl_name = 0 }
75482+};
75483+#endif
75484+
75485 extern int prove_locking;
75486 extern int lock_stat;
75487
75488@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75489 #endif
75490
75491 static struct ctl_table kern_table[] = {
75492+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75493+ {
75494+ .ctl_name = CTL_UNNUMBERED,
75495+ .procname = "grsecurity",
75496+ .mode = 0500,
75497+ .child = grsecurity_table,
75498+ },
75499+#endif
75500+
75501+#ifdef CONFIG_PAX_SOFTMODE
75502+ {
75503+ .ctl_name = CTL_UNNUMBERED,
75504+ .procname = "pax",
75505+ .mode = 0500,
75506+ .child = pax_table,
75507+ },
75508+#endif
75509+
75510 {
75511 .ctl_name = CTL_UNNUMBERED,
75512 .procname = "sched_child_runs_first",
75513@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75514 .data = &modprobe_path,
75515 .maxlen = KMOD_PATH_LEN,
75516 .mode = 0644,
75517- .proc_handler = &proc_dostring,
75518- .strategy = &sysctl_string,
75519+ .proc_handler = &proc_dostring_modpriv,
75520+ .strategy = &sysctl_string_modpriv,
75521 },
75522 {
75523 .ctl_name = CTL_UNNUMBERED,
75524@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75525 .mode = 0644,
75526 .proc_handler = &proc_dointvec
75527 },
75528+ {
75529+ .procname = "heap_stack_gap",
75530+ .data = &sysctl_heap_stack_gap,
75531+ .maxlen = sizeof(sysctl_heap_stack_gap),
75532+ .mode = 0644,
75533+ .proc_handler = proc_doulongvec_minmax,
75534+ },
75535 #else
75536 {
75537 .ctl_name = CTL_UNNUMBERED,
75538@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75539 return 0;
75540 }
75541
75542+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75543+
75544 static int parse_table(int __user *name, int nlen,
75545 void __user *oldval, size_t __user *oldlenp,
75546 void __user *newval, size_t newlen,
75547@@ -1821,7 +1871,7 @@ repeat:
75548 if (n == table->ctl_name) {
75549 int error;
75550 if (table->child) {
75551- if (sysctl_perm(root, table, MAY_EXEC))
75552+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
75553 return -EPERM;
75554 name++;
75555 nlen--;
75556@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75557 int error;
75558 int mode;
75559
75560+ if (table->parent != NULL && table->parent->procname != NULL &&
75561+ table->procname != NULL &&
75562+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75563+ return -EACCES;
75564+ if (gr_handle_chroot_sysctl(op))
75565+ return -EACCES;
75566+ error = gr_handle_sysctl(table, op);
75567+ if (error)
75568+ return error;
75569+
75570+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75571+ if (error)
75572+ return error;
75573+
75574+ if (root->permissions)
75575+ mode = root->permissions(root, current->nsproxy, table);
75576+ else
75577+ mode = table->mode;
75578+
75579+ return test_perm(mode, op);
75580+}
75581+
75582+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75583+{
75584+ int error;
75585+ int mode;
75586+
75587 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75588 if (error)
75589 return error;
75590@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75591 buffer, lenp, ppos);
75592 }
75593
75594+int proc_dostring_modpriv(struct ctl_table *table, int write,
75595+ void __user *buffer, size_t *lenp, loff_t *ppos)
75596+{
75597+ if (write && !capable(CAP_SYS_MODULE))
75598+ return -EPERM;
75599+
75600+ return _proc_do_string(table->data, table->maxlen, write,
75601+ buffer, lenp, ppos);
75602+}
75603+
75604
75605 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75606 int *valp,
75607@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75608 vleft = table->maxlen / sizeof(unsigned long);
75609 left = *lenp;
75610
75611- for (; left && vleft--; i++, min++, max++, first=0) {
75612+ for (; left && vleft--; i++, first=0) {
75613 if (write) {
75614 while (left) {
75615 char c;
75616@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75617 return -ENOSYS;
75618 }
75619
75620+int proc_dostring_modpriv(struct ctl_table *table, int write,
75621+ void __user *buffer, size_t *lenp, loff_t *ppos)
75622+{
75623+ return -ENOSYS;
75624+}
75625+
75626 int proc_dointvec(struct ctl_table *table, int write,
75627 void __user *buffer, size_t *lenp, loff_t *ppos)
75628 {
75629@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75630 return 1;
75631 }
75632
75633+int sysctl_string_modpriv(struct ctl_table *table,
75634+ void __user *oldval, size_t __user *oldlenp,
75635+ void __user *newval, size_t newlen)
75636+{
75637+ if (newval && newlen && !capable(CAP_SYS_MODULE))
75638+ return -EPERM;
75639+
75640+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
75641+}
75642+
75643 /*
75644 * This function makes sure that all of the integers in the vector
75645 * are between the minimum and maximum values given in the arrays
75646@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75647 return -ENOSYS;
75648 }
75649
75650+int sysctl_string_modpriv(struct ctl_table *table,
75651+ void __user *oldval, size_t __user *oldlenp,
75652+ void __user *newval, size_t newlen)
75653+{
75654+ return -ENOSYS;
75655+}
75656+
75657 int sysctl_intvec(struct ctl_table *table,
75658 void __user *oldval, size_t __user *oldlenp,
75659 void __user *newval, size_t newlen)
75660@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75661 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75662 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75663 EXPORT_SYMBOL(proc_dostring);
75664+EXPORT_SYMBOL(proc_dostring_modpriv);
75665 EXPORT_SYMBOL(proc_doulongvec_minmax);
75666 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75667 EXPORT_SYMBOL(register_sysctl_table);
75668@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75669 EXPORT_SYMBOL(sysctl_jiffies);
75670 EXPORT_SYMBOL(sysctl_ms_jiffies);
75671 EXPORT_SYMBOL(sysctl_string);
75672+EXPORT_SYMBOL(sysctl_string_modpriv);
75673 EXPORT_SYMBOL(sysctl_data);
75674 EXPORT_SYMBOL(unregister_sysctl_table);
75675diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75676index 469193c..ea3ecb2 100644
75677--- a/kernel/sysctl_check.c
75678+++ b/kernel/sysctl_check.c
75679@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75680 } else {
75681 if ((table->strategy == sysctl_data) ||
75682 (table->strategy == sysctl_string) ||
75683+ (table->strategy == sysctl_string_modpriv) ||
75684 (table->strategy == sysctl_intvec) ||
75685 (table->strategy == sysctl_jiffies) ||
75686 (table->strategy == sysctl_ms_jiffies) ||
75687 (table->proc_handler == proc_dostring) ||
75688+ (table->proc_handler == proc_dostring_modpriv) ||
75689 (table->proc_handler == proc_dointvec) ||
75690 (table->proc_handler == proc_dointvec_minmax) ||
75691 (table->proc_handler == proc_dointvec_jiffies) ||
75692diff --git a/kernel/taskstats.c b/kernel/taskstats.c
75693index a4ef542..798bcd7 100644
75694--- a/kernel/taskstats.c
75695+++ b/kernel/taskstats.c
75696@@ -26,9 +26,12 @@
75697 #include <linux/cgroup.h>
75698 #include <linux/fs.h>
75699 #include <linux/file.h>
75700+#include <linux/grsecurity.h>
75701 #include <net/genetlink.h>
75702 #include <asm/atomic.h>
75703
75704+extern int gr_is_taskstats_denied(int pid);
75705+
75706 /*
75707 * Maximum length of a cpumask that can be specified in
75708 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
75709@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
75710 size_t size;
75711 cpumask_var_t mask;
75712
75713+ if (gr_is_taskstats_denied(current->pid))
75714+ return -EACCES;
75715+
75716 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
75717 return -ENOMEM;
75718
75719diff --git a/kernel/time.c b/kernel/time.c
75720index 33df60e..ca768bd 100644
75721--- a/kernel/time.c
75722+++ b/kernel/time.c
75723@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
75724 return error;
75725
75726 if (tz) {
75727+ /* we log in do_settimeofday called below, so don't log twice
75728+ */
75729+ if (!tv)
75730+ gr_log_timechange();
75731+
75732 /* SMP safe, global irq locking makes it work. */
75733 sys_tz = *tz;
75734 update_vsyscall_tz();
75735@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
75736 * Avoid unnecessary multiplications/divisions in the
75737 * two most common HZ cases:
75738 */
75739-unsigned int inline jiffies_to_msecs(const unsigned long j)
75740+inline unsigned int jiffies_to_msecs(const unsigned long j)
75741 {
75742 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
75743 return (MSEC_PER_SEC / HZ) * j;
75744@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
75745 }
75746 EXPORT_SYMBOL(jiffies_to_msecs);
75747
75748-unsigned int inline jiffies_to_usecs(const unsigned long j)
75749+inline unsigned int jiffies_to_usecs(const unsigned long j)
75750 {
75751 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
75752 return (USEC_PER_SEC / HZ) * j;
75753diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
75754index 57b953f..06f149f 100644
75755--- a/kernel/time/tick-broadcast.c
75756+++ b/kernel/time/tick-broadcast.c
75757@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
75758 * then clear the broadcast bit.
75759 */
75760 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
75761- int cpu = smp_processor_id();
75762+ cpu = smp_processor_id();
75763
75764 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
75765 tick_broadcast_clear_oneshot(cpu);
75766diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
75767index 4a71cff..ffb5548 100644
75768--- a/kernel/time/timekeeping.c
75769+++ b/kernel/time/timekeeping.c
75770@@ -14,6 +14,7 @@
75771 #include <linux/init.h>
75772 #include <linux/mm.h>
75773 #include <linux/sched.h>
75774+#include <linux/grsecurity.h>
75775 #include <linux/sysdev.h>
75776 #include <linux/clocksource.h>
75777 #include <linux/jiffies.h>
75778@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
75779 */
75780 struct timespec ts = xtime;
75781 timespec_add_ns(&ts, nsec);
75782- ACCESS_ONCE(xtime_cache) = ts;
75783+ ACCESS_ONCE_RW(xtime_cache) = ts;
75784 }
75785
75786 /* must hold xtime_lock */
75787@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
75788 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
75789 return -EINVAL;
75790
75791+ gr_log_timechange();
75792+
75793 write_seqlock_irqsave(&xtime_lock, flags);
75794
75795 timekeeping_forward_now();
75796diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
75797index 54c0dda..e9095d9 100644
75798--- a/kernel/time/timer_list.c
75799+++ b/kernel/time/timer_list.c
75800@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
75801
75802 static void print_name_offset(struct seq_file *m, void *sym)
75803 {
75804+#ifdef CONFIG_GRKERNSEC_HIDESYM
75805+ SEQ_printf(m, "<%p>", NULL);
75806+#else
75807 char symname[KSYM_NAME_LEN];
75808
75809 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
75810 SEQ_printf(m, "<%p>", sym);
75811 else
75812 SEQ_printf(m, "%s", symname);
75813+#endif
75814 }
75815
75816 static void
75817@@ -112,7 +116,11 @@ next_one:
75818 static void
75819 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
75820 {
75821+#ifdef CONFIG_GRKERNSEC_HIDESYM
75822+ SEQ_printf(m, " .base: %p\n", NULL);
75823+#else
75824 SEQ_printf(m, " .base: %p\n", base);
75825+#endif
75826 SEQ_printf(m, " .index: %d\n",
75827 base->index);
75828 SEQ_printf(m, " .resolution: %Lu nsecs\n",
75829@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
75830 {
75831 struct proc_dir_entry *pe;
75832
75833+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75834+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
75835+#else
75836 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
75837+#endif
75838 if (!pe)
75839 return -ENOMEM;
75840 return 0;
75841diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
75842index ee5681f..634089b 100644
75843--- a/kernel/time/timer_stats.c
75844+++ b/kernel/time/timer_stats.c
75845@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
75846 static unsigned long nr_entries;
75847 static struct entry entries[MAX_ENTRIES];
75848
75849-static atomic_t overflow_count;
75850+static atomic_unchecked_t overflow_count;
75851
75852 /*
75853 * The entries are in a hash-table, for fast lookup:
75854@@ -140,7 +140,7 @@ static void reset_entries(void)
75855 nr_entries = 0;
75856 memset(entries, 0, sizeof(entries));
75857 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
75858- atomic_set(&overflow_count, 0);
75859+ atomic_set_unchecked(&overflow_count, 0);
75860 }
75861
75862 static struct entry *alloc_entry(void)
75863@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75864 if (likely(entry))
75865 entry->count++;
75866 else
75867- atomic_inc(&overflow_count);
75868+ atomic_inc_unchecked(&overflow_count);
75869
75870 out_unlock:
75871 spin_unlock_irqrestore(lock, flags);
75872@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75873
75874 static void print_name_offset(struct seq_file *m, unsigned long addr)
75875 {
75876+#ifdef CONFIG_GRKERNSEC_HIDESYM
75877+ seq_printf(m, "<%p>", NULL);
75878+#else
75879 char symname[KSYM_NAME_LEN];
75880
75881 if (lookup_symbol_name(addr, symname) < 0)
75882 seq_printf(m, "<%p>", (void *)addr);
75883 else
75884 seq_printf(m, "%s", symname);
75885+#endif
75886 }
75887
75888 static int tstats_show(struct seq_file *m, void *v)
75889@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75890
75891 seq_puts(m, "Timer Stats Version: v0.2\n");
75892 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75893- if (atomic_read(&overflow_count))
75894+ if (atomic_read_unchecked(&overflow_count))
75895 seq_printf(m, "Overflow: %d entries\n",
75896- atomic_read(&overflow_count));
75897+ atomic_read_unchecked(&overflow_count));
75898
75899 for (i = 0; i < nr_entries; i++) {
75900 entry = entries + i;
75901@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75902 {
75903 struct proc_dir_entry *pe;
75904
75905+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75906+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75907+#else
75908 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75909+#endif
75910 if (!pe)
75911 return -ENOMEM;
75912 return 0;
75913diff --git a/kernel/timer.c b/kernel/timer.c
75914index cb3c1f1..8bf5526 100644
75915--- a/kernel/timer.c
75916+++ b/kernel/timer.c
75917@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75918 /*
75919 * This function runs timers and the timer-tq in bottom half context.
75920 */
75921-static void run_timer_softirq(struct softirq_action *h)
75922+static void run_timer_softirq(void)
75923 {
75924 struct tvec_base *base = __get_cpu_var(tvec_bases);
75925
75926diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75927index d9d6206..f19467e 100644
75928--- a/kernel/trace/blktrace.c
75929+++ b/kernel/trace/blktrace.c
75930@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75931 struct blk_trace *bt = filp->private_data;
75932 char buf[16];
75933
75934- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75935+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75936
75937 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75938 }
75939@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75940 return 1;
75941
75942 bt = buf->chan->private_data;
75943- atomic_inc(&bt->dropped);
75944+ atomic_inc_unchecked(&bt->dropped);
75945 return 0;
75946 }
75947
75948@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75949
75950 bt->dir = dir;
75951 bt->dev = dev;
75952- atomic_set(&bt->dropped, 0);
75953+ atomic_set_unchecked(&bt->dropped, 0);
75954
75955 ret = -EIO;
75956 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75957diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75958index 4872937..c794d40 100644
75959--- a/kernel/trace/ftrace.c
75960+++ b/kernel/trace/ftrace.c
75961@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75962
75963 ip = rec->ip;
75964
75965+ ret = ftrace_arch_code_modify_prepare();
75966+ FTRACE_WARN_ON(ret);
75967+ if (ret)
75968+ return 0;
75969+
75970 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75971+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75972 if (ret) {
75973 ftrace_bug(ret, ip);
75974 rec->flags |= FTRACE_FL_FAILED;
75975- return 0;
75976 }
75977- return 1;
75978+ return ret ? 0 : 1;
75979 }
75980
75981 /*
75982diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75983index e749a05..19c6e94 100644
75984--- a/kernel/trace/ring_buffer.c
75985+++ b/kernel/trace/ring_buffer.c
75986@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75987 * the reader page). But if the next page is a header page,
75988 * its flags will be non zero.
75989 */
75990-static int inline
75991+static inline int
75992 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75993 struct buffer_page *page, struct list_head *list)
75994 {
75995diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75996index a2a2d1f..7f32b09 100644
75997--- a/kernel/trace/trace.c
75998+++ b/kernel/trace/trace.c
75999@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
76000 size_t rem;
76001 unsigned int i;
76002
76003+ pax_track_stack();
76004+
76005 /* copy the tracer to avoid using a global lock all around */
76006 mutex_lock(&trace_types_lock);
76007 if (unlikely(old_tracer != current_trace && current_trace)) {
76008@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
76009 int entries, size, i;
76010 size_t ret;
76011
76012+ pax_track_stack();
76013+
76014 if (*ppos & (PAGE_SIZE - 1)) {
76015 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
76016 return -EINVAL;
76017@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
76018 };
76019 #endif
76020
76021-static struct dentry *d_tracer;
76022-
76023 struct dentry *tracing_init_dentry(void)
76024 {
76025+ static struct dentry *d_tracer;
76026 static int once;
76027
76028 if (d_tracer)
76029@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
76030 return d_tracer;
76031 }
76032
76033-static struct dentry *d_percpu;
76034-
76035 struct dentry *tracing_dentry_percpu(void)
76036 {
76037+ static struct dentry *d_percpu;
76038 static int once;
76039 struct dentry *d_tracer;
76040
76041diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
76042index d128f65..f37b4af 100644
76043--- a/kernel/trace/trace_events.c
76044+++ b/kernel/trace/trace_events.c
76045@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
76046 * Modules must own their file_operations to keep up with
76047 * reference counting.
76048 */
76049+
76050 struct ftrace_module_file_ops {
76051 struct list_head list;
76052 struct module *mod;
76053- struct file_operations id;
76054- struct file_operations enable;
76055- struct file_operations format;
76056- struct file_operations filter;
76057 };
76058
76059 static void remove_subsystem_dir(const char *name)
76060@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
76061
76062 file_ops->mod = mod;
76063
76064- file_ops->id = ftrace_event_id_fops;
76065- file_ops->id.owner = mod;
76066-
76067- file_ops->enable = ftrace_enable_fops;
76068- file_ops->enable.owner = mod;
76069-
76070- file_ops->filter = ftrace_event_filter_fops;
76071- file_ops->filter.owner = mod;
76072-
76073- file_ops->format = ftrace_event_format_fops;
76074- file_ops->format.owner = mod;
76075+ pax_open_kernel();
76076+ *(void **)&mod->trace_id.owner = mod;
76077+ *(void **)&mod->trace_enable.owner = mod;
76078+ *(void **)&mod->trace_filter.owner = mod;
76079+ *(void **)&mod->trace_format.owner = mod;
76080+ pax_close_kernel();
76081
76082 list_add(&file_ops->list, &ftrace_module_file_list);
76083
76084@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
76085 call->mod = mod;
76086 list_add(&call->list, &ftrace_events);
76087 event_create_dir(call, d_events,
76088- &file_ops->id, &file_ops->enable,
76089- &file_ops->filter, &file_ops->format);
76090+ &mod->trace_id, &mod->trace_enable,
76091+ &mod->trace_filter, &mod->trace_format);
76092 }
76093 }
76094
76095diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76096index 0acd834..b800b56 100644
76097--- a/kernel/trace/trace_mmiotrace.c
76098+++ b/kernel/trace/trace_mmiotrace.c
76099@@ -23,7 +23,7 @@ struct header_iter {
76100 static struct trace_array *mmio_trace_array;
76101 static bool overrun_detected;
76102 static unsigned long prev_overruns;
76103-static atomic_t dropped_count;
76104+static atomic_unchecked_t dropped_count;
76105
76106 static void mmio_reset_data(struct trace_array *tr)
76107 {
76108@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76109
76110 static unsigned long count_overruns(struct trace_iterator *iter)
76111 {
76112- unsigned long cnt = atomic_xchg(&dropped_count, 0);
76113+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76114 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76115
76116 if (over > prev_overruns)
76117@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76118 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76119 sizeof(*entry), 0, pc);
76120 if (!event) {
76121- atomic_inc(&dropped_count);
76122+ atomic_inc_unchecked(&dropped_count);
76123 return;
76124 }
76125 entry = ring_buffer_event_data(event);
76126@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76127 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76128 sizeof(*entry), 0, pc);
76129 if (!event) {
76130- atomic_inc(&dropped_count);
76131+ atomic_inc_unchecked(&dropped_count);
76132 return;
76133 }
76134 entry = ring_buffer_event_data(event);
76135diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76136index b6c12c6..41fdc53 100644
76137--- a/kernel/trace/trace_output.c
76138+++ b/kernel/trace/trace_output.c
76139@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76140 return 0;
76141 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76142 if (!IS_ERR(p)) {
76143- p = mangle_path(s->buffer + s->len, p, "\n");
76144+ p = mangle_path(s->buffer + s->len, p, "\n\\");
76145 if (p) {
76146 s->len = p - s->buffer;
76147 return 1;
76148diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76149index 8504ac7..ecf0adb 100644
76150--- a/kernel/trace/trace_stack.c
76151+++ b/kernel/trace/trace_stack.c
76152@@ -50,7 +50,7 @@ static inline void check_stack(void)
76153 return;
76154
76155 /* we do not handle interrupt stacks yet */
76156- if (!object_is_on_stack(&this_size))
76157+ if (!object_starts_on_stack(&this_size))
76158 return;
76159
76160 local_irq_save(flags);
76161diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76162index 40cafb0..d5ead43 100644
76163--- a/kernel/trace/trace_workqueue.c
76164+++ b/kernel/trace/trace_workqueue.c
76165@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76166 int cpu;
76167 pid_t pid;
76168 /* Can be inserted from interrupt or user context, need to be atomic */
76169- atomic_t inserted;
76170+ atomic_unchecked_t inserted;
76171 /*
76172 * Don't need to be atomic, works are serialized in a single workqueue thread
76173 * on a single CPU.
76174@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76175 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76176 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76177 if (node->pid == wq_thread->pid) {
76178- atomic_inc(&node->inserted);
76179+ atomic_inc_unchecked(&node->inserted);
76180 goto found;
76181 }
76182 }
76183@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76184 tsk = get_pid_task(pid, PIDTYPE_PID);
76185 if (tsk) {
76186 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76187- atomic_read(&cws->inserted), cws->executed,
76188+ atomic_read_unchecked(&cws->inserted), cws->executed,
76189 tsk->comm);
76190 put_task_struct(tsk);
76191 }
76192diff --git a/kernel/user.c b/kernel/user.c
76193index 1b91701..8795237 100644
76194--- a/kernel/user.c
76195+++ b/kernel/user.c
76196@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76197 spin_lock_irq(&uidhash_lock);
76198 up = uid_hash_find(uid, hashent);
76199 if (up) {
76200+ put_user_ns(ns);
76201 key_put(new->uid_keyring);
76202 key_put(new->session_keyring);
76203 kmem_cache_free(uid_cachep, new);
76204diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76205index 234ceb1..ad74049 100644
76206--- a/lib/Kconfig.debug
76207+++ b/lib/Kconfig.debug
76208@@ -905,7 +905,7 @@ config LATENCYTOP
76209 select STACKTRACE
76210 select SCHEDSTATS
76211 select SCHED_DEBUG
76212- depends on HAVE_LATENCYTOP_SUPPORT
76213+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76214 help
76215 Enable this option if you want to use the LatencyTOP tool
76216 to find out which userspace is blocking on what kernel operations.
76217diff --git a/lib/bitmap.c b/lib/bitmap.c
76218index 7025658..8d14cab 100644
76219--- a/lib/bitmap.c
76220+++ b/lib/bitmap.c
76221@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76222 {
76223 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76224 u32 chunk;
76225- const char __user *ubuf = buf;
76226+ const char __user *ubuf = (const char __force_user *)buf;
76227
76228 bitmap_zero(maskp, nmaskbits);
76229
76230@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76231 {
76232 if (!access_ok(VERIFY_READ, ubuf, ulen))
76233 return -EFAULT;
76234- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76235+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76236 }
76237 EXPORT_SYMBOL(bitmap_parse_user);
76238
76239diff --git a/lib/bug.c b/lib/bug.c
76240index 300e41a..2779eb0 100644
76241--- a/lib/bug.c
76242+++ b/lib/bug.c
76243@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76244 return BUG_TRAP_TYPE_NONE;
76245
76246 bug = find_bug(bugaddr);
76247+ if (!bug)
76248+ return BUG_TRAP_TYPE_NONE;
76249
76250 printk(KERN_EMERG "------------[ cut here ]------------\n");
76251
76252diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76253index 2b413db..e21d207 100644
76254--- a/lib/debugobjects.c
76255+++ b/lib/debugobjects.c
76256@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76257 if (limit > 4)
76258 return;
76259
76260- is_on_stack = object_is_on_stack(addr);
76261+ is_on_stack = object_starts_on_stack(addr);
76262 if (is_on_stack == onstack)
76263 return;
76264
76265diff --git a/lib/devres.c b/lib/devres.c
76266index 72c8909..7543868 100644
76267--- a/lib/devres.c
76268+++ b/lib/devres.c
76269@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76270 {
76271 iounmap(addr);
76272 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76273- (void *)addr));
76274+ (void __force *)addr));
76275 }
76276 EXPORT_SYMBOL(devm_iounmap);
76277
76278@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76279 {
76280 ioport_unmap(addr);
76281 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76282- devm_ioport_map_match, (void *)addr));
76283+ devm_ioport_map_match, (void __force *)addr));
76284 }
76285 EXPORT_SYMBOL(devm_ioport_unmap);
76286
76287diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76288index 084e879..0674448 100644
76289--- a/lib/dma-debug.c
76290+++ b/lib/dma-debug.c
76291@@ -861,7 +861,7 @@ out:
76292
76293 static void check_for_stack(struct device *dev, void *addr)
76294 {
76295- if (object_is_on_stack(addr))
76296+ if (object_starts_on_stack(addr))
76297 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76298 "stack [addr=%p]\n", addr);
76299 }
76300diff --git a/lib/idr.c b/lib/idr.c
76301index eda7ba3..915dfae 100644
76302--- a/lib/idr.c
76303+++ b/lib/idr.c
76304@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76305 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76306
76307 /* if already at the top layer, we need to grow */
76308- if (id >= 1 << (idp->layers * IDR_BITS)) {
76309+ if (id >= (1 << (idp->layers * IDR_BITS))) {
76310 *starting_id = id;
76311 return IDR_NEED_TO_GROW;
76312 }
76313diff --git a/lib/inflate.c b/lib/inflate.c
76314index d102559..4215f31 100644
76315--- a/lib/inflate.c
76316+++ b/lib/inflate.c
76317@@ -266,7 +266,7 @@ static void free(void *where)
76318 malloc_ptr = free_mem_ptr;
76319 }
76320 #else
76321-#define malloc(a) kmalloc(a, GFP_KERNEL)
76322+#define malloc(a) kmalloc((a), GFP_KERNEL)
76323 #define free(a) kfree(a)
76324 #endif
76325
76326diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76327index bd2bea9..6b3c95e 100644
76328--- a/lib/is_single_threaded.c
76329+++ b/lib/is_single_threaded.c
76330@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76331 struct task_struct *p, *t;
76332 bool ret;
76333
76334+ if (!mm)
76335+ return true;
76336+
76337 if (atomic_read(&task->signal->live) != 1)
76338 return false;
76339
76340diff --git a/lib/kobject.c b/lib/kobject.c
76341index b512b74..8115eb1 100644
76342--- a/lib/kobject.c
76343+++ b/lib/kobject.c
76344@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76345 return ret;
76346 }
76347
76348-struct sysfs_ops kobj_sysfs_ops = {
76349+const struct sysfs_ops kobj_sysfs_ops = {
76350 .show = kobj_attr_show,
76351 .store = kobj_attr_store,
76352 };
76353@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76354 * If the kset was not able to be created, NULL will be returned.
76355 */
76356 static struct kset *kset_create(const char *name,
76357- struct kset_uevent_ops *uevent_ops,
76358+ const struct kset_uevent_ops *uevent_ops,
76359 struct kobject *parent_kobj)
76360 {
76361 struct kset *kset;
76362@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76363 * If the kset was not able to be created, NULL will be returned.
76364 */
76365 struct kset *kset_create_and_add(const char *name,
76366- struct kset_uevent_ops *uevent_ops,
76367+ const struct kset_uevent_ops *uevent_ops,
76368 struct kobject *parent_kobj)
76369 {
76370 struct kset *kset;
76371diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76372index 507b821..0bf8ed0 100644
76373--- a/lib/kobject_uevent.c
76374+++ b/lib/kobject_uevent.c
76375@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76376 const char *subsystem;
76377 struct kobject *top_kobj;
76378 struct kset *kset;
76379- struct kset_uevent_ops *uevent_ops;
76380+ const struct kset_uevent_ops *uevent_ops;
76381 u64 seq;
76382 int i = 0;
76383 int retval = 0;
76384diff --git a/lib/kref.c b/lib/kref.c
76385index 9ecd6e8..12c94c1 100644
76386--- a/lib/kref.c
76387+++ b/lib/kref.c
76388@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76389 */
76390 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76391 {
76392- WARN_ON(release == NULL);
76393+ BUG_ON(release == NULL);
76394 WARN_ON(release == (void (*)(struct kref *))kfree);
76395
76396 if (atomic_dec_and_test(&kref->refcount)) {
76397diff --git a/lib/parser.c b/lib/parser.c
76398index b00d020..1b34325 100644
76399--- a/lib/parser.c
76400+++ b/lib/parser.c
76401@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76402 char *buf;
76403 int ret;
76404
76405- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76406+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76407 if (!buf)
76408 return -ENOMEM;
76409 memcpy(buf, s->from, s->to - s->from);
76410diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76411index 92cdd99..a8149d7 100644
76412--- a/lib/radix-tree.c
76413+++ b/lib/radix-tree.c
76414@@ -81,7 +81,7 @@ struct radix_tree_preload {
76415 int nr;
76416 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76417 };
76418-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76419+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76420
76421 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76422 {
76423diff --git a/lib/random32.c b/lib/random32.c
76424index 217d5c4..45aba8a 100644
76425--- a/lib/random32.c
76426+++ b/lib/random32.c
76427@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76428 */
76429 static inline u32 __seed(u32 x, u32 m)
76430 {
76431- return (x < m) ? x + m : x;
76432+ return (x <= m) ? x + m + 1 : x;
76433 }
76434
76435 /**
76436diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76437index 33bed5e..1477e46 100644
76438--- a/lib/vsprintf.c
76439+++ b/lib/vsprintf.c
76440@@ -16,6 +16,9 @@
76441 * - scnprintf and vscnprintf
76442 */
76443
76444+#ifdef CONFIG_GRKERNSEC_HIDESYM
76445+#define __INCLUDED_BY_HIDESYM 1
76446+#endif
76447 #include <stdarg.h>
76448 #include <linux/module.h>
76449 #include <linux/types.h>
76450@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76451 return buf;
76452 }
76453
76454-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76455+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76456 {
76457 int len, i;
76458
76459 if ((unsigned long)s < PAGE_SIZE)
76460- s = "<NULL>";
76461+ s = "(null)";
76462
76463 len = strnlen(s, spec.precision);
76464
76465@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76466 unsigned long value = (unsigned long) ptr;
76467 #ifdef CONFIG_KALLSYMS
76468 char sym[KSYM_SYMBOL_LEN];
76469- if (ext != 'f' && ext != 's')
76470+ if (ext != 'f' && ext != 's' && ext != 'a')
76471 sprint_symbol(sym, value);
76472 else
76473 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76474@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76475 * - 'f' For simple symbolic function names without offset
76476 * - 'S' For symbolic direct pointers with offset
76477 * - 's' For symbolic direct pointers without offset
76478+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76479+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76480 * - 'R' For a struct resource pointer, it prints the range of
76481 * addresses (not the name nor the flags)
76482 * - 'M' For a 6-byte MAC address, it prints the address in the
76483@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76484 struct printf_spec spec)
76485 {
76486 if (!ptr)
76487- return string(buf, end, "(null)", spec);
76488+ return string(buf, end, "(nil)", spec);
76489
76490 switch (*fmt) {
76491 case 'F':
76492@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76493 case 's':
76494 /* Fallthrough */
76495 case 'S':
76496+#ifdef CONFIG_GRKERNSEC_HIDESYM
76497+ break;
76498+#else
76499+ return symbol_string(buf, end, ptr, spec, *fmt);
76500+#endif
76501+ case 'a':
76502+ /* Fallthrough */
76503+ case 'A':
76504 return symbol_string(buf, end, ptr, spec, *fmt);
76505 case 'R':
76506 return resource_string(buf, end, ptr, spec);
76507@@ -1445,7 +1458,7 @@ do { \
76508 size_t len;
76509 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76510 || (unsigned long)save_str < PAGE_SIZE)
76511- save_str = "<NULL>";
76512+ save_str = "(null)";
76513 len = strlen(save_str);
76514 if (str + len + 1 < end)
76515 memcpy(str, save_str, len + 1);
76516@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76517 typeof(type) value; \
76518 if (sizeof(type) == 8) { \
76519 args = PTR_ALIGN(args, sizeof(u32)); \
76520- *(u32 *)&value = *(u32 *)args; \
76521- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76522+ *(u32 *)&value = *(const u32 *)args; \
76523+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76524 } else { \
76525 args = PTR_ALIGN(args, sizeof(type)); \
76526- value = *(typeof(type) *)args; \
76527+ value = *(const typeof(type) *)args; \
76528 } \
76529 args += sizeof(type); \
76530 value; \
76531@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76532 const char *str_arg = args;
76533 size_t len = strlen(str_arg);
76534 args += len + 1;
76535- str = string(str, end, (char *)str_arg, spec);
76536+ str = string(str, end, str_arg, spec);
76537 break;
76538 }
76539
76540diff --git a/localversion-grsec b/localversion-grsec
76541new file mode 100644
76542index 0000000..7cd6065
76543--- /dev/null
76544+++ b/localversion-grsec
76545@@ -0,0 +1 @@
76546+-grsec
76547diff --git a/mm/Kconfig b/mm/Kconfig
76548index 2c19c0b..f3c3f83 100644
76549--- a/mm/Kconfig
76550+++ b/mm/Kconfig
76551@@ -228,7 +228,7 @@ config KSM
76552 config DEFAULT_MMAP_MIN_ADDR
76553 int "Low address space to protect from user allocation"
76554 depends on MMU
76555- default 4096
76556+ default 65536
76557 help
76558 This is the portion of low virtual memory which should be protected
76559 from userspace allocation. Keeping a user from writing to low pages
76560diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76561index 67a33a5..094dcf1 100644
76562--- a/mm/backing-dev.c
76563+++ b/mm/backing-dev.c
76564@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76565 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76566 spin_unlock(&bdi->wb_lock);
76567
76568- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76569+ tsk->flags |= PF_SWAPWRITE;
76570 set_freezable();
76571
76572 /*
76573@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76574 * Add the default flusher task that gets created for any bdi
76575 * that has dirty data pending writeout
76576 */
76577-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76578+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76579 {
76580 if (!bdi_cap_writeback_dirty(bdi))
76581 return;
76582diff --git a/mm/filemap.c b/mm/filemap.c
76583index a1fe378..e26702f 100644
76584--- a/mm/filemap.c
76585+++ b/mm/filemap.c
76586@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76587 struct address_space *mapping = file->f_mapping;
76588
76589 if (!mapping->a_ops->readpage)
76590- return -ENOEXEC;
76591+ return -ENODEV;
76592 file_accessed(file);
76593 vma->vm_ops = &generic_file_vm_ops;
76594 vma->vm_flags |= VM_CAN_NONLINEAR;
76595@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76596 *pos = i_size_read(inode);
76597
76598 if (limit != RLIM_INFINITY) {
76599+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76600 if (*pos >= limit) {
76601 send_sig(SIGXFSZ, current, 0);
76602 return -EFBIG;
76603diff --git a/mm/fremap.c b/mm/fremap.c
76604index b6ec85a..a24ac22 100644
76605--- a/mm/fremap.c
76606+++ b/mm/fremap.c
76607@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76608 retry:
76609 vma = find_vma(mm, start);
76610
76611+#ifdef CONFIG_PAX_SEGMEXEC
76612+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76613+ goto out;
76614+#endif
76615+
76616 /*
76617 * Make sure the vma is shared, that it supports prefaulting,
76618 * and that the remapped range is valid and fully within
76619@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76620 /*
76621 * drop PG_Mlocked flag for over-mapped range
76622 */
76623- unsigned int saved_flags = vma->vm_flags;
76624+ unsigned long saved_flags = vma->vm_flags;
76625 munlock_vma_pages_range(vma, start, start + size);
76626 vma->vm_flags = saved_flags;
76627 }
76628diff --git a/mm/highmem.c b/mm/highmem.c
76629index 9c1e627..5ca9447 100644
76630--- a/mm/highmem.c
76631+++ b/mm/highmem.c
76632@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76633 * So no dangers, even with speculative execution.
76634 */
76635 page = pte_page(pkmap_page_table[i]);
76636+ pax_open_kernel();
76637 pte_clear(&init_mm, (unsigned long)page_address(page),
76638 &pkmap_page_table[i]);
76639-
76640+ pax_close_kernel();
76641 set_page_address(page, NULL);
76642 need_flush = 1;
76643 }
76644@@ -177,9 +178,11 @@ start:
76645 }
76646 }
76647 vaddr = PKMAP_ADDR(last_pkmap_nr);
76648+
76649+ pax_open_kernel();
76650 set_pte_at(&init_mm, vaddr,
76651 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76652-
76653+ pax_close_kernel();
76654 pkmap_count[last_pkmap_nr] = 1;
76655 set_page_address(page, (void *)vaddr);
76656
76657diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76658index 5e1e508..ac70275 100644
76659--- a/mm/hugetlb.c
76660+++ b/mm/hugetlb.c
76661@@ -869,6 +869,7 @@ free:
76662 list_del(&page->lru);
76663 enqueue_huge_page(h, page);
76664 }
76665+ spin_unlock(&hugetlb_lock);
76666
76667 /* Free unnecessary surplus pages to the buddy allocator */
76668 if (!list_empty(&surplus_list)) {
76669@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76670 return 1;
76671 }
76672
76673+#ifdef CONFIG_PAX_SEGMEXEC
76674+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76675+{
76676+ struct mm_struct *mm = vma->vm_mm;
76677+ struct vm_area_struct *vma_m;
76678+ unsigned long address_m;
76679+ pte_t *ptep_m;
76680+
76681+ vma_m = pax_find_mirror_vma(vma);
76682+ if (!vma_m)
76683+ return;
76684+
76685+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76686+ address_m = address + SEGMEXEC_TASK_SIZE;
76687+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76688+ get_page(page_m);
76689+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
76690+}
76691+#endif
76692+
76693 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
76694 unsigned long address, pte_t *ptep, pte_t pte,
76695 struct page *pagecache_page)
76696@@ -2004,6 +2025,11 @@ retry_avoidcopy:
76697 huge_ptep_clear_flush(vma, address, ptep);
76698 set_huge_pte_at(mm, address, ptep,
76699 make_huge_pte(vma, new_page, 1));
76700+
76701+#ifdef CONFIG_PAX_SEGMEXEC
76702+ pax_mirror_huge_pte(vma, address, new_page);
76703+#endif
76704+
76705 /* Make the old page be freed below */
76706 new_page = old_page;
76707 }
76708@@ -2135,6 +2161,10 @@ retry:
76709 && (vma->vm_flags & VM_SHARED)));
76710 set_huge_pte_at(mm, address, ptep, new_pte);
76711
76712+#ifdef CONFIG_PAX_SEGMEXEC
76713+ pax_mirror_huge_pte(vma, address, page);
76714+#endif
76715+
76716 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
76717 /* Optimization, do the COW without a second fault */
76718 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
76719@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76720 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
76721 struct hstate *h = hstate_vma(vma);
76722
76723+#ifdef CONFIG_PAX_SEGMEXEC
76724+ struct vm_area_struct *vma_m;
76725+
76726+ vma_m = pax_find_mirror_vma(vma);
76727+ if (vma_m) {
76728+ unsigned long address_m;
76729+
76730+ if (vma->vm_start > vma_m->vm_start) {
76731+ address_m = address;
76732+ address -= SEGMEXEC_TASK_SIZE;
76733+ vma = vma_m;
76734+ h = hstate_vma(vma);
76735+ } else
76736+ address_m = address + SEGMEXEC_TASK_SIZE;
76737+
76738+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
76739+ return VM_FAULT_OOM;
76740+ address_m &= HPAGE_MASK;
76741+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
76742+ }
76743+#endif
76744+
76745 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
76746 if (!ptep)
76747 return VM_FAULT_OOM;
76748diff --git a/mm/internal.h b/mm/internal.h
76749index f03e8e2..7354343 100644
76750--- a/mm/internal.h
76751+++ b/mm/internal.h
76752@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
76753 * in mm/page_alloc.c
76754 */
76755 extern void __free_pages_bootmem(struct page *page, unsigned int order);
76756+extern void free_compound_page(struct page *page);
76757 extern void prep_compound_page(struct page *page, unsigned long order);
76758
76759
76760diff --git a/mm/kmemleak.c b/mm/kmemleak.c
76761index c346660..b47382f 100644
76762--- a/mm/kmemleak.c
76763+++ b/mm/kmemleak.c
76764@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
76765
76766 for (i = 0; i < object->trace_len; i++) {
76767 void *ptr = (void *)object->trace[i];
76768- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
76769+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
76770 }
76771 }
76772
76773diff --git a/mm/maccess.c b/mm/maccess.c
76774index 9073695..1127f348 100644
76775--- a/mm/maccess.c
76776+++ b/mm/maccess.c
76777@@ -14,7 +14,7 @@
76778 * Safely read from address @src to the buffer at @dst. If a kernel fault
76779 * happens, handle that and return -EFAULT.
76780 */
76781-long probe_kernel_read(void *dst, void *src, size_t size)
76782+long probe_kernel_read(void *dst, const void *src, size_t size)
76783 {
76784 long ret;
76785 mm_segment_t old_fs = get_fs();
76786@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
76787 set_fs(KERNEL_DS);
76788 pagefault_disable();
76789 ret = __copy_from_user_inatomic(dst,
76790- (__force const void __user *)src, size);
76791+ (const void __force_user *)src, size);
76792 pagefault_enable();
76793 set_fs(old_fs);
76794
76795@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
76796 * Safely write to address @dst from the buffer at @src. If a kernel fault
76797 * happens, handle that and return -EFAULT.
76798 */
76799-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
76800+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
76801 {
76802 long ret;
76803 mm_segment_t old_fs = get_fs();
76804
76805 set_fs(KERNEL_DS);
76806 pagefault_disable();
76807- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
76808+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
76809 pagefault_enable();
76810 set_fs(old_fs);
76811
76812diff --git a/mm/madvise.c b/mm/madvise.c
76813index 35b1479..499f7d4 100644
76814--- a/mm/madvise.c
76815+++ b/mm/madvise.c
76816@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
76817 pgoff_t pgoff;
76818 unsigned long new_flags = vma->vm_flags;
76819
76820+#ifdef CONFIG_PAX_SEGMEXEC
76821+ struct vm_area_struct *vma_m;
76822+#endif
76823+
76824 switch (behavior) {
76825 case MADV_NORMAL:
76826 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
76827@@ -103,6 +107,13 @@ success:
76828 /*
76829 * vm_flags is protected by the mmap_sem held in write mode.
76830 */
76831+
76832+#ifdef CONFIG_PAX_SEGMEXEC
76833+ vma_m = pax_find_mirror_vma(vma);
76834+ if (vma_m)
76835+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
76836+#endif
76837+
76838 vma->vm_flags = new_flags;
76839
76840 out:
76841@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76842 struct vm_area_struct ** prev,
76843 unsigned long start, unsigned long end)
76844 {
76845+
76846+#ifdef CONFIG_PAX_SEGMEXEC
76847+ struct vm_area_struct *vma_m;
76848+#endif
76849+
76850 *prev = vma;
76851 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
76852 return -EINVAL;
76853@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76854 zap_page_range(vma, start, end - start, &details);
76855 } else
76856 zap_page_range(vma, start, end - start, NULL);
76857+
76858+#ifdef CONFIG_PAX_SEGMEXEC
76859+ vma_m = pax_find_mirror_vma(vma);
76860+ if (vma_m) {
76861+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
76862+ struct zap_details details = {
76863+ .nonlinear_vma = vma_m,
76864+ .last_index = ULONG_MAX,
76865+ };
76866+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
76867+ } else
76868+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
76869+ }
76870+#endif
76871+
76872 return 0;
76873 }
76874
76875@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
76876 if (end < start)
76877 goto out;
76878
76879+#ifdef CONFIG_PAX_SEGMEXEC
76880+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76881+ if (end > SEGMEXEC_TASK_SIZE)
76882+ goto out;
76883+ } else
76884+#endif
76885+
76886+ if (end > TASK_SIZE)
76887+ goto out;
76888+
76889 error = 0;
76890 if (end == start)
76891 goto out;
76892diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76893index 8aeba53..b4a4198 100644
76894--- a/mm/memory-failure.c
76895+++ b/mm/memory-failure.c
76896@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76897
76898 int sysctl_memory_failure_recovery __read_mostly = 1;
76899
76900-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76901+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76902
76903 /*
76904 * Send all the processes who have the page mapped an ``action optional''
76905@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76906 si.si_signo = SIGBUS;
76907 si.si_errno = 0;
76908 si.si_code = BUS_MCEERR_AO;
76909- si.si_addr = (void *)addr;
76910+ si.si_addr = (void __user *)addr;
76911 #ifdef __ARCH_SI_TRAPNO
76912 si.si_trapno = trapno;
76913 #endif
76914@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76915 return 0;
76916 }
76917
76918- atomic_long_add(1, &mce_bad_pages);
76919+ atomic_long_add_unchecked(1, &mce_bad_pages);
76920
76921 /*
76922 * We need/can do nothing about count=0 pages.
76923diff --git a/mm/memory.c b/mm/memory.c
76924index 6c836d3..48f3264 100644
76925--- a/mm/memory.c
76926+++ b/mm/memory.c
76927@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76928 return;
76929
76930 pmd = pmd_offset(pud, start);
76931+
76932+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76933 pud_clear(pud);
76934 pmd_free_tlb(tlb, pmd, start);
76935+#endif
76936+
76937 }
76938
76939 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76940@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76941 if (end - 1 > ceiling - 1)
76942 return;
76943
76944+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76945 pud = pud_offset(pgd, start);
76946 pgd_clear(pgd);
76947 pud_free_tlb(tlb, pud, start);
76948+#endif
76949+
76950 }
76951
76952 /*
76953@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76954 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76955 i = 0;
76956
76957- do {
76958+ while (nr_pages) {
76959 struct vm_area_struct *vma;
76960
76961- vma = find_extend_vma(mm, start);
76962+ vma = find_vma(mm, start);
76963 if (!vma && in_gate_area(tsk, start)) {
76964 unsigned long pg = start & PAGE_MASK;
76965 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76966@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76967 continue;
76968 }
76969
76970- if (!vma ||
76971+ if (!vma || start < vma->vm_start ||
76972 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76973 !(vm_flags & vma->vm_flags))
76974 return i ? : -EFAULT;
76975@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76976 start += PAGE_SIZE;
76977 nr_pages--;
76978 } while (nr_pages && start < vma->vm_end);
76979- } while (nr_pages);
76980+ }
76981 return i;
76982 }
76983
76984@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76985 page_add_file_rmap(page);
76986 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76987
76988+#ifdef CONFIG_PAX_SEGMEXEC
76989+ pax_mirror_file_pte(vma, addr, page, ptl);
76990+#endif
76991+
76992 retval = 0;
76993 pte_unmap_unlock(pte, ptl);
76994 return retval;
76995@@ -1560,10 +1571,22 @@ out:
76996 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76997 struct page *page)
76998 {
76999+
77000+#ifdef CONFIG_PAX_SEGMEXEC
77001+ struct vm_area_struct *vma_m;
77002+#endif
77003+
77004 if (addr < vma->vm_start || addr >= vma->vm_end)
77005 return -EFAULT;
77006 if (!page_count(page))
77007 return -EINVAL;
77008+
77009+#ifdef CONFIG_PAX_SEGMEXEC
77010+ vma_m = pax_find_mirror_vma(vma);
77011+ if (vma_m)
77012+ vma_m->vm_flags |= VM_INSERTPAGE;
77013+#endif
77014+
77015 vma->vm_flags |= VM_INSERTPAGE;
77016 return insert_page(vma, addr, page, vma->vm_page_prot);
77017 }
77018@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
77019 unsigned long pfn)
77020 {
77021 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
77022+ BUG_ON(vma->vm_mirror);
77023
77024 if (addr < vma->vm_start || addr >= vma->vm_end)
77025 return -EFAULT;
77026@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
77027 copy_user_highpage(dst, src, va, vma);
77028 }
77029
77030+#ifdef CONFIG_PAX_SEGMEXEC
77031+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
77032+{
77033+ struct mm_struct *mm = vma->vm_mm;
77034+ spinlock_t *ptl;
77035+ pte_t *pte, entry;
77036+
77037+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
77038+ entry = *pte;
77039+ if (!pte_present(entry)) {
77040+ if (!pte_none(entry)) {
77041+ BUG_ON(pte_file(entry));
77042+ free_swap_and_cache(pte_to_swp_entry(entry));
77043+ pte_clear_not_present_full(mm, address, pte, 0);
77044+ }
77045+ } else {
77046+ struct page *page;
77047+
77048+ flush_cache_page(vma, address, pte_pfn(entry));
77049+ entry = ptep_clear_flush(vma, address, pte);
77050+ BUG_ON(pte_dirty(entry));
77051+ page = vm_normal_page(vma, address, entry);
77052+ if (page) {
77053+ update_hiwater_rss(mm);
77054+ if (PageAnon(page))
77055+ dec_mm_counter(mm, anon_rss);
77056+ else
77057+ dec_mm_counter(mm, file_rss);
77058+ page_remove_rmap(page);
77059+ page_cache_release(page);
77060+ }
77061+ }
77062+ pte_unmap_unlock(pte, ptl);
77063+}
77064+
77065+/* PaX: if vma is mirrored, synchronize the mirror's PTE
77066+ *
77067+ * the ptl of the lower mapped page is held on entry and is not released on exit
77068+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
77069+ */
77070+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77071+{
77072+ struct mm_struct *mm = vma->vm_mm;
77073+ unsigned long address_m;
77074+ spinlock_t *ptl_m;
77075+ struct vm_area_struct *vma_m;
77076+ pmd_t *pmd_m;
77077+ pte_t *pte_m, entry_m;
77078+
77079+ BUG_ON(!page_m || !PageAnon(page_m));
77080+
77081+ vma_m = pax_find_mirror_vma(vma);
77082+ if (!vma_m)
77083+ return;
77084+
77085+ BUG_ON(!PageLocked(page_m));
77086+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77087+ address_m = address + SEGMEXEC_TASK_SIZE;
77088+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77089+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77090+ ptl_m = pte_lockptr(mm, pmd_m);
77091+ if (ptl != ptl_m) {
77092+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77093+ if (!pte_none(*pte_m))
77094+ goto out;
77095+ }
77096+
77097+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77098+ page_cache_get(page_m);
77099+ page_add_anon_rmap(page_m, vma_m, address_m);
77100+ inc_mm_counter(mm, anon_rss);
77101+ set_pte_at(mm, address_m, pte_m, entry_m);
77102+ update_mmu_cache(vma_m, address_m, entry_m);
77103+out:
77104+ if (ptl != ptl_m)
77105+ spin_unlock(ptl_m);
77106+ pte_unmap_nested(pte_m);
77107+ unlock_page(page_m);
77108+}
77109+
77110+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77111+{
77112+ struct mm_struct *mm = vma->vm_mm;
77113+ unsigned long address_m;
77114+ spinlock_t *ptl_m;
77115+ struct vm_area_struct *vma_m;
77116+ pmd_t *pmd_m;
77117+ pte_t *pte_m, entry_m;
77118+
77119+ BUG_ON(!page_m || PageAnon(page_m));
77120+
77121+ vma_m = pax_find_mirror_vma(vma);
77122+ if (!vma_m)
77123+ return;
77124+
77125+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77126+ address_m = address + SEGMEXEC_TASK_SIZE;
77127+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77128+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77129+ ptl_m = pte_lockptr(mm, pmd_m);
77130+ if (ptl != ptl_m) {
77131+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77132+ if (!pte_none(*pte_m))
77133+ goto out;
77134+ }
77135+
77136+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77137+ page_cache_get(page_m);
77138+ page_add_file_rmap(page_m);
77139+ inc_mm_counter(mm, file_rss);
77140+ set_pte_at(mm, address_m, pte_m, entry_m);
77141+ update_mmu_cache(vma_m, address_m, entry_m);
77142+out:
77143+ if (ptl != ptl_m)
77144+ spin_unlock(ptl_m);
77145+ pte_unmap_nested(pte_m);
77146+}
77147+
77148+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77149+{
77150+ struct mm_struct *mm = vma->vm_mm;
77151+ unsigned long address_m;
77152+ spinlock_t *ptl_m;
77153+ struct vm_area_struct *vma_m;
77154+ pmd_t *pmd_m;
77155+ pte_t *pte_m, entry_m;
77156+
77157+ vma_m = pax_find_mirror_vma(vma);
77158+ if (!vma_m)
77159+ return;
77160+
77161+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77162+ address_m = address + SEGMEXEC_TASK_SIZE;
77163+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77164+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77165+ ptl_m = pte_lockptr(mm, pmd_m);
77166+ if (ptl != ptl_m) {
77167+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77168+ if (!pte_none(*pte_m))
77169+ goto out;
77170+ }
77171+
77172+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77173+ set_pte_at(mm, address_m, pte_m, entry_m);
77174+out:
77175+ if (ptl != ptl_m)
77176+ spin_unlock(ptl_m);
77177+ pte_unmap_nested(pte_m);
77178+}
77179+
77180+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77181+{
77182+ struct page *page_m;
77183+ pte_t entry;
77184+
77185+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77186+ goto out;
77187+
77188+ entry = *pte;
77189+ page_m = vm_normal_page(vma, address, entry);
77190+ if (!page_m)
77191+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77192+ else if (PageAnon(page_m)) {
77193+ if (pax_find_mirror_vma(vma)) {
77194+ pte_unmap_unlock(pte, ptl);
77195+ lock_page(page_m);
77196+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77197+ if (pte_same(entry, *pte))
77198+ pax_mirror_anon_pte(vma, address, page_m, ptl);
77199+ else
77200+ unlock_page(page_m);
77201+ }
77202+ } else
77203+ pax_mirror_file_pte(vma, address, page_m, ptl);
77204+
77205+out:
77206+ pte_unmap_unlock(pte, ptl);
77207+}
77208+#endif
77209+
77210 /*
77211 * This routine handles present pages, when users try to write
77212 * to a shared page. It is done by copying the page to a new address
77213@@ -2156,6 +2360,12 @@ gotten:
77214 */
77215 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77216 if (likely(pte_same(*page_table, orig_pte))) {
77217+
77218+#ifdef CONFIG_PAX_SEGMEXEC
77219+ if (pax_find_mirror_vma(vma))
77220+ BUG_ON(!trylock_page(new_page));
77221+#endif
77222+
77223 if (old_page) {
77224 if (!PageAnon(old_page)) {
77225 dec_mm_counter(mm, file_rss);
77226@@ -2207,6 +2417,10 @@ gotten:
77227 page_remove_rmap(old_page);
77228 }
77229
77230+#ifdef CONFIG_PAX_SEGMEXEC
77231+ pax_mirror_anon_pte(vma, address, new_page, ptl);
77232+#endif
77233+
77234 /* Free the old page.. */
77235 new_page = old_page;
77236 ret |= VM_FAULT_WRITE;
77237@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77238 swap_free(entry);
77239 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77240 try_to_free_swap(page);
77241+
77242+#ifdef CONFIG_PAX_SEGMEXEC
77243+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77244+#endif
77245+
77246 unlock_page(page);
77247
77248 if (flags & FAULT_FLAG_WRITE) {
77249@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77250
77251 /* No need to invalidate - it was non-present before */
77252 update_mmu_cache(vma, address, pte);
77253+
77254+#ifdef CONFIG_PAX_SEGMEXEC
77255+ pax_mirror_anon_pte(vma, address, page, ptl);
77256+#endif
77257+
77258 unlock:
77259 pte_unmap_unlock(page_table, ptl);
77260 out:
77261@@ -2632,40 +2856,6 @@ out_release:
77262 }
77263
77264 /*
77265- * This is like a special single-page "expand_{down|up}wards()",
77266- * except we must first make sure that 'address{-|+}PAGE_SIZE'
77267- * doesn't hit another vma.
77268- */
77269-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77270-{
77271- address &= PAGE_MASK;
77272- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77273- struct vm_area_struct *prev = vma->vm_prev;
77274-
77275- /*
77276- * Is there a mapping abutting this one below?
77277- *
77278- * That's only ok if it's the same stack mapping
77279- * that has gotten split..
77280- */
77281- if (prev && prev->vm_end == address)
77282- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77283-
77284- expand_stack(vma, address - PAGE_SIZE);
77285- }
77286- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77287- struct vm_area_struct *next = vma->vm_next;
77288-
77289- /* As VM_GROWSDOWN but s/below/above/ */
77290- if (next && next->vm_start == address + PAGE_SIZE)
77291- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77292-
77293- expand_upwards(vma, address + PAGE_SIZE);
77294- }
77295- return 0;
77296-}
77297-
77298-/*
77299 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77300 * but allow concurrent faults), and pte mapped but not yet locked.
77301 * We return with mmap_sem still held, but pte unmapped and unlocked.
77302@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77303 unsigned long address, pte_t *page_table, pmd_t *pmd,
77304 unsigned int flags)
77305 {
77306- struct page *page;
77307+ struct page *page = NULL;
77308 spinlock_t *ptl;
77309 pte_t entry;
77310
77311- pte_unmap(page_table);
77312-
77313- /* Check if we need to add a guard page to the stack */
77314- if (check_stack_guard_page(vma, address) < 0)
77315- return VM_FAULT_SIGBUS;
77316-
77317- /* Use the zero-page for reads */
77318 if (!(flags & FAULT_FLAG_WRITE)) {
77319 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77320 vma->vm_page_prot));
77321- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77322+ ptl = pte_lockptr(mm, pmd);
77323+ spin_lock(ptl);
77324 if (!pte_none(*page_table))
77325 goto unlock;
77326 goto setpte;
77327 }
77328
77329 /* Allocate our own private page. */
77330+ pte_unmap(page_table);
77331+
77332 if (unlikely(anon_vma_prepare(vma)))
77333 goto oom;
77334 page = alloc_zeroed_user_highpage_movable(vma, address);
77335@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77336 if (!pte_none(*page_table))
77337 goto release;
77338
77339+#ifdef CONFIG_PAX_SEGMEXEC
77340+ if (pax_find_mirror_vma(vma))
77341+ BUG_ON(!trylock_page(page));
77342+#endif
77343+
77344 inc_mm_counter(mm, anon_rss);
77345 page_add_new_anon_rmap(page, vma, address);
77346 setpte:
77347@@ -2720,6 +2911,12 @@ setpte:
77348
77349 /* No need to invalidate - it was non-present before */
77350 update_mmu_cache(vma, address, entry);
77351+
77352+#ifdef CONFIG_PAX_SEGMEXEC
77353+ if (page)
77354+ pax_mirror_anon_pte(vma, address, page, ptl);
77355+#endif
77356+
77357 unlock:
77358 pte_unmap_unlock(page_table, ptl);
77359 return 0;
77360@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77361 */
77362 /* Only go through if we didn't race with anybody else... */
77363 if (likely(pte_same(*page_table, orig_pte))) {
77364+
77365+#ifdef CONFIG_PAX_SEGMEXEC
77366+ if (anon && pax_find_mirror_vma(vma))
77367+ BUG_ON(!trylock_page(page));
77368+#endif
77369+
77370 flush_icache_page(vma, page);
77371 entry = mk_pte(page, vma->vm_page_prot);
77372 if (flags & FAULT_FLAG_WRITE)
77373@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77374
77375 /* no need to invalidate: a not-present page won't be cached */
77376 update_mmu_cache(vma, address, entry);
77377+
77378+#ifdef CONFIG_PAX_SEGMEXEC
77379+ if (anon)
77380+ pax_mirror_anon_pte(vma, address, page, ptl);
77381+ else
77382+ pax_mirror_file_pte(vma, address, page, ptl);
77383+#endif
77384+
77385 } else {
77386 if (charged)
77387 mem_cgroup_uncharge_page(page);
77388@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77389 if (flags & FAULT_FLAG_WRITE)
77390 flush_tlb_page(vma, address);
77391 }
77392+
77393+#ifdef CONFIG_PAX_SEGMEXEC
77394+ pax_mirror_pte(vma, address, pte, pmd, ptl);
77395+ return 0;
77396+#endif
77397+
77398 unlock:
77399 pte_unmap_unlock(pte, ptl);
77400 return 0;
77401@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77402 pmd_t *pmd;
77403 pte_t *pte;
77404
77405+#ifdef CONFIG_PAX_SEGMEXEC
77406+ struct vm_area_struct *vma_m;
77407+#endif
77408+
77409 __set_current_state(TASK_RUNNING);
77410
77411 count_vm_event(PGFAULT);
77412@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77413 if (unlikely(is_vm_hugetlb_page(vma)))
77414 return hugetlb_fault(mm, vma, address, flags);
77415
77416+#ifdef CONFIG_PAX_SEGMEXEC
77417+ vma_m = pax_find_mirror_vma(vma);
77418+ if (vma_m) {
77419+ unsigned long address_m;
77420+ pgd_t *pgd_m;
77421+ pud_t *pud_m;
77422+ pmd_t *pmd_m;
77423+
77424+ if (vma->vm_start > vma_m->vm_start) {
77425+ address_m = address;
77426+ address -= SEGMEXEC_TASK_SIZE;
77427+ vma = vma_m;
77428+ } else
77429+ address_m = address + SEGMEXEC_TASK_SIZE;
77430+
77431+ pgd_m = pgd_offset(mm, address_m);
77432+ pud_m = pud_alloc(mm, pgd_m, address_m);
77433+ if (!pud_m)
77434+ return VM_FAULT_OOM;
77435+ pmd_m = pmd_alloc(mm, pud_m, address_m);
77436+ if (!pmd_m)
77437+ return VM_FAULT_OOM;
77438+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77439+ return VM_FAULT_OOM;
77440+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77441+ }
77442+#endif
77443+
77444 pgd = pgd_offset(mm, address);
77445 pud = pud_alloc(mm, pgd, address);
77446 if (!pud)
77447@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77448 gate_vma.vm_start = FIXADDR_USER_START;
77449 gate_vma.vm_end = FIXADDR_USER_END;
77450 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77451- gate_vma.vm_page_prot = __P101;
77452+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77453 /*
77454 * Make sure the vDSO gets into every core dump.
77455 * Dumping its contents makes post-mortem fully interpretable later
77456diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77457index 3c6e3e2..b1ddbb8 100644
77458--- a/mm/mempolicy.c
77459+++ b/mm/mempolicy.c
77460@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77461 struct vm_area_struct *next;
77462 int err;
77463
77464+#ifdef CONFIG_PAX_SEGMEXEC
77465+ struct vm_area_struct *vma_m;
77466+#endif
77467+
77468 err = 0;
77469 for (; vma && vma->vm_start < end; vma = next) {
77470 next = vma->vm_next;
77471@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77472 err = policy_vma(vma, new);
77473 if (err)
77474 break;
77475+
77476+#ifdef CONFIG_PAX_SEGMEXEC
77477+ vma_m = pax_find_mirror_vma(vma);
77478+ if (vma_m) {
77479+ err = policy_vma(vma_m, new);
77480+ if (err)
77481+ break;
77482+ }
77483+#endif
77484+
77485 }
77486 return err;
77487 }
77488@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77489
77490 if (end < start)
77491 return -EINVAL;
77492+
77493+#ifdef CONFIG_PAX_SEGMEXEC
77494+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77495+ if (end > SEGMEXEC_TASK_SIZE)
77496+ return -EINVAL;
77497+ } else
77498+#endif
77499+
77500+ if (end > TASK_SIZE)
77501+ return -EINVAL;
77502+
77503 if (end == start)
77504 return 0;
77505
77506@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77507 if (!mm)
77508 return -EINVAL;
77509
77510+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77511+ if (mm != current->mm &&
77512+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77513+ err = -EPERM;
77514+ goto out;
77515+ }
77516+#endif
77517+
77518 /*
77519 * Check if this process has the right to modify the specified
77520 * process. The right exists if the process has administrative
77521@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77522 rcu_read_lock();
77523 tcred = __task_cred(task);
77524 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77525- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77526- !capable(CAP_SYS_NICE)) {
77527+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77528 rcu_read_unlock();
77529 err = -EPERM;
77530 goto out;
77531@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
77532 }
77533 #endif
77534
77535+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77536+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
77537+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
77538+ _mm->pax_flags & MF_PAX_SEGMEXEC))
77539+#endif
77540+
77541 /*
77542 * Display pages allocated per node and memory policy via /proc.
77543 */
77544@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
77545 int n;
77546 char buffer[50];
77547
77548+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77549+ if (current->exec_id != m->exec_id) {
77550+ gr_log_badprocpid("numa_maps");
77551+ return 0;
77552+ }
77553+#endif
77554+
77555 if (!mm)
77556 return 0;
77557
77558@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
77559 mpol_to_str(buffer, sizeof(buffer), pol, 0);
77560 mpol_cond_put(pol);
77561
77562+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77563+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
77564+#else
77565 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
77566+#endif
77567
77568 if (file) {
77569 seq_printf(m, " file=");
77570- seq_path(m, &file->f_path, "\n\t= ");
77571+ seq_path(m, &file->f_path, "\n\t\\= ");
77572 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77573 seq_printf(m, " heap");
77574 } else if (vma->vm_start <= mm->start_stack &&
77575diff --git a/mm/migrate.c b/mm/migrate.c
77576index aaca868..2ebecdc 100644
77577--- a/mm/migrate.c
77578+++ b/mm/migrate.c
77579@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77580 unsigned long chunk_start;
77581 int err;
77582
77583+ pax_track_stack();
77584+
77585 task_nodes = cpuset_mems_allowed(task);
77586
77587 err = -ENOMEM;
77588@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77589 if (!mm)
77590 return -EINVAL;
77591
77592+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77593+ if (mm != current->mm &&
77594+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77595+ err = -EPERM;
77596+ goto out;
77597+ }
77598+#endif
77599+
77600 /*
77601 * Check if this process has the right to modify the specified
77602 * process. The right exists if the process has administrative
77603@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77604 rcu_read_lock();
77605 tcred = __task_cred(task);
77606 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77607- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77608- !capable(CAP_SYS_NICE)) {
77609+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77610 rcu_read_unlock();
77611 err = -EPERM;
77612 goto out;
77613diff --git a/mm/mlock.c b/mm/mlock.c
77614index 2d846cf..98134d2 100644
77615--- a/mm/mlock.c
77616+++ b/mm/mlock.c
77617@@ -13,6 +13,7 @@
77618 #include <linux/pagemap.h>
77619 #include <linux/mempolicy.h>
77620 #include <linux/syscalls.h>
77621+#include <linux/security.h>
77622 #include <linux/sched.h>
77623 #include <linux/module.h>
77624 #include <linux/rmap.h>
77625@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77626 }
77627 }
77628
77629-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77630-{
77631- return (vma->vm_flags & VM_GROWSDOWN) &&
77632- (vma->vm_start == addr) &&
77633- !vma_stack_continue(vma->vm_prev, addr);
77634-}
77635-
77636 /**
77637 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77638 * @vma: target vma
77639@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77640 if (vma->vm_flags & VM_WRITE)
77641 gup_flags |= FOLL_WRITE;
77642
77643- /* We don't try to access the guard page of a stack vma */
77644- if (stack_guard_page(vma, start)) {
77645- addr += PAGE_SIZE;
77646- nr_pages--;
77647- }
77648-
77649 while (nr_pages > 0) {
77650 int i;
77651
77652@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77653 {
77654 unsigned long nstart, end, tmp;
77655 struct vm_area_struct * vma, * prev;
77656- int error;
77657+ int error = -EINVAL;
77658
77659 len = PAGE_ALIGN(len);
77660 end = start + len;
77661@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77662 return -EINVAL;
77663 if (end == start)
77664 return 0;
77665+ if (end > TASK_SIZE)
77666+ return -EINVAL;
77667+
77668 vma = find_vma_prev(current->mm, start, &prev);
77669 if (!vma || vma->vm_start > start)
77670 return -ENOMEM;
77671@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77672 for (nstart = start ; ; ) {
77673 unsigned int newflags;
77674
77675+#ifdef CONFIG_PAX_SEGMEXEC
77676+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77677+ break;
77678+#endif
77679+
77680 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77681
77682 newflags = vma->vm_flags | VM_LOCKED;
77683@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77684 lock_limit >>= PAGE_SHIFT;
77685
77686 /* check against resource limits */
77687+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77688 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
77689 error = do_mlock(start, len, 1);
77690 up_write(&current->mm->mmap_sem);
77691@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
77692 static int do_mlockall(int flags)
77693 {
77694 struct vm_area_struct * vma, * prev = NULL;
77695- unsigned int def_flags = 0;
77696
77697 if (flags & MCL_FUTURE)
77698- def_flags = VM_LOCKED;
77699- current->mm->def_flags = def_flags;
77700+ current->mm->def_flags |= VM_LOCKED;
77701+ else
77702+ current->mm->def_flags &= ~VM_LOCKED;
77703 if (flags == MCL_FUTURE)
77704 goto out;
77705
77706 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
77707- unsigned int newflags;
77708+ unsigned long newflags;
77709
77710+#ifdef CONFIG_PAX_SEGMEXEC
77711+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77712+ break;
77713+#endif
77714+
77715+ BUG_ON(vma->vm_end > TASK_SIZE);
77716 newflags = vma->vm_flags | VM_LOCKED;
77717 if (!(flags & MCL_CURRENT))
77718 newflags &= ~VM_LOCKED;
77719@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
77720 lock_limit >>= PAGE_SHIFT;
77721
77722 ret = -ENOMEM;
77723+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
77724 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
77725 capable(CAP_IPC_LOCK))
77726 ret = do_mlockall(flags);
77727diff --git a/mm/mmap.c b/mm/mmap.c
77728index 4b80cbf..c5ce1df 100644
77729--- a/mm/mmap.c
77730+++ b/mm/mmap.c
77731@@ -45,6 +45,16 @@
77732 #define arch_rebalance_pgtables(addr, len) (addr)
77733 #endif
77734
77735+static inline void verify_mm_writelocked(struct mm_struct *mm)
77736+{
77737+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
77738+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77739+ up_read(&mm->mmap_sem);
77740+ BUG();
77741+ }
77742+#endif
77743+}
77744+
77745 static void unmap_region(struct mm_struct *mm,
77746 struct vm_area_struct *vma, struct vm_area_struct *prev,
77747 unsigned long start, unsigned long end);
77748@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
77749 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
77750 *
77751 */
77752-pgprot_t protection_map[16] = {
77753+pgprot_t protection_map[16] __read_only = {
77754 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77755 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77756 };
77757
77758 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77759 {
77760- return __pgprot(pgprot_val(protection_map[vm_flags &
77761+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
77762 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77763 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77764+
77765+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77766+ if (!nx_enabled &&
77767+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
77768+ (vm_flags & (VM_READ | VM_WRITE)))
77769+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
77770+#endif
77771+
77772+ return prot;
77773 }
77774 EXPORT_SYMBOL(vm_get_page_prot);
77775
77776 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77777 int sysctl_overcommit_ratio = 50; /* default is 50% */
77778 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
77779+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
77780 struct percpu_counter vm_committed_as;
77781
77782 /*
77783@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
77784 struct vm_area_struct *next = vma->vm_next;
77785
77786 might_sleep();
77787+ BUG_ON(vma->vm_mirror);
77788 if (vma->vm_ops && vma->vm_ops->close)
77789 vma->vm_ops->close(vma);
77790 if (vma->vm_file) {
77791@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
77792 * not page aligned -Ram Gupta
77793 */
77794 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
77795+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
77796 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
77797 (mm->end_data - mm->start_data) > rlim)
77798 goto out;
77799@@ -704,6 +726,12 @@ static int
77800 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
77801 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77802 {
77803+
77804+#ifdef CONFIG_PAX_SEGMEXEC
77805+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
77806+ return 0;
77807+#endif
77808+
77809 if (is_mergeable_vma(vma, file, vm_flags) &&
77810 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77811 if (vma->vm_pgoff == vm_pgoff)
77812@@ -723,6 +751,12 @@ static int
77813 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77814 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77815 {
77816+
77817+#ifdef CONFIG_PAX_SEGMEXEC
77818+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
77819+ return 0;
77820+#endif
77821+
77822 if (is_mergeable_vma(vma, file, vm_flags) &&
77823 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77824 pgoff_t vm_pglen;
77825@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77826 struct vm_area_struct *vma_merge(struct mm_struct *mm,
77827 struct vm_area_struct *prev, unsigned long addr,
77828 unsigned long end, unsigned long vm_flags,
77829- struct anon_vma *anon_vma, struct file *file,
77830+ struct anon_vma *anon_vma, struct file *file,
77831 pgoff_t pgoff, struct mempolicy *policy)
77832 {
77833 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
77834 struct vm_area_struct *area, *next;
77835
77836+#ifdef CONFIG_PAX_SEGMEXEC
77837+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
77838+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
77839+
77840+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
77841+#endif
77842+
77843 /*
77844 * We later require that vma->vm_flags == vm_flags,
77845 * so this tests vma->vm_flags & VM_SPECIAL, too.
77846@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77847 if (next && next->vm_end == end) /* cases 6, 7, 8 */
77848 next = next->vm_next;
77849
77850+#ifdef CONFIG_PAX_SEGMEXEC
77851+ if (prev)
77852+ prev_m = pax_find_mirror_vma(prev);
77853+ if (area)
77854+ area_m = pax_find_mirror_vma(area);
77855+ if (next)
77856+ next_m = pax_find_mirror_vma(next);
77857+#endif
77858+
77859 /*
77860 * Can it merge with the predecessor?
77861 */
77862@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77863 /* cases 1, 6 */
77864 vma_adjust(prev, prev->vm_start,
77865 next->vm_end, prev->vm_pgoff, NULL);
77866- } else /* cases 2, 5, 7 */
77867+
77868+#ifdef CONFIG_PAX_SEGMEXEC
77869+ if (prev_m)
77870+ vma_adjust(prev_m, prev_m->vm_start,
77871+ next_m->vm_end, prev_m->vm_pgoff, NULL);
77872+#endif
77873+
77874+ } else { /* cases 2, 5, 7 */
77875 vma_adjust(prev, prev->vm_start,
77876 end, prev->vm_pgoff, NULL);
77877+
77878+#ifdef CONFIG_PAX_SEGMEXEC
77879+ if (prev_m)
77880+ vma_adjust(prev_m, prev_m->vm_start,
77881+ end_m, prev_m->vm_pgoff, NULL);
77882+#endif
77883+
77884+ }
77885 return prev;
77886 }
77887
77888@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77889 mpol_equal(policy, vma_policy(next)) &&
77890 can_vma_merge_before(next, vm_flags,
77891 anon_vma, file, pgoff+pglen)) {
77892- if (prev && addr < prev->vm_end) /* case 4 */
77893+ if (prev && addr < prev->vm_end) { /* case 4 */
77894 vma_adjust(prev, prev->vm_start,
77895 addr, prev->vm_pgoff, NULL);
77896- else /* cases 3, 8 */
77897+
77898+#ifdef CONFIG_PAX_SEGMEXEC
77899+ if (prev_m)
77900+ vma_adjust(prev_m, prev_m->vm_start,
77901+ addr_m, prev_m->vm_pgoff, NULL);
77902+#endif
77903+
77904+ } else { /* cases 3, 8 */
77905 vma_adjust(area, addr, next->vm_end,
77906 next->vm_pgoff - pglen, NULL);
77907+
77908+#ifdef CONFIG_PAX_SEGMEXEC
77909+ if (area_m)
77910+ vma_adjust(area_m, addr_m, next_m->vm_end,
77911+ next_m->vm_pgoff - pglen, NULL);
77912+#endif
77913+
77914+ }
77915 return area;
77916 }
77917
77918@@ -898,14 +978,11 @@ none:
77919 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77920 struct file *file, long pages)
77921 {
77922- const unsigned long stack_flags
77923- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77924-
77925 if (file) {
77926 mm->shared_vm += pages;
77927 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77928 mm->exec_vm += pages;
77929- } else if (flags & stack_flags)
77930+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77931 mm->stack_vm += pages;
77932 if (flags & (VM_RESERVED|VM_IO))
77933 mm->reserved_vm += pages;
77934@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77935 * (the exception is when the underlying filesystem is noexec
77936 * mounted, in which case we dont add PROT_EXEC.)
77937 */
77938- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77939+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77940 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77941 prot |= PROT_EXEC;
77942
77943@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77944 /* Obtain the address to map to. we verify (or select) it and ensure
77945 * that it represents a valid section of the address space.
77946 */
77947- addr = get_unmapped_area(file, addr, len, pgoff, flags);
77948+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77949 if (addr & ~PAGE_MASK)
77950 return addr;
77951
77952@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77953 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77954 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77955
77956+#ifdef CONFIG_PAX_MPROTECT
77957+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77958+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77959+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77960+ gr_log_rwxmmap(file);
77961+
77962+#ifdef CONFIG_PAX_EMUPLT
77963+ vm_flags &= ~VM_EXEC;
77964+#else
77965+ return -EPERM;
77966+#endif
77967+
77968+ }
77969+
77970+ if (!(vm_flags & VM_EXEC))
77971+ vm_flags &= ~VM_MAYEXEC;
77972+#else
77973+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77974+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77975+#endif
77976+ else
77977+ vm_flags &= ~VM_MAYWRITE;
77978+ }
77979+#endif
77980+
77981+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77982+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77983+ vm_flags &= ~VM_PAGEEXEC;
77984+#endif
77985+
77986 if (flags & MAP_LOCKED)
77987 if (!can_do_mlock())
77988 return -EPERM;
77989@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77990 locked += mm->locked_vm;
77991 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77992 lock_limit >>= PAGE_SHIFT;
77993+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77994 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77995 return -EAGAIN;
77996 }
77997@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77998 if (error)
77999 return error;
78000
78001+ if (!gr_acl_handle_mmap(file, prot))
78002+ return -EACCES;
78003+
78004 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
78005 }
78006 EXPORT_SYMBOL(do_mmap_pgoff);
78007@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
78008 */
78009 int vma_wants_writenotify(struct vm_area_struct *vma)
78010 {
78011- unsigned int vm_flags = vma->vm_flags;
78012+ unsigned long vm_flags = vma->vm_flags;
78013
78014 /* If it was private or non-writable, the write bit is already clear */
78015- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
78016+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
78017 return 0;
78018
78019 /* The backer wishes to know when pages are first written to? */
78020@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
78021 unsigned long charged = 0;
78022 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
78023
78024+#ifdef CONFIG_PAX_SEGMEXEC
78025+ struct vm_area_struct *vma_m = NULL;
78026+#endif
78027+
78028+ /*
78029+ * mm->mmap_sem is required to protect against another thread
78030+ * changing the mappings in case we sleep.
78031+ */
78032+ verify_mm_writelocked(mm);
78033+
78034 /* Clear old maps */
78035 error = -ENOMEM;
78036-munmap_back:
78037 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78038 if (vma && vma->vm_start < addr + len) {
78039 if (do_munmap(mm, addr, len))
78040 return -ENOMEM;
78041- goto munmap_back;
78042+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78043+ BUG_ON(vma && vma->vm_start < addr + len);
78044 }
78045
78046 /* Check against address space limit. */
78047@@ -1173,6 +1294,16 @@ munmap_back:
78048 goto unacct_error;
78049 }
78050
78051+#ifdef CONFIG_PAX_SEGMEXEC
78052+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
78053+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78054+ if (!vma_m) {
78055+ error = -ENOMEM;
78056+ goto free_vma;
78057+ }
78058+ }
78059+#endif
78060+
78061 vma->vm_mm = mm;
78062 vma->vm_start = addr;
78063 vma->vm_end = addr + len;
78064@@ -1195,6 +1326,19 @@ munmap_back:
78065 error = file->f_op->mmap(file, vma);
78066 if (error)
78067 goto unmap_and_free_vma;
78068+
78069+#ifdef CONFIG_PAX_SEGMEXEC
78070+ if (vma_m && (vm_flags & VM_EXECUTABLE))
78071+ added_exe_file_vma(mm);
78072+#endif
78073+
78074+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
78075+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
78076+ vma->vm_flags |= VM_PAGEEXEC;
78077+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78078+ }
78079+#endif
78080+
78081 if (vm_flags & VM_EXECUTABLE)
78082 added_exe_file_vma(mm);
78083
78084@@ -1218,6 +1362,11 @@ munmap_back:
78085 vma_link(mm, vma, prev, rb_link, rb_parent);
78086 file = vma->vm_file;
78087
78088+#ifdef CONFIG_PAX_SEGMEXEC
78089+ if (vma_m)
78090+ pax_mirror_vma(vma_m, vma);
78091+#endif
78092+
78093 /* Once vma denies write, undo our temporary denial count */
78094 if (correct_wcount)
78095 atomic_inc(&inode->i_writecount);
78096@@ -1226,6 +1375,7 @@ out:
78097
78098 mm->total_vm += len >> PAGE_SHIFT;
78099 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78100+ track_exec_limit(mm, addr, addr + len, vm_flags);
78101 if (vm_flags & VM_LOCKED) {
78102 /*
78103 * makes pages present; downgrades, drops, reacquires mmap_sem
78104@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78105 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78106 charged = 0;
78107 free_vma:
78108+
78109+#ifdef CONFIG_PAX_SEGMEXEC
78110+ if (vma_m)
78111+ kmem_cache_free(vm_area_cachep, vma_m);
78112+#endif
78113+
78114 kmem_cache_free(vm_area_cachep, vma);
78115 unacct_error:
78116 if (charged)
78117@@ -1255,6 +1411,44 @@ unacct_error:
78118 return error;
78119 }
78120
78121+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78122+{
78123+ if (!vma) {
78124+#ifdef CONFIG_STACK_GROWSUP
78125+ if (addr > sysctl_heap_stack_gap)
78126+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78127+ else
78128+ vma = find_vma(current->mm, 0);
78129+ if (vma && (vma->vm_flags & VM_GROWSUP))
78130+ return false;
78131+#endif
78132+ return true;
78133+ }
78134+
78135+ if (addr + len > vma->vm_start)
78136+ return false;
78137+
78138+ if (vma->vm_flags & VM_GROWSDOWN)
78139+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78140+#ifdef CONFIG_STACK_GROWSUP
78141+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78142+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78143+#endif
78144+
78145+ return true;
78146+}
78147+
78148+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78149+{
78150+ if (vma->vm_start < len)
78151+ return -ENOMEM;
78152+ if (!(vma->vm_flags & VM_GROWSDOWN))
78153+ return vma->vm_start - len;
78154+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
78155+ return vma->vm_start - len - sysctl_heap_stack_gap;
78156+ return -ENOMEM;
78157+}
78158+
78159 /* Get an address range which is currently unmapped.
78160 * For shmat() with addr=0.
78161 *
78162@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78163 if (flags & MAP_FIXED)
78164 return addr;
78165
78166+#ifdef CONFIG_PAX_RANDMMAP
78167+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78168+#endif
78169+
78170 if (addr) {
78171 addr = PAGE_ALIGN(addr);
78172- vma = find_vma(mm, addr);
78173- if (TASK_SIZE - len >= addr &&
78174- (!vma || addr + len <= vma->vm_start))
78175- return addr;
78176+ if (TASK_SIZE - len >= addr) {
78177+ vma = find_vma(mm, addr);
78178+ if (check_heap_stack_gap(vma, addr, len))
78179+ return addr;
78180+ }
78181 }
78182 if (len > mm->cached_hole_size) {
78183- start_addr = addr = mm->free_area_cache;
78184+ start_addr = addr = mm->free_area_cache;
78185 } else {
78186- start_addr = addr = TASK_UNMAPPED_BASE;
78187- mm->cached_hole_size = 0;
78188+ start_addr = addr = mm->mmap_base;
78189+ mm->cached_hole_size = 0;
78190 }
78191
78192 full_search:
78193@@ -1303,34 +1502,40 @@ full_search:
78194 * Start a new search - just in case we missed
78195 * some holes.
78196 */
78197- if (start_addr != TASK_UNMAPPED_BASE) {
78198- addr = TASK_UNMAPPED_BASE;
78199- start_addr = addr;
78200+ if (start_addr != mm->mmap_base) {
78201+ start_addr = addr = mm->mmap_base;
78202 mm->cached_hole_size = 0;
78203 goto full_search;
78204 }
78205 return -ENOMEM;
78206 }
78207- if (!vma || addr + len <= vma->vm_start) {
78208- /*
78209- * Remember the place where we stopped the search:
78210- */
78211- mm->free_area_cache = addr + len;
78212- return addr;
78213- }
78214+ if (check_heap_stack_gap(vma, addr, len))
78215+ break;
78216 if (addr + mm->cached_hole_size < vma->vm_start)
78217 mm->cached_hole_size = vma->vm_start - addr;
78218 addr = vma->vm_end;
78219 }
78220+
78221+ /*
78222+ * Remember the place where we stopped the search:
78223+ */
78224+ mm->free_area_cache = addr + len;
78225+ return addr;
78226 }
78227 #endif
78228
78229 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78230 {
78231+
78232+#ifdef CONFIG_PAX_SEGMEXEC
78233+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78234+ return;
78235+#endif
78236+
78237 /*
78238 * Is this a new hole at the lowest possible address?
78239 */
78240- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78241+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78242 mm->free_area_cache = addr;
78243 mm->cached_hole_size = ~0UL;
78244 }
78245@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78246 {
78247 struct vm_area_struct *vma;
78248 struct mm_struct *mm = current->mm;
78249- unsigned long addr = addr0;
78250+ unsigned long base = mm->mmap_base, addr = addr0;
78251
78252 /* requested length too big for entire address space */
78253 if (len > TASK_SIZE)
78254@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78255 if (flags & MAP_FIXED)
78256 return addr;
78257
78258+#ifdef CONFIG_PAX_RANDMMAP
78259+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78260+#endif
78261+
78262 /* requesting a specific address */
78263 if (addr) {
78264 addr = PAGE_ALIGN(addr);
78265- vma = find_vma(mm, addr);
78266- if (TASK_SIZE - len >= addr &&
78267- (!vma || addr + len <= vma->vm_start))
78268- return addr;
78269+ if (TASK_SIZE - len >= addr) {
78270+ vma = find_vma(mm, addr);
78271+ if (check_heap_stack_gap(vma, addr, len))
78272+ return addr;
78273+ }
78274 }
78275
78276 /* check if free_area_cache is useful for us */
78277@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78278 /* make sure it can fit in the remaining address space */
78279 if (addr > len) {
78280 vma = find_vma(mm, addr-len);
78281- if (!vma || addr <= vma->vm_start)
78282+ if (check_heap_stack_gap(vma, addr - len, len))
78283 /* remember the address as a hint for next time */
78284 return (mm->free_area_cache = addr-len);
78285 }
78286@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78287 * return with success:
78288 */
78289 vma = find_vma(mm, addr);
78290- if (!vma || addr+len <= vma->vm_start)
78291+ if (check_heap_stack_gap(vma, addr, len))
78292 /* remember the address as a hint for next time */
78293 return (mm->free_area_cache = addr);
78294
78295@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78296 mm->cached_hole_size = vma->vm_start - addr;
78297
78298 /* try just below the current vma->vm_start */
78299- addr = vma->vm_start-len;
78300- } while (len < vma->vm_start);
78301+ addr = skip_heap_stack_gap(vma, len);
78302+ } while (!IS_ERR_VALUE(addr));
78303
78304 bottomup:
78305 /*
78306@@ -1414,13 +1624,21 @@ bottomup:
78307 * can happen with large stack limits and large mmap()
78308 * allocations.
78309 */
78310+ mm->mmap_base = TASK_UNMAPPED_BASE;
78311+
78312+#ifdef CONFIG_PAX_RANDMMAP
78313+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78314+ mm->mmap_base += mm->delta_mmap;
78315+#endif
78316+
78317+ mm->free_area_cache = mm->mmap_base;
78318 mm->cached_hole_size = ~0UL;
78319- mm->free_area_cache = TASK_UNMAPPED_BASE;
78320 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78321 /*
78322 * Restore the topdown base:
78323 */
78324- mm->free_area_cache = mm->mmap_base;
78325+ mm->mmap_base = base;
78326+ mm->free_area_cache = base;
78327 mm->cached_hole_size = ~0UL;
78328
78329 return addr;
78330@@ -1429,6 +1647,12 @@ bottomup:
78331
78332 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78333 {
78334+
78335+#ifdef CONFIG_PAX_SEGMEXEC
78336+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78337+ return;
78338+#endif
78339+
78340 /*
78341 * Is this a new hole at the highest possible address?
78342 */
78343@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78344 mm->free_area_cache = addr;
78345
78346 /* dont allow allocations above current base */
78347- if (mm->free_area_cache > mm->mmap_base)
78348+ if (mm->free_area_cache > mm->mmap_base) {
78349 mm->free_area_cache = mm->mmap_base;
78350+ mm->cached_hole_size = ~0UL;
78351+ }
78352 }
78353
78354 unsigned long
78355@@ -1545,6 +1771,27 @@ out:
78356 return prev ? prev->vm_next : vma;
78357 }
78358
78359+#ifdef CONFIG_PAX_SEGMEXEC
78360+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78361+{
78362+ struct vm_area_struct *vma_m;
78363+
78364+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78365+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78366+ BUG_ON(vma->vm_mirror);
78367+ return NULL;
78368+ }
78369+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78370+ vma_m = vma->vm_mirror;
78371+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78372+ BUG_ON(vma->vm_file != vma_m->vm_file);
78373+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78374+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78375+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78376+ return vma_m;
78377+}
78378+#endif
78379+
78380 /*
78381 * Verify that the stack growth is acceptable and
78382 * update accounting. This is shared with both the
78383@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78384 return -ENOMEM;
78385
78386 /* Stack limit test */
78387+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
78388 if (size > rlim[RLIMIT_STACK].rlim_cur)
78389 return -ENOMEM;
78390
78391@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78392 unsigned long limit;
78393 locked = mm->locked_vm + grow;
78394 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78395+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78396 if (locked > limit && !capable(CAP_IPC_LOCK))
78397 return -ENOMEM;
78398 }
78399@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78400 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78401 * vma is the last one with address > vma->vm_end. Have to extend vma.
78402 */
78403+#ifndef CONFIG_IA64
78404+static
78405+#endif
78406 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78407 {
78408 int error;
78409+ bool locknext;
78410
78411 if (!(vma->vm_flags & VM_GROWSUP))
78412 return -EFAULT;
78413
78414+ /* Also guard against wrapping around to address 0. */
78415+ if (address < PAGE_ALIGN(address+1))
78416+ address = PAGE_ALIGN(address+1);
78417+ else
78418+ return -ENOMEM;
78419+
78420 /*
78421 * We must make sure the anon_vma is allocated
78422 * so that the anon_vma locking is not a noop.
78423 */
78424 if (unlikely(anon_vma_prepare(vma)))
78425 return -ENOMEM;
78426+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78427+ if (locknext && anon_vma_prepare(vma->vm_next))
78428+ return -ENOMEM;
78429 anon_vma_lock(vma);
78430+ if (locknext)
78431+ anon_vma_lock(vma->vm_next);
78432
78433 /*
78434 * vma->vm_start/vm_end cannot change under us because the caller
78435 * is required to hold the mmap_sem in read mode. We need the
78436- * anon_vma lock to serialize against concurrent expand_stacks.
78437- * Also guard against wrapping around to address 0.
78438+ * anon_vma locks to serialize against concurrent expand_stacks
78439+ * and expand_upwards.
78440 */
78441- if (address < PAGE_ALIGN(address+4))
78442- address = PAGE_ALIGN(address+4);
78443- else {
78444- anon_vma_unlock(vma);
78445- return -ENOMEM;
78446- }
78447 error = 0;
78448
78449 /* Somebody else might have raced and expanded it already */
78450- if (address > vma->vm_end) {
78451+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78452+ error = -ENOMEM;
78453+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78454 unsigned long size, grow;
78455
78456 size = address - vma->vm_start;
78457@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78458 vma->vm_end = address;
78459 }
78460 }
78461+ if (locknext)
78462+ anon_vma_unlock(vma->vm_next);
78463 anon_vma_unlock(vma);
78464 return error;
78465 }
78466@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78467 unsigned long address)
78468 {
78469 int error;
78470+ bool lockprev = false;
78471+ struct vm_area_struct *prev;
78472
78473 /*
78474 * We must make sure the anon_vma is allocated
78475@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78476 if (error)
78477 return error;
78478
78479+ prev = vma->vm_prev;
78480+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78481+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78482+#endif
78483+ if (lockprev && anon_vma_prepare(prev))
78484+ return -ENOMEM;
78485+ if (lockprev)
78486+ anon_vma_lock(prev);
78487+
78488 anon_vma_lock(vma);
78489
78490 /*
78491@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78492 */
78493
78494 /* Somebody else might have raced and expanded it already */
78495- if (address < vma->vm_start) {
78496+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78497+ error = -ENOMEM;
78498+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78499 unsigned long size, grow;
78500
78501+#ifdef CONFIG_PAX_SEGMEXEC
78502+ struct vm_area_struct *vma_m;
78503+
78504+ vma_m = pax_find_mirror_vma(vma);
78505+#endif
78506+
78507 size = vma->vm_end - address;
78508 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78509
78510@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78511 if (!error) {
78512 vma->vm_start = address;
78513 vma->vm_pgoff -= grow;
78514+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78515+
78516+#ifdef CONFIG_PAX_SEGMEXEC
78517+ if (vma_m) {
78518+ vma_m->vm_start -= grow << PAGE_SHIFT;
78519+ vma_m->vm_pgoff -= grow;
78520+ }
78521+#endif
78522+
78523+
78524 }
78525 }
78526 }
78527 anon_vma_unlock(vma);
78528+ if (lockprev)
78529+ anon_vma_unlock(prev);
78530 return error;
78531 }
78532
78533@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78534 do {
78535 long nrpages = vma_pages(vma);
78536
78537+#ifdef CONFIG_PAX_SEGMEXEC
78538+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78539+ vma = remove_vma(vma);
78540+ continue;
78541+ }
78542+#endif
78543+
78544 mm->total_vm -= nrpages;
78545 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78546 vma = remove_vma(vma);
78547@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78548 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78549 vma->vm_prev = NULL;
78550 do {
78551+
78552+#ifdef CONFIG_PAX_SEGMEXEC
78553+ if (vma->vm_mirror) {
78554+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78555+ vma->vm_mirror->vm_mirror = NULL;
78556+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
78557+ vma->vm_mirror = NULL;
78558+ }
78559+#endif
78560+
78561 rb_erase(&vma->vm_rb, &mm->mm_rb);
78562 mm->map_count--;
78563 tail_vma = vma;
78564@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78565 struct mempolicy *pol;
78566 struct vm_area_struct *new;
78567
78568+#ifdef CONFIG_PAX_SEGMEXEC
78569+ struct vm_area_struct *vma_m, *new_m = NULL;
78570+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78571+#endif
78572+
78573 if (is_vm_hugetlb_page(vma) && (addr &
78574 ~(huge_page_mask(hstate_vma(vma)))))
78575 return -EINVAL;
78576
78577+#ifdef CONFIG_PAX_SEGMEXEC
78578+ vma_m = pax_find_mirror_vma(vma);
78579+
78580+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78581+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78582+ if (mm->map_count >= sysctl_max_map_count-1)
78583+ return -ENOMEM;
78584+ } else
78585+#endif
78586+
78587 if (mm->map_count >= sysctl_max_map_count)
78588 return -ENOMEM;
78589
78590@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78591 if (!new)
78592 return -ENOMEM;
78593
78594+#ifdef CONFIG_PAX_SEGMEXEC
78595+ if (vma_m) {
78596+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78597+ if (!new_m) {
78598+ kmem_cache_free(vm_area_cachep, new);
78599+ return -ENOMEM;
78600+ }
78601+ }
78602+#endif
78603+
78604 /* most fields are the same, copy all, and then fixup */
78605 *new = *vma;
78606
78607@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78608 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78609 }
78610
78611+#ifdef CONFIG_PAX_SEGMEXEC
78612+ if (vma_m) {
78613+ *new_m = *vma_m;
78614+ new_m->vm_mirror = new;
78615+ new->vm_mirror = new_m;
78616+
78617+ if (new_below)
78618+ new_m->vm_end = addr_m;
78619+ else {
78620+ new_m->vm_start = addr_m;
78621+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78622+ }
78623+ }
78624+#endif
78625+
78626 pol = mpol_dup(vma_policy(vma));
78627 if (IS_ERR(pol)) {
78628+
78629+#ifdef CONFIG_PAX_SEGMEXEC
78630+ if (new_m)
78631+ kmem_cache_free(vm_area_cachep, new_m);
78632+#endif
78633+
78634 kmem_cache_free(vm_area_cachep, new);
78635 return PTR_ERR(pol);
78636 }
78637@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78638 else
78639 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78640
78641+#ifdef CONFIG_PAX_SEGMEXEC
78642+ if (vma_m) {
78643+ mpol_get(pol);
78644+ vma_set_policy(new_m, pol);
78645+
78646+ if (new_m->vm_file) {
78647+ get_file(new_m->vm_file);
78648+ if (vma_m->vm_flags & VM_EXECUTABLE)
78649+ added_exe_file_vma(mm);
78650+ }
78651+
78652+ if (new_m->vm_ops && new_m->vm_ops->open)
78653+ new_m->vm_ops->open(new_m);
78654+
78655+ if (new_below)
78656+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
78657+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
78658+ else
78659+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
78660+ }
78661+#endif
78662+
78663 return 0;
78664 }
78665
78666@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78667 * work. This now handles partial unmappings.
78668 * Jeremy Fitzhardinge <jeremy@goop.org>
78669 */
78670+#ifdef CONFIG_PAX_SEGMEXEC
78671 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78672 {
78673+ int ret = __do_munmap(mm, start, len);
78674+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
78675+ return ret;
78676+
78677+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
78678+}
78679+
78680+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78681+#else
78682+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78683+#endif
78684+{
78685 unsigned long end;
78686 struct vm_area_struct *vma, *prev, *last;
78687
78688+ /*
78689+ * mm->mmap_sem is required to protect against another thread
78690+ * changing the mappings in case we sleep.
78691+ */
78692+ verify_mm_writelocked(mm);
78693+
78694 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
78695 return -EINVAL;
78696
78697@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78698 /* Fix up all other VM information */
78699 remove_vma_list(mm, vma);
78700
78701+ track_exec_limit(mm, start, end, 0UL);
78702+
78703 return 0;
78704 }
78705
78706@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
78707
78708 profile_munmap(addr);
78709
78710+#ifdef CONFIG_PAX_SEGMEXEC
78711+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
78712+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
78713+ return -EINVAL;
78714+#endif
78715+
78716 down_write(&mm->mmap_sem);
78717 ret = do_munmap(mm, addr, len);
78718 up_write(&mm->mmap_sem);
78719 return ret;
78720 }
78721
78722-static inline void verify_mm_writelocked(struct mm_struct *mm)
78723-{
78724-#ifdef CONFIG_DEBUG_VM
78725- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78726- WARN_ON(1);
78727- up_read(&mm->mmap_sem);
78728- }
78729-#endif
78730-}
78731-
78732 /*
78733 * this is really a simplified "do_mmap". it only handles
78734 * anonymous maps. eventually we may be able to do some
78735@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78736 struct rb_node ** rb_link, * rb_parent;
78737 pgoff_t pgoff = addr >> PAGE_SHIFT;
78738 int error;
78739+ unsigned long charged;
78740
78741 len = PAGE_ALIGN(len);
78742 if (!len)
78743@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78744
78745 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
78746
78747+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
78748+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78749+ flags &= ~VM_EXEC;
78750+
78751+#ifdef CONFIG_PAX_MPROTECT
78752+ if (mm->pax_flags & MF_PAX_MPROTECT)
78753+ flags &= ~VM_MAYEXEC;
78754+#endif
78755+
78756+ }
78757+#endif
78758+
78759 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
78760 if (error & ~PAGE_MASK)
78761 return error;
78762
78763+ charged = len >> PAGE_SHIFT;
78764+
78765 /*
78766 * mlock MCL_FUTURE?
78767 */
78768 if (mm->def_flags & VM_LOCKED) {
78769 unsigned long locked, lock_limit;
78770- locked = len >> PAGE_SHIFT;
78771+ locked = charged;
78772 locked += mm->locked_vm;
78773 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78774 lock_limit >>= PAGE_SHIFT;
78775@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78776 /*
78777 * Clear old maps. this also does some error checking for us
78778 */
78779- munmap_back:
78780 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78781 if (vma && vma->vm_start < addr + len) {
78782 if (do_munmap(mm, addr, len))
78783 return -ENOMEM;
78784- goto munmap_back;
78785+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78786+ BUG_ON(vma && vma->vm_start < addr + len);
78787 }
78788
78789 /* Check against address space limits *after* clearing old maps... */
78790- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
78791+ if (!may_expand_vm(mm, charged))
78792 return -ENOMEM;
78793
78794 if (mm->map_count > sysctl_max_map_count)
78795 return -ENOMEM;
78796
78797- if (security_vm_enough_memory(len >> PAGE_SHIFT))
78798+ if (security_vm_enough_memory(charged))
78799 return -ENOMEM;
78800
78801 /* Can we just expand an old private anonymous mapping? */
78802@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78803 */
78804 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78805 if (!vma) {
78806- vm_unacct_memory(len >> PAGE_SHIFT);
78807+ vm_unacct_memory(charged);
78808 return -ENOMEM;
78809 }
78810
78811@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78812 vma->vm_page_prot = vm_get_page_prot(flags);
78813 vma_link(mm, vma, prev, rb_link, rb_parent);
78814 out:
78815- mm->total_vm += len >> PAGE_SHIFT;
78816+ mm->total_vm += charged;
78817 if (flags & VM_LOCKED) {
78818 if (!mlock_vma_pages_range(vma, addr, addr + len))
78819- mm->locked_vm += (len >> PAGE_SHIFT);
78820+ mm->locked_vm += charged;
78821 }
78822+ track_exec_limit(mm, addr, addr + len, flags);
78823 return addr;
78824 }
78825
78826@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
78827 * Walk the list again, actually closing and freeing it,
78828 * with preemption enabled, without holding any MM locks.
78829 */
78830- while (vma)
78831+ while (vma) {
78832+ vma->vm_mirror = NULL;
78833 vma = remove_vma(vma);
78834+ }
78835
78836 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
78837 }
78838@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78839 struct vm_area_struct * __vma, * prev;
78840 struct rb_node ** rb_link, * rb_parent;
78841
78842+#ifdef CONFIG_PAX_SEGMEXEC
78843+ struct vm_area_struct *vma_m = NULL;
78844+#endif
78845+
78846 /*
78847 * The vm_pgoff of a purely anonymous vma should be irrelevant
78848 * until its first write fault, when page's anon_vma and index
78849@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78850 if ((vma->vm_flags & VM_ACCOUNT) &&
78851 security_vm_enough_memory_mm(mm, vma_pages(vma)))
78852 return -ENOMEM;
78853+
78854+#ifdef CONFIG_PAX_SEGMEXEC
78855+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
78856+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78857+ if (!vma_m)
78858+ return -ENOMEM;
78859+ }
78860+#endif
78861+
78862 vma_link(mm, vma, prev, rb_link, rb_parent);
78863+
78864+#ifdef CONFIG_PAX_SEGMEXEC
78865+ if (vma_m)
78866+ pax_mirror_vma(vma_m, vma);
78867+#endif
78868+
78869 return 0;
78870 }
78871
78872@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78873 struct rb_node **rb_link, *rb_parent;
78874 struct mempolicy *pol;
78875
78876+ BUG_ON(vma->vm_mirror);
78877+
78878 /*
78879 * If anonymous vma has not yet been faulted, update new pgoff
78880 * to match new location, to increase its chance of merging.
78881@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78882 return new_vma;
78883 }
78884
78885+#ifdef CONFIG_PAX_SEGMEXEC
78886+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
78887+{
78888+ struct vm_area_struct *prev_m;
78889+ struct rb_node **rb_link_m, *rb_parent_m;
78890+ struct mempolicy *pol_m;
78891+
78892+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
78893+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
78894+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
78895+ *vma_m = *vma;
78896+ pol_m = vma_policy(vma_m);
78897+ mpol_get(pol_m);
78898+ vma_set_policy(vma_m, pol_m);
78899+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
78900+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
78901+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
78902+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
78903+ if (vma_m->vm_file)
78904+ get_file(vma_m->vm_file);
78905+ if (vma_m->vm_ops && vma_m->vm_ops->open)
78906+ vma_m->vm_ops->open(vma_m);
78907+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
78908+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
78909+ vma_m->vm_mirror = vma;
78910+ vma->vm_mirror = vma_m;
78911+}
78912+#endif
78913+
78914 /*
78915 * Return true if the calling process may expand its vm space by the passed
78916 * number of pages
78917@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
78918 unsigned long lim;
78919
78920 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78921-
78922+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78923 if (cur + npages > lim)
78924 return 0;
78925 return 1;
78926@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78927 vma->vm_start = addr;
78928 vma->vm_end = addr + len;
78929
78930+#ifdef CONFIG_PAX_MPROTECT
78931+ if (mm->pax_flags & MF_PAX_MPROTECT) {
78932+#ifndef CONFIG_PAX_MPROTECT_COMPAT
78933+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78934+ return -EPERM;
78935+ if (!(vm_flags & VM_EXEC))
78936+ vm_flags &= ~VM_MAYEXEC;
78937+#else
78938+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78939+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78940+#endif
78941+ else
78942+ vm_flags &= ~VM_MAYWRITE;
78943+ }
78944+#endif
78945+
78946 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78947 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78948
78949diff --git a/mm/mprotect.c b/mm/mprotect.c
78950index 1737c7e..c7faeb4 100644
78951--- a/mm/mprotect.c
78952+++ b/mm/mprotect.c
78953@@ -24,10 +24,16 @@
78954 #include <linux/mmu_notifier.h>
78955 #include <linux/migrate.h>
78956 #include <linux/perf_event.h>
78957+
78958+#ifdef CONFIG_PAX_MPROTECT
78959+#include <linux/elf.h>
78960+#endif
78961+
78962 #include <asm/uaccess.h>
78963 #include <asm/pgtable.h>
78964 #include <asm/cacheflush.h>
78965 #include <asm/tlbflush.h>
78966+#include <asm/mmu_context.h>
78967
78968 #ifndef pgprot_modify
78969 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78970@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78971 flush_tlb_range(vma, start, end);
78972 }
78973
78974+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78975+/* called while holding the mmap semaphor for writing except stack expansion */
78976+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78977+{
78978+ unsigned long oldlimit, newlimit = 0UL;
78979+
78980+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78981+ return;
78982+
78983+ spin_lock(&mm->page_table_lock);
78984+ oldlimit = mm->context.user_cs_limit;
78985+ if ((prot & VM_EXEC) && oldlimit < end)
78986+ /* USER_CS limit moved up */
78987+ newlimit = end;
78988+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78989+ /* USER_CS limit moved down */
78990+ newlimit = start;
78991+
78992+ if (newlimit) {
78993+ mm->context.user_cs_limit = newlimit;
78994+
78995+#ifdef CONFIG_SMP
78996+ wmb();
78997+ cpus_clear(mm->context.cpu_user_cs_mask);
78998+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78999+#endif
79000+
79001+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
79002+ }
79003+ spin_unlock(&mm->page_table_lock);
79004+ if (newlimit == end) {
79005+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
79006+
79007+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
79008+ if (is_vm_hugetlb_page(vma))
79009+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
79010+ else
79011+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
79012+ }
79013+}
79014+#endif
79015+
79016 int
79017 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79018 unsigned long start, unsigned long end, unsigned long newflags)
79019@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79020 int error;
79021 int dirty_accountable = 0;
79022
79023+#ifdef CONFIG_PAX_SEGMEXEC
79024+ struct vm_area_struct *vma_m = NULL;
79025+ unsigned long start_m, end_m;
79026+
79027+ start_m = start + SEGMEXEC_TASK_SIZE;
79028+ end_m = end + SEGMEXEC_TASK_SIZE;
79029+#endif
79030+
79031 if (newflags == oldflags) {
79032 *pprev = vma;
79033 return 0;
79034 }
79035
79036+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
79037+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
79038+
79039+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
79040+ return -ENOMEM;
79041+
79042+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
79043+ return -ENOMEM;
79044+ }
79045+
79046 /*
79047 * If we make a private mapping writable we increase our commit;
79048 * but (without finer accounting) cannot reduce our commit if we
79049@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
79050 }
79051 }
79052
79053+#ifdef CONFIG_PAX_SEGMEXEC
79054+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
79055+ if (start != vma->vm_start) {
79056+ error = split_vma(mm, vma, start, 1);
79057+ if (error)
79058+ goto fail;
79059+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
79060+ *pprev = (*pprev)->vm_next;
79061+ }
79062+
79063+ if (end != vma->vm_end) {
79064+ error = split_vma(mm, vma, end, 0);
79065+ if (error)
79066+ goto fail;
79067+ }
79068+
79069+ if (pax_find_mirror_vma(vma)) {
79070+ error = __do_munmap(mm, start_m, end_m - start_m);
79071+ if (error)
79072+ goto fail;
79073+ } else {
79074+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
79075+ if (!vma_m) {
79076+ error = -ENOMEM;
79077+ goto fail;
79078+ }
79079+ vma->vm_flags = newflags;
79080+ pax_mirror_vma(vma_m, vma);
79081+ }
79082+ }
79083+#endif
79084+
79085 /*
79086 * First try to merge with previous and/or next vma.
79087 */
79088@@ -195,9 +293,21 @@ success:
79089 * vm_flags and vm_page_prot are protected by the mmap_sem
79090 * held in write mode.
79091 */
79092+
79093+#ifdef CONFIG_PAX_SEGMEXEC
79094+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79095+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79096+#endif
79097+
79098 vma->vm_flags = newflags;
79099+
79100+#ifdef CONFIG_PAX_MPROTECT
79101+ if (mm->binfmt && mm->binfmt->handle_mprotect)
79102+ mm->binfmt->handle_mprotect(vma, newflags);
79103+#endif
79104+
79105 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79106- vm_get_page_prot(newflags));
79107+ vm_get_page_prot(vma->vm_flags));
79108
79109 if (vma_wants_writenotify(vma)) {
79110 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79111@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79112 end = start + len;
79113 if (end <= start)
79114 return -ENOMEM;
79115+
79116+#ifdef CONFIG_PAX_SEGMEXEC
79117+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79118+ if (end > SEGMEXEC_TASK_SIZE)
79119+ return -EINVAL;
79120+ } else
79121+#endif
79122+
79123+ if (end > TASK_SIZE)
79124+ return -EINVAL;
79125+
79126 if (!arch_validate_prot(prot))
79127 return -EINVAL;
79128
79129@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79130 /*
79131 * Does the application expect PROT_READ to imply PROT_EXEC:
79132 */
79133- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79134+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79135 prot |= PROT_EXEC;
79136
79137 vm_flags = calc_vm_prot_bits(prot);
79138@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79139 if (start > vma->vm_start)
79140 prev = vma;
79141
79142+#ifdef CONFIG_PAX_MPROTECT
79143+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79144+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
79145+#endif
79146+
79147 for (nstart = start ; ; ) {
79148 unsigned long newflags;
79149
79150@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79151
79152 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79153 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79154+ if (prot & (PROT_WRITE | PROT_EXEC))
79155+ gr_log_rwxmprotect(vma->vm_file);
79156+
79157+ error = -EACCES;
79158+ goto out;
79159+ }
79160+
79161+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79162 error = -EACCES;
79163 goto out;
79164 }
79165@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79166 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79167 if (error)
79168 goto out;
79169+
79170+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
79171+
79172 nstart = tmp;
79173
79174 if (nstart < prev->vm_end)
79175diff --git a/mm/mremap.c b/mm/mremap.c
79176index 3e98d79..1706cec 100644
79177--- a/mm/mremap.c
79178+++ b/mm/mremap.c
79179@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79180 continue;
79181 pte = ptep_clear_flush(vma, old_addr, old_pte);
79182 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79183+
79184+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79185+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79186+ pte = pte_exprotect(pte);
79187+#endif
79188+
79189 set_pte_at(mm, new_addr, new_pte, pte);
79190 }
79191
79192@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79193 if (is_vm_hugetlb_page(vma))
79194 goto Einval;
79195
79196+#ifdef CONFIG_PAX_SEGMEXEC
79197+ if (pax_find_mirror_vma(vma))
79198+ goto Einval;
79199+#endif
79200+
79201 /* We can't remap across vm area boundaries */
79202 if (old_len > vma->vm_end - addr)
79203 goto Efault;
79204@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79205 unsigned long ret = -EINVAL;
79206 unsigned long charged = 0;
79207 unsigned long map_flags;
79208+ unsigned long pax_task_size = TASK_SIZE;
79209
79210 if (new_addr & ~PAGE_MASK)
79211 goto out;
79212
79213- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79214+#ifdef CONFIG_PAX_SEGMEXEC
79215+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79216+ pax_task_size = SEGMEXEC_TASK_SIZE;
79217+#endif
79218+
79219+ pax_task_size -= PAGE_SIZE;
79220+
79221+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79222 goto out;
79223
79224 /* Check if the location we're moving into overlaps the
79225 * old location at all, and fail if it does.
79226 */
79227- if ((new_addr <= addr) && (new_addr+new_len) > addr)
79228- goto out;
79229-
79230- if ((addr <= new_addr) && (addr+old_len) > new_addr)
79231+ if (addr + old_len > new_addr && new_addr + new_len > addr)
79232 goto out;
79233
79234 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79235@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79236 struct vm_area_struct *vma;
79237 unsigned long ret = -EINVAL;
79238 unsigned long charged = 0;
79239+ unsigned long pax_task_size = TASK_SIZE;
79240
79241 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79242 goto out;
79243@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79244 if (!new_len)
79245 goto out;
79246
79247+#ifdef CONFIG_PAX_SEGMEXEC
79248+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79249+ pax_task_size = SEGMEXEC_TASK_SIZE;
79250+#endif
79251+
79252+ pax_task_size -= PAGE_SIZE;
79253+
79254+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79255+ old_len > pax_task_size || addr > pax_task_size-old_len)
79256+ goto out;
79257+
79258 if (flags & MREMAP_FIXED) {
79259 if (flags & MREMAP_MAYMOVE)
79260 ret = mremap_to(addr, old_len, new_addr, new_len);
79261@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79262 addr + new_len);
79263 }
79264 ret = addr;
79265+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79266 goto out;
79267 }
79268 }
79269@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79270 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79271 if (ret)
79272 goto out;
79273+
79274+ map_flags = vma->vm_flags;
79275 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79276+ if (!(ret & ~PAGE_MASK)) {
79277+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79278+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79279+ }
79280 }
79281 out:
79282 if (ret & ~PAGE_MASK)
79283diff --git a/mm/nommu.c b/mm/nommu.c
79284index 406e8d4..53970d3 100644
79285--- a/mm/nommu.c
79286+++ b/mm/nommu.c
79287@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79288 int sysctl_overcommit_ratio = 50; /* default is 50% */
79289 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79290 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79291-int heap_stack_gap = 0;
79292
79293 atomic_long_t mmap_pages_allocated;
79294
79295@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79296 EXPORT_SYMBOL(find_vma);
79297
79298 /*
79299- * find a VMA
79300- * - we don't extend stack VMAs under NOMMU conditions
79301- */
79302-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79303-{
79304- return find_vma(mm, addr);
79305-}
79306-
79307-/*
79308 * expand a stack to a given address
79309 * - not supported under NOMMU conditions
79310 */
79311diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79312index 3ecab7e..594a471 100644
79313--- a/mm/page_alloc.c
79314+++ b/mm/page_alloc.c
79315@@ -289,7 +289,7 @@ out:
79316 * This usage means that zero-order pages may not be compound.
79317 */
79318
79319-static void free_compound_page(struct page *page)
79320+void free_compound_page(struct page *page)
79321 {
79322 __free_pages_ok(page, compound_order(page));
79323 }
79324@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79325 int bad = 0;
79326 int wasMlocked = __TestClearPageMlocked(page);
79327
79328+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79329+ unsigned long index = 1UL << order;
79330+#endif
79331+
79332 kmemcheck_free_shadow(page, order);
79333
79334 for (i = 0 ; i < (1 << order) ; ++i)
79335@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79336 debug_check_no_obj_freed(page_address(page),
79337 PAGE_SIZE << order);
79338 }
79339+
79340+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79341+ for (; index; --index)
79342+ sanitize_highpage(page + index - 1);
79343+#endif
79344+
79345 arch_free_page(page, order);
79346 kernel_map_pages(page, 1 << order, 0);
79347
79348@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79349 arch_alloc_page(page, order);
79350 kernel_map_pages(page, 1 << order, 1);
79351
79352+#ifndef CONFIG_PAX_MEMORY_SANITIZE
79353 if (gfp_flags & __GFP_ZERO)
79354 prep_zero_page(page, order, gfp_flags);
79355+#endif
79356
79357 if (order && (gfp_flags & __GFP_COMP))
79358 prep_compound_page(page, order);
79359@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79360 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79361 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79362 }
79363+
79364+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79365+ sanitize_highpage(page);
79366+#endif
79367+
79368 arch_free_page(page, 0);
79369 kernel_map_pages(page, 1, 0);
79370
79371@@ -2179,6 +2196,8 @@ void show_free_areas(void)
79372 int cpu;
79373 struct zone *zone;
79374
79375+ pax_track_stack();
79376+
79377 for_each_populated_zone(zone) {
79378 show_node(zone);
79379 printk("%s per-cpu:\n", zone->name);
79380@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79381 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79382 }
79383 #else
79384-static void inline setup_usemap(struct pglist_data *pgdat,
79385+static inline void setup_usemap(struct pglist_data *pgdat,
79386 struct zone *zone, unsigned long zonesize) {}
79387 #endif /* CONFIG_SPARSEMEM */
79388
79389diff --git a/mm/percpu.c b/mm/percpu.c
79390index c90614a..5f7b7b8 100644
79391--- a/mm/percpu.c
79392+++ b/mm/percpu.c
79393@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79394 static unsigned int pcpu_high_unit_cpu __read_mostly;
79395
79396 /* the address of the first chunk which starts with the kernel static area */
79397-void *pcpu_base_addr __read_mostly;
79398+void *pcpu_base_addr __read_only;
79399 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79400
79401 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79402diff --git a/mm/rmap.c b/mm/rmap.c
79403index dd43373..d848cd7 100644
79404--- a/mm/rmap.c
79405+++ b/mm/rmap.c
79406@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79407 /* page_table_lock to protect against threads */
79408 spin_lock(&mm->page_table_lock);
79409 if (likely(!vma->anon_vma)) {
79410+
79411+#ifdef CONFIG_PAX_SEGMEXEC
79412+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79413+
79414+ if (vma_m) {
79415+ BUG_ON(vma_m->anon_vma);
79416+ vma_m->anon_vma = anon_vma;
79417+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79418+ }
79419+#endif
79420+
79421 vma->anon_vma = anon_vma;
79422 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79423 allocated = NULL;
79424diff --git a/mm/shmem.c b/mm/shmem.c
79425index 3e0005b..1d659a8 100644
79426--- a/mm/shmem.c
79427+++ b/mm/shmem.c
79428@@ -31,7 +31,7 @@
79429 #include <linux/swap.h>
79430 #include <linux/ima.h>
79431
79432-static struct vfsmount *shm_mnt;
79433+struct vfsmount *shm_mnt;
79434
79435 #ifdef CONFIG_SHMEM
79436 /*
79437@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79438 goto unlock;
79439 }
79440 entry = shmem_swp_entry(info, index, NULL);
79441+ if (!entry)
79442+ goto unlock;
79443 if (entry->val) {
79444 /*
79445 * The more uptodate page coming down from a stacked
79446@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79447 struct vm_area_struct pvma;
79448 struct page *page;
79449
79450+ pax_track_stack();
79451+
79452 spol = mpol_cond_copy(&mpol,
79453 mpol_shared_policy_lookup(&info->policy, idx));
79454
79455@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79456
79457 info = SHMEM_I(inode);
79458 inode->i_size = len-1;
79459- if (len <= (char *)inode - (char *)info) {
79460+ if (len <= (char *)inode - (char *)info && len <= 64) {
79461 /* do it inline */
79462 memcpy(info, symname, len);
79463 inode->i_op = &shmem_symlink_inline_operations;
79464@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79465 int err = -ENOMEM;
79466
79467 /* Round up to L1_CACHE_BYTES to resist false sharing */
79468- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79469- L1_CACHE_BYTES), GFP_KERNEL);
79470+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79471 if (!sbinfo)
79472 return -ENOMEM;
79473
79474diff --git a/mm/slab.c b/mm/slab.c
79475index c8d466a..909e01e 100644
79476--- a/mm/slab.c
79477+++ b/mm/slab.c
79478@@ -174,7 +174,7 @@
79479
79480 /* Legal flag mask for kmem_cache_create(). */
79481 #if DEBUG
79482-# define CREATE_MASK (SLAB_RED_ZONE | \
79483+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79484 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79485 SLAB_CACHE_DMA | \
79486 SLAB_STORE_USER | \
79487@@ -182,7 +182,7 @@
79488 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79489 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79490 #else
79491-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79492+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79493 SLAB_CACHE_DMA | \
79494 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79495 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79496@@ -308,7 +308,7 @@ struct kmem_list3 {
79497 * Need this for bootstrapping a per node allocator.
79498 */
79499 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79500-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79501+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79502 #define CACHE_CACHE 0
79503 #define SIZE_AC MAX_NUMNODES
79504 #define SIZE_L3 (2 * MAX_NUMNODES)
79505@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79506 if ((x)->max_freeable < i) \
79507 (x)->max_freeable = i; \
79508 } while (0)
79509-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79510-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79511-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79512-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79513+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79514+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79515+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79516+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79517 #else
79518 #define STATS_INC_ACTIVE(x) do { } while (0)
79519 #define STATS_DEC_ACTIVE(x) do { } while (0)
79520@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79521 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79522 */
79523 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79524- const struct slab *slab, void *obj)
79525+ const struct slab *slab, const void *obj)
79526 {
79527 u32 offset = (obj - slab->s_mem);
79528 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79529@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79530 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79531 sizes[INDEX_AC].cs_size,
79532 ARCH_KMALLOC_MINALIGN,
79533- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79534+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79535 NULL);
79536
79537 if (INDEX_AC != INDEX_L3) {
79538@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79539 kmem_cache_create(names[INDEX_L3].name,
79540 sizes[INDEX_L3].cs_size,
79541 ARCH_KMALLOC_MINALIGN,
79542- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79543+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79544 NULL);
79545 }
79546
79547@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79548 sizes->cs_cachep = kmem_cache_create(names->name,
79549 sizes->cs_size,
79550 ARCH_KMALLOC_MINALIGN,
79551- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79552+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79553 NULL);
79554 }
79555 #ifdef CONFIG_ZONE_DMA
79556@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79557 }
79558 /* cpu stats */
79559 {
79560- unsigned long allochit = atomic_read(&cachep->allochit);
79561- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79562- unsigned long freehit = atomic_read(&cachep->freehit);
79563- unsigned long freemiss = atomic_read(&cachep->freemiss);
79564+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79565+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79566+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79567+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79568
79569 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79570 allochit, allocmiss, freehit, freemiss);
79571@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79572
79573 static int __init slab_proc_init(void)
79574 {
79575- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79576+ mode_t gr_mode = S_IRUGO;
79577+
79578+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79579+ gr_mode = S_IRUSR;
79580+#endif
79581+
79582+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79583 #ifdef CONFIG_DEBUG_SLAB_LEAK
79584- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79585+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79586 #endif
79587 return 0;
79588 }
79589 module_init(slab_proc_init);
79590 #endif
79591
79592+void check_object_size(const void *ptr, unsigned long n, bool to)
79593+{
79594+
79595+#ifdef CONFIG_PAX_USERCOPY
79596+ struct page *page;
79597+ struct kmem_cache *cachep = NULL;
79598+ struct slab *slabp;
79599+ unsigned int objnr;
79600+ unsigned long offset;
79601+ const char *type;
79602+
79603+ if (!n)
79604+ return;
79605+
79606+ type = "<null>";
79607+ if (ZERO_OR_NULL_PTR(ptr))
79608+ goto report;
79609+
79610+ if (!virt_addr_valid(ptr))
79611+ return;
79612+
79613+ page = virt_to_head_page(ptr);
79614+
79615+ type = "<process stack>";
79616+ if (!PageSlab(page)) {
79617+ if (object_is_on_stack(ptr, n) == -1)
79618+ goto report;
79619+ return;
79620+ }
79621+
79622+ cachep = page_get_cache(page);
79623+ type = cachep->name;
79624+ if (!(cachep->flags & SLAB_USERCOPY))
79625+ goto report;
79626+
79627+ slabp = page_get_slab(page);
79628+ objnr = obj_to_index(cachep, slabp, ptr);
79629+ BUG_ON(objnr >= cachep->num);
79630+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79631+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79632+ return;
79633+
79634+report:
79635+ pax_report_usercopy(ptr, n, to, type);
79636+#endif
79637+
79638+}
79639+EXPORT_SYMBOL(check_object_size);
79640+
79641 /**
79642 * ksize - get the actual amount of memory allocated for a given object
79643 * @objp: Pointer to the object
79644diff --git a/mm/slob.c b/mm/slob.c
79645index 837ebd6..0bd23bc 100644
79646--- a/mm/slob.c
79647+++ b/mm/slob.c
79648@@ -29,7 +29,7 @@
79649 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
79650 * alloc_pages() directly, allocating compound pages so the page order
79651 * does not have to be separately tracked, and also stores the exact
79652- * allocation size in page->private so that it can be used to accurately
79653+ * allocation size in slob_page->size so that it can be used to accurately
79654 * provide ksize(). These objects are detected in kfree() because slob_page()
79655 * is false for them.
79656 *
79657@@ -58,6 +58,7 @@
79658 */
79659
79660 #include <linux/kernel.h>
79661+#include <linux/sched.h>
79662 #include <linux/slab.h>
79663 #include <linux/mm.h>
79664 #include <linux/swap.h> /* struct reclaim_state */
79665@@ -100,7 +101,8 @@ struct slob_page {
79666 unsigned long flags; /* mandatory */
79667 atomic_t _count; /* mandatory */
79668 slobidx_t units; /* free units left in page */
79669- unsigned long pad[2];
79670+ unsigned long pad[1];
79671+ unsigned long size; /* size when >=PAGE_SIZE */
79672 slob_t *free; /* first free slob_t in page */
79673 struct list_head list; /* linked list of free pages */
79674 };
79675@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
79676 */
79677 static inline int is_slob_page(struct slob_page *sp)
79678 {
79679- return PageSlab((struct page *)sp);
79680+ return PageSlab((struct page *)sp) && !sp->size;
79681 }
79682
79683 static inline void set_slob_page(struct slob_page *sp)
79684@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
79685
79686 static inline struct slob_page *slob_page(const void *addr)
79687 {
79688- return (struct slob_page *)virt_to_page(addr);
79689+ return (struct slob_page *)virt_to_head_page(addr);
79690 }
79691
79692 /*
79693@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
79694 /*
79695 * Return the size of a slob block.
79696 */
79697-static slobidx_t slob_units(slob_t *s)
79698+static slobidx_t slob_units(const slob_t *s)
79699 {
79700 if (s->units > 0)
79701 return s->units;
79702@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
79703 /*
79704 * Return the next free slob block pointer after this one.
79705 */
79706-static slob_t *slob_next(slob_t *s)
79707+static slob_t *slob_next(const slob_t *s)
79708 {
79709 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
79710 slobidx_t next;
79711@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
79712 /*
79713 * Returns true if s is the last free block in its page.
79714 */
79715-static int slob_last(slob_t *s)
79716+static int slob_last(const slob_t *s)
79717 {
79718 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
79719 }
79720@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
79721 if (!page)
79722 return NULL;
79723
79724+ set_slob_page(page);
79725 return page_address(page);
79726 }
79727
79728@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
79729 if (!b)
79730 return NULL;
79731 sp = slob_page(b);
79732- set_slob_page(sp);
79733
79734 spin_lock_irqsave(&slob_lock, flags);
79735 sp->units = SLOB_UNITS(PAGE_SIZE);
79736 sp->free = b;
79737+ sp->size = 0;
79738 INIT_LIST_HEAD(&sp->list);
79739 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
79740 set_slob_page_free(sp, slob_list);
79741@@ -475,10 +478,9 @@ out:
79742 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
79743 #endif
79744
79745-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79746+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
79747 {
79748- unsigned int *m;
79749- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79750+ slob_t *m;
79751 void *ret;
79752
79753 lockdep_trace_alloc(gfp);
79754@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79755
79756 if (!m)
79757 return NULL;
79758- *m = size;
79759+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
79760+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
79761+ m[0].units = size;
79762+ m[1].units = align;
79763 ret = (void *)m + align;
79764
79765 trace_kmalloc_node(_RET_IP_, ret,
79766@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79767
79768 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
79769 if (ret) {
79770- struct page *page;
79771- page = virt_to_page(ret);
79772- page->private = size;
79773+ struct slob_page *sp;
79774+ sp = slob_page(ret);
79775+ sp->size = size;
79776 }
79777
79778 trace_kmalloc_node(_RET_IP_, ret,
79779 size, PAGE_SIZE << order, gfp, node);
79780 }
79781
79782- kmemleak_alloc(ret, size, 1, gfp);
79783+ return ret;
79784+}
79785+
79786+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79787+{
79788+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79789+ void *ret = __kmalloc_node_align(size, gfp, node, align);
79790+
79791+ if (!ZERO_OR_NULL_PTR(ret))
79792+ kmemleak_alloc(ret, size, 1, gfp);
79793 return ret;
79794 }
79795 EXPORT_SYMBOL(__kmalloc_node);
79796@@ -528,13 +542,92 @@ void kfree(const void *block)
79797 sp = slob_page(block);
79798 if (is_slob_page(sp)) {
79799 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79800- unsigned int *m = (unsigned int *)(block - align);
79801- slob_free(m, *m + align);
79802- } else
79803+ slob_t *m = (slob_t *)(block - align);
79804+ slob_free(m, m[0].units + align);
79805+ } else {
79806+ clear_slob_page(sp);
79807+ free_slob_page(sp);
79808+ sp->size = 0;
79809 put_page(&sp->page);
79810+ }
79811 }
79812 EXPORT_SYMBOL(kfree);
79813
79814+void check_object_size(const void *ptr, unsigned long n, bool to)
79815+{
79816+
79817+#ifdef CONFIG_PAX_USERCOPY
79818+ struct slob_page *sp;
79819+ const slob_t *free;
79820+ const void *base;
79821+ unsigned long flags;
79822+ const char *type;
79823+
79824+ if (!n)
79825+ return;
79826+
79827+ type = "<null>";
79828+ if (ZERO_OR_NULL_PTR(ptr))
79829+ goto report;
79830+
79831+ if (!virt_addr_valid(ptr))
79832+ return;
79833+
79834+ type = "<process stack>";
79835+ sp = slob_page(ptr);
79836+ if (!PageSlab((struct page *)sp)) {
79837+ if (object_is_on_stack(ptr, n) == -1)
79838+ goto report;
79839+ return;
79840+ }
79841+
79842+ type = "<slob>";
79843+ if (sp->size) {
79844+ base = page_address(&sp->page);
79845+ if (base <= ptr && n <= sp->size - (ptr - base))
79846+ return;
79847+ goto report;
79848+ }
79849+
79850+ /* some tricky double walking to find the chunk */
79851+ spin_lock_irqsave(&slob_lock, flags);
79852+ base = (void *)((unsigned long)ptr & PAGE_MASK);
79853+ free = sp->free;
79854+
79855+ while (!slob_last(free) && (void *)free <= ptr) {
79856+ base = free + slob_units(free);
79857+ free = slob_next(free);
79858+ }
79859+
79860+ while (base < (void *)free) {
79861+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
79862+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
79863+ int offset;
79864+
79865+ if (ptr < base + align)
79866+ break;
79867+
79868+ offset = ptr - base - align;
79869+ if (offset >= m) {
79870+ base += size;
79871+ continue;
79872+ }
79873+
79874+ if (n > m - offset)
79875+ break;
79876+
79877+ spin_unlock_irqrestore(&slob_lock, flags);
79878+ return;
79879+ }
79880+
79881+ spin_unlock_irqrestore(&slob_lock, flags);
79882+report:
79883+ pax_report_usercopy(ptr, n, to, type);
79884+#endif
79885+
79886+}
79887+EXPORT_SYMBOL(check_object_size);
79888+
79889 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
79890 size_t ksize(const void *block)
79891 {
79892@@ -547,10 +640,10 @@ size_t ksize(const void *block)
79893 sp = slob_page(block);
79894 if (is_slob_page(sp)) {
79895 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79896- unsigned int *m = (unsigned int *)(block - align);
79897- return SLOB_UNITS(*m) * SLOB_UNIT;
79898+ slob_t *m = (slob_t *)(block - align);
79899+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
79900 } else
79901- return sp->page.private;
79902+ return sp->size;
79903 }
79904 EXPORT_SYMBOL(ksize);
79905
79906@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79907 {
79908 struct kmem_cache *c;
79909
79910+#ifdef CONFIG_PAX_USERCOPY
79911+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
79912+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
79913+#else
79914 c = slob_alloc(sizeof(struct kmem_cache),
79915 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
79916+#endif
79917
79918 if (c) {
79919 c->name = name;
79920@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79921 {
79922 void *b;
79923
79924+#ifdef CONFIG_PAX_USERCOPY
79925+ b = __kmalloc_node_align(c->size, flags, node, c->align);
79926+#else
79927 if (c->size < PAGE_SIZE) {
79928 b = slob_alloc(c->size, flags, c->align, node);
79929 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79930 SLOB_UNITS(c->size) * SLOB_UNIT,
79931 flags, node);
79932 } else {
79933+ struct slob_page *sp;
79934+
79935 b = slob_new_pages(flags, get_order(c->size), node);
79936+ sp = slob_page(b);
79937+ sp->size = c->size;
79938 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79939 PAGE_SIZE << get_order(c->size),
79940 flags, node);
79941 }
79942+#endif
79943
79944 if (c->ctor)
79945 c->ctor(b);
79946@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79947
79948 static void __kmem_cache_free(void *b, int size)
79949 {
79950- if (size < PAGE_SIZE)
79951+ struct slob_page *sp = slob_page(b);
79952+
79953+ if (is_slob_page(sp))
79954 slob_free(b, size);
79955- else
79956+ else {
79957+ clear_slob_page(sp);
79958+ free_slob_page(sp);
79959+ sp->size = 0;
79960 slob_free_pages(b, get_order(size));
79961+ }
79962 }
79963
79964 static void kmem_rcu_free(struct rcu_head *head)
79965@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79966
79967 void kmem_cache_free(struct kmem_cache *c, void *b)
79968 {
79969+ int size = c->size;
79970+
79971+#ifdef CONFIG_PAX_USERCOPY
79972+ if (size + c->align < PAGE_SIZE) {
79973+ size += c->align;
79974+ b -= c->align;
79975+ }
79976+#endif
79977+
79978 kmemleak_free_recursive(b, c->flags);
79979 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79980 struct slob_rcu *slob_rcu;
79981- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79982+ slob_rcu = b + (size - sizeof(struct slob_rcu));
79983 INIT_RCU_HEAD(&slob_rcu->head);
79984- slob_rcu->size = c->size;
79985+ slob_rcu->size = size;
79986 call_rcu(&slob_rcu->head, kmem_rcu_free);
79987 } else {
79988- __kmem_cache_free(b, c->size);
79989+ __kmem_cache_free(b, size);
79990 }
79991
79992+#ifdef CONFIG_PAX_USERCOPY
79993+ trace_kfree(_RET_IP_, b);
79994+#else
79995 trace_kmem_cache_free(_RET_IP_, b);
79996+#endif
79997+
79998 }
79999 EXPORT_SYMBOL(kmem_cache_free);
80000
80001diff --git a/mm/slub.c b/mm/slub.c
80002index 4996fc7..87e01d0 100644
80003--- a/mm/slub.c
80004+++ b/mm/slub.c
80005@@ -201,7 +201,7 @@ struct track {
80006
80007 enum track_item { TRACK_ALLOC, TRACK_FREE };
80008
80009-#ifdef CONFIG_SLUB_DEBUG
80010+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80011 static int sysfs_slab_add(struct kmem_cache *);
80012 static int sysfs_slab_alias(struct kmem_cache *, const char *);
80013 static void sysfs_slab_remove(struct kmem_cache *);
80014@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
80015 if (!t->addr)
80016 return;
80017
80018- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
80019+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
80020 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
80021 }
80022
80023@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
80024
80025 page = virt_to_head_page(x);
80026
80027+ BUG_ON(!PageSlab(page));
80028+
80029 slab_free(s, page, x, _RET_IP_);
80030
80031 trace_kmem_cache_free(_RET_IP_, x);
80032@@ -1937,7 +1939,7 @@ static int slub_min_objects;
80033 * Merge control. If this is set then no merging of slab caches will occur.
80034 * (Could be removed. This was introduced to pacify the merge skeptics.)
80035 */
80036-static int slub_nomerge;
80037+static int slub_nomerge = 1;
80038
80039 /*
80040 * Calculate the order of allocation given an slab object size.
80041@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
80042 * list to avoid pounding the page allocator excessively.
80043 */
80044 set_min_partial(s, ilog2(s->size));
80045- s->refcount = 1;
80046+ atomic_set(&s->refcount, 1);
80047 #ifdef CONFIG_NUMA
80048 s->remote_node_defrag_ratio = 1000;
80049 #endif
80050@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
80051 void kmem_cache_destroy(struct kmem_cache *s)
80052 {
80053 down_write(&slub_lock);
80054- s->refcount--;
80055- if (!s->refcount) {
80056+ if (atomic_dec_and_test(&s->refcount)) {
80057 list_del(&s->list);
80058 up_write(&slub_lock);
80059 if (kmem_cache_close(s)) {
80060@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
80061 __setup("slub_nomerge", setup_slub_nomerge);
80062
80063 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
80064- const char *name, int size, gfp_t gfp_flags)
80065+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
80066 {
80067- unsigned int flags = 0;
80068-
80069 if (gfp_flags & SLUB_DMA)
80070- flags = SLAB_CACHE_DMA;
80071+ flags |= SLAB_CACHE_DMA;
80072
80073 /*
80074 * This function is called with IRQs disabled during early-boot on
80075@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
80076 EXPORT_SYMBOL(__kmalloc_node);
80077 #endif
80078
80079+void check_object_size(const void *ptr, unsigned long n, bool to)
80080+{
80081+
80082+#ifdef CONFIG_PAX_USERCOPY
80083+ struct page *page;
80084+ struct kmem_cache *s = NULL;
80085+ unsigned long offset;
80086+ const char *type;
80087+
80088+ if (!n)
80089+ return;
80090+
80091+ type = "<null>";
80092+ if (ZERO_OR_NULL_PTR(ptr))
80093+ goto report;
80094+
80095+ if (!virt_addr_valid(ptr))
80096+ return;
80097+
80098+ page = get_object_page(ptr);
80099+
80100+ type = "<process stack>";
80101+ if (!page) {
80102+ if (object_is_on_stack(ptr, n) == -1)
80103+ goto report;
80104+ return;
80105+ }
80106+
80107+ s = page->slab;
80108+ type = s->name;
80109+ if (!(s->flags & SLAB_USERCOPY))
80110+ goto report;
80111+
80112+ offset = (ptr - page_address(page)) % s->size;
80113+ if (offset <= s->objsize && n <= s->objsize - offset)
80114+ return;
80115+
80116+report:
80117+ pax_report_usercopy(ptr, n, to, type);
80118+#endif
80119+
80120+}
80121+EXPORT_SYMBOL(check_object_size);
80122+
80123 size_t ksize(const void *object)
80124 {
80125 struct page *page;
80126@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80127 * kmem_cache_open for slab_state == DOWN.
80128 */
80129 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80130- sizeof(struct kmem_cache_node), GFP_NOWAIT);
80131- kmalloc_caches[0].refcount = -1;
80132+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80133+ atomic_set(&kmalloc_caches[0].refcount, -1);
80134 caches++;
80135
80136 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80137@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80138 /* Caches that are not of the two-to-the-power-of size */
80139 if (KMALLOC_MIN_SIZE <= 32) {
80140 create_kmalloc_cache(&kmalloc_caches[1],
80141- "kmalloc-96", 96, GFP_NOWAIT);
80142+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80143 caches++;
80144 }
80145 if (KMALLOC_MIN_SIZE <= 64) {
80146 create_kmalloc_cache(&kmalloc_caches[2],
80147- "kmalloc-192", 192, GFP_NOWAIT);
80148+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80149 caches++;
80150 }
80151
80152 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80153 create_kmalloc_cache(&kmalloc_caches[i],
80154- "kmalloc", 1 << i, GFP_NOWAIT);
80155+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80156 caches++;
80157 }
80158
80159@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80160 /*
80161 * We may have set a slab to be unmergeable during bootstrap.
80162 */
80163- if (s->refcount < 0)
80164+ if (atomic_read(&s->refcount) < 0)
80165 return 1;
80166
80167 return 0;
80168@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80169 if (s) {
80170 int cpu;
80171
80172- s->refcount++;
80173+ atomic_inc(&s->refcount);
80174 /*
80175 * Adjust the object sizes so that we clear
80176 * the complete object on kzalloc.
80177@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80178
80179 if (sysfs_slab_alias(s, name)) {
80180 down_write(&slub_lock);
80181- s->refcount--;
80182+ atomic_dec(&s->refcount);
80183 up_write(&slub_lock);
80184 goto err;
80185 }
80186@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80187
80188 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80189 {
80190- return sprintf(buf, "%d\n", s->refcount - 1);
80191+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80192 }
80193 SLAB_ATTR_RO(aliases);
80194
80195@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80196 kfree(s);
80197 }
80198
80199-static struct sysfs_ops slab_sysfs_ops = {
80200+static const struct sysfs_ops slab_sysfs_ops = {
80201 .show = slab_attr_show,
80202 .store = slab_attr_store,
80203 };
80204@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80205 return 0;
80206 }
80207
80208-static struct kset_uevent_ops slab_uevent_ops = {
80209+static const struct kset_uevent_ops slab_uevent_ops = {
80210 .filter = uevent_filter,
80211 };
80212
80213@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80214 return name;
80215 }
80216
80217+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80218 static int sysfs_slab_add(struct kmem_cache *s)
80219 {
80220 int err;
80221@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80222 kobject_del(&s->kobj);
80223 kobject_put(&s->kobj);
80224 }
80225+#endif
80226
80227 /*
80228 * Need to buffer aliases during bootup until sysfs becomes
80229@@ -4632,6 +4677,7 @@ struct saved_alias {
80230
80231 static struct saved_alias *alias_list;
80232
80233+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80234 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80235 {
80236 struct saved_alias *al;
80237@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80238 alias_list = al;
80239 return 0;
80240 }
80241+#endif
80242
80243 static int __init slab_sysfs_init(void)
80244 {
80245@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80246
80247 static int __init slab_proc_init(void)
80248 {
80249- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80250+ mode_t gr_mode = S_IRUGO;
80251+
80252+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80253+ gr_mode = S_IRUSR;
80254+#endif
80255+
80256+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80257 return 0;
80258 }
80259 module_init(slab_proc_init);
80260diff --git a/mm/swap.c b/mm/swap.c
80261index 308e57d..5de19c0 100644
80262--- a/mm/swap.c
80263+++ b/mm/swap.c
80264@@ -30,6 +30,7 @@
80265 #include <linux/notifier.h>
80266 #include <linux/backing-dev.h>
80267 #include <linux/memcontrol.h>
80268+#include <linux/hugetlb.h>
80269
80270 #include "internal.h"
80271
80272@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80273 compound_page_dtor *dtor;
80274
80275 dtor = get_compound_page_dtor(page);
80276+ if (!PageHuge(page))
80277+ BUG_ON(dtor != free_compound_page);
80278 (*dtor)(page);
80279 }
80280 }
80281diff --git a/mm/util.c b/mm/util.c
80282index e48b493..24a601d 100644
80283--- a/mm/util.c
80284+++ b/mm/util.c
80285@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80286 void arch_pick_mmap_layout(struct mm_struct *mm)
80287 {
80288 mm->mmap_base = TASK_UNMAPPED_BASE;
80289+
80290+#ifdef CONFIG_PAX_RANDMMAP
80291+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80292+ mm->mmap_base += mm->delta_mmap;
80293+#endif
80294+
80295 mm->get_unmapped_area = arch_get_unmapped_area;
80296 mm->unmap_area = arch_unmap_area;
80297 }
80298diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80299index f34ffd0..e60c44f 100644
80300--- a/mm/vmalloc.c
80301+++ b/mm/vmalloc.c
80302@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80303
80304 pte = pte_offset_kernel(pmd, addr);
80305 do {
80306- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80307- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80308+
80309+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80310+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80311+ BUG_ON(!pte_exec(*pte));
80312+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80313+ continue;
80314+ }
80315+#endif
80316+
80317+ {
80318+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80319+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80320+ }
80321 } while (pte++, addr += PAGE_SIZE, addr != end);
80322 }
80323
80324@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80325 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80326 {
80327 pte_t *pte;
80328+ int ret = -ENOMEM;
80329
80330 /*
80331 * nr is a running index into the array which helps higher level
80332@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80333 pte = pte_alloc_kernel(pmd, addr);
80334 if (!pte)
80335 return -ENOMEM;
80336+
80337+ pax_open_kernel();
80338 do {
80339 struct page *page = pages[*nr];
80340
80341- if (WARN_ON(!pte_none(*pte)))
80342- return -EBUSY;
80343- if (WARN_ON(!page))
80344- return -ENOMEM;
80345+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80346+ if (!(pgprot_val(prot) & _PAGE_NX))
80347+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80348+ else
80349+#endif
80350+
80351+ if (WARN_ON(!pte_none(*pte))) {
80352+ ret = -EBUSY;
80353+ goto out;
80354+ }
80355+ if (WARN_ON(!page)) {
80356+ ret = -ENOMEM;
80357+ goto out;
80358+ }
80359 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80360 (*nr)++;
80361 } while (pte++, addr += PAGE_SIZE, addr != end);
80362- return 0;
80363+ ret = 0;
80364+out:
80365+ pax_close_kernel();
80366+ return ret;
80367 }
80368
80369 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80370@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80371 * and fall back on vmalloc() if that fails. Others
80372 * just put it in the vmalloc space.
80373 */
80374-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80375+#ifdef CONFIG_MODULES
80376+#ifdef MODULES_VADDR
80377 unsigned long addr = (unsigned long)x;
80378 if (addr >= MODULES_VADDR && addr < MODULES_END)
80379 return 1;
80380 #endif
80381+
80382+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80383+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80384+ return 1;
80385+#endif
80386+
80387+#endif
80388+
80389 return is_vmalloc_addr(x);
80390 }
80391
80392@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80393
80394 if (!pgd_none(*pgd)) {
80395 pud_t *pud = pud_offset(pgd, addr);
80396+#ifdef CONFIG_X86
80397+ if (!pud_large(*pud))
80398+#endif
80399 if (!pud_none(*pud)) {
80400 pmd_t *pmd = pmd_offset(pud, addr);
80401+#ifdef CONFIG_X86
80402+ if (!pmd_large(*pmd))
80403+#endif
80404 if (!pmd_none(*pmd)) {
80405 pte_t *ptep, pte;
80406
80407@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80408 struct rb_node *tmp;
80409
80410 while (*p) {
80411- struct vmap_area *tmp;
80412+ struct vmap_area *varea;
80413
80414 parent = *p;
80415- tmp = rb_entry(parent, struct vmap_area, rb_node);
80416- if (va->va_start < tmp->va_end)
80417+ varea = rb_entry(parent, struct vmap_area, rb_node);
80418+ if (va->va_start < varea->va_end)
80419 p = &(*p)->rb_left;
80420- else if (va->va_end > tmp->va_start)
80421+ else if (va->va_end > varea->va_start)
80422 p = &(*p)->rb_right;
80423 else
80424 BUG();
80425@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80426 struct vm_struct *area;
80427
80428 BUG_ON(in_interrupt());
80429+
80430+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80431+ if (flags & VM_KERNEXEC) {
80432+ if (start != VMALLOC_START || end != VMALLOC_END)
80433+ return NULL;
80434+ start = (unsigned long)MODULES_EXEC_VADDR;
80435+ end = (unsigned long)MODULES_EXEC_END;
80436+ }
80437+#endif
80438+
80439 if (flags & VM_IOREMAP) {
80440 int bit = fls(size);
80441
80442@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80443 if (count > totalram_pages)
80444 return NULL;
80445
80446+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80447+ if (!(pgprot_val(prot) & _PAGE_NX))
80448+ flags |= VM_KERNEXEC;
80449+#endif
80450+
80451 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80452 __builtin_return_address(0));
80453 if (!area)
80454@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80455 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80456 return NULL;
80457
80458+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80459+ if (!(pgprot_val(prot) & _PAGE_NX))
80460+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80461+ VMALLOC_START, VMALLOC_END, node,
80462+ gfp_mask, caller);
80463+ else
80464+#endif
80465+
80466 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80467 VMALLOC_START, VMALLOC_END, node,
80468 gfp_mask, caller);
80469@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80470 return addr;
80471 }
80472
80473+#undef __vmalloc
80474 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80475 {
80476 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80477@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80478 * For tight control over page level allocator and protection flags
80479 * use __vmalloc() instead.
80480 */
80481+#undef vmalloc
80482 void *vmalloc(unsigned long size)
80483 {
80484 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80485@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80486 * The resulting memory area is zeroed so it can be mapped to userspace
80487 * without leaking data.
80488 */
80489+#undef vmalloc_user
80490 void *vmalloc_user(unsigned long size)
80491 {
80492 struct vm_struct *area;
80493@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80494 * For tight control over page level allocator and protection flags
80495 * use __vmalloc() instead.
80496 */
80497+#undef vmalloc_node
80498 void *vmalloc_node(unsigned long size, int node)
80499 {
80500 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80501@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80502 * For tight control over page level allocator and protection flags
80503 * use __vmalloc() instead.
80504 */
80505-
80506+#undef vmalloc_exec
80507 void *vmalloc_exec(unsigned long size)
80508 {
80509- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80510+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80511 -1, __builtin_return_address(0));
80512 }
80513
80514@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80515 * Allocate enough 32bit PA addressable pages to cover @size from the
80516 * page level allocator and map them into contiguous kernel virtual space.
80517 */
80518+#undef vmalloc_32
80519 void *vmalloc_32(unsigned long size)
80520 {
80521 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80522@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80523 * The resulting memory area is 32bit addressable and zeroed so it can be
80524 * mapped to userspace without leaking data.
80525 */
80526+#undef vmalloc_32_user
80527 void *vmalloc_32_user(unsigned long size)
80528 {
80529 struct vm_struct *area;
80530@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80531 unsigned long uaddr = vma->vm_start;
80532 unsigned long usize = vma->vm_end - vma->vm_start;
80533
80534+ BUG_ON(vma->vm_mirror);
80535+
80536 if ((PAGE_SIZE-1) & (unsigned long)addr)
80537 return -EINVAL;
80538
80539diff --git a/mm/vmstat.c b/mm/vmstat.c
80540index 42d76c6..5643dc4 100644
80541--- a/mm/vmstat.c
80542+++ b/mm/vmstat.c
80543@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80544 *
80545 * vm_stat contains the global counters
80546 */
80547-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80548+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80549 EXPORT_SYMBOL(vm_stat);
80550
80551 #ifdef CONFIG_SMP
80552@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80553 v = p->vm_stat_diff[i];
80554 p->vm_stat_diff[i] = 0;
80555 local_irq_restore(flags);
80556- atomic_long_add(v, &zone->vm_stat[i]);
80557+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80558 global_diff[i] += v;
80559 #ifdef CONFIG_NUMA
80560 /* 3 seconds idle till flush */
80561@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80562
80563 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80564 if (global_diff[i])
80565- atomic_long_add(global_diff[i], &vm_stat[i]);
80566+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80567 }
80568
80569 #endif
80570@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80571 start_cpu_timer(cpu);
80572 #endif
80573 #ifdef CONFIG_PROC_FS
80574- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80575- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80576- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80577- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80578+ {
80579+ mode_t gr_mode = S_IRUGO;
80580+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80581+ gr_mode = S_IRUSR;
80582+#endif
80583+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80584+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80585+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80586+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80587+#else
80588+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80589+#endif
80590+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80591+ }
80592 #endif
80593 return 0;
80594 }
80595diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80596index a29c5ab..6143f20 100644
80597--- a/net/8021q/vlan.c
80598+++ b/net/8021q/vlan.c
80599@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80600 err = -EPERM;
80601 if (!capable(CAP_NET_ADMIN))
80602 break;
80603- if ((args.u.name_type >= 0) &&
80604- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80605+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80606 struct vlan_net *vn;
80607
80608 vn = net_generic(net, vlan_net_id);
80609diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80610index a2d2984..f9eb711 100644
80611--- a/net/9p/trans_fd.c
80612+++ b/net/9p/trans_fd.c
80613@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80614 oldfs = get_fs();
80615 set_fs(get_ds());
80616 /* The cast to a user pointer is valid due to the set_fs() */
80617- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80618+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80619 set_fs(oldfs);
80620
80621 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80622diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80623index 02cc7e7..4514f1b 100644
80624--- a/net/atm/atm_misc.c
80625+++ b/net/atm/atm_misc.c
80626@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80627 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80628 return 1;
80629 atm_return(vcc,truesize);
80630- atomic_inc(&vcc->stats->rx_drop);
80631+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80632 return 0;
80633 }
80634
80635@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80636 }
80637 }
80638 atm_return(vcc,guess);
80639- atomic_inc(&vcc->stats->rx_drop);
80640+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80641 return NULL;
80642 }
80643
80644@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80645
80646 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80647 {
80648-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80649+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80650 __SONET_ITEMS
80651 #undef __HANDLE_ITEM
80652 }
80653@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80654
80655 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80656 {
80657-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
80658+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
80659 __SONET_ITEMS
80660 #undef __HANDLE_ITEM
80661 }
80662diff --git a/net/atm/lec.h b/net/atm/lec.h
80663index 9d14d19..5c145f3 100644
80664--- a/net/atm/lec.h
80665+++ b/net/atm/lec.h
80666@@ -48,7 +48,7 @@ struct lane2_ops {
80667 const u8 *tlvs, u32 sizeoftlvs);
80668 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
80669 const u8 *tlvs, u32 sizeoftlvs);
80670-};
80671+} __no_const;
80672
80673 /*
80674 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
80675diff --git a/net/atm/mpc.h b/net/atm/mpc.h
80676index 0919a88..a23d54e 100644
80677--- a/net/atm/mpc.h
80678+++ b/net/atm/mpc.h
80679@@ -33,7 +33,7 @@ struct mpoa_client {
80680 struct mpc_parameters parameters; /* parameters for this client */
80681
80682 const struct net_device_ops *old_ops;
80683- struct net_device_ops new_ops;
80684+ net_device_ops_no_const new_ops;
80685 };
80686
80687
80688diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
80689index 4504a4b..1733f1e 100644
80690--- a/net/atm/mpoa_caches.c
80691+++ b/net/atm/mpoa_caches.c
80692@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
80693 struct timeval now;
80694 struct k_message msg;
80695
80696+ pax_track_stack();
80697+
80698 do_gettimeofday(&now);
80699
80700 write_lock_irq(&client->egress_lock);
80701diff --git a/net/atm/proc.c b/net/atm/proc.c
80702index ab8419a..aa91497 100644
80703--- a/net/atm/proc.c
80704+++ b/net/atm/proc.c
80705@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
80706 const struct k_atm_aal_stats *stats)
80707 {
80708 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
80709- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
80710- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
80711- atomic_read(&stats->rx_drop));
80712+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
80713+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
80714+ atomic_read_unchecked(&stats->rx_drop));
80715 }
80716
80717 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
80718@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
80719 {
80720 struct sock *sk = sk_atm(vcc);
80721
80722+#ifdef CONFIG_GRKERNSEC_HIDESYM
80723+ seq_printf(seq, "%p ", NULL);
80724+#else
80725 seq_printf(seq, "%p ", vcc);
80726+#endif
80727+
80728 if (!vcc->dev)
80729 seq_printf(seq, "Unassigned ");
80730 else
80731@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
80732 {
80733 if (!vcc->dev)
80734 seq_printf(seq, sizeof(void *) == 4 ?
80735+#ifdef CONFIG_GRKERNSEC_HIDESYM
80736+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
80737+#else
80738 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
80739+#endif
80740 else
80741 seq_printf(seq, "%3d %3d %5d ",
80742 vcc->dev->number, vcc->vpi, vcc->vci);
80743diff --git a/net/atm/resources.c b/net/atm/resources.c
80744index 56b7322..c48b84e 100644
80745--- a/net/atm/resources.c
80746+++ b/net/atm/resources.c
80747@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
80748 static void copy_aal_stats(struct k_atm_aal_stats *from,
80749 struct atm_aal_stats *to)
80750 {
80751-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80752+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80753 __AAL_STAT_ITEMS
80754 #undef __HANDLE_ITEM
80755 }
80756@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
80757 static void subtract_aal_stats(struct k_atm_aal_stats *from,
80758 struct atm_aal_stats *to)
80759 {
80760-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
80761+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
80762 __AAL_STAT_ITEMS
80763 #undef __HANDLE_ITEM
80764 }
80765diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
80766index 8567d47..bba2292 100644
80767--- a/net/bridge/br_private.h
80768+++ b/net/bridge/br_private.h
80769@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
80770
80771 #ifdef CONFIG_SYSFS
80772 /* br_sysfs_if.c */
80773-extern struct sysfs_ops brport_sysfs_ops;
80774+extern const struct sysfs_ops brport_sysfs_ops;
80775 extern int br_sysfs_addif(struct net_bridge_port *p);
80776
80777 /* br_sysfs_br.c */
80778diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
80779index 9a52ac5..c97538e 100644
80780--- a/net/bridge/br_stp_if.c
80781+++ b/net/bridge/br_stp_if.c
80782@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
80783 char *envp[] = { NULL };
80784
80785 if (br->stp_enabled == BR_USER_STP) {
80786- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
80787+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
80788 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
80789 br->dev->name, r);
80790
80791diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
80792index 820643a..ce77fb3 100644
80793--- a/net/bridge/br_sysfs_if.c
80794+++ b/net/bridge/br_sysfs_if.c
80795@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
80796 return ret;
80797 }
80798
80799-struct sysfs_ops brport_sysfs_ops = {
80800+const struct sysfs_ops brport_sysfs_ops = {
80801 .show = brport_show,
80802 .store = brport_store,
80803 };
80804diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80805index d73d47f..72df42a 100644
80806--- a/net/bridge/netfilter/ebtables.c
80807+++ b/net/bridge/netfilter/ebtables.c
80808@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
80809 unsigned int entries_size, nentries;
80810 char *entries;
80811
80812+ pax_track_stack();
80813+
80814 if (cmd == EBT_SO_GET_ENTRIES) {
80815 entries_size = t->private->entries_size;
80816 nentries = t->private->nentries;
80817diff --git a/net/can/bcm.c b/net/can/bcm.c
80818index 2ffd2e0..72a7486 100644
80819--- a/net/can/bcm.c
80820+++ b/net/can/bcm.c
80821@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
80822 struct bcm_sock *bo = bcm_sk(sk);
80823 struct bcm_op *op;
80824
80825+#ifdef CONFIG_GRKERNSEC_HIDESYM
80826+ seq_printf(m, ">>> socket %p", NULL);
80827+ seq_printf(m, " / sk %p", NULL);
80828+ seq_printf(m, " / bo %p", NULL);
80829+#else
80830 seq_printf(m, ">>> socket %p", sk->sk_socket);
80831 seq_printf(m, " / sk %p", sk);
80832 seq_printf(m, " / bo %p", bo);
80833+#endif
80834 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
80835 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
80836 seq_printf(m, " <<<\n");
80837diff --git a/net/compat.c b/net/compat.c
80838index 9559afc..ccd74e1 100644
80839--- a/net/compat.c
80840+++ b/net/compat.c
80841@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80842 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80843 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80844 return -EFAULT;
80845- kmsg->msg_name = compat_ptr(tmp1);
80846- kmsg->msg_iov = compat_ptr(tmp2);
80847- kmsg->msg_control = compat_ptr(tmp3);
80848+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80849+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80850+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80851 return 0;
80852 }
80853
80854@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80855 kern_msg->msg_name = NULL;
80856
80857 tot_len = iov_from_user_compat_to_kern(kern_iov,
80858- (struct compat_iovec __user *)kern_msg->msg_iov,
80859+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
80860 kern_msg->msg_iovlen);
80861 if (tot_len >= 0)
80862 kern_msg->msg_iov = kern_iov;
80863@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80864
80865 #define CMSG_COMPAT_FIRSTHDR(msg) \
80866 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80867- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80868+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80869 (struct compat_cmsghdr __user *)NULL)
80870
80871 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80872 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80873 (ucmlen) <= (unsigned long) \
80874 ((mhdr)->msg_controllen - \
80875- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80876+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80877
80878 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80879 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80880 {
80881 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80882- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80883+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80884 msg->msg_controllen)
80885 return NULL;
80886 return (struct compat_cmsghdr __user *)ptr;
80887@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80888 {
80889 struct compat_timeval ctv;
80890 struct compat_timespec cts[3];
80891- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80892+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80893 struct compat_cmsghdr cmhdr;
80894 int cmlen;
80895
80896@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80897
80898 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80899 {
80900- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80901+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80902 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80903 int fdnum = scm->fp->count;
80904 struct file **fp = scm->fp->fp;
80905@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80906 len = sizeof(ktime);
80907 old_fs = get_fs();
80908 set_fs(KERNEL_DS);
80909- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80910+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80911 set_fs(old_fs);
80912
80913 if (!err) {
80914@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80915 case MCAST_JOIN_GROUP:
80916 case MCAST_LEAVE_GROUP:
80917 {
80918- struct compat_group_req __user *gr32 = (void *)optval;
80919+ struct compat_group_req __user *gr32 = (void __user *)optval;
80920 struct group_req __user *kgr =
80921 compat_alloc_user_space(sizeof(struct group_req));
80922 u32 interface;
80923@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80924 case MCAST_BLOCK_SOURCE:
80925 case MCAST_UNBLOCK_SOURCE:
80926 {
80927- struct compat_group_source_req __user *gsr32 = (void *)optval;
80928+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80929 struct group_source_req __user *kgsr = compat_alloc_user_space(
80930 sizeof(struct group_source_req));
80931 u32 interface;
80932@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80933 }
80934 case MCAST_MSFILTER:
80935 {
80936- struct compat_group_filter __user *gf32 = (void *)optval;
80937+ struct compat_group_filter __user *gf32 = (void __user *)optval;
80938 struct group_filter __user *kgf;
80939 u32 interface, fmode, numsrc;
80940
80941diff --git a/net/core/dev.c b/net/core/dev.c
80942index 84a0705..575db4c 100644
80943--- a/net/core/dev.c
80944+++ b/net/core/dev.c
80945@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80946 if (no_module && capable(CAP_NET_ADMIN))
80947 no_module = request_module("netdev-%s", name);
80948 if (no_module && capable(CAP_SYS_MODULE)) {
80949+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80950+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
80951+#else
80952 if (!request_module("%s", name))
80953 pr_err("Loading kernel module for a network device "
80954 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80955 "instead\n", name);
80956+#endif
80957 }
80958 }
80959 EXPORT_SYMBOL(dev_load);
80960@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80961
80962 struct dev_gso_cb {
80963 void (*destructor)(struct sk_buff *skb);
80964-};
80965+} __no_const;
80966
80967 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80968
80969@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80970 }
80971 EXPORT_SYMBOL(netif_rx_ni);
80972
80973-static void net_tx_action(struct softirq_action *h)
80974+static void net_tx_action(void)
80975 {
80976 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80977
80978@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80979 EXPORT_SYMBOL(netif_napi_del);
80980
80981
80982-static void net_rx_action(struct softirq_action *h)
80983+static void net_rx_action(void)
80984 {
80985 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80986 unsigned long time_limit = jiffies + 2;
80987diff --git a/net/core/flow.c b/net/core/flow.c
80988index 9601587..8c4824e 100644
80989--- a/net/core/flow.c
80990+++ b/net/core/flow.c
80991@@ -35,11 +35,11 @@ struct flow_cache_entry {
80992 atomic_t *object_ref;
80993 };
80994
80995-atomic_t flow_cache_genid = ATOMIC_INIT(0);
80996+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80997
80998 static u32 flow_hash_shift;
80999 #define flow_hash_size (1 << flow_hash_shift)
81000-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
81001+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
81002
81003 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
81004
81005@@ -52,7 +52,7 @@ struct flow_percpu_info {
81006 u32 hash_rnd;
81007 int count;
81008 };
81009-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
81010+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
81011
81012 #define flow_hash_rnd_recalc(cpu) \
81013 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
81014@@ -69,7 +69,7 @@ struct flow_flush_info {
81015 atomic_t cpuleft;
81016 struct completion completion;
81017 };
81018-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
81019+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
81020
81021 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
81022
81023@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
81024 if (fle->family == family &&
81025 fle->dir == dir &&
81026 flow_key_compare(key, &fle->key) == 0) {
81027- if (fle->genid == atomic_read(&flow_cache_genid)) {
81028+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
81029 void *ret = fle->object;
81030
81031 if (ret)
81032@@ -228,7 +228,7 @@ nocache:
81033 err = resolver(net, key, family, dir, &obj, &obj_ref);
81034
81035 if (fle && !err) {
81036- fle->genid = atomic_read(&flow_cache_genid);
81037+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
81038
81039 if (fle->object)
81040 atomic_dec(fle->object_ref);
81041@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
81042
81043 fle = flow_table(cpu)[i];
81044 for (; fle; fle = fle->next) {
81045- unsigned genid = atomic_read(&flow_cache_genid);
81046+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
81047
81048 if (!fle->object || fle->genid == genid)
81049 continue;
81050diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
81051index d4fd895..ac9b1e6 100644
81052--- a/net/core/rtnetlink.c
81053+++ b/net/core/rtnetlink.c
81054@@ -57,7 +57,7 @@ struct rtnl_link
81055 {
81056 rtnl_doit_func doit;
81057 rtnl_dumpit_func dumpit;
81058-};
81059+} __no_const;
81060
81061 static DEFINE_MUTEX(rtnl_mutex);
81062
81063diff --git a/net/core/scm.c b/net/core/scm.c
81064index d98eafc..1a190a9 100644
81065--- a/net/core/scm.c
81066+++ b/net/core/scm.c
81067@@ -191,7 +191,7 @@ error:
81068 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81069 {
81070 struct cmsghdr __user *cm
81071- = (__force struct cmsghdr __user *)msg->msg_control;
81072+ = (struct cmsghdr __force_user *)msg->msg_control;
81073 struct cmsghdr cmhdr;
81074 int cmlen = CMSG_LEN(len);
81075 int err;
81076@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
81077 err = -EFAULT;
81078 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
81079 goto out;
81080- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
81081+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
81082 goto out;
81083 cmlen = CMSG_SPACE(len);
81084 if (msg->msg_controllen < cmlen)
81085@@ -229,7 +229,7 @@ out:
81086 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81087 {
81088 struct cmsghdr __user *cm
81089- = (__force struct cmsghdr __user*)msg->msg_control;
81090+ = (struct cmsghdr __force_user *)msg->msg_control;
81091
81092 int fdmax = 0;
81093 int fdnum = scm->fp->count;
81094@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81095 if (fdnum < fdmax)
81096 fdmax = fdnum;
81097
81098- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81099+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81100 i++, cmfptr++)
81101 {
81102 int new_fd;
81103diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81104index 45329d7..626aaa6 100644
81105--- a/net/core/secure_seq.c
81106+++ b/net/core/secure_seq.c
81107@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81108 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81109
81110 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81111- __be16 dport)
81112+ __be16 dport)
81113 {
81114 u32 secret[MD5_MESSAGE_BYTES / 4];
81115 u32 hash[MD5_DIGEST_WORDS];
81116@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81117 secret[i] = net_secret[i];
81118
81119 md5_transform(hash, secret);
81120-
81121 return hash[0];
81122 }
81123 #endif
81124diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81125index 025f924..70a71c4 100644
81126--- a/net/core/skbuff.c
81127+++ b/net/core/skbuff.c
81128@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81129 struct sk_buff *frag_iter;
81130 struct sock *sk = skb->sk;
81131
81132+ pax_track_stack();
81133+
81134 /*
81135 * __skb_splice_bits() only fails if the output has no room left,
81136 * so no point in going over the frag_list for the error case.
81137diff --git a/net/core/sock.c b/net/core/sock.c
81138index 6605e75..3acebda 100644
81139--- a/net/core/sock.c
81140+++ b/net/core/sock.c
81141@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81142 break;
81143
81144 case SO_PEERCRED:
81145+ {
81146+ struct ucred peercred;
81147 if (len > sizeof(sk->sk_peercred))
81148 len = sizeof(sk->sk_peercred);
81149- if (copy_to_user(optval, &sk->sk_peercred, len))
81150+ peercred = sk->sk_peercred;
81151+ if (copy_to_user(optval, &peercred, len))
81152 return -EFAULT;
81153 goto lenout;
81154+ }
81155
81156 case SO_PEERNAME:
81157 {
81158@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81159 */
81160 smp_wmb();
81161 atomic_set(&sk->sk_refcnt, 1);
81162- atomic_set(&sk->sk_drops, 0);
81163+ atomic_set_unchecked(&sk->sk_drops, 0);
81164 }
81165 EXPORT_SYMBOL(sock_init_data);
81166
81167diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81168index 2036568..c55883d 100644
81169--- a/net/decnet/sysctl_net_decnet.c
81170+++ b/net/decnet/sysctl_net_decnet.c
81171@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81172
81173 if (len > *lenp) len = *lenp;
81174
81175- if (copy_to_user(buffer, addr, len))
81176+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
81177 return -EFAULT;
81178
81179 *lenp = len;
81180@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81181
81182 if (len > *lenp) len = *lenp;
81183
81184- if (copy_to_user(buffer, devname, len))
81185+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
81186 return -EFAULT;
81187
81188 *lenp = len;
81189diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81190index 39a2d29..f39c0fe 100644
81191--- a/net/econet/Kconfig
81192+++ b/net/econet/Kconfig
81193@@ -4,7 +4,7 @@
81194
81195 config ECONET
81196 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81197- depends on EXPERIMENTAL && INET
81198+ depends on EXPERIMENTAL && INET && BROKEN
81199 ---help---
81200 Econet is a fairly old and slow networking protocol mainly used by
81201 Acorn computers to access file and print servers. It uses native
81202diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81203index a413b1b..380849c 100644
81204--- a/net/ieee802154/dgram.c
81205+++ b/net/ieee802154/dgram.c
81206@@ -318,7 +318,7 @@ out:
81207 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81208 {
81209 if (sock_queue_rcv_skb(sk, skb) < 0) {
81210- atomic_inc(&sk->sk_drops);
81211+ atomic_inc_unchecked(&sk->sk_drops);
81212 kfree_skb(skb);
81213 return NET_RX_DROP;
81214 }
81215diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81216index 30e74ee..bfc6ee0 100644
81217--- a/net/ieee802154/raw.c
81218+++ b/net/ieee802154/raw.c
81219@@ -206,7 +206,7 @@ out:
81220 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81221 {
81222 if (sock_queue_rcv_skb(sk, skb) < 0) {
81223- atomic_inc(&sk->sk_drops);
81224+ atomic_inc_unchecked(&sk->sk_drops);
81225 kfree_skb(skb);
81226 return NET_RX_DROP;
81227 }
81228diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81229index dba56d2..acee5d6 100644
81230--- a/net/ipv4/inet_diag.c
81231+++ b/net/ipv4/inet_diag.c
81232@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81233 r->idiag_retrans = 0;
81234
81235 r->id.idiag_if = sk->sk_bound_dev_if;
81236+#ifdef CONFIG_GRKERNSEC_HIDESYM
81237+ r->id.idiag_cookie[0] = 0;
81238+ r->id.idiag_cookie[1] = 0;
81239+#else
81240 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81241 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81242+#endif
81243
81244 r->id.idiag_sport = inet->sport;
81245 r->id.idiag_dport = inet->dport;
81246@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81247 r->idiag_family = tw->tw_family;
81248 r->idiag_retrans = 0;
81249 r->id.idiag_if = tw->tw_bound_dev_if;
81250+
81251+#ifdef CONFIG_GRKERNSEC_HIDESYM
81252+ r->id.idiag_cookie[0] = 0;
81253+ r->id.idiag_cookie[1] = 0;
81254+#else
81255 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81256 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81257+#endif
81258+
81259 r->id.idiag_sport = tw->tw_sport;
81260 r->id.idiag_dport = tw->tw_dport;
81261 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81262@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81263 if (sk == NULL)
81264 goto unlock;
81265
81266+#ifndef CONFIG_GRKERNSEC_HIDESYM
81267 err = -ESTALE;
81268 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81269 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81270 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81271 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81272 goto out;
81273+#endif
81274
81275 err = -ENOMEM;
81276 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81277@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81278 r->idiag_retrans = req->retrans;
81279
81280 r->id.idiag_if = sk->sk_bound_dev_if;
81281+
81282+#ifdef CONFIG_GRKERNSEC_HIDESYM
81283+ r->id.idiag_cookie[0] = 0;
81284+ r->id.idiag_cookie[1] = 0;
81285+#else
81286 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81287 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81288+#endif
81289
81290 tmo = req->expires - jiffies;
81291 if (tmo < 0)
81292diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81293index d717267..56de7e7 100644
81294--- a/net/ipv4/inet_hashtables.c
81295+++ b/net/ipv4/inet_hashtables.c
81296@@ -18,12 +18,15 @@
81297 #include <linux/sched.h>
81298 #include <linux/slab.h>
81299 #include <linux/wait.h>
81300+#include <linux/security.h>
81301
81302 #include <net/inet_connection_sock.h>
81303 #include <net/inet_hashtables.h>
81304 #include <net/secure_seq.h>
81305 #include <net/ip.h>
81306
81307+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81308+
81309 /*
81310 * Allocate and initialize a new local port bind bucket.
81311 * The bindhash mutex for snum's hash chain must be held here.
81312@@ -491,6 +494,8 @@ ok:
81313 }
81314 spin_unlock(&head->lock);
81315
81316+ gr_update_task_in_ip_table(current, inet_sk(sk));
81317+
81318 if (tw) {
81319 inet_twsk_deschedule(tw, death_row);
81320 inet_twsk_put(tw);
81321diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81322index 13b229f..6956484 100644
81323--- a/net/ipv4/inetpeer.c
81324+++ b/net/ipv4/inetpeer.c
81325@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81326 struct inet_peer *p, *n;
81327 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81328
81329+ pax_track_stack();
81330+
81331 /* Look up for the address quickly. */
81332 read_lock_bh(&peer_pool_lock);
81333 p = lookup(daddr, NULL);
81334@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81335 return NULL;
81336 n->v4daddr = daddr;
81337 atomic_set(&n->refcnt, 1);
81338- atomic_set(&n->rid, 0);
81339+ atomic_set_unchecked(&n->rid, 0);
81340 n->ip_id_count = secure_ip_id(daddr);
81341 n->tcp_ts_stamp = 0;
81342
81343diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81344index d3fe10b..feeafc9 100644
81345--- a/net/ipv4/ip_fragment.c
81346+++ b/net/ipv4/ip_fragment.c
81347@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81348 return 0;
81349
81350 start = qp->rid;
81351- end = atomic_inc_return(&peer->rid);
81352+ end = atomic_inc_return_unchecked(&peer->rid);
81353 qp->rid = end;
81354
81355 rc = qp->q.fragments && (end - start) > max;
81356diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81357index e982b5c..f079d75 100644
81358--- a/net/ipv4/ip_sockglue.c
81359+++ b/net/ipv4/ip_sockglue.c
81360@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81361 int val;
81362 int len;
81363
81364+ pax_track_stack();
81365+
81366 if (level != SOL_IP)
81367 return -EOPNOTSUPP;
81368
81369@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81370 if (sk->sk_type != SOCK_STREAM)
81371 return -ENOPROTOOPT;
81372
81373- msg.msg_control = optval;
81374+ msg.msg_control = (void __force_kernel *)optval;
81375 msg.msg_controllen = len;
81376 msg.msg_flags = 0;
81377
81378diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81379index f8d04c2..c1188f2 100644
81380--- a/net/ipv4/ipconfig.c
81381+++ b/net/ipv4/ipconfig.c
81382@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81383
81384 mm_segment_t oldfs = get_fs();
81385 set_fs(get_ds());
81386- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81387+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81388 set_fs(oldfs);
81389 return res;
81390 }
81391@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81392
81393 mm_segment_t oldfs = get_fs();
81394 set_fs(get_ds());
81395- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81396+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81397 set_fs(oldfs);
81398 return res;
81399 }
81400@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81401
81402 mm_segment_t oldfs = get_fs();
81403 set_fs(get_ds());
81404- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81405+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81406 set_fs(oldfs);
81407 return res;
81408 }
81409diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81410index c8b0cc3..4da5ae2 100644
81411--- a/net/ipv4/netfilter/arp_tables.c
81412+++ b/net/ipv4/netfilter/arp_tables.c
81413@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81414 private = &tmp;
81415 }
81416 #endif
81417+ memset(&info, 0, sizeof(info));
81418 info.valid_hooks = t->valid_hooks;
81419 memcpy(info.hook_entry, private->hook_entry,
81420 sizeof(info.hook_entry));
81421diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81422index c156db2..e772975 100644
81423--- a/net/ipv4/netfilter/ip_queue.c
81424+++ b/net/ipv4/netfilter/ip_queue.c
81425@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81426
81427 if (v->data_len < sizeof(*user_iph))
81428 return 0;
81429+ if (v->data_len > 65535)
81430+ return -EMSGSIZE;
81431+
81432 diff = v->data_len - e->skb->len;
81433 if (diff < 0) {
81434 if (pskb_trim(e->skb, v->data_len))
81435@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81436 static inline void
81437 __ipq_rcv_skb(struct sk_buff *skb)
81438 {
81439- int status, type, pid, flags, nlmsglen, skblen;
81440+ int status, type, pid, flags;
81441+ unsigned int nlmsglen, skblen;
81442 struct nlmsghdr *nlh;
81443
81444 skblen = skb->len;
81445diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81446index 0606db1..02e7e4c 100644
81447--- a/net/ipv4/netfilter/ip_tables.c
81448+++ b/net/ipv4/netfilter/ip_tables.c
81449@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81450 private = &tmp;
81451 }
81452 #endif
81453+ memset(&info, 0, sizeof(info));
81454 info.valid_hooks = t->valid_hooks;
81455 memcpy(info.hook_entry, private->hook_entry,
81456 sizeof(info.hook_entry));
81457diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81458index d9521f6..3c3eb25 100644
81459--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81460+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81461@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81462
81463 *len = 0;
81464
81465- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81466+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81467 if (*octets == NULL) {
81468 if (net_ratelimit())
81469 printk("OOM in bsalg (%d)\n", __LINE__);
81470diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81471index ab996f9..3da5f96 100644
81472--- a/net/ipv4/raw.c
81473+++ b/net/ipv4/raw.c
81474@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81475 /* Charge it to the socket. */
81476
81477 if (sock_queue_rcv_skb(sk, skb) < 0) {
81478- atomic_inc(&sk->sk_drops);
81479+ atomic_inc_unchecked(&sk->sk_drops);
81480 kfree_skb(skb);
81481 return NET_RX_DROP;
81482 }
81483@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81484 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81485 {
81486 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81487- atomic_inc(&sk->sk_drops);
81488+ atomic_inc_unchecked(&sk->sk_drops);
81489 kfree_skb(skb);
81490 return NET_RX_DROP;
81491 }
81492@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81493
81494 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81495 {
81496+ struct icmp_filter filter;
81497+
81498+ if (optlen < 0)
81499+ return -EINVAL;
81500 if (optlen > sizeof(struct icmp_filter))
81501 optlen = sizeof(struct icmp_filter);
81502- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81503+ if (copy_from_user(&filter, optval, optlen))
81504 return -EFAULT;
81505+ raw_sk(sk)->filter = filter;
81506+
81507 return 0;
81508 }
81509
81510 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81511 {
81512 int len, ret = -EFAULT;
81513+ struct icmp_filter filter;
81514
81515 if (get_user(len, optlen))
81516 goto out;
81517@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81518 if (len > sizeof(struct icmp_filter))
81519 len = sizeof(struct icmp_filter);
81520 ret = -EFAULT;
81521- if (put_user(len, optlen) ||
81522- copy_to_user(optval, &raw_sk(sk)->filter, len))
81523+ filter = raw_sk(sk)->filter;
81524+ if (put_user(len, optlen) || len > sizeof filter ||
81525+ copy_to_user(optval, &filter, len))
81526 goto out;
81527 ret = 0;
81528 out: return ret;
81529@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81530 sk_wmem_alloc_get(sp),
81531 sk_rmem_alloc_get(sp),
81532 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81533- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81534+ atomic_read(&sp->sk_refcnt),
81535+#ifdef CONFIG_GRKERNSEC_HIDESYM
81536+ NULL,
81537+#else
81538+ sp,
81539+#endif
81540+ atomic_read_unchecked(&sp->sk_drops));
81541 }
81542
81543 static int raw_seq_show(struct seq_file *seq, void *v)
81544diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81545index 58f141b..b759702 100644
81546--- a/net/ipv4/route.c
81547+++ b/net/ipv4/route.c
81548@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81549
81550 static inline int rt_genid(struct net *net)
81551 {
81552- return atomic_read(&net->ipv4.rt_genid);
81553+ return atomic_read_unchecked(&net->ipv4.rt_genid);
81554 }
81555
81556 #ifdef CONFIG_PROC_FS
81557@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81558 unsigned char shuffle;
81559
81560 get_random_bytes(&shuffle, sizeof(shuffle));
81561- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81562+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81563 }
81564
81565 /*
81566@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81567
81568 static __net_init int rt_secret_timer_init(struct net *net)
81569 {
81570- atomic_set(&net->ipv4.rt_genid,
81571+ atomic_set_unchecked(&net->ipv4.rt_genid,
81572 (int) ((num_physpages ^ (num_physpages>>8)) ^
81573 (jiffies ^ (jiffies >> 7))));
81574
81575diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81576index f095659..adc892a 100644
81577--- a/net/ipv4/tcp.c
81578+++ b/net/ipv4/tcp.c
81579@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81580 int val;
81581 int err = 0;
81582
81583+ pax_track_stack();
81584+
81585 /* This is a string value all the others are int's */
81586 if (optname == TCP_CONGESTION) {
81587 char name[TCP_CA_NAME_MAX];
81588@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81589 struct tcp_sock *tp = tcp_sk(sk);
81590 int val, len;
81591
81592+ pax_track_stack();
81593+
81594 if (get_user(len, optlen))
81595 return -EFAULT;
81596
81597diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81598index 6fc7961..33bad4a 100644
81599--- a/net/ipv4/tcp_ipv4.c
81600+++ b/net/ipv4/tcp_ipv4.c
81601@@ -85,6 +85,9 @@
81602 int sysctl_tcp_tw_reuse __read_mostly;
81603 int sysctl_tcp_low_latency __read_mostly;
81604
81605+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81606+extern int grsec_enable_blackhole;
81607+#endif
81608
81609 #ifdef CONFIG_TCP_MD5SIG
81610 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81611@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81612 return 0;
81613
81614 reset:
81615+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81616+ if (!grsec_enable_blackhole)
81617+#endif
81618 tcp_v4_send_reset(rsk, skb);
81619 discard:
81620 kfree_skb(skb);
81621@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81622 TCP_SKB_CB(skb)->sacked = 0;
81623
81624 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81625- if (!sk)
81626+ if (!sk) {
81627+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81628+ ret = 1;
81629+#endif
81630 goto no_tcp_socket;
81631+ }
81632
81633 process:
81634- if (sk->sk_state == TCP_TIME_WAIT)
81635+ if (sk->sk_state == TCP_TIME_WAIT) {
81636+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81637+ ret = 2;
81638+#endif
81639 goto do_time_wait;
81640+ }
81641
81642 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81643 goto discard_and_relse;
81644@@ -1651,6 +1665,10 @@ no_tcp_socket:
81645 bad_packet:
81646 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81647 } else {
81648+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81649+ if (!grsec_enable_blackhole || (ret == 1 &&
81650+ (skb->dev->flags & IFF_LOOPBACK)))
81651+#endif
81652 tcp_v4_send_reset(NULL, skb);
81653 }
81654
81655@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
81656 0, /* non standard timer */
81657 0, /* open_requests have no inode */
81658 atomic_read(&sk->sk_refcnt),
81659+#ifdef CONFIG_GRKERNSEC_HIDESYM
81660+ NULL,
81661+#else
81662 req,
81663+#endif
81664 len);
81665 }
81666
81667@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
81668 sock_i_uid(sk),
81669 icsk->icsk_probes_out,
81670 sock_i_ino(sk),
81671- atomic_read(&sk->sk_refcnt), sk,
81672+ atomic_read(&sk->sk_refcnt),
81673+#ifdef CONFIG_GRKERNSEC_HIDESYM
81674+ NULL,
81675+#else
81676+ sk,
81677+#endif
81678 jiffies_to_clock_t(icsk->icsk_rto),
81679 jiffies_to_clock_t(icsk->icsk_ack.ato),
81680 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
81681@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
81682 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
81683 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
81684 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81685- atomic_read(&tw->tw_refcnt), tw, len);
81686+ atomic_read(&tw->tw_refcnt),
81687+#ifdef CONFIG_GRKERNSEC_HIDESYM
81688+ NULL,
81689+#else
81690+ tw,
81691+#endif
81692+ len);
81693 }
81694
81695 #define TMPSZ 150
81696diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81697index 4c03598..e09a8e8 100644
81698--- a/net/ipv4/tcp_minisocks.c
81699+++ b/net/ipv4/tcp_minisocks.c
81700@@ -26,6 +26,10 @@
81701 #include <net/inet_common.h>
81702 #include <net/xfrm.h>
81703
81704+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81705+extern int grsec_enable_blackhole;
81706+#endif
81707+
81708 #ifdef CONFIG_SYSCTL
81709 #define SYNC_INIT 0 /* let the user enable it */
81710 #else
81711@@ -672,6 +676,10 @@ listen_overflow:
81712
81713 embryonic_reset:
81714 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
81715+
81716+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81717+ if (!grsec_enable_blackhole)
81718+#endif
81719 if (!(flg & TCP_FLAG_RST))
81720 req->rsk_ops->send_reset(sk, skb);
81721
81722diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
81723index af83bdf..ec91cb2 100644
81724--- a/net/ipv4/tcp_output.c
81725+++ b/net/ipv4/tcp_output.c
81726@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
81727 __u8 *md5_hash_location;
81728 int mss;
81729
81730+ pax_track_stack();
81731+
81732 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
81733 if (skb == NULL)
81734 return NULL;
81735diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81736index 59f5b5e..193860f 100644
81737--- a/net/ipv4/tcp_probe.c
81738+++ b/net/ipv4/tcp_probe.c
81739@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81740 if (cnt + width >= len)
81741 break;
81742
81743- if (copy_to_user(buf + cnt, tbuf, width))
81744+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81745 return -EFAULT;
81746 cnt += width;
81747 }
81748diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81749index 57d5501..a9ed13a 100644
81750--- a/net/ipv4/tcp_timer.c
81751+++ b/net/ipv4/tcp_timer.c
81752@@ -21,6 +21,10 @@
81753 #include <linux/module.h>
81754 #include <net/tcp.h>
81755
81756+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81757+extern int grsec_lastack_retries;
81758+#endif
81759+
81760 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81761 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81762 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81763@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
81764 }
81765 }
81766
81767+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81768+ if ((sk->sk_state == TCP_LAST_ACK) &&
81769+ (grsec_lastack_retries > 0) &&
81770+ (grsec_lastack_retries < retry_until))
81771+ retry_until = grsec_lastack_retries;
81772+#endif
81773+
81774 if (retransmits_timed_out(sk, retry_until)) {
81775 /* Has it gone just too far? */
81776 tcp_write_err(sk);
81777diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81778index 8e28770..72105c8 100644
81779--- a/net/ipv4/udp.c
81780+++ b/net/ipv4/udp.c
81781@@ -86,6 +86,7 @@
81782 #include <linux/types.h>
81783 #include <linux/fcntl.h>
81784 #include <linux/module.h>
81785+#include <linux/security.h>
81786 #include <linux/socket.h>
81787 #include <linux/sockios.h>
81788 #include <linux/igmp.h>
81789@@ -106,6 +107,10 @@
81790 #include <net/xfrm.h>
81791 #include "udp_impl.h"
81792
81793+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81794+extern int grsec_enable_blackhole;
81795+#endif
81796+
81797 struct udp_table udp_table;
81798 EXPORT_SYMBOL(udp_table);
81799
81800@@ -371,6 +376,9 @@ found:
81801 return s;
81802 }
81803
81804+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81805+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81806+
81807 /*
81808 * This routine is called by the ICMP module when it gets some
81809 * sort of error condition. If err < 0 then the socket should
81810@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81811 dport = usin->sin_port;
81812 if (dport == 0)
81813 return -EINVAL;
81814+
81815+ err = gr_search_udp_sendmsg(sk, usin);
81816+ if (err)
81817+ return err;
81818 } else {
81819 if (sk->sk_state != TCP_ESTABLISHED)
81820 return -EDESTADDRREQ;
81821+
81822+ err = gr_search_udp_sendmsg(sk, NULL);
81823+ if (err)
81824+ return err;
81825+
81826 daddr = inet->daddr;
81827 dport = inet->dport;
81828 /* Open fast path for connected socket.
81829@@ -945,6 +962,10 @@ try_again:
81830 if (!skb)
81831 goto out;
81832
81833+ err = gr_search_udp_recvmsg(sk, skb);
81834+ if (err)
81835+ goto out_free;
81836+
81837 ulen = skb->len - sizeof(struct udphdr);
81838 copied = len;
81839 if (copied > ulen)
81840@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81841 if (rc == -ENOMEM) {
81842 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81843 is_udplite);
81844- atomic_inc(&sk->sk_drops);
81845+ atomic_inc_unchecked(&sk->sk_drops);
81846 }
81847 goto drop;
81848 }
81849@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81850 goto csum_error;
81851
81852 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81853+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81854+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81855+#endif
81856 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81857
81858 /*
81859@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81860 sk_wmem_alloc_get(sp),
81861 sk_rmem_alloc_get(sp),
81862 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81863- atomic_read(&sp->sk_refcnt), sp,
81864- atomic_read(&sp->sk_drops), len);
81865+ atomic_read(&sp->sk_refcnt),
81866+#ifdef CONFIG_GRKERNSEC_HIDESYM
81867+ NULL,
81868+#else
81869+ sp,
81870+#endif
81871+ atomic_read_unchecked(&sp->sk_drops), len);
81872 }
81873
81874 int udp4_seq_show(struct seq_file *seq, void *v)
81875diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81876index 8ac3d09..fc58c5f 100644
81877--- a/net/ipv6/addrconf.c
81878+++ b/net/ipv6/addrconf.c
81879@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81880 p.iph.ihl = 5;
81881 p.iph.protocol = IPPROTO_IPV6;
81882 p.iph.ttl = 64;
81883- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81884+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81885
81886 if (ops->ndo_do_ioctl) {
81887 mm_segment_t oldfs = get_fs();
81888diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
81889index cc4797d..7cfdfcc 100644
81890--- a/net/ipv6/inet6_connection_sock.c
81891+++ b/net/ipv6/inet6_connection_sock.c
81892@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
81893 #ifdef CONFIG_XFRM
81894 {
81895 struct rt6_info *rt = (struct rt6_info *)dst;
81896- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
81897+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
81898 }
81899 #endif
81900 }
81901@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
81902 #ifdef CONFIG_XFRM
81903 if (dst) {
81904 struct rt6_info *rt = (struct rt6_info *)dst;
81905- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
81906+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
81907 sk->sk_dst_cache = NULL;
81908 dst_release(dst);
81909 dst = NULL;
81910diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
81911index 093e9b2..f72cddb 100644
81912--- a/net/ipv6/inet6_hashtables.c
81913+++ b/net/ipv6/inet6_hashtables.c
81914@@ -119,7 +119,7 @@ out:
81915 }
81916 EXPORT_SYMBOL(__inet6_lookup_established);
81917
81918-static int inline compute_score(struct sock *sk, struct net *net,
81919+static inline int compute_score(struct sock *sk, struct net *net,
81920 const unsigned short hnum,
81921 const struct in6_addr *daddr,
81922 const int dif)
81923diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81924index 4f7aaf6..f7acf45 100644
81925--- a/net/ipv6/ipv6_sockglue.c
81926+++ b/net/ipv6/ipv6_sockglue.c
81927@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81928 int val, valbool;
81929 int retv = -ENOPROTOOPT;
81930
81931+ pax_track_stack();
81932+
81933 if (optval == NULL)
81934 val=0;
81935 else {
81936@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81937 int len;
81938 int val;
81939
81940+ pax_track_stack();
81941+
81942 if (ip6_mroute_opt(optname))
81943 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81944
81945@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81946 if (sk->sk_type != SOCK_STREAM)
81947 return -ENOPROTOOPT;
81948
81949- msg.msg_control = optval;
81950+ msg.msg_control = (void __force_kernel *)optval;
81951 msg.msg_controllen = len;
81952 msg.msg_flags = 0;
81953
81954diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81955index 1cf3f0c..1d4376f 100644
81956--- a/net/ipv6/netfilter/ip6_queue.c
81957+++ b/net/ipv6/netfilter/ip6_queue.c
81958@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81959
81960 if (v->data_len < sizeof(*user_iph))
81961 return 0;
81962+ if (v->data_len > 65535)
81963+ return -EMSGSIZE;
81964+
81965 diff = v->data_len - e->skb->len;
81966 if (diff < 0) {
81967 if (pskb_trim(e->skb, v->data_len))
81968@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81969 static inline void
81970 __ipq_rcv_skb(struct sk_buff *skb)
81971 {
81972- int status, type, pid, flags, nlmsglen, skblen;
81973+ int status, type, pid, flags;
81974+ unsigned int nlmsglen, skblen;
81975 struct nlmsghdr *nlh;
81976
81977 skblen = skb->len;
81978diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81979index 78b5a36..7f37433 100644
81980--- a/net/ipv6/netfilter/ip6_tables.c
81981+++ b/net/ipv6/netfilter/ip6_tables.c
81982@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81983 private = &tmp;
81984 }
81985 #endif
81986+ memset(&info, 0, sizeof(info));
81987 info.valid_hooks = t->valid_hooks;
81988 memcpy(info.hook_entry, private->hook_entry,
81989 sizeof(info.hook_entry));
81990diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81991index 4f24570..b813b34 100644
81992--- a/net/ipv6/raw.c
81993+++ b/net/ipv6/raw.c
81994@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81995 {
81996 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81997 skb_checksum_complete(skb)) {
81998- atomic_inc(&sk->sk_drops);
81999+ atomic_inc_unchecked(&sk->sk_drops);
82000 kfree_skb(skb);
82001 return NET_RX_DROP;
82002 }
82003
82004 /* Charge it to the socket. */
82005 if (sock_queue_rcv_skb(sk,skb)<0) {
82006- atomic_inc(&sk->sk_drops);
82007+ atomic_inc_unchecked(&sk->sk_drops);
82008 kfree_skb(skb);
82009 return NET_RX_DROP;
82010 }
82011@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82012 struct raw6_sock *rp = raw6_sk(sk);
82013
82014 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
82015- atomic_inc(&sk->sk_drops);
82016+ atomic_inc_unchecked(&sk->sk_drops);
82017 kfree_skb(skb);
82018 return NET_RX_DROP;
82019 }
82020@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
82021
82022 if (inet->hdrincl) {
82023 if (skb_checksum_complete(skb)) {
82024- atomic_inc(&sk->sk_drops);
82025+ atomic_inc_unchecked(&sk->sk_drops);
82026 kfree_skb(skb);
82027 return NET_RX_DROP;
82028 }
82029@@ -518,7 +518,7 @@ csum_copy_err:
82030 as some normal condition.
82031 */
82032 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
82033- atomic_inc(&sk->sk_drops);
82034+ atomic_inc_unchecked(&sk->sk_drops);
82035 goto out;
82036 }
82037
82038@@ -600,7 +600,7 @@ out:
82039 return err;
82040 }
82041
82042-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
82043+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
82044 struct flowi *fl, struct rt6_info *rt,
82045 unsigned int flags)
82046 {
82047@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
82048 u16 proto;
82049 int err;
82050
82051+ pax_track_stack();
82052+
82053 /* Rough check on arithmetic overflow,
82054 better check is made in ip6_append_data().
82055 */
82056@@ -916,12 +918,17 @@ do_confirm:
82057 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
82058 char __user *optval, int optlen)
82059 {
82060+ struct icmp6_filter filter;
82061+
82062 switch (optname) {
82063 case ICMPV6_FILTER:
82064+ if (optlen < 0)
82065+ return -EINVAL;
82066 if (optlen > sizeof(struct icmp6_filter))
82067 optlen = sizeof(struct icmp6_filter);
82068- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
82069+ if (copy_from_user(&filter, optval, optlen))
82070 return -EFAULT;
82071+ raw6_sk(sk)->filter = filter;
82072 return 0;
82073 default:
82074 return -ENOPROTOOPT;
82075@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82076 char __user *optval, int __user *optlen)
82077 {
82078 int len;
82079+ struct icmp6_filter filter;
82080
82081 switch (optname) {
82082 case ICMPV6_FILTER:
82083@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
82084 len = sizeof(struct icmp6_filter);
82085 if (put_user(len, optlen))
82086 return -EFAULT;
82087- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
82088+ filter = raw6_sk(sk)->filter;
82089+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
82090 return -EFAULT;
82091 return 0;
82092 default:
82093@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82094 0, 0L, 0,
82095 sock_i_uid(sp), 0,
82096 sock_i_ino(sp),
82097- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82098+ atomic_read(&sp->sk_refcnt),
82099+#ifdef CONFIG_GRKERNSEC_HIDESYM
82100+ NULL,
82101+#else
82102+ sp,
82103+#endif
82104+ atomic_read_unchecked(&sp->sk_drops));
82105 }
82106
82107 static int raw6_seq_show(struct seq_file *seq, void *v)
82108diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82109index faae6df..d4430c1 100644
82110--- a/net/ipv6/tcp_ipv6.c
82111+++ b/net/ipv6/tcp_ipv6.c
82112@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82113 }
82114 #endif
82115
82116+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82117+extern int grsec_enable_blackhole;
82118+#endif
82119+
82120 static void tcp_v6_hash(struct sock *sk)
82121 {
82122 if (sk->sk_state != TCP_CLOSE) {
82123@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82124 return 0;
82125
82126 reset:
82127+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82128+ if (!grsec_enable_blackhole)
82129+#endif
82130 tcp_v6_send_reset(sk, skb);
82131 discard:
82132 if (opt_skb)
82133@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82134 TCP_SKB_CB(skb)->sacked = 0;
82135
82136 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82137- if (!sk)
82138+ if (!sk) {
82139+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82140+ ret = 1;
82141+#endif
82142 goto no_tcp_socket;
82143+ }
82144
82145 process:
82146- if (sk->sk_state == TCP_TIME_WAIT)
82147+ if (sk->sk_state == TCP_TIME_WAIT) {
82148+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82149+ ret = 2;
82150+#endif
82151 goto do_time_wait;
82152+ }
82153
82154 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82155 goto discard_and_relse;
82156@@ -1701,6 +1716,10 @@ no_tcp_socket:
82157 bad_packet:
82158 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82159 } else {
82160+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82161+ if (!grsec_enable_blackhole || (ret == 1 &&
82162+ (skb->dev->flags & IFF_LOOPBACK)))
82163+#endif
82164 tcp_v6_send_reset(NULL, skb);
82165 }
82166
82167@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82168 uid,
82169 0, /* non standard timer */
82170 0, /* open_requests have no inode */
82171- 0, req);
82172+ 0,
82173+#ifdef CONFIG_GRKERNSEC_HIDESYM
82174+ NULL
82175+#else
82176+ req
82177+#endif
82178+ );
82179 }
82180
82181 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82182@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82183 sock_i_uid(sp),
82184 icsk->icsk_probes_out,
82185 sock_i_ino(sp),
82186- atomic_read(&sp->sk_refcnt), sp,
82187+ atomic_read(&sp->sk_refcnt),
82188+#ifdef CONFIG_GRKERNSEC_HIDESYM
82189+ NULL,
82190+#else
82191+ sp,
82192+#endif
82193 jiffies_to_clock_t(icsk->icsk_rto),
82194 jiffies_to_clock_t(icsk->icsk_ack.ato),
82195 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82196@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82197 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82198 tw->tw_substate, 0, 0,
82199 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82200- atomic_read(&tw->tw_refcnt), tw);
82201+ atomic_read(&tw->tw_refcnt),
82202+#ifdef CONFIG_GRKERNSEC_HIDESYM
82203+ NULL
82204+#else
82205+ tw
82206+#endif
82207+ );
82208 }
82209
82210 static int tcp6_seq_show(struct seq_file *seq, void *v)
82211diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82212index 9cc6289..052c521 100644
82213--- a/net/ipv6/udp.c
82214+++ b/net/ipv6/udp.c
82215@@ -49,6 +49,10 @@
82216 #include <linux/seq_file.h>
82217 #include "udp_impl.h"
82218
82219+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82220+extern int grsec_enable_blackhole;
82221+#endif
82222+
82223 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82224 {
82225 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82226@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82227 if (rc == -ENOMEM) {
82228 UDP6_INC_STATS_BH(sock_net(sk),
82229 UDP_MIB_RCVBUFERRORS, is_udplite);
82230- atomic_inc(&sk->sk_drops);
82231+ atomic_inc_unchecked(&sk->sk_drops);
82232 }
82233 goto drop;
82234 }
82235@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82236 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82237 proto == IPPROTO_UDPLITE);
82238
82239+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82240+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82241+#endif
82242 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82243
82244 kfree_skb(skb);
82245@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82246 0, 0L, 0,
82247 sock_i_uid(sp), 0,
82248 sock_i_ino(sp),
82249- atomic_read(&sp->sk_refcnt), sp,
82250- atomic_read(&sp->sk_drops));
82251+ atomic_read(&sp->sk_refcnt),
82252+#ifdef CONFIG_GRKERNSEC_HIDESYM
82253+ NULL,
82254+#else
82255+ sp,
82256+#endif
82257+ atomic_read_unchecked(&sp->sk_drops));
82258 }
82259
82260 int udp6_seq_show(struct seq_file *seq, void *v)
82261diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82262index 811984d..11f59b7 100644
82263--- a/net/irda/ircomm/ircomm_tty.c
82264+++ b/net/irda/ircomm/ircomm_tty.c
82265@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82266 add_wait_queue(&self->open_wait, &wait);
82267
82268 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82269- __FILE__,__LINE__, tty->driver->name, self->open_count );
82270+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82271
82272 /* As far as I can see, we protect open_count - Jean II */
82273 spin_lock_irqsave(&self->spinlock, flags);
82274 if (!tty_hung_up_p(filp)) {
82275 extra_count = 1;
82276- self->open_count--;
82277+ local_dec(&self->open_count);
82278 }
82279 spin_unlock_irqrestore(&self->spinlock, flags);
82280- self->blocked_open++;
82281+ local_inc(&self->blocked_open);
82282
82283 while (1) {
82284 if (tty->termios->c_cflag & CBAUD) {
82285@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82286 }
82287
82288 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82289- __FILE__,__LINE__, tty->driver->name, self->open_count );
82290+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82291
82292 schedule();
82293 }
82294@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82295 if (extra_count) {
82296 /* ++ is not atomic, so this should be protected - Jean II */
82297 spin_lock_irqsave(&self->spinlock, flags);
82298- self->open_count++;
82299+ local_inc(&self->open_count);
82300 spin_unlock_irqrestore(&self->spinlock, flags);
82301 }
82302- self->blocked_open--;
82303+ local_dec(&self->blocked_open);
82304
82305 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82306- __FILE__,__LINE__, tty->driver->name, self->open_count);
82307+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82308
82309 if (!retval)
82310 self->flags |= ASYNC_NORMAL_ACTIVE;
82311@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82312 }
82313 /* ++ is not atomic, so this should be protected - Jean II */
82314 spin_lock_irqsave(&self->spinlock, flags);
82315- self->open_count++;
82316+ local_inc(&self->open_count);
82317
82318 tty->driver_data = self;
82319 self->tty = tty;
82320 spin_unlock_irqrestore(&self->spinlock, flags);
82321
82322 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82323- self->line, self->open_count);
82324+ self->line, local_read(&self->open_count));
82325
82326 /* Not really used by us, but lets do it anyway */
82327 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82328@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82329 return;
82330 }
82331
82332- if ((tty->count == 1) && (self->open_count != 1)) {
82333+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82334 /*
82335 * Uh, oh. tty->count is 1, which means that the tty
82336 * structure will be freed. state->count should always
82337@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82338 */
82339 IRDA_DEBUG(0, "%s(), bad serial port count; "
82340 "tty->count is 1, state->count is %d\n", __func__ ,
82341- self->open_count);
82342- self->open_count = 1;
82343+ local_read(&self->open_count));
82344+ local_set(&self->open_count, 1);
82345 }
82346
82347- if (--self->open_count < 0) {
82348+ if (local_dec_return(&self->open_count) < 0) {
82349 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82350- __func__, self->line, self->open_count);
82351- self->open_count = 0;
82352+ __func__, self->line, local_read(&self->open_count));
82353+ local_set(&self->open_count, 0);
82354 }
82355- if (self->open_count) {
82356+ if (local_read(&self->open_count)) {
82357 spin_unlock_irqrestore(&self->spinlock, flags);
82358
82359 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82360@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82361 tty->closing = 0;
82362 self->tty = NULL;
82363
82364- if (self->blocked_open) {
82365+ if (local_read(&self->blocked_open)) {
82366 if (self->close_delay)
82367 schedule_timeout_interruptible(self->close_delay);
82368 wake_up_interruptible(&self->open_wait);
82369@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82370 spin_lock_irqsave(&self->spinlock, flags);
82371 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82372 self->tty = NULL;
82373- self->open_count = 0;
82374+ local_set(&self->open_count, 0);
82375 spin_unlock_irqrestore(&self->spinlock, flags);
82376
82377 wake_up_interruptible(&self->open_wait);
82378@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82379 seq_putc(m, '\n');
82380
82381 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82382- seq_printf(m, "Open count: %d\n", self->open_count);
82383+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82384 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82385 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82386
82387diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82388index bada1b9..f325943 100644
82389--- a/net/iucv/af_iucv.c
82390+++ b/net/iucv/af_iucv.c
82391@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82392
82393 write_lock_bh(&iucv_sk_list.lock);
82394
82395- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82396+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82397 while (__iucv_get_sock_by_name(name)) {
82398 sprintf(name, "%08x",
82399- atomic_inc_return(&iucv_sk_list.autobind_name));
82400+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82401 }
82402
82403 write_unlock_bh(&iucv_sk_list.lock);
82404diff --git a/net/key/af_key.c b/net/key/af_key.c
82405index 4e98193..439b449 100644
82406--- a/net/key/af_key.c
82407+++ b/net/key/af_key.c
82408@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82409 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82410 struct xfrm_kmaddress k;
82411
82412+ pax_track_stack();
82413+
82414 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82415 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82416 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82417@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82418 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82419 else
82420 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82421+#ifdef CONFIG_GRKERNSEC_HIDESYM
82422+ NULL,
82423+#else
82424 s,
82425+#endif
82426 atomic_read(&s->sk_refcnt),
82427 sk_rmem_alloc_get(s),
82428 sk_wmem_alloc_get(s),
82429diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82430index bda96d1..c038b72 100644
82431--- a/net/lapb/lapb_iface.c
82432+++ b/net/lapb/lapb_iface.c
82433@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82434 goto out;
82435
82436 lapb->dev = dev;
82437- lapb->callbacks = *callbacks;
82438+ lapb->callbacks = callbacks;
82439
82440 __lapb_insert_cb(lapb);
82441
82442@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82443
82444 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82445 {
82446- if (lapb->callbacks.connect_confirmation)
82447- lapb->callbacks.connect_confirmation(lapb->dev, reason);
82448+ if (lapb->callbacks->connect_confirmation)
82449+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
82450 }
82451
82452 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82453 {
82454- if (lapb->callbacks.connect_indication)
82455- lapb->callbacks.connect_indication(lapb->dev, reason);
82456+ if (lapb->callbacks->connect_indication)
82457+ lapb->callbacks->connect_indication(lapb->dev, reason);
82458 }
82459
82460 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82461 {
82462- if (lapb->callbacks.disconnect_confirmation)
82463- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82464+ if (lapb->callbacks->disconnect_confirmation)
82465+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82466 }
82467
82468 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82469 {
82470- if (lapb->callbacks.disconnect_indication)
82471- lapb->callbacks.disconnect_indication(lapb->dev, reason);
82472+ if (lapb->callbacks->disconnect_indication)
82473+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
82474 }
82475
82476 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82477 {
82478- if (lapb->callbacks.data_indication)
82479- return lapb->callbacks.data_indication(lapb->dev, skb);
82480+ if (lapb->callbacks->data_indication)
82481+ return lapb->callbacks->data_indication(lapb->dev, skb);
82482
82483 kfree_skb(skb);
82484 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82485@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82486 {
82487 int used = 0;
82488
82489- if (lapb->callbacks.data_transmit) {
82490- lapb->callbacks.data_transmit(lapb->dev, skb);
82491+ if (lapb->callbacks->data_transmit) {
82492+ lapb->callbacks->data_transmit(lapb->dev, skb);
82493 used = 1;
82494 }
82495
82496diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82497index fe2d3f8..e57f683 100644
82498--- a/net/mac80211/cfg.c
82499+++ b/net/mac80211/cfg.c
82500@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82501 return err;
82502 }
82503
82504-struct cfg80211_ops mac80211_config_ops = {
82505+const struct cfg80211_ops mac80211_config_ops = {
82506 .add_virtual_intf = ieee80211_add_iface,
82507 .del_virtual_intf = ieee80211_del_iface,
82508 .change_virtual_intf = ieee80211_change_iface,
82509diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82510index 7d7879f..2d51f62 100644
82511--- a/net/mac80211/cfg.h
82512+++ b/net/mac80211/cfg.h
82513@@ -4,6 +4,6 @@
82514 #ifndef __CFG_H
82515 #define __CFG_H
82516
82517-extern struct cfg80211_ops mac80211_config_ops;
82518+extern const struct cfg80211_ops mac80211_config_ops;
82519
82520 #endif /* __CFG_H */
82521diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82522index 99c7525..9cb4937 100644
82523--- a/net/mac80211/debugfs_key.c
82524+++ b/net/mac80211/debugfs_key.c
82525@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82526 size_t count, loff_t *ppos)
82527 {
82528 struct ieee80211_key *key = file->private_data;
82529- int i, res, bufsize = 2 * key->conf.keylen + 2;
82530+ int i, bufsize = 2 * key->conf.keylen + 2;
82531 char *buf = kmalloc(bufsize, GFP_KERNEL);
82532 char *p = buf;
82533+ ssize_t res;
82534+
82535+ if (buf == NULL)
82536+ return -ENOMEM;
82537
82538 for (i = 0; i < key->conf.keylen; i++)
82539 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82540diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82541index 33a2e89..08650c8 100644
82542--- a/net/mac80211/debugfs_sta.c
82543+++ b/net/mac80211/debugfs_sta.c
82544@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82545 int i;
82546 struct sta_info *sta = file->private_data;
82547
82548+ pax_track_stack();
82549+
82550 spin_lock_bh(&sta->lock);
82551 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82552 sta->ampdu_mlme.dialog_token_allocator + 1);
82553diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82554index ca62bfe..6657a03 100644
82555--- a/net/mac80211/ieee80211_i.h
82556+++ b/net/mac80211/ieee80211_i.h
82557@@ -25,6 +25,7 @@
82558 #include <linux/etherdevice.h>
82559 #include <net/cfg80211.h>
82560 #include <net/mac80211.h>
82561+#include <asm/local.h>
82562 #include "key.h"
82563 #include "sta_info.h"
82564
82565@@ -635,7 +636,7 @@ struct ieee80211_local {
82566 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82567 spinlock_t queue_stop_reason_lock;
82568
82569- int open_count;
82570+ local_t open_count;
82571 int monitors, cooked_mntrs;
82572 /* number of interfaces with corresponding FIF_ flags */
82573 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82574diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82575index 079c500..eb3c6d4 100644
82576--- a/net/mac80211/iface.c
82577+++ b/net/mac80211/iface.c
82578@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82579 break;
82580 }
82581
82582- if (local->open_count == 0) {
82583+ if (local_read(&local->open_count) == 0) {
82584 res = drv_start(local);
82585 if (res)
82586 goto err_del_bss;
82587@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82588 * Validate the MAC address for this device.
82589 */
82590 if (!is_valid_ether_addr(dev->dev_addr)) {
82591- if (!local->open_count)
82592+ if (!local_read(&local->open_count))
82593 drv_stop(local);
82594 return -EADDRNOTAVAIL;
82595 }
82596@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82597
82598 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82599
82600- local->open_count++;
82601+ local_inc(&local->open_count);
82602 if (hw_reconf_flags) {
82603 ieee80211_hw_config(local, hw_reconf_flags);
82604 /*
82605@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82606 err_del_interface:
82607 drv_remove_interface(local, &conf);
82608 err_stop:
82609- if (!local->open_count)
82610+ if (!local_read(&local->open_count))
82611 drv_stop(local);
82612 err_del_bss:
82613 sdata->bss = NULL;
82614@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82615 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82616 }
82617
82618- local->open_count--;
82619+ local_dec(&local->open_count);
82620
82621 switch (sdata->vif.type) {
82622 case NL80211_IFTYPE_AP_VLAN:
82623@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82624
82625 ieee80211_recalc_ps(local, -1);
82626
82627- if (local->open_count == 0) {
82628+ if (local_read(&local->open_count) == 0) {
82629 ieee80211_clear_tx_pending(local);
82630 ieee80211_stop_device(local);
82631
82632diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82633index 2dfe176..74e4388 100644
82634--- a/net/mac80211/main.c
82635+++ b/net/mac80211/main.c
82636@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82637 local->hw.conf.power_level = power;
82638 }
82639
82640- if (changed && local->open_count) {
82641+ if (changed && local_read(&local->open_count)) {
82642 ret = drv_config(local, changed);
82643 /*
82644 * Goal:
82645diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82646index e67eea7..fcc227e 100644
82647--- a/net/mac80211/mlme.c
82648+++ b/net/mac80211/mlme.c
82649@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
82650 bool have_higher_than_11mbit = false, newsta = false;
82651 u16 ap_ht_cap_flags;
82652
82653+ pax_track_stack();
82654+
82655 /*
82656 * AssocResp and ReassocResp have identical structure, so process both
82657 * of them in this function.
82658diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
82659index e535f1c..4d733d1 100644
82660--- a/net/mac80211/pm.c
82661+++ b/net/mac80211/pm.c
82662@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
82663 }
82664
82665 /* stop hardware - this must stop RX */
82666- if (local->open_count)
82667+ if (local_read(&local->open_count))
82668 ieee80211_stop_device(local);
82669
82670 local->suspended = true;
82671diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
82672index b33efc4..0a2efb6 100644
82673--- a/net/mac80211/rate.c
82674+++ b/net/mac80211/rate.c
82675@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
82676 struct rate_control_ref *ref, *old;
82677
82678 ASSERT_RTNL();
82679- if (local->open_count)
82680+ if (local_read(&local->open_count))
82681 return -EBUSY;
82682
82683 ref = rate_control_alloc(name, local);
82684diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
82685index b1d7904..57e4da7 100644
82686--- a/net/mac80211/tx.c
82687+++ b/net/mac80211/tx.c
82688@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82689 return cpu_to_le16(dur);
82690 }
82691
82692-static int inline is_ieee80211_device(struct ieee80211_local *local,
82693+static inline int is_ieee80211_device(struct ieee80211_local *local,
82694 struct net_device *dev)
82695 {
82696 return local == wdev_priv(dev->ieee80211_ptr);
82697diff --git a/net/mac80211/util.c b/net/mac80211/util.c
82698index 31b1085..48fb26d 100644
82699--- a/net/mac80211/util.c
82700+++ b/net/mac80211/util.c
82701@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
82702 local->resuming = true;
82703
82704 /* restart hardware */
82705- if (local->open_count) {
82706+ if (local_read(&local->open_count)) {
82707 /*
82708 * Upon resume hardware can sometimes be goofy due to
82709 * various platform / driver / bus issues, so restarting
82710diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
82711index 634d14a..b35a608 100644
82712--- a/net/netfilter/Kconfig
82713+++ b/net/netfilter/Kconfig
82714@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
82715
82716 To compile it as a module, choose M here. If unsure, say N.
82717
82718+config NETFILTER_XT_MATCH_GRADM
82719+ tristate '"gradm" match support'
82720+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82721+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82722+ ---help---
82723+ The gradm match allows to match on grsecurity RBAC being enabled.
82724+ It is useful when iptables rules are applied early on bootup to
82725+ prevent connections to the machine (except from a trusted host)
82726+ while the RBAC system is disabled.
82727+
82728 config NETFILTER_XT_MATCH_HASHLIMIT
82729 tristate '"hashlimit" match support'
82730 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82731diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82732index 49f62ee..a17b2c6 100644
82733--- a/net/netfilter/Makefile
82734+++ b/net/netfilter/Makefile
82735@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
82736 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
82737 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82738 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82739+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82740 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82741 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82742 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82743diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
82744index 3c7e427..724043c 100644
82745--- a/net/netfilter/ipvs/ip_vs_app.c
82746+++ b/net/netfilter/ipvs/ip_vs_app.c
82747@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
82748 .open = ip_vs_app_open,
82749 .read = seq_read,
82750 .llseek = seq_lseek,
82751- .release = seq_release,
82752+ .release = seq_release_net,
82753 };
82754 #endif
82755
82756diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82757index 95682e5..457dbac 100644
82758--- a/net/netfilter/ipvs/ip_vs_conn.c
82759+++ b/net/netfilter/ipvs/ip_vs_conn.c
82760@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82761 /* if the connection is not template and is created
82762 * by sync, preserve the activity flag.
82763 */
82764- cp->flags |= atomic_read(&dest->conn_flags) &
82765+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
82766 (~IP_VS_CONN_F_INACTIVE);
82767 else
82768- cp->flags |= atomic_read(&dest->conn_flags);
82769+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
82770 cp->dest = dest;
82771
82772 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
82773@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
82774 atomic_set(&cp->refcnt, 1);
82775
82776 atomic_set(&cp->n_control, 0);
82777- atomic_set(&cp->in_pkts, 0);
82778+ atomic_set_unchecked(&cp->in_pkts, 0);
82779
82780 atomic_inc(&ip_vs_conn_count);
82781 if (flags & IP_VS_CONN_F_NO_CPORT)
82782@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
82783 .open = ip_vs_conn_open,
82784 .read = seq_read,
82785 .llseek = seq_lseek,
82786- .release = seq_release,
82787+ .release = seq_release_net,
82788 };
82789
82790 static const char *ip_vs_origin_name(unsigned flags)
82791@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
82792 .open = ip_vs_conn_sync_open,
82793 .read = seq_read,
82794 .llseek = seq_lseek,
82795- .release = seq_release,
82796+ .release = seq_release_net,
82797 };
82798
82799 #endif
82800@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82801
82802 /* Don't drop the entry if its number of incoming packets is not
82803 located in [0, 8] */
82804- i = atomic_read(&cp->in_pkts);
82805+ i = atomic_read_unchecked(&cp->in_pkts);
82806 if (i > 8 || i < 0) return 0;
82807
82808 if (!todrop_rate[i]) return 0;
82809diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82810index b95699f..5fee919 100644
82811--- a/net/netfilter/ipvs/ip_vs_core.c
82812+++ b/net/netfilter/ipvs/ip_vs_core.c
82813@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82814 ret = cp->packet_xmit(skb, cp, pp);
82815 /* do not touch skb anymore */
82816
82817- atomic_inc(&cp->in_pkts);
82818+ atomic_inc_unchecked(&cp->in_pkts);
82819 ip_vs_conn_put(cp);
82820 return ret;
82821 }
82822@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
82823 * Sync connection if it is about to close to
82824 * encorage the standby servers to update the connections timeout
82825 */
82826- pkts = atomic_add_return(1, &cp->in_pkts);
82827+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82828 if (af == AF_INET &&
82829 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
82830 (((cp->protocol != IPPROTO_TCP ||
82831diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82832index 02b2610..2d89424 100644
82833--- a/net/netfilter/ipvs/ip_vs_ctl.c
82834+++ b/net/netfilter/ipvs/ip_vs_ctl.c
82835@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
82836 ip_vs_rs_hash(dest);
82837 write_unlock_bh(&__ip_vs_rs_lock);
82838 }
82839- atomic_set(&dest->conn_flags, conn_flags);
82840+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
82841
82842 /* bind the service */
82843 if (!dest->svc) {
82844@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82845 " %-7s %-6d %-10d %-10d\n",
82846 &dest->addr.in6,
82847 ntohs(dest->port),
82848- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82849+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82850 atomic_read(&dest->weight),
82851 atomic_read(&dest->activeconns),
82852 atomic_read(&dest->inactconns));
82853@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82854 "%-7s %-6d %-10d %-10d\n",
82855 ntohl(dest->addr.ip),
82856 ntohs(dest->port),
82857- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82858+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82859 atomic_read(&dest->weight),
82860 atomic_read(&dest->activeconns),
82861 atomic_read(&dest->inactconns));
82862@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
82863 .open = ip_vs_info_open,
82864 .read = seq_read,
82865 .llseek = seq_lseek,
82866- .release = seq_release_private,
82867+ .release = seq_release_net,
82868 };
82869
82870 #endif
82871@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
82872 .open = ip_vs_stats_seq_open,
82873 .read = seq_read,
82874 .llseek = seq_lseek,
82875- .release = single_release,
82876+ .release = single_release_net,
82877 };
82878
82879 #endif
82880@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
82881
82882 entry.addr = dest->addr.ip;
82883 entry.port = dest->port;
82884- entry.conn_flags = atomic_read(&dest->conn_flags);
82885+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82886 entry.weight = atomic_read(&dest->weight);
82887 entry.u_threshold = dest->u_threshold;
82888 entry.l_threshold = dest->l_threshold;
82889@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
82890 unsigned char arg[128];
82891 int ret = 0;
82892
82893+ pax_track_stack();
82894+
82895 if (!capable(CAP_NET_ADMIN))
82896 return -EPERM;
82897
82898@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82899 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
82900
82901 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82902- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82903+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82904 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
82905 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
82906 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
82907diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82908index e177f0d..55e8581 100644
82909--- a/net/netfilter/ipvs/ip_vs_sync.c
82910+++ b/net/netfilter/ipvs/ip_vs_sync.c
82911@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
82912
82913 if (opt)
82914 memcpy(&cp->in_seq, opt, sizeof(*opt));
82915- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82916+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82917 cp->state = state;
82918 cp->old_state = cp->state;
82919 /*
82920diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82921index 30b3189..e2e4b55 100644
82922--- a/net/netfilter/ipvs/ip_vs_xmit.c
82923+++ b/net/netfilter/ipvs/ip_vs_xmit.c
82924@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82925 else
82926 rc = NF_ACCEPT;
82927 /* do not touch skb anymore */
82928- atomic_inc(&cp->in_pkts);
82929+ atomic_inc_unchecked(&cp->in_pkts);
82930 goto out;
82931 }
82932
82933@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82934 else
82935 rc = NF_ACCEPT;
82936 /* do not touch skb anymore */
82937- atomic_inc(&cp->in_pkts);
82938+ atomic_inc_unchecked(&cp->in_pkts);
82939 goto out;
82940 }
82941
82942diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82943index d521718..d0fd7a1 100644
82944--- a/net/netfilter/nf_conntrack_netlink.c
82945+++ b/net/netfilter/nf_conntrack_netlink.c
82946@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82947 static int
82948 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82949 struct nf_conntrack_tuple *tuple,
82950- enum ctattr_tuple type, u_int8_t l3num)
82951+ enum ctattr_type type, u_int8_t l3num)
82952 {
82953 struct nlattr *tb[CTA_TUPLE_MAX+1];
82954 int err;
82955diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82956index f900dc3..5e45346 100644
82957--- a/net/netfilter/nfnetlink_log.c
82958+++ b/net/netfilter/nfnetlink_log.c
82959@@ -68,7 +68,7 @@ struct nfulnl_instance {
82960 };
82961
82962 static DEFINE_RWLOCK(instances_lock);
82963-static atomic_t global_seq;
82964+static atomic_unchecked_t global_seq;
82965
82966 #define INSTANCE_BUCKETS 16
82967 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82968@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82969 /* global sequence number */
82970 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82971 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82972- htonl(atomic_inc_return(&global_seq)));
82973+ htonl(atomic_inc_return_unchecked(&global_seq)));
82974
82975 if (data_len) {
82976 struct nlattr *nla;
82977diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82978new file mode 100644
82979index 0000000..b1bac76
82980--- /dev/null
82981+++ b/net/netfilter/xt_gradm.c
82982@@ -0,0 +1,51 @@
82983+/*
82984+ * gradm match for netfilter
82985