]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.56-201202102034.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.56-201202102034.patch
CommitLineData
43359f88
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index 81ad738..cbdaeb0 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+ifeq ($(KBUILD_EXTMOD),)
251+gcc-plugins:
252+ $(Q)$(MAKE) $(build)=tools/gcc
253+else
254+gcc-plugins: ;
255+endif
256+else
257+gcc-plugins:
258+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260+else
261+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262+endif
263+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264+endif
265+endif
266+
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270@@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279@@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287@@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291-$(vmlinux-dirs): prepare scripts
292+$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304@@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312@@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316-modules_prepare: prepare scripts
317+modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321@@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325- Module.symvers Module.markers tags TAGS cscope*
326+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330@@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334- -o -name '.*.rej' -o -size 0 \
335+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339@@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343+ @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347@@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355@@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359-tags TAGS cscope: FORCE
360+tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364@@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368-%.s: %.c prepare scripts FORCE
369+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370+%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374-%.o: %.c prepare scripts FORCE
375+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376+%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380-%.s: %.S prepare scripts FORCE
381+%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383-%.o: %.S prepare scripts FORCE
384+%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388@@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392-%/: prepare scripts FORCE
393+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394+%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398-%.ko: prepare scripts FORCE
399+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400+%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405index 5c75c1b..c82f878 100644
406--- a/arch/alpha/include/asm/elf.h
407+++ b/arch/alpha/include/asm/elf.h
408@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412+#ifdef CONFIG_PAX_ASLR
413+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414+
415+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417+#endif
418+
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423index 3f0c59f..cf1e100 100644
424--- a/arch/alpha/include/asm/pgtable.h
425+++ b/arch/alpha/include/asm/pgtable.h
426@@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430+
431+#ifdef CONFIG_PAX_PAGEEXEC
432+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435+#else
436+# define PAGE_SHARED_NOEXEC PAGE_SHARED
437+# define PAGE_COPY_NOEXEC PAGE_COPY
438+# define PAGE_READONLY_NOEXEC PAGE_READONLY
439+#endif
440+
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445index ebc3c89..20cfa63 100644
446--- a/arch/alpha/kernel/module.c
447+++ b/arch/alpha/kernel/module.c
448@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452- gp = (u64)me->module_core + me->core_size - 0x8000;
453+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458index a94e49c..d71dd44 100644
459--- a/arch/alpha/kernel/osf_sys.c
460+++ b/arch/alpha/kernel/osf_sys.c
461@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465- if (!vma || addr + len <= vma->vm_start)
466+ if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474+#ifdef CONFIG_PAX_RANDMMAP
475+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476+#endif
477+
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486- len, limit);
487+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488+
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493index 00a31de..2ded0f2 100644
494--- a/arch/alpha/mm/fault.c
495+++ b/arch/alpha/mm/fault.c
496@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500+#ifdef CONFIG_PAX_PAGEEXEC
501+/*
502+ * PaX: decide what to do with offenders (regs->pc = fault address)
503+ *
504+ * returns 1 when task should be killed
505+ * 2 when patched PLT trampoline was detected
506+ * 3 when unpatched PLT trampoline was detected
507+ */
508+static int pax_handle_fetch_fault(struct pt_regs *regs)
509+{
510+
511+#ifdef CONFIG_PAX_EMUPLT
512+ int err;
513+
514+ do { /* PaX: patched PLT emulation #1 */
515+ unsigned int ldah, ldq, jmp;
516+
517+ err = get_user(ldah, (unsigned int *)regs->pc);
518+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520+
521+ if (err)
522+ break;
523+
524+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526+ jmp == 0x6BFB0000U)
527+ {
528+ unsigned long r27, addr;
529+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531+
532+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533+ err = get_user(r27, (unsigned long *)addr);
534+ if (err)
535+ break;
536+
537+ regs->r27 = r27;
538+ regs->pc = r27;
539+ return 2;
540+ }
541+ } while (0);
542+
543+ do { /* PaX: patched PLT emulation #2 */
544+ unsigned int ldah, lda, br;
545+
546+ err = get_user(ldah, (unsigned int *)regs->pc);
547+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
548+ err |= get_user(br, (unsigned int *)(regs->pc+8));
549+
550+ if (err)
551+ break;
552+
553+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
555+ (br & 0xFFE00000U) == 0xC3E00000U)
556+ {
557+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560+
561+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563+ return 2;
564+ }
565+ } while (0);
566+
567+ do { /* PaX: unpatched PLT emulation */
568+ unsigned int br;
569+
570+ err = get_user(br, (unsigned int *)regs->pc);
571+
572+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573+ unsigned int br2, ldq, nop, jmp;
574+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575+
576+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577+ err = get_user(br2, (unsigned int *)addr);
578+ err |= get_user(ldq, (unsigned int *)(addr+4));
579+ err |= get_user(nop, (unsigned int *)(addr+8));
580+ err |= get_user(jmp, (unsigned int *)(addr+12));
581+ err |= get_user(resolver, (unsigned long *)(addr+16));
582+
583+ if (err)
584+ break;
585+
586+ if (br2 == 0xC3600000U &&
587+ ldq == 0xA77B000CU &&
588+ nop == 0x47FF041FU &&
589+ jmp == 0x6B7B0000U)
590+ {
591+ regs->r28 = regs->pc+4;
592+ regs->r27 = addr+16;
593+ regs->pc = resolver;
594+ return 3;
595+ }
596+ }
597+ } while (0);
598+#endif
599+
600+ return 1;
601+}
602+
603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604+{
605+ unsigned long i;
606+
607+ printk(KERN_ERR "PAX: bytes at PC: ");
608+ for (i = 0; i < 5; i++) {
609+ unsigned int c;
610+ if (get_user(c, (unsigned int *)pc+i))
611+ printk(KERN_CONT "???????? ");
612+ else
613+ printk(KERN_CONT "%08x ", c);
614+ }
615+ printk("\n");
616+}
617+#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625- if (!(vma->vm_flags & VM_EXEC))
626+ if (!(vma->vm_flags & VM_EXEC)) {
627+
628+#ifdef CONFIG_PAX_PAGEEXEC
629+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630+ goto bad_area;
631+
632+ up_read(&mm->mmap_sem);
633+ switch (pax_handle_fetch_fault(regs)) {
634+
635+#ifdef CONFIG_PAX_EMUPLT
636+ case 2:
637+ case 3:
638+ return;
639+#endif
640+
641+ }
642+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643+ do_group_exit(SIGKILL);
644+#else
645 goto bad_area;
646+#endif
647+
648+ }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653index 6aac3f5..265536b 100644
654--- a/arch/arm/include/asm/elf.h
655+++ b/arch/arm/include/asm/elf.h
656@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662+
663+#ifdef CONFIG_PAX_ASLR
664+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665+
666+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668+#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673index c019949..388fdd1 100644
674--- a/arch/arm/include/asm/kmap_types.h
675+++ b/arch/arm/include/asm/kmap_types.h
676@@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680+ KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685index 1d6bd40..fba0cb9 100644
686--- a/arch/arm/include/asm/uaccess.h
687+++ b/arch/arm/include/asm/uaccess.h
688@@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692+extern void check_object_size(const void *ptr, unsigned long n, bool to);
693+
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697@@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705+
706+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707+{
708+ if (!__builtin_constant_p(n))
709+ check_object_size(to, n, false);
710+ return ___copy_from_user(to, from, n);
711+}
712+
713+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714+{
715+ if (!__builtin_constant_p(n))
716+ check_object_size(from, n, true);
717+ return ___copy_to_user(to, from, n);
718+}
719+
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727+ if ((long)n < 0)
728+ return n;
729+
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737+ if ((long)n < 0)
738+ return n;
739+
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744index 0e62770..e2c2cd6 100644
745--- a/arch/arm/kernel/armksyms.c
746+++ b/arch/arm/kernel/armksyms.c
747@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751-EXPORT_SYMBOL(__copy_from_user);
752-EXPORT_SYMBOL(__copy_to_user);
753+EXPORT_SYMBOL(___copy_from_user);
754+EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759index ba8ccfe..2dc34dc 100644
760--- a/arch/arm/kernel/kgdb.c
761+++ b/arch/arm/kernel/kgdb.c
762@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766-struct kgdb_arch arch_kgdb_ops = {
767+const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772index 3f361a7..6e806e1 100644
773--- a/arch/arm/kernel/traps.c
774+++ b/arch/arm/kernel/traps.c
775@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779+extern void gr_handle_kernel_exploit(void);
780+
781 /*
782 * This function is protected against re-entrancy.
783 */
784@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788+ gr_handle_kernel_exploit();
789+
790 do_exit(SIGSEGV);
791 }
792
793diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794index e4fe124..0fc246b 100644
795--- a/arch/arm/lib/copy_from_user.S
796+++ b/arch/arm/lib/copy_from_user.S
797@@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801- * size_t __copy_from_user(void *to, const void *from, size_t n)
802+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806@@ -84,11 +84,11 @@
807
808 .text
809
810-ENTRY(__copy_from_user)
811+ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815-ENDPROC(__copy_from_user)
816+ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821index 1a71e15..ac7b258 100644
822--- a/arch/arm/lib/copy_to_user.S
823+++ b/arch/arm/lib/copy_to_user.S
824@@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828- * size_t __copy_to_user(void *to, const void *from, size_t n)
829+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833@@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837-WEAK(__copy_to_user)
838+WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842-ENDPROC(__copy_to_user)
843+ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848index ffdd274..91017b6 100644
849--- a/arch/arm/lib/uaccess.S
850+++ b/arch/arm/lib/uaccess.S
851@@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864-ENTRY(__copy_to_user)
865+ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873-ENDPROC(__copy_to_user)
874+ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890-ENTRY(__copy_from_user)
891+ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899-ENDPROC(__copy_from_user)
900+ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905index 6b967ff..67d5b2b 100644
906--- a/arch/arm/lib/uaccess_with_memcpy.c
907+++ b/arch/arm/lib/uaccess_with_memcpy.c
908@@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912-__copy_to_user(void __user *to, const void *from, unsigned long n)
913+___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918index 4028724..beec230 100644
919--- a/arch/arm/mach-at91/pm.c
920+++ b/arch/arm/mach-at91/pm.c
921@@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925-static struct platform_suspend_ops at91_pm_ops ={
926+static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931index 5218943..0a34552 100644
932--- a/arch/arm/mach-omap1/pm.c
933+++ b/arch/arm/mach-omap1/pm.c
934@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938-static struct platform_suspend_ops omap_pm_ops ={
939+static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944index bff5c4e..d4c649b 100644
945--- a/arch/arm/mach-omap2/pm24xx.c
946+++ b/arch/arm/mach-omap2/pm24xx.c
947@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951-static struct platform_suspend_ops omap_pm_ops = {
952+static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957index 8946319..7d3e661 100644
958--- a/arch/arm/mach-omap2/pm34xx.c
959+++ b/arch/arm/mach-omap2/pm34xx.c
960@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964-static struct platform_suspend_ops omap_pm_ops = {
965+static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970index b3d8d53..6e68ebc 100644
971--- a/arch/arm/mach-pnx4008/pm.c
972+++ b/arch/arm/mach-pnx4008/pm.c
973@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977-static struct platform_suspend_ops pnx4008_pm_ops = {
978+static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983index 7693355..9beb00a 100644
984--- a/arch/arm/mach-pxa/pm.c
985+++ b/arch/arm/mach-pxa/pm.c
986@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990-static struct platform_suspend_ops pxa_pm_ops = {
991+static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996index 629e05d..06be589 100644
997--- a/arch/arm/mach-pxa/sharpsl_pm.c
998+++ b/arch/arm/mach-pxa/sharpsl_pm.c
999@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003-static struct platform_suspend_ops sharpsl_pm_ops = {
1004+static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009index c83fdc8..ab9fc44 100644
1010--- a/arch/arm/mach-sa1100/pm.c
1011+++ b/arch/arm/mach-sa1100/pm.c
1012@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016-static struct platform_suspend_ops sa11x0_pm_ops = {
1017+static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022index 3191cd6..c0739db 100644
1023--- a/arch/arm/mm/fault.c
1024+++ b/arch/arm/mm/fault.c
1025@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029+#ifdef CONFIG_PAX_PAGEEXEC
1030+ if (fsr & FSR_LNX_PF) {
1031+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032+ do_group_exit(SIGKILL);
1033+ }
1034+#endif
1035+
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045+{
1046+ long i;
1047+
1048+ printk(KERN_ERR "PAX: bytes at PC: ");
1049+ for (i = 0; i < 20; i++) {
1050+ unsigned char c;
1051+ if (get_user(c, (__force unsigned char __user *)pc+i))
1052+ printk(KERN_CONT "?? ");
1053+ else
1054+ printk(KERN_CONT "%02x ", c);
1055+ }
1056+ printk("\n");
1057+
1058+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1059+ for (i = -1; i < 20; i++) {
1060+ unsigned long c;
1061+ if (get_user(c, (__force unsigned long __user *)sp+i))
1062+ printk(KERN_CONT "???????? ");
1063+ else
1064+ printk(KERN_CONT "%08lx ", c);
1065+ }
1066+ printk("\n");
1067+}
1068+#endif
1069+
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074index f5abc51..7ec524c 100644
1075--- a/arch/arm/mm/mmap.c
1076+++ b/arch/arm/mm/mmap.c
1077@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081+#ifdef CONFIG_PAX_RANDMMAP
1082+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083+#endif
1084+
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092- if (TASK_SIZE - len >= addr &&
1093- (!vma || addr + len <= vma->vm_start))
1094+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098- start_addr = addr = mm->free_area_cache;
1099+ start_addr = addr = mm->free_area_cache;
1100 } else {
1101- start_addr = addr = TASK_UNMAPPED_BASE;
1102- mm->cached_hole_size = 0;
1103+ start_addr = addr = mm->mmap_base;
1104+ mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108@@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112- if (start_addr != TASK_UNMAPPED_BASE) {
1113- start_addr = addr = TASK_UNMAPPED_BASE;
1114+ if (start_addr != mm->mmap_base) {
1115+ start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121- if (!vma || addr + len <= vma->vm_start) {
1122+ if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127index 8d97db2..b66cfa5 100644
1128--- a/arch/arm/plat-s3c/pm.c
1129+++ b/arch/arm/plat-s3c/pm.c
1130@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134-static struct platform_suspend_ops s3c_pm_ops = {
1135+static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140index d5d1d41..856e2ed 100644
1141--- a/arch/avr32/include/asm/elf.h
1142+++ b/arch/avr32/include/asm/elf.h
1143@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150+#ifdef CONFIG_PAX_ASLR
1151+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152+
1153+#define PAX_DELTA_MMAP_LEN 15
1154+#define PAX_DELTA_STACK_LEN 15
1155+#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160index b7f5c68..556135c 100644
1161--- a/arch/avr32/include/asm/kmap_types.h
1162+++ b/arch/avr32/include/asm/kmap_types.h
1163@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167-D(14) KM_TYPE_NR
1168+D(14) KM_CLEARPAGE,
1169+D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174index f021edf..32d680e 100644
1175--- a/arch/avr32/mach-at32ap/pm.c
1176+++ b/arch/avr32/mach-at32ap/pm.c
1177@@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181-static struct platform_suspend_ops avr32_pm_ops = {
1182+static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187index b61d86d..e292c7f 100644
1188--- a/arch/avr32/mm/fault.c
1189+++ b/arch/avr32/mm/fault.c
1190@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194+#ifdef CONFIG_PAX_PAGEEXEC
1195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196+{
1197+ unsigned long i;
1198+
1199+ printk(KERN_ERR "PAX: bytes at PC: ");
1200+ for (i = 0; i < 20; i++) {
1201+ unsigned char c;
1202+ if (get_user(c, (unsigned char *)pc+i))
1203+ printk(KERN_CONT "???????? ");
1204+ else
1205+ printk(KERN_CONT "%02x ", c);
1206+ }
1207+ printk("\n");
1208+}
1209+#endif
1210+
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214@@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218+
1219+#ifdef CONFIG_PAX_PAGEEXEC
1220+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223+ do_group_exit(SIGKILL);
1224+ }
1225+ }
1226+#endif
1227+
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232index cce79d0..c406c85 100644
1233--- a/arch/blackfin/kernel/kgdb.c
1234+++ b/arch/blackfin/kernel/kgdb.c
1235@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239-struct kgdb_arch arch_kgdb_ops = {
1240+const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245index 8837be4..b2fb413 100644
1246--- a/arch/blackfin/mach-common/pm.c
1247+++ b/arch/blackfin/mach-common/pm.c
1248@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252-struct platform_suspend_ops bfin_pm_ops = {
1253+const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258index f8e16b2..c73ff79 100644
1259--- a/arch/frv/include/asm/kmap_types.h
1260+++ b/arch/frv/include/asm/kmap_types.h
1261@@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265+ KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270index 385fd30..6c3d97e 100644
1271--- a/arch/frv/mm/elf-fdpic.c
1272+++ b/arch/frv/mm/elf-fdpic.c
1273@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277- if (TASK_SIZE - len >= addr &&
1278- (!vma || addr + len <= vma->vm_start))
1279+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287- if (addr + len <= vma->vm_start)
1288+ if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296- if (addr + len <= vma->vm_start)
1297+ if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302index e4a80d8..11a7ea1 100644
1303--- a/arch/ia64/hp/common/hwsw_iommu.c
1304+++ b/arch/ia64/hp/common/hwsw_iommu.c
1305@@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324index 01ae69b..35752fd 100644
1325--- a/arch/ia64/hp/common/sba_iommu.c
1326+++ b/arch/ia64/hp/common/sba_iommu.c
1327@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331-extern struct dma_map_ops swiotlb_dma_ops;
1332+extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340-struct dma_map_ops sba_dma_ops = {
1341+const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346index c69552b..c7122f4 100644
1347--- a/arch/ia64/ia32/binfmt_elf32.c
1348+++ b/arch/ia64/ia32/binfmt_elf32.c
1349@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353+#ifdef CONFIG_PAX_ASLR
1354+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355+
1356+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358+#endif
1359+
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364index 0f15349..26b3429 100644
1365--- a/arch/ia64/ia32/ia32priv.h
1366+++ b/arch/ia64/ia32/ia32priv.h
1367@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372+#ifdef CONFIG_PAX_RANDUSTACK
1373+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374+#else
1375+#define __IA32_DELTA_STACK 0UL
1376+#endif
1377+
1378+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379+
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384index 8d3c79c..71b3af6 100644
1385--- a/arch/ia64/include/asm/dma-mapping.h
1386+++ b/arch/ia64/include/asm/dma-mapping.h
1387@@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391-extern struct dma_map_ops *dma_ops;
1392+extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431index 86eddee..b116bb4 100644
1432--- a/arch/ia64/include/asm/elf.h
1433+++ b/arch/ia64/include/asm/elf.h
1434@@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438+#ifdef CONFIG_PAX_ASLR
1439+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440+
1441+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443+#endif
1444+
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449index 367d299..9ad4279 100644
1450--- a/arch/ia64/include/asm/machvec.h
1451+++ b/arch/ia64/include/asm/machvec.h
1452@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465-extern struct dma_map_ops *dma_get_ops(struct device *);
1466+extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471index 8840a69..cdb63d9 100644
1472--- a/arch/ia64/include/asm/pgtable.h
1473+++ b/arch/ia64/include/asm/pgtable.h
1474@@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478-
1479+#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483@@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487+
1488+#ifdef CONFIG_PAX_PAGEEXEC
1489+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492+#else
1493+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495+# define PAGE_COPY_NOEXEC PAGE_COPY
1496+#endif
1497+
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502index 239ecdc..f94170e 100644
1503--- a/arch/ia64/include/asm/spinlock.h
1504+++ b/arch/ia64/include/asm/spinlock.h
1505@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515index 449c8c0..432a3d2 100644
1516--- a/arch/ia64/include/asm/uaccess.h
1517+++ b/arch/ia64/include/asm/uaccess.h
1518@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537index f2c1600..969398a 100644
1538--- a/arch/ia64/kernel/dma-mapping.c
1539+++ b/arch/ia64/kernel/dma-mapping.c
1540@@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544-struct dma_map_ops *dma_ops;
1545+const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549@@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553-struct dma_map_ops *dma_get_ops(struct device *dev)
1554+const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559index 1481b0a..e7d38ff 100644
1560--- a/arch/ia64/kernel/module.c
1561+++ b/arch/ia64/kernel/module.c
1562@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566- if (mod && mod->arch.init_unw_table &&
1567- module_region == mod->module_init) {
1568+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576+in_init_rx (const struct module *mod, uint64_t addr)
1577+{
1578+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579+}
1580+
1581+static inline int
1582+in_init_rw (const struct module *mod, uint64_t addr)
1583+{
1584+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585+}
1586+
1587+static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590- return addr - (uint64_t) mod->module_init < mod->init_size;
1591+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592+}
1593+
1594+static inline int
1595+in_core_rx (const struct module *mod, uint64_t addr)
1596+{
1597+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598+}
1599+
1600+static inline int
1601+in_core_rw (const struct module *mod, uint64_t addr)
1602+{
1603+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609- return addr - (uint64_t) mod->module_core < mod->core_size;
1610+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619+ if (in_init_rx(mod, val))
1620+ val -= (uint64_t) mod->module_init_rx;
1621+ else if (in_init_rw(mod, val))
1622+ val -= (uint64_t) mod->module_init_rw;
1623+ else if (in_core_rx(mod, val))
1624+ val -= (uint64_t) mod->module_core_rx;
1625+ else if (in_core_rw(mod, val))
1626+ val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634- if (mod->core_size > MAX_LTOFF)
1635+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640- gp = mod->core_size - MAX_LTOFF / 2;
1641+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643- gp = mod->core_size / 2;
1644- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651index f6b1ff0..de773fb 100644
1652--- a/arch/ia64/kernel/pci-dma.c
1653+++ b/arch/ia64/kernel/pci-dma.c
1654@@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658-extern struct dma_map_ops intel_dma_ops;
1659+extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674+
1675+static const struct dma_map_ops intel_iommu_dma_ops = {
1676+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677+ .alloc_coherent = intel_alloc_coherent,
1678+ .free_coherent = intel_free_coherent,
1679+ .map_sg = intel_map_sg,
1680+ .unmap_sg = intel_unmap_sg,
1681+ .map_page = intel_map_page,
1682+ .unmap_page = intel_unmap_page,
1683+ .mapping_error = intel_mapping_error,
1684+
1685+ .sync_single_for_cpu = machvec_dma_sync_single,
1686+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1687+ .sync_single_for_device = machvec_dma_sync_single,
1688+ .sync_sg_for_device = machvec_dma_sync_sg,
1689+ .dma_supported = iommu_dma_supported,
1690+};
1691+
1692 void __init pci_iommu_alloc(void)
1693 {
1694- dma_ops = &intel_dma_ops;
1695-
1696- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700- dma_ops->dma_supported = iommu_dma_supported;
1701+ dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706index 285aae8..61dbab6 100644
1707--- a/arch/ia64/kernel/pci-swiotlb.c
1708+++ b/arch/ia64/kernel/pci-swiotlb.c
1709@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713-struct dma_map_ops swiotlb_dma_ops = {
1714+const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719index 609d500..7dde2a8 100644
1720--- a/arch/ia64/kernel/sys_ia64.c
1721+++ b/arch/ia64/kernel/sys_ia64.c
1722@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726+
1727+#ifdef CONFIG_PAX_RANDMMAP
1728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1729+ addr = mm->free_area_cache;
1730+ else
1731+#endif
1732+
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740- if (start_addr != TASK_UNMAPPED_BASE) {
1741+ if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743- addr = TASK_UNMAPPED_BASE;
1744+ addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749- if (!vma || addr + len <= vma->vm_start) {
1750+ if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755index 8f06035..b3a5818 100644
1756--- a/arch/ia64/kernel/topology.c
1757+++ b/arch/ia64/kernel/topology.c
1758@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762-static struct sysfs_ops cache_sysfs_ops = {
1763+static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768index 0a0c77b..8e55a81 100644
1769--- a/arch/ia64/kernel/vmlinux.lds.S
1770+++ b/arch/ia64/kernel/vmlinux.lds.S
1771@@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775- __phys_per_cpu_start = __per_cpu_load;
1776+ __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781index 19261a9..1611b7a 100644
1782--- a/arch/ia64/mm/fault.c
1783+++ b/arch/ia64/mm/fault.c
1784@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788+#ifdef CONFIG_PAX_PAGEEXEC
1789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790+{
1791+ unsigned long i;
1792+
1793+ printk(KERN_ERR "PAX: bytes at PC: ");
1794+ for (i = 0; i < 8; i++) {
1795+ unsigned int c;
1796+ if (get_user(c, (unsigned int *)pc+i))
1797+ printk(KERN_CONT "???????? ");
1798+ else
1799+ printk(KERN_CONT "%08x ", c);
1800+ }
1801+ printk("\n");
1802+}
1803+#endif
1804+
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812- if ((vma->vm_flags & mask) != mask)
1813+ if ((vma->vm_flags & mask) != mask) {
1814+
1815+#ifdef CONFIG_PAX_PAGEEXEC
1816+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818+ goto bad_area;
1819+
1820+ up_read(&mm->mmap_sem);
1821+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822+ do_group_exit(SIGKILL);
1823+ }
1824+#endif
1825+
1826 goto bad_area;
1827
1828+ }
1829+
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834index b0f6157..a082bbc 100644
1835--- a/arch/ia64/mm/hugetlbpage.c
1836+++ b/arch/ia64/mm/hugetlbpage.c
1837@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841- if (!vmm || (addr + len) <= vmm->vm_start)
1842+ if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847index 1857766..05cc6a3 100644
1848--- a/arch/ia64/mm/init.c
1849+++ b/arch/ia64/mm/init.c
1850@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854+
1855+#ifdef CONFIG_PAX_PAGEEXEC
1856+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857+ vma->vm_flags &= ~VM_EXEC;
1858+
1859+#ifdef CONFIG_PAX_MPROTECT
1860+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861+ vma->vm_flags &= ~VM_MAYEXEC;
1862+#endif
1863+
1864+ }
1865+#endif
1866+
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871index 98b6849..8046766 100644
1872--- a/arch/ia64/sn/pci/pci_dma.c
1873+++ b/arch/ia64/sn/pci/pci_dma.c
1874@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878-static struct dma_map_ops sn_dma_ops = {
1879+static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884index 82abd15..d95ae5d 100644
1885--- a/arch/m32r/lib/usercopy.c
1886+++ b/arch/m32r/lib/usercopy.c
1887@@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891+ if ((long)n < 0)
1892+ return n;
1893+
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908index 77f5021..2b1db8a 100644
1909--- a/arch/mips/Makefile
1910+++ b/arch/mips/Makefile
1911@@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915+cflags-y += -Wno-sign-compare -Wno-extra
1916+
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921index 632f986..fd0378d 100644
1922--- a/arch/mips/alchemy/devboards/pm.c
1923+++ b/arch/mips/alchemy/devboards/pm.c
1924@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928-static struct platform_suspend_ops db1x_pm_ops = {
1929+static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934index 7990694..4e93acf 100644
1935--- a/arch/mips/include/asm/elf.h
1936+++ b/arch/mips/include/asm/elf.h
1937@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941+#ifdef CONFIG_PAX_ASLR
1942+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943+
1944+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946+#endif
1947+
1948 #endif /* _ASM_ELF_H */
1949diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950index f266295..627cfff 100644
1951--- a/arch/mips/include/asm/page.h
1952+++ b/arch/mips/include/asm/page.h
1953@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963index e48c0bf..f3acf65 100644
1964--- a/arch/mips/include/asm/reboot.h
1965+++ b/arch/mips/include/asm/reboot.h
1966@@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970-extern void (*_machine_restart)(char *command);
1971-extern void (*_machine_halt)(void);
1972+extern void (*__noreturn _machine_restart)(char *command);
1973+extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977index 83b5509..9fa24a23 100644
1978--- a/arch/mips/include/asm/system.h
1979+++ b/arch/mips/include/asm/system.h
1980@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984-extern unsigned long arch_align_stack(unsigned long sp);
1985+#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989index 9fdd8bc..fcf9d68 100644
1990--- a/arch/mips/kernel/binfmt_elfn32.c
1991+++ b/arch/mips/kernel/binfmt_elfn32.c
1992@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996+#ifdef CONFIG_PAX_ASLR
1997+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998+
1999+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001+#endif
2002+
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007index ff44823..cf0b48a 100644
2008--- a/arch/mips/kernel/binfmt_elfo32.c
2009+++ b/arch/mips/kernel/binfmt_elfo32.c
2010@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014+#ifdef CONFIG_PAX_ASLR
2015+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016+
2017+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019+#endif
2020+
2021 #include <asm/processor.h>
2022
2023 /*
2024diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025index 50c9bb8..efdd5f8 100644
2026--- a/arch/mips/kernel/kgdb.c
2027+++ b/arch/mips/kernel/kgdb.c
2028@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032+/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037index f3d73e1..bb3f57a 100644
2038--- a/arch/mips/kernel/process.c
2039+++ b/arch/mips/kernel/process.c
2040@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044-
2045-/*
2046- * Don't forget that the stack pointer must be aligned on a 8 bytes
2047- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048- */
2049-unsigned long arch_align_stack(unsigned long sp)
2050-{
2051- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052- sp -= get_random_int() & ~PAGE_MASK;
2053-
2054- return sp & ALMASK;
2055-}
2056diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057index 060563a..7fbf310 100644
2058--- a/arch/mips/kernel/reset.c
2059+++ b/arch/mips/kernel/reset.c
2060@@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064-void (*_machine_restart)(char *command);
2065-void (*_machine_halt)(void);
2066+void (*__noreturn _machine_restart)(char *command);
2067+void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071@@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075+ BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082+ BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089+ BUG();
2090 }
2091diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092index 3f7f466..3abe0b5 100644
2093--- a/arch/mips/kernel/syscall.c
2094+++ b/arch/mips/kernel/syscall.c
2095@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099+
2100+#ifdef CONFIG_PAX_RANDMMAP
2101+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102+#endif
2103+
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110- if (task_size - len >= addr &&
2111- (!vmm || addr + len <= vmm->vm_start))
2112+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115- addr = TASK_UNMAPPED_BASE;
2116+ addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124- if (!vmm || addr + len <= vmm->vm_start)
2125+ if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130index e97a7a2..f18f5b0 100644
2131--- a/arch/mips/mm/fault.c
2132+++ b/arch/mips/mm/fault.c
2133@@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137+#ifdef CONFIG_PAX_PAGEEXEC
2138+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139+{
2140+ unsigned long i;
2141+
2142+ printk(KERN_ERR "PAX: bytes at PC: ");
2143+ for (i = 0; i < 5; i++) {
2144+ unsigned int c;
2145+ if (get_user(c, (unsigned int *)pc+i))
2146+ printk(KERN_CONT "???????? ");
2147+ else
2148+ printk(KERN_CONT "%08x ", c);
2149+ }
2150+ printk("\n");
2151+}
2152+#endif
2153+
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158index 9c802eb..0592e41 100644
2159--- a/arch/parisc/include/asm/elf.h
2160+++ b/arch/parisc/include/asm/elf.h
2161@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165+#ifdef CONFIG_PAX_ASLR
2166+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167+
2168+#define PAX_DELTA_MMAP_LEN 16
2169+#define PAX_DELTA_STACK_LEN 16
2170+#endif
2171+
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176index a27d2e2..18fd845 100644
2177--- a/arch/parisc/include/asm/pgtable.h
2178+++ b/arch/parisc/include/asm/pgtable.h
2179@@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183+
2184+#ifdef CONFIG_PAX_PAGEEXEC
2185+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188+#else
2189+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190+# define PAGE_COPY_NOEXEC PAGE_COPY
2191+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192+#endif
2193+
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198index 2120746..8d70a5e 100644
2199--- a/arch/parisc/kernel/module.c
2200+++ b/arch/parisc/kernel/module.c
2201@@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205+static inline int in_init_rx(struct module *me, void *loc)
2206+{
2207+ return (loc >= me->module_init_rx &&
2208+ loc < (me->module_init_rx + me->init_size_rx));
2209+}
2210+
2211+static inline int in_init_rw(struct module *me, void *loc)
2212+{
2213+ return (loc >= me->module_init_rw &&
2214+ loc < (me->module_init_rw + me->init_size_rw));
2215+}
2216+
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219- return (loc >= me->module_init &&
2220- loc <= (me->module_init + me->init_size));
2221+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2222+}
2223+
2224+static inline int in_core_rx(struct module *me, void *loc)
2225+{
2226+ return (loc >= me->module_core_rx &&
2227+ loc < (me->module_core_rx + me->core_size_rx));
2228+}
2229+
2230+static inline int in_core_rw(struct module *me, void *loc)
2231+{
2232+ return (loc >= me->module_core_rw &&
2233+ loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238- return (loc >= me->module_core &&
2239- loc <= (me->module_core + me->core_size));
2240+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248- me->core_size = ALIGN(me->core_size, 16);
2249- me->arch.got_offset = me->core_size;
2250- me->core_size += gots * sizeof(struct got_entry);
2251+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252+ me->arch.got_offset = me->core_size_rw;
2253+ me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255- me->core_size = ALIGN(me->core_size, 16);
2256- me->arch.fdesc_offset = me->core_size;
2257- me->core_size += fdescs * sizeof(Elf_Fdesc);
2258+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259+ me->arch.fdesc_offset = me->core_size_rw;
2260+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268- got = me->module_core + me->arch.got_offset;
2269+ got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301index 9147391..f3d949a 100644
2302--- a/arch/parisc/kernel/sys_parisc.c
2303+++ b/arch/parisc/kernel/sys_parisc.c
2304@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308- if (!vma || addr + len <= vma->vm_start)
2309+ if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317- if (!vma || addr + len <= vma->vm_start)
2318+ if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326- addr = TASK_UNMAPPED_BASE;
2327+ addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332index 8b58bf0..7afff03 100644
2333--- a/arch/parisc/kernel/traps.c
2334+++ b/arch/parisc/kernel/traps.c
2335@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339- if (vma && (regs->iaoq[0] >= vma->vm_start)
2340- && (vma->vm_flags & VM_EXEC)) {
2341-
2342+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347index c6afbfc..c5839f6 100644
2348--- a/arch/parisc/mm/fault.c
2349+++ b/arch/parisc/mm/fault.c
2350@@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354+#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362- if (code == 6 || code == 16)
2363+ if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371+#ifdef CONFIG_PAX_PAGEEXEC
2372+/*
2373+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374+ *
2375+ * returns 1 when task should be killed
2376+ * 2 when rt_sigreturn trampoline was detected
2377+ * 3 when unpatched PLT trampoline was detected
2378+ */
2379+static int pax_handle_fetch_fault(struct pt_regs *regs)
2380+{
2381+
2382+#ifdef CONFIG_PAX_EMUPLT
2383+ int err;
2384+
2385+ do { /* PaX: unpatched PLT emulation */
2386+ unsigned int bl, depwi;
2387+
2388+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390+
2391+ if (err)
2392+ break;
2393+
2394+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396+
2397+ err = get_user(ldw, (unsigned int *)addr);
2398+ err |= get_user(bv, (unsigned int *)(addr+4));
2399+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2400+
2401+ if (err)
2402+ break;
2403+
2404+ if (ldw == 0x0E801096U &&
2405+ bv == 0xEAC0C000U &&
2406+ ldw2 == 0x0E881095U)
2407+ {
2408+ unsigned int resolver, map;
2409+
2410+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412+ if (err)
2413+ break;
2414+
2415+ regs->gr[20] = instruction_pointer(regs)+8;
2416+ regs->gr[21] = map;
2417+ regs->gr[22] = resolver;
2418+ regs->iaoq[0] = resolver | 3UL;
2419+ regs->iaoq[1] = regs->iaoq[0] + 4;
2420+ return 3;
2421+ }
2422+ }
2423+ } while (0);
2424+#endif
2425+
2426+#ifdef CONFIG_PAX_EMUTRAMP
2427+
2428+#ifndef CONFIG_PAX_EMUSIGRT
2429+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430+ return 1;
2431+#endif
2432+
2433+ do { /* PaX: rt_sigreturn emulation */
2434+ unsigned int ldi1, ldi2, bel, nop;
2435+
2436+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440+
2441+ if (err)
2442+ break;
2443+
2444+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445+ ldi2 == 0x3414015AU &&
2446+ bel == 0xE4008200U &&
2447+ nop == 0x08000240U)
2448+ {
2449+ regs->gr[25] = (ldi1 & 2) >> 1;
2450+ regs->gr[20] = __NR_rt_sigreturn;
2451+ regs->gr[31] = regs->iaoq[1] + 16;
2452+ regs->sr[0] = regs->iasq[1];
2453+ regs->iaoq[0] = 0x100UL;
2454+ regs->iaoq[1] = regs->iaoq[0] + 4;
2455+ regs->iasq[0] = regs->sr[2];
2456+ regs->iasq[1] = regs->sr[2];
2457+ return 2;
2458+ }
2459+ } while (0);
2460+#endif
2461+
2462+ return 1;
2463+}
2464+
2465+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466+{
2467+ unsigned long i;
2468+
2469+ printk(KERN_ERR "PAX: bytes at PC: ");
2470+ for (i = 0; i < 5; i++) {
2471+ unsigned int c;
2472+ if (get_user(c, (unsigned int *)pc+i))
2473+ printk(KERN_CONT "???????? ");
2474+ else
2475+ printk(KERN_CONT "%08x ", c);
2476+ }
2477+ printk("\n");
2478+}
2479+#endif
2480+
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484@@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488- if ((vma->vm_flags & acc_type) != acc_type)
2489+ if ((vma->vm_flags & acc_type) != acc_type) {
2490+
2491+#ifdef CONFIG_PAX_PAGEEXEC
2492+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493+ (address & ~3UL) == instruction_pointer(regs))
2494+ {
2495+ up_read(&mm->mmap_sem);
2496+ switch (pax_handle_fetch_fault(regs)) {
2497+
2498+#ifdef CONFIG_PAX_EMUPLT
2499+ case 3:
2500+ return;
2501+#endif
2502+
2503+#ifdef CONFIG_PAX_EMUTRAMP
2504+ case 2:
2505+ return;
2506+#endif
2507+
2508+ }
2509+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510+ do_group_exit(SIGKILL);
2511+ }
2512+#endif
2513+
2514 goto bad_area;
2515+ }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520index c107b74..409dc0f 100644
2521--- a/arch/powerpc/Makefile
2522+++ b/arch/powerpc/Makefile
2523@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527+cflags-y += -Wno-sign-compare -Wno-extra
2528+
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533index 6d94d27..50d4cad 100644
2534--- a/arch/powerpc/include/asm/device.h
2535+++ b/arch/powerpc/include/asm/device.h
2536@@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540- struct dma_map_ops *dma_ops;
2541+ const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546index e281dae..2b8a784 100644
2547--- a/arch/powerpc/include/asm/dma-mapping.h
2548+++ b/arch/powerpc/include/asm/dma-mapping.h
2549@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553-extern struct dma_map_ops dma_direct_ops;
2554+extern const struct dma_map_ops dma_direct_ops;
2555
2556-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616index 5698502..5db093c 100644
2617--- a/arch/powerpc/include/asm/elf.h
2618+++ b/arch/powerpc/include/asm/elf.h
2619@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623-extern unsigned long randomize_et_dyn(unsigned long base);
2624-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625+#define ELF_ET_DYN_BASE (0x20000000)
2626+
2627+#ifdef CONFIG_PAX_ASLR
2628+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629+
2630+#ifdef __powerpc64__
2631+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633+#else
2634+#define PAX_DELTA_MMAP_LEN 15
2635+#define PAX_DELTA_STACK_LEN 15
2636+#endif
2637+#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646-#define arch_randomize_brk arch_randomize_brk
2647-
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652index edfc980..1766f59 100644
2653--- a/arch/powerpc/include/asm/iommu.h
2654+++ b/arch/powerpc/include/asm/iommu.h
2655@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659+/* dma-iommu.c */
2660+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661+
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666index 9163695..5a00112 100644
2667--- a/arch/powerpc/include/asm/kmap_types.h
2668+++ b/arch/powerpc/include/asm/kmap_types.h
2669@@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673+ KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678index ff24254..fe45b21 100644
2679--- a/arch/powerpc/include/asm/page.h
2680+++ b/arch/powerpc/include/asm/page.h
2681@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687+#define VM_DATA_DEFAULT_FLAGS32 \
2688+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697+#define ktla_ktva(addr) (addr)
2698+#define ktva_ktla(addr) (addr)
2699+
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704index 3f17b83..1f9e766 100644
2705--- a/arch/powerpc/include/asm/page_64.h
2706+++ b/arch/powerpc/include/asm/page_64.h
2707@@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713+#define VM_STACK_DEFAULT_FLAGS32 \
2714+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720+#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724+#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729index b5ea626..4030822 100644
2730--- a/arch/powerpc/include/asm/pci.h
2731+++ b/arch/powerpc/include/asm/pci.h
2732@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737-extern struct dma_map_ops *get_pci_dma_ops(void);
2738+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739+extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744index 2a5da06..d65bea2 100644
2745--- a/arch/powerpc/include/asm/pgtable.h
2746+++ b/arch/powerpc/include/asm/pgtable.h
2747@@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751+#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756index 4aad413..85d86bf 100644
2757--- a/arch/powerpc/include/asm/pte-hash32.h
2758+++ b/arch/powerpc/include/asm/pte-hash32.h
2759@@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763+#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768index 8c34149..78f425a 100644
2769--- a/arch/powerpc/include/asm/ptrace.h
2770+++ b/arch/powerpc/include/asm/ptrace.h
2771@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781index 32a7c30..be3a8bb 100644
2782--- a/arch/powerpc/include/asm/reg.h
2783+++ b/arch/powerpc/include/asm/reg.h
2784@@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793index 8979d4c..d2fd0d3 100644
2794--- a/arch/powerpc/include/asm/swiotlb.h
2795+++ b/arch/powerpc/include/asm/swiotlb.h
2796@@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800-extern struct dma_map_ops swiotlb_dma_ops;
2801+extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806index 094a12a..877a60a 100644
2807--- a/arch/powerpc/include/asm/system.h
2808+++ b/arch/powerpc/include/asm/system.h
2809@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813-extern unsigned long arch_align_stack(unsigned long sp);
2814+#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819index bd0fb84..a42a14b 100644
2820--- a/arch/powerpc/include/asm/uaccess.h
2821+++ b/arch/powerpc/include/asm/uaccess.h
2822@@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827+
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831@@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835-#ifndef __powerpc64__
2836-
2837-static inline unsigned long copy_from_user(void *to,
2838- const void __user *from, unsigned long n)
2839-{
2840- unsigned long over;
2841-
2842- if (access_ok(VERIFY_READ, from, n))
2843- return __copy_tofrom_user((__force void __user *)to, from, n);
2844- if ((unsigned long)from < TASK_SIZE) {
2845- over = (unsigned long)from + n - TASK_SIZE;
2846- return __copy_tofrom_user((__force void __user *)to, from,
2847- n - over) + over;
2848- }
2849- return n;
2850-}
2851-
2852-static inline unsigned long copy_to_user(void __user *to,
2853- const void *from, unsigned long n)
2854-{
2855- unsigned long over;
2856-
2857- if (access_ok(VERIFY_WRITE, to, n))
2858- return __copy_tofrom_user(to, (__force void __user *)from, n);
2859- if ((unsigned long)to < TASK_SIZE) {
2860- over = (unsigned long)to + n - TASK_SIZE;
2861- return __copy_tofrom_user(to, (__force void __user *)from,
2862- n - over) + over;
2863- }
2864- return n;
2865-}
2866-
2867-#else /* __powerpc64__ */
2868-
2869-#define __copy_in_user(to, from, size) \
2870- __copy_tofrom_user((to), (from), (size))
2871-
2872-extern unsigned long copy_from_user(void *to, const void __user *from,
2873- unsigned long n);
2874-extern unsigned long copy_to_user(void __user *to, const void *from,
2875- unsigned long n);
2876-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877- unsigned long n);
2878-
2879-#endif /* __powerpc64__ */
2880-
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888+
2889+ if (!__builtin_constant_p(n))
2890+ check_object_size(to, n, false);
2891+
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899+
2900+ if (!__builtin_constant_p(n))
2901+ check_object_size(from, n, true);
2902+
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910+#ifndef __powerpc64__
2911+
2912+static inline unsigned long __must_check copy_from_user(void *to,
2913+ const void __user *from, unsigned long n)
2914+{
2915+ unsigned long over;
2916+
2917+ if ((long)n < 0)
2918+ return n;
2919+
2920+ if (access_ok(VERIFY_READ, from, n)) {
2921+ if (!__builtin_constant_p(n))
2922+ check_object_size(to, n, false);
2923+ return __copy_tofrom_user((__force void __user *)to, from, n);
2924+ }
2925+ if ((unsigned long)from < TASK_SIZE) {
2926+ over = (unsigned long)from + n - TASK_SIZE;
2927+ if (!__builtin_constant_p(n - over))
2928+ check_object_size(to, n - over, false);
2929+ return __copy_tofrom_user((__force void __user *)to, from,
2930+ n - over) + over;
2931+ }
2932+ return n;
2933+}
2934+
2935+static inline unsigned long __must_check copy_to_user(void __user *to,
2936+ const void *from, unsigned long n)
2937+{
2938+ unsigned long over;
2939+
2940+ if ((long)n < 0)
2941+ return n;
2942+
2943+ if (access_ok(VERIFY_WRITE, to, n)) {
2944+ if (!__builtin_constant_p(n))
2945+ check_object_size(from, n, true);
2946+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2947+ }
2948+ if ((unsigned long)to < TASK_SIZE) {
2949+ over = (unsigned long)to + n - TASK_SIZE;
2950+ if (!__builtin_constant_p(n))
2951+ check_object_size(from, n - over, true);
2952+ return __copy_tofrom_user(to, (__force void __user *)from,
2953+ n - over) + over;
2954+ }
2955+ return n;
2956+}
2957+
2958+#else /* __powerpc64__ */
2959+
2960+#define __copy_in_user(to, from, size) \
2961+ __copy_tofrom_user((to), (from), (size))
2962+
2963+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964+{
2965+ if ((long)n < 0 || n > INT_MAX)
2966+ return n;
2967+
2968+ if (!__builtin_constant_p(n))
2969+ check_object_size(to, n, false);
2970+
2971+ if (likely(access_ok(VERIFY_READ, from, n)))
2972+ n = __copy_from_user(to, from, n);
2973+ else
2974+ memset(to, 0, n);
2975+ return n;
2976+}
2977+
2978+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979+{
2980+ if ((long)n < 0 || n > INT_MAX)
2981+ return n;
2982+
2983+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984+ if (!__builtin_constant_p(n))
2985+ check_object_size(from, n, true);
2986+ n = __copy_to_user(to, from, n);
2987+ }
2988+ return n;
2989+}
2990+
2991+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992+ unsigned long n);
2993+
2994+#endif /* __powerpc64__ */
2995+
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000index bb37b1d..01fe9ce 100644
3001--- a/arch/powerpc/kernel/cacheinfo.c
3002+++ b/arch/powerpc/kernel/cacheinfo.c
3003@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007-static struct sysfs_ops cache_index_ops = {
3008+static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013index 37771a5..648530c 100644
3014--- a/arch/powerpc/kernel/dma-iommu.c
3015+++ b/arch/powerpc/kernel/dma-iommu.c
3016@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026index e96cbbd..bdd6d41 100644
3027--- a/arch/powerpc/kernel/dma-swiotlb.c
3028+++ b/arch/powerpc/kernel/dma-swiotlb.c
3029@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033-struct dma_map_ops swiotlb_dma_ops = {
3034+const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039index 6215062..ebea59c 100644
3040--- a/arch/powerpc/kernel/dma.c
3041+++ b/arch/powerpc/kernel/dma.c
3042@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046-struct dma_map_ops dma_direct_ops = {
3047+const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052index 24dcc0e..a300455 100644
3053--- a/arch/powerpc/kernel/exceptions-64e.S
3054+++ b/arch/powerpc/kernel/exceptions-64e.S
3055@@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059+ bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063@@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067-1: bl .save_nvgprs
3068- mr r5,r3
3069+1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074index 1808876..9fd206a 100644
3075--- a/arch/powerpc/kernel/exceptions-64s.S
3076+++ b/arch/powerpc/kernel/exceptions-64s.S
3077@@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081+ bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085- bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090index a4c8b38..1b09ad9 100644
3091--- a/arch/powerpc/kernel/ibmebus.c
3092+++ b/arch/powerpc/kernel/ibmebus.c
3093@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097-static struct dma_map_ops ibmebus_dma_ops = {
3098+static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103index 641c74b..8339ad7 100644
3104--- a/arch/powerpc/kernel/kgdb.c
3105+++ b/arch/powerpc/kernel/kgdb.c
3106@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119-struct kgdb_arch arch_kgdb_ops = {
3120+const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125index 477c663..4f50234 100644
3126--- a/arch/powerpc/kernel/module.c
3127+++ b/arch/powerpc/kernel/module.c
3128@@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132+#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138+ return vmalloc(size);
3139+}
3140+
3141+void *module_alloc_exec(unsigned long size)
3142+#else
3143+void *module_alloc(unsigned long size)
3144+#endif
3145+
3146+{
3147+ if (size == 0)
3148+ return NULL;
3149+
3150 return vmalloc_exec(size);
3151 }
3152
3153@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157+#ifdef CONFIG_PAX_KERNEXEC
3158+void module_free_exec(struct module *mod, void *module_region)
3159+{
3160+ module_free(mod, module_region);
3161+}
3162+#endif
3163+
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168index f832773..0507238 100644
3169--- a/arch/powerpc/kernel/module_32.c
3170+++ b/arch/powerpc/kernel/module_32.c
3171@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175- printk("Module doesn't contain .plt or .init.plt sections.\n");
3176+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184- if (location >= mod->module_core
3185- && location < mod->module_core + mod->core_size)
3186+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189- else
3190+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193+ else {
3194+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195+ return ~0UL;
3196+ }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201index cadbed6..b9bbb00 100644
3202--- a/arch/powerpc/kernel/pci-common.c
3203+++ b/arch/powerpc/kernel/pci-common.c
3204@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217-struct dma_map_ops *get_pci_dma_ops(void)
3218+const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223index 7b816da..8d5c277 100644
3224--- a/arch/powerpc/kernel/process.c
3225+++ b/arch/powerpc/kernel/process.c
3226@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245- printk(" (%pS)",
3246+ printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263-
3264-unsigned long arch_align_stack(unsigned long sp)
3265-{
3266- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267- sp -= get_random_int() & ~PAGE_MASK;
3268- return sp & ~0xf;
3269-}
3270-
3271-static inline unsigned long brk_rnd(void)
3272-{
3273- unsigned long rnd = 0;
3274-
3275- /* 8MB for 32bit, 1GB for 64bit */
3276- if (is_32bit_task())
3277- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278- else
3279- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280-
3281- return rnd << PAGE_SHIFT;
3282-}
3283-
3284-unsigned long arch_randomize_brk(struct mm_struct *mm)
3285-{
3286- unsigned long base = mm->brk;
3287- unsigned long ret;
3288-
3289-#ifdef CONFIG_PPC_STD_MMU_64
3290- /*
3291- * If we are using 1TB segments and we are allowed to randomise
3292- * the heap, we can put it above 1TB so it is backed by a 1TB
3293- * segment. Otherwise the heap will be in the bottom 1TB
3294- * which always uses 256MB segments and this may result in a
3295- * performance penalty.
3296- */
3297- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299-#endif
3300-
3301- ret = PAGE_ALIGN(base + brk_rnd());
3302-
3303- if (ret < mm->brk)
3304- return mm->brk;
3305-
3306- return ret;
3307-}
3308-
3309-unsigned long randomize_et_dyn(unsigned long base)
3310-{
3311- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312-
3313- if (ret < base)
3314- return base;
3315-
3316- return ret;
3317-}
3318diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319index ef14988..856c4bc 100644
3320--- a/arch/powerpc/kernel/ptrace.c
3321+++ b/arch/powerpc/kernel/ptrace.c
3322@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335- tmp = ptrace_get_reg(child, (int) index);
3336+ tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341index d670429..2bc59b2 100644
3342--- a/arch/powerpc/kernel/signal_32.c
3343+++ b/arch/powerpc/kernel/signal_32.c
3344@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354index 2fe6fc6..ada0d96 100644
3355--- a/arch/powerpc/kernel/signal_64.c
3356+++ b/arch/powerpc/kernel/signal_64.c
3357@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367index b97c2d6..dd01a6a 100644
3368--- a/arch/powerpc/kernel/sys_ppc32.c
3369+++ b/arch/powerpc/kernel/sys_ppc32.c
3370@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384index 6f0ae1a..e4b6a56 100644
3385--- a/arch/powerpc/kernel/traps.c
3386+++ b/arch/powerpc/kernel/traps.c
3387@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391+extern void gr_handle_kernel_exploit(void);
3392+
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400+ gr_handle_kernel_exploit();
3401+
3402 oops_exit();
3403 do_exit(err);
3404
3405diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406index 137dc22..fe57a79 100644
3407--- a/arch/powerpc/kernel/vdso.c
3408+++ b/arch/powerpc/kernel/vdso.c
3409@@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413+#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421- current->mm->context.vdso_base = 0;
3422+ current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430- 0, 0);
3431+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436index 77f6421..829564a 100644
3437--- a/arch/powerpc/kernel/vio.c
3438+++ b/arch/powerpc/kernel/vio.c
3439@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443-struct dma_map_ops vio_dma_mapping_ops = {
3444+static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449+ .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462index 5eea6f3..5d10396 100644
3463--- a/arch/powerpc/lib/usercopy_64.c
3464+++ b/arch/powerpc/lib/usercopy_64.c
3465@@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470-{
3471- if (likely(access_ok(VERIFY_READ, from, n)))
3472- n = __copy_from_user(to, from, n);
3473- else
3474- memset(to, 0, n);
3475- return n;
3476-}
3477-
3478-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479-{
3480- if (likely(access_ok(VERIFY_WRITE, to, n)))
3481- n = __copy_to_user(to, from, n);
3482- return n;
3483-}
3484-
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492-EXPORT_SYMBOL(copy_from_user);
3493-EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497index e7dae82..877ce0d 100644
3498--- a/arch/powerpc/mm/fault.c
3499+++ b/arch/powerpc/mm/fault.c
3500@@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504+#include <linux/slab.h>
3505+#include <linux/pagemap.h>
3506+#include <linux/compiler.h>
3507+#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511@@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515+#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523+#ifdef CONFIG_PAX_PAGEEXEC
3524+/*
3525+ * PaX: decide what to do with offenders (regs->nip = fault address)
3526+ *
3527+ * returns 1 when task should be killed
3528+ */
3529+static int pax_handle_fetch_fault(struct pt_regs *regs)
3530+{
3531+ return 1;
3532+}
3533+
3534+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535+{
3536+ unsigned long i;
3537+
3538+ printk(KERN_ERR "PAX: bytes at PC: ");
3539+ for (i = 0; i < 5; i++) {
3540+ unsigned int c;
3541+ if (get_user(c, (unsigned int __user *)pc+i))
3542+ printk(KERN_CONT "???????? ");
3543+ else
3544+ printk(KERN_CONT "%08x ", c);
3545+ }
3546+ printk("\n");
3547+}
3548+#endif
3549+
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557- error_code &= 0x48200000;
3558+ error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562@@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566- if (error_code & 0x10000000)
3567+ if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571@@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575- if (error_code & DSISR_PROTFAULT)
3576+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580@@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584+
3585+#ifdef CONFIG_PAX_PAGEEXEC
3586+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587+#ifdef CONFIG_PPC_STD_MMU
3588+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589+#else
3590+ if (is_exec && regs->nip == address) {
3591+#endif
3592+ switch (pax_handle_fetch_fault(regs)) {
3593+ }
3594+
3595+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596+ do_group_exit(SIGKILL);
3597+ }
3598+ }
3599+#endif
3600+
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605index 5973631..ad617af 100644
3606--- a/arch/powerpc/mm/mem.c
3607+++ b/arch/powerpc/mm/mem.c
3608@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612- int i;
3613+ unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618index 0d957a4..26d968f 100644
3619--- a/arch/powerpc/mm/mmap_64.c
3620+++ b/arch/powerpc/mm/mmap_64.c
3621@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625+
3626+#ifdef CONFIG_PAX_RANDMMAP
3627+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3628+ mm->mmap_base += mm->delta_mmap;
3629+#endif
3630+
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635+
3636+#ifdef CONFIG_PAX_RANDMMAP
3637+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3638+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639+#endif
3640+
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645index ba51948..23009d9 100644
3646--- a/arch/powerpc/mm/slice.c
3647+++ b/arch/powerpc/mm/slice.c
3648@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652- return (!vma || (addr + len) <= vma->vm_start);
3653+ return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657@@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661- if (!vma || addr + len <= vma->vm_start) {
3662+ if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670- addr = mm->mmap_base;
3671- while (addr > len) {
3672+ if (mm->mmap_base < len)
3673+ addr = -ENOMEM;
3674+ else
3675+ addr = mm->mmap_base - len;
3676+
3677+ while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688- if (!vma || (addr + len) <= vma->vm_start) {
3689+ if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697- addr = vma->vm_start;
3698+ addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706+#ifdef CONFIG_PAX_RANDMMAP
3707+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708+ addr = 0;
3709+#endif
3710+
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715index b5c753d..8f01abe 100644
3716--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722-static struct platform_suspend_ops lite5200_pm_ops = {
3723+static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728index a55b0b6..478c18e 100644
3729--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735-static struct platform_suspend_ops mpc52xx_pm_ops = {
3736+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741index 08e65fc..643d3ac 100644
3742--- a/arch/powerpc/platforms/83xx/suspend.c
3743+++ b/arch/powerpc/platforms/83xx/suspend.c
3744@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754index ca5bfdf..1602e09 100644
3755--- a/arch/powerpc/platforms/cell/iommu.c
3756+++ b/arch/powerpc/platforms/cell/iommu.c
3757@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761-struct dma_map_ops dma_iommu_fixed_ops = {
3762+const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767index e34b305..20e48ec 100644
3768--- a/arch/powerpc/platforms/ps3/system-bus.c
3769+++ b/arch/powerpc/platforms/ps3/system-bus.c
3770@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774-static struct dma_map_ops ps3_sb_dma_ops = {
3775+static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783-static struct dma_map_ops ps3_ioc0_dma_ops = {
3784+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789index f0e6f28..60d53ed 100644
3790--- a/arch/powerpc/platforms/pseries/Kconfig
3791+++ b/arch/powerpc/platforms/pseries/Kconfig
3792@@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796+ select PCI_MSI
3797+ select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802index 43c0aca..42c045b 100644
3803--- a/arch/s390/Kconfig
3804+++ b/arch/s390/Kconfig
3805@@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809+ default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812- space. The kernel parameter switch_amode=on will enable this feature,
3813- default is disabled. Enabling this (via kernel parameter) on machines
3814- earlier than IBM System z9-109 EC/BC will reduce system performance.
3815+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816+ will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819- protection option below. Enabling the execute protection via the
3820- noexec kernel parameter will also switch the addressing modes,
3821- independent of the switch_amode kernel parameter.
3822+ protection option below. Enabling the execute protection will also
3823+ switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828+ default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833- The kernel parameter noexec=on will enable this feature and also
3834- switch the addressing modes, default is disabled. Enabling this (via
3835- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836- will reduce system performance.
3837+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838+ reduce system performance.
3839
3840 comment "Code generation options"
3841
3842diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843index e885442..5e6c303 100644
3844--- a/arch/s390/include/asm/elf.h
3845+++ b/arch/s390/include/asm/elf.h
3846@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850+#ifdef CONFIG_PAX_ASLR
3851+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852+
3853+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3854+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3855+#endif
3856+
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861index e37478e..9ce0e9f 100644
3862--- a/arch/s390/include/asm/setup.h
3863+++ b/arch/s390/include/asm/setup.h
3864@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868-extern unsigned int switch_amode;
3869+#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875-extern unsigned int s390_noexec;
3876+#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881index 8377e91..e28e6f1 100644
3882--- a/arch/s390/include/asm/uaccess.h
3883+++ b/arch/s390/include/asm/uaccess.h
3884@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888+
3889+ if ((long)n < 0)
3890+ return n;
3891+
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899+ if ((long)n < 0)
3900+ return n;
3901+
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909+
3910+ if ((long)n < 0)
3911+ return n;
3912+
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917index 639380a..72e3c02 100644
3918--- a/arch/s390/kernel/module.c
3919+++ b/arch/s390/kernel/module.c
3920@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924- me->core_size = ALIGN(me->core_size, 4);
3925- me->arch.got_offset = me->core_size;
3926- me->core_size += me->arch.got_size;
3927- me->arch.plt_offset = me->core_size;
3928- me->core_size += me->arch.plt_size;
3929+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930+ me->arch.got_offset = me->core_size_rw;
3931+ me->core_size_rw += me->arch.got_size;
3932+ me->arch.plt_offset = me->core_size_rx;
3933+ me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941- gotent = me->module_core + me->arch.got_offset +
3942+ gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950- (val + (Elf_Addr) me->module_core - loc) >> 1;
3951+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959- ip = me->module_core + me->arch.plt_offset +
3960+ ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968- val = (Elf_Addr) me->module_core +
3969+ val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977- ((Elf_Addr) me->module_core + me->arch.got_offset);
3978+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992index 061479f..dbfb08c 100644
3993--- a/arch/s390/kernel/setup.c
3994+++ b/arch/s390/kernel/setup.c
3995@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999-unsigned int switch_amode = 0;
4000-EXPORT_SYMBOL_GPL(switch_amode);
4001-
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009-
4010-/*
4011- * Switch kernel/user addressing modes?
4012- */
4013-static int __init early_parse_switch_amode(char *p)
4014-{
4015- switch_amode = 1;
4016- return 0;
4017-}
4018-early_param("switch_amode", early_parse_switch_amode);
4019-
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027-#ifdef CONFIG_S390_EXEC_PROTECT
4028-unsigned int s390_noexec = 0;
4029-EXPORT_SYMBOL_GPL(s390_noexec);
4030-
4031-/*
4032- * Enable execute protection?
4033- */
4034-static int __init early_parse_noexec(char *p)
4035-{
4036- if (!strncmp(p, "off", 3))
4037- return 0;
4038- switch_amode = 1;
4039- s390_noexec = 1;
4040- return 0;
4041-}
4042-early_param("noexec", early_parse_noexec);
4043-#endif /* CONFIG_S390_EXEC_PROTECT */
4044-
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049index f4558cc..e461f37 100644
4050--- a/arch/s390/mm/mmap.c
4051+++ b/arch/s390/mm/mmap.c
4052@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066+
4067+#ifdef CONFIG_PAX_RANDMMAP
4068+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4069+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070+#endif
4071+
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079+
4080+#ifdef CONFIG_PAX_RANDMMAP
4081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4082+ mm->mmap_base += mm->delta_mmap;
4083+#endif
4084+
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089+
4090+#ifdef CONFIG_PAX_RANDMMAP
4091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4092+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093+#endif
4094+
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099index 589d5c7..669e274 100644
4100--- a/arch/score/include/asm/system.h
4101+++ b/arch/score/include/asm/system.h
4102@@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106-extern unsigned long arch_align_stack(unsigned long sp);
4107+#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112index 25d0803..d6c8e36 100644
4113--- a/arch/score/kernel/process.c
4114+++ b/arch/score/kernel/process.c
4115@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119-
4120-unsigned long arch_align_stack(unsigned long sp)
4121-{
4122- return sp;
4123-}
4124diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125index d936c1a..304a252 100644
4126--- a/arch/sh/boards/mach-hp6xx/pm.c
4127+++ b/arch/sh/boards/mach-hp6xx/pm.c
4128@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132-static struct platform_suspend_ops hp6x0_pm_ops = {
4133+static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138index 8a8a993..7b3079b 100644
4139--- a/arch/sh/kernel/cpu/sh4/sq.c
4140+++ b/arch/sh/kernel/cpu/sh4/sq.c
4141@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145-static struct sysfs_ops sq_sysfs_ops = {
4146+static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151index ee3c2aa..c49cee6 100644
4152--- a/arch/sh/kernel/cpu/shmobile/pm.c
4153+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158-static struct platform_suspend_ops sh_pm_ops = {
4159+static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164index 3e532d0..9faa306 100644
4165--- a/arch/sh/kernel/kgdb.c
4166+++ b/arch/sh/kernel/kgdb.c
4167@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171-struct kgdb_arch arch_kgdb_ops = {
4172+const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177index afeb710..d1d1289 100644
4178--- a/arch/sh/mm/mmap.c
4179+++ b/arch/sh/mm/mmap.c
4180@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184- if (TASK_SIZE - len >= addr &&
4185- (!vma || addr + len <= vma->vm_start))
4186+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190@@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194- if (likely(!vma || addr + len <= vma->vm_start)) {
4195+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203- if (TASK_SIZE - len >= addr &&
4204- (!vma || addr + len <= vma->vm_start))
4205+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213- if (!vma || addr <= vma->vm_start) {
4214+ if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222- addr = mm->mmap_base-len;
4223- if (do_colour_align)
4224- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225+ addr = mm->mmap_base - len;
4226
4227 do {
4228+ if (do_colour_align)
4229+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236- if (likely(!vma || addr+len <= vma->vm_start)) {
4237+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245- addr = vma->vm_start-len;
4246- if (do_colour_align)
4247- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248- } while (likely(len < vma->vm_start));
4249+ addr = skip_heap_stack_gap(vma, len);
4250+ } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255index 113225b..7fd04e7 100644
4256--- a/arch/sparc/Makefile
4257+++ b/arch/sparc/Makefile
4258@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268index f5cc06f..f858d47 100644
4269--- a/arch/sparc/include/asm/atomic_64.h
4270+++ b/arch/sparc/include/asm/atomic_64.h
4271@@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276+{
4277+ return v->counter;
4278+}
4279 #define atomic64_read(v) ((v)->counter)
4280+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281+{
4282+ return v->counter;
4283+}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287+{
4288+ v->counter = i;
4289+}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292+{
4293+ v->counter = i;
4294+}
4295
4296 extern void atomic_add(int, atomic_t *);
4297+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317+{
4318+ return atomic_add_ret_unchecked(1, v);
4319+}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322+{
4323+ return atomic64_add_ret_unchecked(1, v);
4324+}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331+{
4332+ return atomic_add_ret_unchecked(i, v);
4333+}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336+{
4337+ return atomic64_add_ret_unchecked(i, v);
4338+}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347+{
4348+ return atomic_inc_return_unchecked(v) == 0;
4349+}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358+{
4359+ atomic_add_unchecked(1, v);
4360+}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363+{
4364+ atomic64_add_unchecked(1, v);
4365+}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369+{
4370+ atomic_sub_unchecked(1, v);
4371+}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374+{
4375+ atomic64_sub_unchecked(1, v);
4376+}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383+{
4384+ return cmpxchg(&v->counter, old, new);
4385+}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388+{
4389+ return xchg(&v->counter, new);
4390+}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394- int c, old;
4395+ int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398- if (unlikely(c == (u)))
4399+ if (unlikely(c == u))
4400 break;
4401- old = atomic_cmpxchg((v), c, c + (a));
4402+
4403+ asm volatile("addcc %2, %0, %0\n"
4404+
4405+#ifdef CONFIG_PAX_REFCOUNT
4406+ "tvs %%icc, 6\n"
4407+#endif
4408+
4409+ : "=r" (new)
4410+ : "0" (c), "ir" (a)
4411+ : "cc");
4412+
4413+ old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418- return c != (u);
4419+ return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428+{
4429+ return xchg(&v->counter, new);
4430+}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434- long c, old;
4435+ long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438- if (unlikely(c == (u)))
4439+ if (unlikely(c == u))
4440 break;
4441- old = atomic64_cmpxchg((v), c, c + (a));
4442+
4443+ asm volatile("addcc %2, %0, %0\n"
4444+
4445+#ifdef CONFIG_PAX_REFCOUNT
4446+ "tvs %%xcc, 6\n"
4447+#endif
4448+
4449+ : "=r" (new)
4450+ : "0" (c), "ir" (a)
4451+ : "cc");
4452+
4453+ old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458- return c != (u);
4459+ return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464index 41f85ae..fb54d5e 100644
4465--- a/arch/sparc/include/asm/cache.h
4466+++ b/arch/sparc/include/asm/cache.h
4467@@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471-#define L1_CACHE_BYTES 32
4472+#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477index 5a8c308..38def92 100644
4478--- a/arch/sparc/include/asm/dma-mapping.h
4479+++ b/arch/sparc/include/asm/dma-mapping.h
4480@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497- struct dma_map_ops *ops = get_dma_ops(dev);
4498+ const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506- struct dma_map_ops *ops = get_dma_ops(dev);
4507+ const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512index 381a1b5..b97e3ff 100644
4513--- a/arch/sparc/include/asm/elf_32.h
4514+++ b/arch/sparc/include/asm/elf_32.h
4515@@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519+#ifdef CONFIG_PAX_ASLR
4520+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521+
4522+#define PAX_DELTA_MMAP_LEN 16
4523+#define PAX_DELTA_STACK_LEN 16
4524+#endif
4525+
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530index 9968085..c2106ef 100644
4531--- a/arch/sparc/include/asm/elf_64.h
4532+++ b/arch/sparc/include/asm/elf_64.h
4533@@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537+#ifdef CONFIG_PAX_ASLR
4538+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539+
4540+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542+#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547index e0cabe7..efd60f1 100644
4548--- a/arch/sparc/include/asm/pgtable_32.h
4549+++ b/arch/sparc/include/asm/pgtable_32.h
4550@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554+
4555+#ifdef CONFIG_PAX_PAGEEXEC
4556+BTFIXUPDEF_INT(page_shared_noexec)
4557+BTFIXUPDEF_INT(page_copy_noexec)
4558+BTFIXUPDEF_INT(page_readonly_noexec)
4559+#endif
4560+
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568+#ifdef CONFIG_PAX_PAGEEXEC
4569+extern pgprot_t PAGE_SHARED_NOEXEC;
4570+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572+#else
4573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574+# define PAGE_COPY_NOEXEC PAGE_COPY
4575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576+#endif
4577+
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582index 1407c07..7e10231 100644
4583--- a/arch/sparc/include/asm/pgtsrmmu.h
4584+++ b/arch/sparc/include/asm/pgtsrmmu.h
4585@@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589+
4590+#ifdef CONFIG_PAX_PAGEEXEC
4591+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594+#endif
4595+
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600index 43e5147..47622a1 100644
4601--- a/arch/sparc/include/asm/spinlock_64.h
4602+++ b/arch/sparc/include/asm/spinlock_64.h
4603@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607-static void inline arch_read_lock(raw_rwlock_t *lock)
4608+static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615-"4: add %0, 1, %1\n"
4616+"4: addcc %0, 1, %1\n"
4617+
4618+#ifdef CONFIG_PAX_REFCOUNT
4619+" tvs %%icc, 6\n"
4620+#endif
4621+
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629- : "memory");
4630+ : "memory", "cc");
4631 }
4632
4633-static int inline arch_read_trylock(raw_rwlock_t *lock)
4634+static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642-" add %0, 1, %1\n"
4643+" addcc %0, 1, %1\n"
4644+
4645+#ifdef CONFIG_PAX_REFCOUNT
4646+" tvs %%icc, 6\n"
4647+#endif
4648+
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656-static void inline arch_read_unlock(raw_rwlock_t *lock)
4657+static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663-" sub %0, 1, %1\n"
4664+" subcc %0, 1, %1\n"
4665+
4666+#ifdef CONFIG_PAX_REFCOUNT
4667+" tvs %%icc, 6\n"
4668+#endif
4669+
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677-static void inline arch_write_lock(raw_rwlock_t *lock)
4678+static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686-static void inline arch_write_unlock(raw_rwlock_t *lock)
4687+static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695-static int inline arch_write_trylock(raw_rwlock_t *lock)
4696+static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701index 844d73a..f787fb9 100644
4702--- a/arch/sparc/include/asm/thread_info_32.h
4703+++ b/arch/sparc/include/asm/thread_info_32.h
4704@@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708+
4709+ unsigned long lowest_stack;
4710 };
4711
4712 /*
4713diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714index f78ad9a..9f55fc7 100644
4715--- a/arch/sparc/include/asm/thread_info_64.h
4716+++ b/arch/sparc/include/asm/thread_info_64.h
4717@@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721+ unsigned long lowest_stack;
4722+
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727index e88fbe5..96b0ce5 100644
4728--- a/arch/sparc/include/asm/uaccess.h
4729+++ b/arch/sparc/include/asm/uaccess.h
4730@@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733+
4734+#ifdef __KERNEL__
4735+#ifndef __ASSEMBLY__
4736+#include <linux/types.h>
4737+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738+#endif
4739+#endif
4740+
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745index 8303ac4..07f333d 100644
4746--- a/arch/sparc/include/asm/uaccess_32.h
4747+++ b/arch/sparc/include/asm/uaccess_32.h
4748@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752- if (n && __access_ok((unsigned long) to, n))
4753+ if ((long)n < 0)
4754+ return n;
4755+
4756+ if (n && __access_ok((unsigned long) to, n)) {
4757+ if (!__builtin_constant_p(n))
4758+ check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760- else
4761+ } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767+ if ((long)n < 0)
4768+ return n;
4769+
4770+ if (!__builtin_constant_p(n))
4771+ check_object_size(from, n, true);
4772+
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778- if (n && __access_ok((unsigned long) from, n))
4779+ if ((long)n < 0)
4780+ return n;
4781+
4782+ if (n && __access_ok((unsigned long) from, n)) {
4783+ if (!__builtin_constant_p(n))
4784+ check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786- else
4787+ } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793+ if ((long)n < 0)
4794+ return n;
4795+
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800index 9ea271e..7b8a271 100644
4801--- a/arch/sparc/include/asm/uaccess_64.h
4802+++ b/arch/sparc/include/asm/uaccess_64.h
4803@@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807+#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815- unsigned long ret = ___copy_from_user(to, from, size);
4816+ unsigned long ret;
4817
4818+ if ((long)size < 0 || size > INT_MAX)
4819+ return size;
4820+
4821+ if (!__builtin_constant_p(size))
4822+ check_object_size(to, size, false);
4823+
4824+ ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832- unsigned long ret = ___copy_to_user(to, from, size);
4833+ unsigned long ret;
4834
4835+ if ((long)size < 0 || size > INT_MAX)
4836+ return size;
4837+
4838+ if (!__builtin_constant_p(size))
4839+ check_object_size(from, size, true);
4840+
4841+ ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846index 2782681..77ded84 100644
4847--- a/arch/sparc/kernel/Makefile
4848+++ b/arch/sparc/kernel/Makefile
4849@@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853-ccflags-y := -Werror
4854+#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859index 7690cc2..ece64c9 100644
4860--- a/arch/sparc/kernel/iommu.c
4861+++ b/arch/sparc/kernel/iommu.c
4862@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866-static struct dma_map_ops sun4u_dma_ops = {
4867+static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881index 9f61fd8..bd048db 100644
4882--- a/arch/sparc/kernel/ioport.c
4883+++ b/arch/sparc/kernel/ioport.c
4884@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888-struct dma_map_ops sbus_dma_ops = {
4889+const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906-struct dma_map_ops pci32_dma_ops = {
4907+const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912index 04df4ed..55c4b6e 100644
4913--- a/arch/sparc/kernel/kgdb_32.c
4914+++ b/arch/sparc/kernel/kgdb_32.c
4915@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919-struct kgdb_arch arch_kgdb_ops = {
4920+const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925index f5a0fd4..d886f71 100644
4926--- a/arch/sparc/kernel/kgdb_64.c
4927+++ b/arch/sparc/kernel/kgdb_64.c
4928@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932-struct kgdb_arch arch_kgdb_ops = {
4933+const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938index 23c33ff..d137fbd 100644
4939--- a/arch/sparc/kernel/pci_sun4v.c
4940+++ b/arch/sparc/kernel/pci_sun4v.c
4941@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945-static struct dma_map_ops sun4v_dma_ops = {
4946+static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951index c49865b..b41a81b 100644
4952--- a/arch/sparc/kernel/process_32.c
4953+++ b/arch/sparc/kernel/process_32.c
4954@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958- printk("%pS\n", (void *) rw->ins[7]);
4959+ printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967- printk("PC: <%pS>\n", (void *) r->pc);
4968+ printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984- printk("%pS ] ", (void *) pc);
4985+ printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990index cb70476..3d0c191 100644
4991--- a/arch/sparc/kernel/process_64.c
4992+++ b/arch/sparc/kernel/process_64.c
4993@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005- printk("TPC: <%pS>\n", (void *) regs->tpc);
5006+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029index 6edc4e5..06a69b4 100644
5030--- a/arch/sparc/kernel/sigutil_64.c
5031+++ b/arch/sparc/kernel/sigutil_64.c
5032@@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036+#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041index 3a82e65..ce0a53a 100644
5042--- a/arch/sparc/kernel/sys_sparc_32.c
5043+++ b/arch/sparc/kernel/sys_sparc_32.c
5044@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048- addr = TASK_UNMAPPED_BASE;
5049+ addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057- if (!vmm || addr + len <= vmm->vm_start)
5058+ if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063index cfa0e19..98972ac 100644
5064--- a/arch/sparc/kernel/sys_sparc_64.c
5065+++ b/arch/sparc/kernel/sys_sparc_64.c
5066@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070- if ((flags & MAP_SHARED) &&
5071+ if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079+#ifdef CONFIG_PAX_RANDMMAP
5080+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081+#endif
5082+
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090- if (task_size - len >= addr &&
5091- (!vma || addr + len <= vma->vm_start))
5092+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097- start_addr = addr = mm->free_area_cache;
5098+ start_addr = addr = mm->free_area_cache;
5099 } else {
5100- start_addr = addr = TASK_UNMAPPED_BASE;
5101+ start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105@@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109- if (start_addr != TASK_UNMAPPED_BASE) {
5110- start_addr = addr = TASK_UNMAPPED_BASE;
5111+ if (start_addr != mm->mmap_base) {
5112+ start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118- if (likely(!vma || addr + len <= vma->vm_start)) {
5119+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127- if ((flags & MAP_SHARED) &&
5128+ if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136- if (task_size - len >= addr &&
5137- (!vma || addr + len <= vma->vm_start))
5138+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146- if (!vma || addr <= vma->vm_start) {
5147+ if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155- addr = mm->mmap_base-len;
5156- if (do_color_align)
5157- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158+ addr = mm->mmap_base - len;
5159
5160 do {
5161+ if (do_color_align)
5162+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169- if (likely(!vma || addr+len <= vma->vm_start)) {
5170+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178- addr = vma->vm_start-len;
5179- if (do_color_align)
5180- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181- } while (likely(len < vma->vm_start));
5182+ addr = skip_heap_stack_gap(vma, len);
5183+ } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191+
5192+#ifdef CONFIG_PAX_RANDMMAP
5193+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5194+ mm->mmap_base += mm->delta_mmap;
5195+#endif
5196+
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204+
5205+#ifdef CONFIG_PAX_RANDMMAP
5206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5207+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208+#endif
5209+
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214index c0490c7..84959d1 100644
5215--- a/arch/sparc/kernel/traps_32.c
5216+++ b/arch/sparc/kernel/traps_32.c
5217@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221+extern void gr_handle_kernel_exploit(void);
5222+
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238- if(regs->psr & PSR_PS)
5239+ if(regs->psr & PSR_PS) {
5240+ gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242+ }
5243 do_exit(SIGSEGV);
5244 }
5245
5246diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247index 10f7bb9..cdb6793 100644
5248--- a/arch/sparc/kernel/traps_64.c
5249+++ b/arch/sparc/kernel/traps_64.c
5250@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263+
5264+#ifdef CONFIG_PAX_REFCOUNT
5265+ if (lvl == 6)
5266+ pax_report_refcount_overflow(regs);
5267+#endif
5268+
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276-
5277+
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282+#ifdef CONFIG_PAX_REFCOUNT
5283+ if (lvl == 6)
5284+ pax_report_refcount_overflow(regs);
5285+#endif
5286+
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294- printk("TPC<%pS>\n", (void *) regs->tpc);
5295+ printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345- printk(" [%016lx] %pS\n", pc, (void *) pc);
5346+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354+extern void gr_handle_kernel_exploit(void);
5355+
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372- if (regs->tstate & TSTATE_PRIV)
5373+ if (regs->tstate & TSTATE_PRIV) {
5374+ gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376+ }
5377+
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382index be183fe..1c8d332 100644
5383--- a/arch/sparc/kernel/una_asm_64.S
5384+++ b/arch/sparc/kernel/una_asm_64.S
5385@@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389- .size __do_int_load, .-__do_int_load
5390+ .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395index 3792099..2af17d8 100644
5396--- a/arch/sparc/kernel/unaligned_64.c
5397+++ b/arch/sparc/kernel/unaligned_64.c
5398@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408index e75faf0..24f12f9 100644
5409--- a/arch/sparc/lib/Makefile
5410+++ b/arch/sparc/lib/Makefile
5411@@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415-ccflags-y := -Werror
5416+#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421index 0268210..f0291ca 100644
5422--- a/arch/sparc/lib/atomic_64.S
5423+++ b/arch/sparc/lib/atomic_64.S
5424@@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428- add %g1, %o0, %g7
5429+ addcc %g1, %o0, %g7
5430+
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ tvs %icc, 6
5433+#endif
5434+
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442+ .globl atomic_add_unchecked
5443+ .type atomic_add_unchecked,#function
5444+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445+ BACKOFF_SETUP(%o2)
5446+1: lduw [%o1], %g1
5447+ add %g1, %o0, %g7
5448+ cas [%o1], %g1, %g7
5449+ cmp %g1, %g7
5450+ bne,pn %icc, 2f
5451+ nop
5452+ retl
5453+ nop
5454+2: BACKOFF_SPIN(%o2, %o3, 1b)
5455+ .size atomic_add_unchecked, .-atomic_add_unchecked
5456+
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462- sub %g1, %o0, %g7
5463+ subcc %g1, %o0, %g7
5464+
5465+#ifdef CONFIG_PAX_REFCOUNT
5466+ tvs %icc, 6
5467+#endif
5468+
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476+ .globl atomic_sub_unchecked
5477+ .type atomic_sub_unchecked,#function
5478+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479+ BACKOFF_SETUP(%o2)
5480+1: lduw [%o1], %g1
5481+ sub %g1, %o0, %g7
5482+ cas [%o1], %g1, %g7
5483+ cmp %g1, %g7
5484+ bne,pn %icc, 2f
5485+ nop
5486+ retl
5487+ nop
5488+2: BACKOFF_SPIN(%o2, %o3, 1b)
5489+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490+
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496- add %g1, %o0, %g7
5497+ addcc %g1, %o0, %g7
5498+
5499+#ifdef CONFIG_PAX_REFCOUNT
5500+ tvs %icc, 6
5501+#endif
5502+
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510+ .globl atomic_add_ret_unchecked
5511+ .type atomic_add_ret_unchecked,#function
5512+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513+ BACKOFF_SETUP(%o2)
5514+1: lduw [%o1], %g1
5515+ addcc %g1, %o0, %g7
5516+ cas [%o1], %g1, %g7
5517+ cmp %g1, %g7
5518+ bne,pn %icc, 2f
5519+ add %g7, %o0, %g7
5520+ sra %g7, 0, %o0
5521+ retl
5522+ nop
5523+2: BACKOFF_SPIN(%o2, %o3, 1b)
5524+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525+
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531- sub %g1, %o0, %g7
5532+ subcc %g1, %o0, %g7
5533+
5534+#ifdef CONFIG_PAX_REFCOUNT
5535+ tvs %icc, 6
5536+#endif
5537+
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545- add %g1, %o0, %g7
5546+ addcc %g1, %o0, %g7
5547+
5548+#ifdef CONFIG_PAX_REFCOUNT
5549+ tvs %xcc, 6
5550+#endif
5551+
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559+ .globl atomic64_add_unchecked
5560+ .type atomic64_add_unchecked,#function
5561+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562+ BACKOFF_SETUP(%o2)
5563+1: ldx [%o1], %g1
5564+ addcc %g1, %o0, %g7
5565+ casx [%o1], %g1, %g7
5566+ cmp %g1, %g7
5567+ bne,pn %xcc, 2f
5568+ nop
5569+ retl
5570+ nop
5571+2: BACKOFF_SPIN(%o2, %o3, 1b)
5572+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573+
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579- sub %g1, %o0, %g7
5580+ subcc %g1, %o0, %g7
5581+
5582+#ifdef CONFIG_PAX_REFCOUNT
5583+ tvs %xcc, 6
5584+#endif
5585+
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593+ .globl atomic64_sub_unchecked
5594+ .type atomic64_sub_unchecked,#function
5595+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596+ BACKOFF_SETUP(%o2)
5597+1: ldx [%o1], %g1
5598+ subcc %g1, %o0, %g7
5599+ casx [%o1], %g1, %g7
5600+ cmp %g1, %g7
5601+ bne,pn %xcc, 2f
5602+ nop
5603+ retl
5604+ nop
5605+2: BACKOFF_SPIN(%o2, %o3, 1b)
5606+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607+
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613- add %g1, %o0, %g7
5614+ addcc %g1, %o0, %g7
5615+
5616+#ifdef CONFIG_PAX_REFCOUNT
5617+ tvs %xcc, 6
5618+#endif
5619+
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627+ .globl atomic64_add_ret_unchecked
5628+ .type atomic64_add_ret_unchecked,#function
5629+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630+ BACKOFF_SETUP(%o2)
5631+1: ldx [%o1], %g1
5632+ addcc %g1, %o0, %g7
5633+ casx [%o1], %g1, %g7
5634+ cmp %g1, %g7
5635+ bne,pn %xcc, 2f
5636+ add %g7, %o0, %g7
5637+ mov %g7, %o0
5638+ retl
5639+ nop
5640+2: BACKOFF_SPIN(%o2, %o3, 1b)
5641+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642+
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648- sub %g1, %o0, %g7
5649+ subcc %g1, %o0, %g7
5650+
5651+#ifdef CONFIG_PAX_REFCOUNT
5652+ tvs %xcc, 6
5653+#endif
5654+
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659index 704b126..2e79d76 100644
5660--- a/arch/sparc/lib/ksyms.c
5661+++ b/arch/sparc/lib/ksyms.c
5662@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666+EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670+EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673+EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677+EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682index 91a7d29..ce75c29 100644
5683--- a/arch/sparc/lib/rwsem_64.S
5684+++ b/arch/sparc/lib/rwsem_64.S
5685@@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689- add %g1, 1, %g7
5690+ addcc %g1, 1, %g7
5691+
5692+#ifdef CONFIG_PAX_REFCOUNT
5693+ tvs %icc, 6
5694+#endif
5695+
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699@@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703- add %g1, 1, %g7
5704+ addcc %g1, 1, %g7
5705+
5706+#ifdef CONFIG_PAX_REFCOUNT
5707+ tvs %icc, 6
5708+#endif
5709+
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713@@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717- add %g3, %g1, %g7
5718+ addcc %g3, %g1, %g7
5719+
5720+#ifdef CONFIG_PAX_REFCOUNT
5721+ tvs %icc, 6
5722+#endif
5723+
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727@@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731- add %g3, %g1, %g7
5732+ addcc %g3, %g1, %g7
5733+
5734+#ifdef CONFIG_PAX_REFCOUNT
5735+ tvs %icc, 6
5736+#endif
5737+
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741@@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745- sub %g1, 1, %g7
5746+ subcc %g1, 1, %g7
5747+
5748+#ifdef CONFIG_PAX_REFCOUNT
5749+ tvs %icc, 6
5750+#endif
5751+
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755@@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759- sub %g3, %g1, %g7
5760+ subcc %g3, %g1, %g7
5761+
5762+#ifdef CONFIG_PAX_REFCOUNT
5763+ tvs %icc, 6
5764+#endif
5765+
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769@@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773- sub %g3, %g1, %g7
5774+ subcc %g3, %g1, %g7
5775+
5776+#ifdef CONFIG_PAX_REFCOUNT
5777+ tvs %icc, 6
5778+#endif
5779+
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784index 79836a7..62f47a2 100644
5785--- a/arch/sparc/mm/Makefile
5786+++ b/arch/sparc/mm/Makefile
5787@@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791-ccflags-y := -Werror
5792+#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797index b99f81c..3453e93 100644
5798--- a/arch/sparc/mm/fault_32.c
5799+++ b/arch/sparc/mm/fault_32.c
5800@@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804+#include <linux/slab.h>
5805+#include <linux/pagemap.h>
5806+#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814+#ifdef CONFIG_PAX_PAGEEXEC
5815+#ifdef CONFIG_PAX_DLRESOLVE
5816+static void pax_emuplt_close(struct vm_area_struct *vma)
5817+{
5818+ vma->vm_mm->call_dl_resolve = 0UL;
5819+}
5820+
5821+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822+{
5823+ unsigned int *kaddr;
5824+
5825+ vmf->page = alloc_page(GFP_HIGHUSER);
5826+ if (!vmf->page)
5827+ return VM_FAULT_OOM;
5828+
5829+ kaddr = kmap(vmf->page);
5830+ memset(kaddr, 0, PAGE_SIZE);
5831+ kaddr[0] = 0x9DE3BFA8U; /* save */
5832+ flush_dcache_page(vmf->page);
5833+ kunmap(vmf->page);
5834+ return VM_FAULT_MAJOR;
5835+}
5836+
5837+static const struct vm_operations_struct pax_vm_ops = {
5838+ .close = pax_emuplt_close,
5839+ .fault = pax_emuplt_fault
5840+};
5841+
5842+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843+{
5844+ int ret;
5845+
5846+ vma->vm_mm = current->mm;
5847+ vma->vm_start = addr;
5848+ vma->vm_end = addr + PAGE_SIZE;
5849+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851+ vma->vm_ops = &pax_vm_ops;
5852+
5853+ ret = insert_vm_struct(current->mm, vma);
5854+ if (ret)
5855+ return ret;
5856+
5857+ ++current->mm->total_vm;
5858+ return 0;
5859+}
5860+#endif
5861+
5862+/*
5863+ * PaX: decide what to do with offenders (regs->pc = fault address)
5864+ *
5865+ * returns 1 when task should be killed
5866+ * 2 when patched PLT trampoline was detected
5867+ * 3 when unpatched PLT trampoline was detected
5868+ */
5869+static int pax_handle_fetch_fault(struct pt_regs *regs)
5870+{
5871+
5872+#ifdef CONFIG_PAX_EMUPLT
5873+ int err;
5874+
5875+ do { /* PaX: patched PLT emulation #1 */
5876+ unsigned int sethi1, sethi2, jmpl;
5877+
5878+ err = get_user(sethi1, (unsigned int *)regs->pc);
5879+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881+
5882+ if (err)
5883+ break;
5884+
5885+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888+ {
5889+ unsigned int addr;
5890+
5891+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892+ addr = regs->u_regs[UREG_G1];
5893+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894+ regs->pc = addr;
5895+ regs->npc = addr+4;
5896+ return 2;
5897+ }
5898+ } while (0);
5899+
5900+ { /* PaX: patched PLT emulation #2 */
5901+ unsigned int ba;
5902+
5903+ err = get_user(ba, (unsigned int *)regs->pc);
5904+
5905+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906+ unsigned int addr;
5907+
5908+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909+ regs->pc = addr;
5910+ regs->npc = addr+4;
5911+ return 2;
5912+ }
5913+ }
5914+
5915+ do { /* PaX: patched PLT emulation #3 */
5916+ unsigned int sethi, jmpl, nop;
5917+
5918+ err = get_user(sethi, (unsigned int *)regs->pc);
5919+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921+
5922+ if (err)
5923+ break;
5924+
5925+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927+ nop == 0x01000000U)
5928+ {
5929+ unsigned int addr;
5930+
5931+ addr = (sethi & 0x003FFFFFU) << 10;
5932+ regs->u_regs[UREG_G1] = addr;
5933+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934+ regs->pc = addr;
5935+ regs->npc = addr+4;
5936+ return 2;
5937+ }
5938+ } while (0);
5939+
5940+ do { /* PaX: unpatched PLT emulation step 1 */
5941+ unsigned int sethi, ba, nop;
5942+
5943+ err = get_user(sethi, (unsigned int *)regs->pc);
5944+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946+
5947+ if (err)
5948+ break;
5949+
5950+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952+ nop == 0x01000000U)
5953+ {
5954+ unsigned int addr, save, call;
5955+
5956+ if ((ba & 0xFFC00000U) == 0x30800000U)
5957+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958+ else
5959+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960+
5961+ err = get_user(save, (unsigned int *)addr);
5962+ err |= get_user(call, (unsigned int *)(addr+4));
5963+ err |= get_user(nop, (unsigned int *)(addr+8));
5964+ if (err)
5965+ break;
5966+
5967+#ifdef CONFIG_PAX_DLRESOLVE
5968+ if (save == 0x9DE3BFA8U &&
5969+ (call & 0xC0000000U) == 0x40000000U &&
5970+ nop == 0x01000000U)
5971+ {
5972+ struct vm_area_struct *vma;
5973+ unsigned long call_dl_resolve;
5974+
5975+ down_read(&current->mm->mmap_sem);
5976+ call_dl_resolve = current->mm->call_dl_resolve;
5977+ up_read(&current->mm->mmap_sem);
5978+ if (likely(call_dl_resolve))
5979+ goto emulate;
5980+
5981+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982+
5983+ down_write(&current->mm->mmap_sem);
5984+ if (current->mm->call_dl_resolve) {
5985+ call_dl_resolve = current->mm->call_dl_resolve;
5986+ up_write(&current->mm->mmap_sem);
5987+ if (vma)
5988+ kmem_cache_free(vm_area_cachep, vma);
5989+ goto emulate;
5990+ }
5991+
5992+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994+ up_write(&current->mm->mmap_sem);
5995+ if (vma)
5996+ kmem_cache_free(vm_area_cachep, vma);
5997+ return 1;
5998+ }
5999+
6000+ if (pax_insert_vma(vma, call_dl_resolve)) {
6001+ up_write(&current->mm->mmap_sem);
6002+ kmem_cache_free(vm_area_cachep, vma);
6003+ return 1;
6004+ }
6005+
6006+ current->mm->call_dl_resolve = call_dl_resolve;
6007+ up_write(&current->mm->mmap_sem);
6008+
6009+emulate:
6010+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011+ regs->pc = call_dl_resolve;
6012+ regs->npc = addr+4;
6013+ return 3;
6014+ }
6015+#endif
6016+
6017+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018+ if ((save & 0xFFC00000U) == 0x05000000U &&
6019+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6020+ nop == 0x01000000U)
6021+ {
6022+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023+ regs->u_regs[UREG_G2] = addr + 4;
6024+ addr = (save & 0x003FFFFFU) << 10;
6025+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026+ regs->pc = addr;
6027+ regs->npc = addr+4;
6028+ return 3;
6029+ }
6030+ }
6031+ } while (0);
6032+
6033+ do { /* PaX: unpatched PLT emulation step 2 */
6034+ unsigned int save, call, nop;
6035+
6036+ err = get_user(save, (unsigned int *)(regs->pc-4));
6037+ err |= get_user(call, (unsigned int *)regs->pc);
6038+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039+ if (err)
6040+ break;
6041+
6042+ if (save == 0x9DE3BFA8U &&
6043+ (call & 0xC0000000U) == 0x40000000U &&
6044+ nop == 0x01000000U)
6045+ {
6046+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047+
6048+ regs->u_regs[UREG_RETPC] = regs->pc;
6049+ regs->pc = dl_resolve;
6050+ regs->npc = dl_resolve+4;
6051+ return 3;
6052+ }
6053+ } while (0);
6054+#endif
6055+
6056+ return 1;
6057+}
6058+
6059+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060+{
6061+ unsigned long i;
6062+
6063+ printk(KERN_ERR "PAX: bytes at PC: ");
6064+ for (i = 0; i < 8; i++) {
6065+ unsigned int c;
6066+ if (get_user(c, (unsigned int *)pc+i))
6067+ printk(KERN_CONT "???????? ");
6068+ else
6069+ printk(KERN_CONT "%08x ", c);
6070+ }
6071+ printk("\n");
6072+}
6073+#endif
6074+
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078@@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082+
6083+#ifdef CONFIG_PAX_PAGEEXEC
6084+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085+ up_read(&mm->mmap_sem);
6086+ switch (pax_handle_fetch_fault(regs)) {
6087+
6088+#ifdef CONFIG_PAX_EMUPLT
6089+ case 2:
6090+ case 3:
6091+ return;
6092+#endif
6093+
6094+ }
6095+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096+ do_group_exit(SIGKILL);
6097+ }
6098+#endif
6099+
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104index 43b0da9..a0b78f9 100644
6105--- a/arch/sparc/mm/fault_64.c
6106+++ b/arch/sparc/mm/fault_64.c
6107@@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111+#include <linux/slab.h>
6112+#include <linux/pagemap.h>
6113+#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130+#ifdef CONFIG_PAX_PAGEEXEC
6131+#ifdef CONFIG_PAX_DLRESOLVE
6132+static void pax_emuplt_close(struct vm_area_struct *vma)
6133+{
6134+ vma->vm_mm->call_dl_resolve = 0UL;
6135+}
6136+
6137+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138+{
6139+ unsigned int *kaddr;
6140+
6141+ vmf->page = alloc_page(GFP_HIGHUSER);
6142+ if (!vmf->page)
6143+ return VM_FAULT_OOM;
6144+
6145+ kaddr = kmap(vmf->page);
6146+ memset(kaddr, 0, PAGE_SIZE);
6147+ kaddr[0] = 0x9DE3BFA8U; /* save */
6148+ flush_dcache_page(vmf->page);
6149+ kunmap(vmf->page);
6150+ return VM_FAULT_MAJOR;
6151+}
6152+
6153+static const struct vm_operations_struct pax_vm_ops = {
6154+ .close = pax_emuplt_close,
6155+ .fault = pax_emuplt_fault
6156+};
6157+
6158+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159+{
6160+ int ret;
6161+
6162+ vma->vm_mm = current->mm;
6163+ vma->vm_start = addr;
6164+ vma->vm_end = addr + PAGE_SIZE;
6165+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167+ vma->vm_ops = &pax_vm_ops;
6168+
6169+ ret = insert_vm_struct(current->mm, vma);
6170+ if (ret)
6171+ return ret;
6172+
6173+ ++current->mm->total_vm;
6174+ return 0;
6175+}
6176+#endif
6177+
6178+/*
6179+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6180+ *
6181+ * returns 1 when task should be killed
6182+ * 2 when patched PLT trampoline was detected
6183+ * 3 when unpatched PLT trampoline was detected
6184+ */
6185+static int pax_handle_fetch_fault(struct pt_regs *regs)
6186+{
6187+
6188+#ifdef CONFIG_PAX_EMUPLT
6189+ int err;
6190+
6191+ do { /* PaX: patched PLT emulation #1 */
6192+ unsigned int sethi1, sethi2, jmpl;
6193+
6194+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6195+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197+
6198+ if (err)
6199+ break;
6200+
6201+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204+ {
6205+ unsigned long addr;
6206+
6207+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208+ addr = regs->u_regs[UREG_G1];
6209+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210+
6211+ if (test_thread_flag(TIF_32BIT))
6212+ addr &= 0xFFFFFFFFUL;
6213+
6214+ regs->tpc = addr;
6215+ regs->tnpc = addr+4;
6216+ return 2;
6217+ }
6218+ } while (0);
6219+
6220+ { /* PaX: patched PLT emulation #2 */
6221+ unsigned int ba;
6222+
6223+ err = get_user(ba, (unsigned int *)regs->tpc);
6224+
6225+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226+ unsigned long addr;
6227+
6228+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229+
6230+ if (test_thread_flag(TIF_32BIT))
6231+ addr &= 0xFFFFFFFFUL;
6232+
6233+ regs->tpc = addr;
6234+ regs->tnpc = addr+4;
6235+ return 2;
6236+ }
6237+ }
6238+
6239+ do { /* PaX: patched PLT emulation #3 */
6240+ unsigned int sethi, jmpl, nop;
6241+
6242+ err = get_user(sethi, (unsigned int *)regs->tpc);
6243+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245+
6246+ if (err)
6247+ break;
6248+
6249+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251+ nop == 0x01000000U)
6252+ {
6253+ unsigned long addr;
6254+
6255+ addr = (sethi & 0x003FFFFFU) << 10;
6256+ regs->u_regs[UREG_G1] = addr;
6257+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258+
6259+ if (test_thread_flag(TIF_32BIT))
6260+ addr &= 0xFFFFFFFFUL;
6261+
6262+ regs->tpc = addr;
6263+ regs->tnpc = addr+4;
6264+ return 2;
6265+ }
6266+ } while (0);
6267+
6268+ do { /* PaX: patched PLT emulation #4 */
6269+ unsigned int sethi, mov1, call, mov2;
6270+
6271+ err = get_user(sethi, (unsigned int *)regs->tpc);
6272+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275+
6276+ if (err)
6277+ break;
6278+
6279+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280+ mov1 == 0x8210000FU &&
6281+ (call & 0xC0000000U) == 0x40000000U &&
6282+ mov2 == 0x9E100001U)
6283+ {
6284+ unsigned long addr;
6285+
6286+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288+
6289+ if (test_thread_flag(TIF_32BIT))
6290+ addr &= 0xFFFFFFFFUL;
6291+
6292+ regs->tpc = addr;
6293+ regs->tnpc = addr+4;
6294+ return 2;
6295+ }
6296+ } while (0);
6297+
6298+ do { /* PaX: patched PLT emulation #5 */
6299+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300+
6301+ err = get_user(sethi, (unsigned int *)regs->tpc);
6302+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309+
6310+ if (err)
6311+ break;
6312+
6313+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6317+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318+ sllx == 0x83287020U &&
6319+ jmpl == 0x81C04005U &&
6320+ nop == 0x01000000U)
6321+ {
6322+ unsigned long addr;
6323+
6324+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325+ regs->u_regs[UREG_G1] <<= 32;
6326+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328+ regs->tpc = addr;
6329+ regs->tnpc = addr+4;
6330+ return 2;
6331+ }
6332+ } while (0);
6333+
6334+ do { /* PaX: patched PLT emulation #6 */
6335+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336+
6337+ err = get_user(sethi, (unsigned int *)regs->tpc);
6338+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344+
6345+ if (err)
6346+ break;
6347+
6348+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351+ sllx == 0x83287020U &&
6352+ (or & 0xFFFFE000U) == 0x8A116000U &&
6353+ jmpl == 0x81C04005U &&
6354+ nop == 0x01000000U)
6355+ {
6356+ unsigned long addr;
6357+
6358+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359+ regs->u_regs[UREG_G1] <<= 32;
6360+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362+ regs->tpc = addr;
6363+ regs->tnpc = addr+4;
6364+ return 2;
6365+ }
6366+ } while (0);
6367+
6368+ do { /* PaX: unpatched PLT emulation step 1 */
6369+ unsigned int sethi, ba, nop;
6370+
6371+ err = get_user(sethi, (unsigned int *)regs->tpc);
6372+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374+
6375+ if (err)
6376+ break;
6377+
6378+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380+ nop == 0x01000000U)
6381+ {
6382+ unsigned long addr;
6383+ unsigned int save, call;
6384+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385+
6386+ if ((ba & 0xFFC00000U) == 0x30800000U)
6387+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388+ else
6389+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390+
6391+ if (test_thread_flag(TIF_32BIT))
6392+ addr &= 0xFFFFFFFFUL;
6393+
6394+ err = get_user(save, (unsigned int *)addr);
6395+ err |= get_user(call, (unsigned int *)(addr+4));
6396+ err |= get_user(nop, (unsigned int *)(addr+8));
6397+ if (err)
6398+ break;
6399+
6400+#ifdef CONFIG_PAX_DLRESOLVE
6401+ if (save == 0x9DE3BFA8U &&
6402+ (call & 0xC0000000U) == 0x40000000U &&
6403+ nop == 0x01000000U)
6404+ {
6405+ struct vm_area_struct *vma;
6406+ unsigned long call_dl_resolve;
6407+
6408+ down_read(&current->mm->mmap_sem);
6409+ call_dl_resolve = current->mm->call_dl_resolve;
6410+ up_read(&current->mm->mmap_sem);
6411+ if (likely(call_dl_resolve))
6412+ goto emulate;
6413+
6414+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415+
6416+ down_write(&current->mm->mmap_sem);
6417+ if (current->mm->call_dl_resolve) {
6418+ call_dl_resolve = current->mm->call_dl_resolve;
6419+ up_write(&current->mm->mmap_sem);
6420+ if (vma)
6421+ kmem_cache_free(vm_area_cachep, vma);
6422+ goto emulate;
6423+ }
6424+
6425+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427+ up_write(&current->mm->mmap_sem);
6428+ if (vma)
6429+ kmem_cache_free(vm_area_cachep, vma);
6430+ return 1;
6431+ }
6432+
6433+ if (pax_insert_vma(vma, call_dl_resolve)) {
6434+ up_write(&current->mm->mmap_sem);
6435+ kmem_cache_free(vm_area_cachep, vma);
6436+ return 1;
6437+ }
6438+
6439+ current->mm->call_dl_resolve = call_dl_resolve;
6440+ up_write(&current->mm->mmap_sem);
6441+
6442+emulate:
6443+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444+ regs->tpc = call_dl_resolve;
6445+ regs->tnpc = addr+4;
6446+ return 3;
6447+ }
6448+#endif
6449+
6450+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451+ if ((save & 0xFFC00000U) == 0x05000000U &&
6452+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6453+ nop == 0x01000000U)
6454+ {
6455+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456+ regs->u_regs[UREG_G2] = addr + 4;
6457+ addr = (save & 0x003FFFFFU) << 10;
6458+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459+
6460+ if (test_thread_flag(TIF_32BIT))
6461+ addr &= 0xFFFFFFFFUL;
6462+
6463+ regs->tpc = addr;
6464+ regs->tnpc = addr+4;
6465+ return 3;
6466+ }
6467+
6468+ /* PaX: 64-bit PLT stub */
6469+ err = get_user(sethi1, (unsigned int *)addr);
6470+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6471+ err |= get_user(or1, (unsigned int *)(addr+8));
6472+ err |= get_user(or2, (unsigned int *)(addr+12));
6473+ err |= get_user(sllx, (unsigned int *)(addr+16));
6474+ err |= get_user(add, (unsigned int *)(addr+20));
6475+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6476+ err |= get_user(nop, (unsigned int *)(addr+28));
6477+ if (err)
6478+ break;
6479+
6480+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6483+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484+ sllx == 0x89293020U &&
6485+ add == 0x8A010005U &&
6486+ jmpl == 0x89C14000U &&
6487+ nop == 0x01000000U)
6488+ {
6489+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491+ regs->u_regs[UREG_G4] <<= 32;
6492+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494+ regs->u_regs[UREG_G4] = addr + 24;
6495+ addr = regs->u_regs[UREG_G5];
6496+ regs->tpc = addr;
6497+ regs->tnpc = addr+4;
6498+ return 3;
6499+ }
6500+ }
6501+ } while (0);
6502+
6503+#ifdef CONFIG_PAX_DLRESOLVE
6504+ do { /* PaX: unpatched PLT emulation step 2 */
6505+ unsigned int save, call, nop;
6506+
6507+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6508+ err |= get_user(call, (unsigned int *)regs->tpc);
6509+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510+ if (err)
6511+ break;
6512+
6513+ if (save == 0x9DE3BFA8U &&
6514+ (call & 0xC0000000U) == 0x40000000U &&
6515+ nop == 0x01000000U)
6516+ {
6517+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518+
6519+ if (test_thread_flag(TIF_32BIT))
6520+ dl_resolve &= 0xFFFFFFFFUL;
6521+
6522+ regs->u_regs[UREG_RETPC] = regs->tpc;
6523+ regs->tpc = dl_resolve;
6524+ regs->tnpc = dl_resolve+4;
6525+ return 3;
6526+ }
6527+ } while (0);
6528+#endif
6529+
6530+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531+ unsigned int sethi, ba, nop;
6532+
6533+ err = get_user(sethi, (unsigned int *)regs->tpc);
6534+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536+
6537+ if (err)
6538+ break;
6539+
6540+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541+ (ba & 0xFFF00000U) == 0x30600000U &&
6542+ nop == 0x01000000U)
6543+ {
6544+ unsigned long addr;
6545+
6546+ addr = (sethi & 0x003FFFFFU) << 10;
6547+ regs->u_regs[UREG_G1] = addr;
6548+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549+
6550+ if (test_thread_flag(TIF_32BIT))
6551+ addr &= 0xFFFFFFFFUL;
6552+
6553+ regs->tpc = addr;
6554+ regs->tnpc = addr+4;
6555+ return 2;
6556+ }
6557+ } while (0);
6558+
6559+#endif
6560+
6561+ return 1;
6562+}
6563+
6564+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565+{
6566+ unsigned long i;
6567+
6568+ printk(KERN_ERR "PAX: bytes at PC: ");
6569+ for (i = 0; i < 8; i++) {
6570+ unsigned int c;
6571+ if (get_user(c, (unsigned int *)pc+i))
6572+ printk(KERN_CONT "???????? ");
6573+ else
6574+ printk(KERN_CONT "%08x ", c);
6575+ }
6576+ printk("\n");
6577+}
6578+#endif
6579+
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587+#ifdef CONFIG_PAX_PAGEEXEC
6588+ /* PaX: detect ITLB misses on non-exec pages */
6589+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591+ {
6592+ if (address != regs->tpc)
6593+ goto good_area;
6594+
6595+ up_read(&mm->mmap_sem);
6596+ switch (pax_handle_fetch_fault(regs)) {
6597+
6598+#ifdef CONFIG_PAX_EMUPLT
6599+ case 2:
6600+ case 3:
6601+ return;
6602+#endif
6603+
6604+ }
6605+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606+ do_group_exit(SIGKILL);
6607+ }
6608+#endif
6609+
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614index f27d103..1b06377 100644
6615--- a/arch/sparc/mm/hugetlbpage.c
6616+++ b/arch/sparc/mm/hugetlbpage.c
6617@@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621- if (likely(!vma || addr + len <= vma->vm_start)) {
6622+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630- if (!vma || addr <= vma->vm_start) {
6631+ if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639- addr = (mm->mmap_base-len) & HPAGE_MASK;
6640+ addr = mm->mmap_base - len;
6641
6642 do {
6643+ addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650- if (likely(!vma || addr+len <= vma->vm_start)) {
6651+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659- addr = (vma->vm_start-len) & HPAGE_MASK;
6660- } while (likely(len < vma->vm_start));
6661+ addr = skip_heap_stack_gap(vma, len);
6662+ } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670- if (task_size - len >= addr &&
6671- (!vma || addr + len <= vma->vm_start))
6672+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677index dc7c3b1..34c0070 100644
6678--- a/arch/sparc/mm/init_32.c
6679+++ b/arch/sparc/mm/init_32.c
6680@@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686+
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690@@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694- protection_map[1] = PAGE_READONLY;
6695- protection_map[2] = PAGE_COPY;
6696- protection_map[3] = PAGE_COPY;
6697+ protection_map[1] = PAGE_READONLY_NOEXEC;
6698+ protection_map[2] = PAGE_COPY_NOEXEC;
6699+ protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705- protection_map[9] = PAGE_READONLY;
6706- protection_map[10] = PAGE_SHARED;
6707- protection_map[11] = PAGE_SHARED;
6708+ protection_map[9] = PAGE_READONLY_NOEXEC;
6709+ protection_map[10] = PAGE_SHARED_NOEXEC;
6710+ protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715index 509b1ff..bfd7118 100644
6716--- a/arch/sparc/mm/srmmu.c
6717+++ b/arch/sparc/mm/srmmu.c
6718@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722+
6723+#ifdef CONFIG_PAX_PAGEEXEC
6724+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727+#endif
6728+
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732diff --git a/arch/um/Makefile b/arch/um/Makefile
6733index fc633db..5e1a1c2 100644
6734--- a/arch/um/Makefile
6735+++ b/arch/um/Makefile
6736@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740+ifdef CONSTIFY_PLUGIN
6741+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742+endif
6743+
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748index 6c03acd..a5e0215 100644
6749--- a/arch/um/include/asm/kmap_types.h
6750+++ b/arch/um/include/asm/kmap_types.h
6751@@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755+ KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760index 4cc9b6c..02e5029 100644
6761--- a/arch/um/include/asm/page.h
6762+++ b/arch/um/include/asm/page.h
6763@@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767+#define ktla_ktva(addr) (addr)
6768+#define ktva_ktla(addr) (addr)
6769+
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774index 4a28a15..654dc2a 100644
6775--- a/arch/um/kernel/process.c
6776+++ b/arch/um/kernel/process.c
6777@@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781-/*
6782- * Only x86 and x86_64 have an arch_align_stack().
6783- * All other arches have "#define arch_align_stack(x) (x)"
6784- * in their asm/system.h
6785- * As this is included in UML from asm-um/system-generic.h,
6786- * we can use it to behave as the subarch does.
6787- */
6788-#ifndef arch_align_stack
6789-unsigned long arch_align_stack(unsigned long sp)
6790-{
6791- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792- sp -= get_random_int() % 8192;
6793- return sp & ~0xf;
6794-}
6795-#endif
6796-
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801index d1b93c4..ae1b7fd 100644
6802--- a/arch/um/sys-i386/shared/sysdep/system.h
6803+++ b/arch/um/sys-i386/shared/sysdep/system.h
6804@@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808-extern unsigned long arch_align_stack(unsigned long sp);
6809+#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814index 857ca0b..9a2669d 100644
6815--- a/arch/um/sys-i386/syscalls.c
6816+++ b/arch/um/sys-i386/syscalls.c
6817@@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822+{
6823+ unsigned long pax_task_size = TASK_SIZE;
6824+
6825+#ifdef CONFIG_PAX_SEGMEXEC
6826+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827+ pax_task_size = SEGMEXEC_TASK_SIZE;
6828+#endif
6829+
6830+ if (len > pax_task_size || addr > pax_task_size - len)
6831+ return -EINVAL;
6832+
6833+ return 0;
6834+}
6835+
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840index d1b93c4..ae1b7fd 100644
6841--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843@@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847-extern unsigned long arch_align_stack(unsigned long sp);
6848+#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853index 73ae02a..f932de5 100644
6854--- a/arch/x86/Kconfig
6855+++ b/arch/x86/Kconfig
6856@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860- depends on X86_32 && !CC_STACKPROTECTOR
6861+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865@@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869- depends on !X86_NUMAQ
6870+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878- depends on !X86_NUMAQ
6879+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887- default 0x78000000 if VMSPLIT_2G_OPT
6888+ default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892@@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896+ depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904+ range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912+ range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920- def_bool y
6921+ def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929index 0e566103..1a6b57e 100644
6930--- a/arch/x86/Kconfig.cpu
6931+++ b/arch/x86/Kconfig.cpu
6932@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936- depends on M586MMX || M586TSC || M586 || M486 || M386
6937+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941@@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960index d105f29..c928727 100644
6961--- a/arch/x86/Kconfig.debug
6962+++ b/arch/x86/Kconfig.debug
6963@@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967- depends on DEBUG_KERNEL
6968+ depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973index d2d24c9..0f21f8d 100644
6974--- a/arch/x86/Makefile
6975+++ b/arch/x86/Makefile
6976@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980+ biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984@@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988+
6989+define OLD_LD
6990+
6991+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992+*** Please upgrade your binutils to 2.18 or newer
6993+endef
6994+
6995+archprepare:
6996+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998index ec749c2..bbb5319 100644
6999--- a/arch/x86/boot/Makefile
7000+++ b/arch/x86/boot/Makefile
7001@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005+ifdef CONSTIFY_PLUGIN
7006+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007+endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012index 878e4b9..20537ab 100644
7013--- a/arch/x86/boot/bitops.h
7014+++ b/arch/x86/boot/bitops.h
7015@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034index 98239d2..f40214c 100644
7035--- a/arch/x86/boot/boot.h
7036+++ b/arch/x86/boot/boot.h
7037@@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041- asm("movw %%ds,%0" : "=rm" (seg));
7042+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050- asm("repe; cmpsb; setnz %0"
7051+ asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056index f8ed065..5bf5ff3 100644
7057--- a/arch/x86/boot/compressed/Makefile
7058+++ b/arch/x86/boot/compressed/Makefile
7059@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063+ifdef CONSTIFY_PLUGIN
7064+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065+endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070index f543b70..b60fba8 100644
7071--- a/arch/x86/boot/compressed/head_32.S
7072+++ b/arch/x86/boot/compressed/head_32.S
7073@@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077- movl $LOAD_PHYSICAL_ADDR, %ebx
7078+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082@@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086- subl $LOAD_PHYSICAL_ADDR, %ebx
7087+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091@@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095- testl %ecx, %ecx
7096- jz 2f
7097+ jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102index 077e1b6..2c6b13b 100644
7103--- a/arch/x86/boot/compressed/head_64.S
7104+++ b/arch/x86/boot/compressed/head_64.S
7105@@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109- movl $LOAD_PHYSICAL_ADDR, %ebx
7110+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114@@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118-#include "../../kernel/verify_cpu_64.S"
7119+#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123@@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127- movq $LOAD_PHYSICAL_ADDR, %rbp
7128+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133index 842b2a3..f00178b 100644
7134--- a/arch/x86/boot/compressed/misc.c
7135+++ b/arch/x86/boot/compressed/misc.c
7136@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155index bcbd36c..b1754af 100644
7156--- a/arch/x86/boot/compressed/mkpiggy.c
7157+++ b/arch/x86/boot/compressed/mkpiggy.c
7158@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163+ offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168index bbeb0c3..f5167ab 100644
7169--- a/arch/x86/boot/compressed/relocs.c
7170+++ b/arch/x86/boot/compressed/relocs.c
7171@@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175+#include "../../../../include/linux/autoconf.h"
7176+
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179+static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187- int i;
7188+ unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196+static void read_phdrs(FILE *fp)
7197+{
7198+ unsigned int i;
7199+
7200+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201+ if (!phdr) {
7202+ die("Unable to allocate %d program headers\n",
7203+ ehdr.e_phnum);
7204+ }
7205+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206+ die("Seek to %d failed: %s\n",
7207+ ehdr.e_phoff, strerror(errno));
7208+ }
7209+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210+ die("Cannot read ELF program headers: %s\n",
7211+ strerror(errno));
7212+ }
7213+ for(i = 0; i < ehdr.e_phnum; i++) {
7214+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222+ }
7223+
7224+}
7225+
7226 static void read_shdrs(FILE *fp)
7227 {
7228- int i;
7229+ unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237- int i;
7238+ unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246- int i,j;
7247+ unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255- int i,j;
7256+ unsigned int i,j;
7257+ uint32_t base;
7258+
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266+ base = 0;
7267+ for (j = 0; j < ehdr.e_phnum; j++) {
7268+ if (phdr[j].p_type != PT_LOAD )
7269+ continue;
7270+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271+ continue;
7272+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273+ break;
7274+ }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277- rel->r_offset = elf32_to_cpu(rel->r_offset);
7278+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286- int i;
7287+ unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294- int j;
7295+ unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303- int i, printed = 0;
7304+ unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311- int j;
7312+ unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320- int i;
7321+ unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327- int j;
7328+ unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338+ continue;
7339+
7340+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345+ continue;
7346+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347+ continue;
7348+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349+ continue;
7350+#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358- int i;
7359+ unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367+ read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372index 4d3ff03..e4972ff 100644
7373--- a/arch/x86/boot/cpucheck.c
7374+++ b/arch/x86/boot/cpucheck.c
7375@@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379- asm("movl %%cr0,%0" : "=r" (cr0));
7380+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388- asm("pushfl ; "
7389+ asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393@@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397- asm("cpuid"
7398+ asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402@@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406- asm("cpuid"
7407+ asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411@@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415- asm("cpuid"
7416+ asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420@@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424- asm("cpuid"
7425+ asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459- asm("cpuid"
7460+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462+ asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471index b31cc54..8d69237 100644
7472--- a/arch/x86/boot/header.S
7473+++ b/arch/x86/boot/header.S
7474@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484index cae3feb..ff8ff2a 100644
7485--- a/arch/x86/boot/memory.c
7486+++ b/arch/x86/boot/memory.c
7487@@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491- int count = 0;
7492+ unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497index 11e8c6e..fdbb1ed 100644
7498--- a/arch/x86/boot/video-vesa.c
7499+++ b/arch/x86/boot/video-vesa.c
7500@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504+ boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509index d42da38..787cdf3 100644
7510--- a/arch/x86/boot/video.c
7511+++ b/arch/x86/boot/video.c
7512@@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516- int i, len = 0;
7517+ unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522index 5b577d5..3c1fed4 100644
7523--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525@@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529+#include <asm/alternative-asm.h>
7530+
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538+#define ret pax_force_retaddr 0, 1; ret
7539+
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544index eb0566e..e3ebad8 100644
7545--- a/arch/x86/crypto/aesni-intel_asm.S
7546+++ b/arch/x86/crypto/aesni-intel_asm.S
7547@@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551+#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555@@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559+ pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563@@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567+ pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571@@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575+ pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579@@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583+ pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591+ pax_force_retaddr 0, 1
7592 ret
7593+ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601+ pax_force_retaddr 0, 1
7602 ret
7603+ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607@@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611+ pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615@@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619+ pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627+ pax_force_retaddr 0, 1
7628 ret
7629+ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633@@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637+ pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641@@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645+ pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653+ pax_force_retaddr 0, 1
7654 ret
7655+ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663+ pax_force_retaddr 0, 1
7664 ret
7665+ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673+ pax_force_retaddr 0, 1
7674 ret
7675+ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683+ pax_force_retaddr 0, 1
7684 ret
7685+ENDPROC(aesni_cbc_dec)
7686diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687index 6214a9b..1f4fc9a 100644
7688--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690@@ -1,3 +1,5 @@
7691+#include <asm/alternative-asm.h>
7692+
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700+ pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708+ pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716+ pax_force_retaddr
7717 ret
7718diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719index 35974a5..5662ae2 100644
7720--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722@@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726+#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730@@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734+ pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738@@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742+ pax_force_retaddr 0, 1
7743 ret
7744diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745index 14531ab..a89a0c0 100644
7746--- a/arch/x86/ia32/ia32_aout.c
7747+++ b/arch/x86/ia32/ia32_aout.c
7748@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752+ memset(&dump, 0, sizeof(dump));
7753+
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761- /*
7762- * Finally dump the task struct. Not be used by gdb, but
7763- * could be useful
7764- */
7765- set_fs(KERNEL_DS);
7766- DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771index 588a7aa..a3468b0 100644
7772--- a/arch/x86/ia32/ia32_signal.c
7773+++ b/arch/x86/ia32/ia32_signal.c
7774@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787- void **fpstate)
7788+ void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796- *fpstate = (struct _fpstate_ia32 *) sp;
7797+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805- sp = ((sp + 4) & -16ul) - 4;
7806+ sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823- 0,
7824+ 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832+ else if (current->mm->context.vdso)
7833+ /* Return stub is in 32bit vsyscall page */
7834+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837- rt_sigreturn);
7838+ restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851index 4edd8eb..29124b4 100644
7852--- a/arch/x86/ia32/ia32entry.S
7853+++ b/arch/x86/ia32/ia32entry.S
7854@@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858+#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860+#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868+ .macro pax_enter_kernel_user
7869+ pax_set_fptr_mask
7870+#ifdef CONFIG_PAX_MEMORY_UDEREF
7871+ call pax_enter_kernel_user
7872+#endif
7873+ .endm
7874+
7875+ .macro pax_exit_kernel_user
7876+#ifdef CONFIG_PAX_MEMORY_UDEREF
7877+ call pax_exit_kernel_user
7878+#endif
7879+#ifdef CONFIG_PAX_RANDKSTACK
7880+ pushq %rax
7881+ pushq %r11
7882+ call pax_randomize_kstack
7883+ popq %r11
7884+ popq %rax
7885+#endif
7886+ .endm
7887+
7888+.macro pax_erase_kstack
7889+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890+ call pax_erase_kstack
7891+#endif
7892+.endm
7893+
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901- addq $(KERNEL_STACK_OFFSET),%rsp
7902- /*
7903- * No need to follow this irqs on/off section: the syscall
7904- * disabled irqs, here we enable it straight after entry:
7905- */
7906- ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915- CFI_REGISTER rip,r10
7916+ orl $X86_EFLAGS_IF,(%rsp)
7917+ GET_THREAD_INFO(%r11)
7918+ movl TI_sysenter_return(%r11), %r11d
7919+ CFI_REGISTER rip,r11
7920 pushq $__USER32_CS
7921 CFI_ADJUST_CFA_OFFSET 8
7922 /*CFI_REL_OFFSET cs,0*/
7923 movl %eax, %eax
7924- pushq %r10
7925+ pushq %r11
7926 CFI_ADJUST_CFA_OFFSET 8
7927 CFI_REL_OFFSET rip,0
7928 pushq %rax
7929 CFI_ADJUST_CFA_OFFSET 8
7930 cld
7931 SAVE_ARGS 0,0,1
7932+ pax_enter_kernel_user
7933+ /*
7934+ * No need to follow this irqs on/off section: the syscall
7935+ * disabled irqs, here we enable it straight after entry:
7936+ */
7937+ ENABLE_INTERRUPTS(CLBR_NONE)
7938 /* no need to do an access_ok check here because rbp has been
7939 32bit zero extended */
7940+
7941+#ifdef CONFIG_PAX_MEMORY_UDEREF
7942+ mov $PAX_USER_SHADOW_BASE,%r11
7943+ add %r11,%rbp
7944+#endif
7945+
7946 1: movl (%rbp),%ebp
7947 .section __ex_table,"a"
7948 .quad 1b,ia32_badarg
7949 .previous
7950- GET_THREAD_INFO(%r10)
7951- orl $TS_COMPAT,TI_status(%r10)
7952- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7953+ GET_THREAD_INFO(%r11)
7954+ orl $TS_COMPAT,TI_status(%r11)
7955+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7956 CFI_REMEMBER_STATE
7957 jnz sysenter_tracesys
7958 cmpq $(IA32_NR_syscalls-1),%rax
7959@@ -166,13 +202,15 @@ sysenter_do_call:
7960 sysenter_dispatch:
7961 call *ia32_sys_call_table(,%rax,8)
7962 movq %rax,RAX-ARGOFFSET(%rsp)
7963- GET_THREAD_INFO(%r10)
7964+ GET_THREAD_INFO(%r11)
7965 DISABLE_INTERRUPTS(CLBR_NONE)
7966 TRACE_IRQS_OFF
7967- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7968+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7969 jnz sysexit_audit
7970 sysexit_from_sys_call:
7971- andl $~TS_COMPAT,TI_status(%r10)
7972+ pax_exit_kernel_user
7973+ pax_erase_kstack
7974+ andl $~TS_COMPAT,TI_status(%r11)
7975 /* clear IF, that popfq doesn't enable interrupts early */
7976 andl $~0x200,EFLAGS-R11(%rsp)
7977 movl RIP-R11(%rsp),%edx /* User %eip */
7978@@ -200,6 +238,9 @@ sysexit_from_sys_call:
7979 movl %eax,%esi /* 2nd arg: syscall number */
7980 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7981 call audit_syscall_entry
7982+
7983+ pax_erase_kstack
7984+
7985 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7986 cmpq $(IA32_NR_syscalls-1),%rax
7987 ja ia32_badsys
7988@@ -211,7 +252,7 @@ sysexit_from_sys_call:
7989 .endm
7990
7991 .macro auditsys_exit exit
7992- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7993+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7994 jnz ia32_ret_from_sys_call
7995 TRACE_IRQS_ON
7996 sti
7997@@ -221,12 +262,12 @@ sysexit_from_sys_call:
7998 movzbl %al,%edi /* zero-extend that into %edi */
7999 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8000 call audit_syscall_exit
8001- GET_THREAD_INFO(%r10)
8002+ GET_THREAD_INFO(%r11)
8003 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8004 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8005 cli
8006 TRACE_IRQS_OFF
8007- testl %edi,TI_flags(%r10)
8008+ testl %edi,TI_flags(%r11)
8009 jz \exit
8010 CLEAR_RREGS -ARGOFFSET
8011 jmp int_with_check
8012@@ -244,7 +285,7 @@ sysexit_audit:
8013
8014 sysenter_tracesys:
8015 #ifdef CONFIG_AUDITSYSCALL
8016- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8017+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8018 jz sysenter_auditsys
8019 #endif
8020 SAVE_REST
8021@@ -252,6 +293,9 @@ sysenter_tracesys:
8022 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8023 movq %rsp,%rdi /* &pt_regs -> arg1 */
8024 call syscall_trace_enter
8025+
8026+ pax_erase_kstack
8027+
8028 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8029 RESTORE_REST
8030 cmpq $(IA32_NR_syscalls-1),%rax
8031@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8032 ENTRY(ia32_cstar_target)
8033 CFI_STARTPROC32 simple
8034 CFI_SIGNAL_FRAME
8035- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8036+ CFI_DEF_CFA rsp,0
8037 CFI_REGISTER rip,rcx
8038 /*CFI_REGISTER rflags,r11*/
8039 SWAPGS_UNSAFE_STACK
8040 movl %esp,%r8d
8041 CFI_REGISTER rsp,r8
8042 movq PER_CPU_VAR(kernel_stack),%rsp
8043+ SAVE_ARGS 8*6,1,1
8044+ pax_enter_kernel_user
8045 /*
8046 * No need to follow this irqs on/off section: the syscall
8047 * disabled irqs and here we enable it straight after entry:
8048 */
8049 ENABLE_INTERRUPTS(CLBR_NONE)
8050- SAVE_ARGS 8,1,1
8051 movl %eax,%eax /* zero extension */
8052 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8053 movq %rcx,RIP-ARGOFFSET(%rsp)
8054@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8055 /* no need to do an access_ok check here because r8 has been
8056 32bit zero extended */
8057 /* hardware stack frame is complete now */
8058+
8059+#ifdef CONFIG_PAX_MEMORY_UDEREF
8060+ mov $PAX_USER_SHADOW_BASE,%r11
8061+ add %r11,%r8
8062+#endif
8063+
8064 1: movl (%r8),%r9d
8065 .section __ex_table,"a"
8066 .quad 1b,ia32_badarg
8067 .previous
8068- GET_THREAD_INFO(%r10)
8069- orl $TS_COMPAT,TI_status(%r10)
8070- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8071+ GET_THREAD_INFO(%r11)
8072+ orl $TS_COMPAT,TI_status(%r11)
8073+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8074 CFI_REMEMBER_STATE
8075 jnz cstar_tracesys
8076 cmpq $IA32_NR_syscalls-1,%rax
8077@@ -327,13 +378,15 @@ cstar_do_call:
8078 cstar_dispatch:
8079 call *ia32_sys_call_table(,%rax,8)
8080 movq %rax,RAX-ARGOFFSET(%rsp)
8081- GET_THREAD_INFO(%r10)
8082+ GET_THREAD_INFO(%r11)
8083 DISABLE_INTERRUPTS(CLBR_NONE)
8084 TRACE_IRQS_OFF
8085- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8086+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8087 jnz sysretl_audit
8088 sysretl_from_sys_call:
8089- andl $~TS_COMPAT,TI_status(%r10)
8090+ pax_exit_kernel_user
8091+ pax_erase_kstack
8092+ andl $~TS_COMPAT,TI_status(%r11)
8093 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8094 movl RIP-ARGOFFSET(%rsp),%ecx
8095 CFI_REGISTER rip,rcx
8096@@ -361,7 +414,7 @@ sysretl_audit:
8097
8098 cstar_tracesys:
8099 #ifdef CONFIG_AUDITSYSCALL
8100- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8101+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8102 jz cstar_auditsys
8103 #endif
8104 xchgl %r9d,%ebp
8105@@ -370,6 +423,9 @@ cstar_tracesys:
8106 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8107 movq %rsp,%rdi /* &pt_regs -> arg1 */
8108 call syscall_trace_enter
8109+
8110+ pax_erase_kstack
8111+
8112 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8113 RESTORE_REST
8114 xchgl %ebp,%r9d
8115@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8116 CFI_REL_OFFSET rip,RIP-RIP
8117 PARAVIRT_ADJUST_EXCEPTION_FRAME
8118 SWAPGS
8119- /*
8120- * No need to follow this irqs on/off section: the syscall
8121- * disabled irqs and here we enable it straight after entry:
8122- */
8123- ENABLE_INTERRUPTS(CLBR_NONE)
8124 movl %eax,%eax
8125 pushq %rax
8126 CFI_ADJUST_CFA_OFFSET 8
8127@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8128 /* note the registers are not zero extended to the sf.
8129 this could be a problem. */
8130 SAVE_ARGS 0,0,1
8131- GET_THREAD_INFO(%r10)
8132- orl $TS_COMPAT,TI_status(%r10)
8133- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8134+ pax_enter_kernel_user
8135+ /*
8136+ * No need to follow this irqs on/off section: the syscall
8137+ * disabled irqs and here we enable it straight after entry:
8138+ */
8139+ ENABLE_INTERRUPTS(CLBR_NONE)
8140+ GET_THREAD_INFO(%r11)
8141+ orl $TS_COMPAT,TI_status(%r11)
8142+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8143 jnz ia32_tracesys
8144 cmpq $(IA32_NR_syscalls-1),%rax
8145 ja ia32_badsys
8146@@ -448,6 +505,9 @@ ia32_tracesys:
8147 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8148 movq %rsp,%rdi /* &pt_regs -> arg1 */
8149 call syscall_trace_enter
8150+
8151+ pax_erase_kstack
8152+
8153 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8154 RESTORE_REST
8155 cmpq $(IA32_NR_syscalls-1),%rax
8156@@ -462,6 +522,7 @@ ia32_badsys:
8157
8158 quiet_ni_syscall:
8159 movq $-ENOSYS,%rax
8160+ pax_force_retaddr
8161 ret
8162 CFI_ENDPROC
8163
8164diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8165index 016218c..47ccbdd 100644
8166--- a/arch/x86/ia32/sys_ia32.c
8167+++ b/arch/x86/ia32/sys_ia32.c
8168@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8169 */
8170 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8171 {
8172- typeof(ubuf->st_uid) uid = 0;
8173- typeof(ubuf->st_gid) gid = 0;
8174+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8175+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8176 SET_UID(uid, stat->uid);
8177 SET_GID(gid, stat->gid);
8178 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8179@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8180 }
8181 set_fs(KERNEL_DS);
8182 ret = sys_rt_sigprocmask(how,
8183- set ? (sigset_t __user *)&s : NULL,
8184- oset ? (sigset_t __user *)&s : NULL,
8185+ set ? (sigset_t __force_user *)&s : NULL,
8186+ oset ? (sigset_t __force_user *)&s : NULL,
8187 sigsetsize);
8188 set_fs(old_fs);
8189 if (ret)
8190@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8191 mm_segment_t old_fs = get_fs();
8192
8193 set_fs(KERNEL_DS);
8194- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8195+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8196 set_fs(old_fs);
8197 if (put_compat_timespec(&t, interval))
8198 return -EFAULT;
8199@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8200 mm_segment_t old_fs = get_fs();
8201
8202 set_fs(KERNEL_DS);
8203- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8204+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8205 set_fs(old_fs);
8206 if (!ret) {
8207 switch (_NSIG_WORDS) {
8208@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8209 if (copy_siginfo_from_user32(&info, uinfo))
8210 return -EFAULT;
8211 set_fs(KERNEL_DS);
8212- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8213+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8214 set_fs(old_fs);
8215 return ret;
8216 }
8217@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8218 return -EFAULT;
8219
8220 set_fs(KERNEL_DS);
8221- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8222+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8223 count);
8224 set_fs(old_fs);
8225
8226diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8227index e2077d3..b7a8919 100644
8228--- a/arch/x86/include/asm/alternative-asm.h
8229+++ b/arch/x86/include/asm/alternative-asm.h
8230@@ -8,10 +8,10 @@
8231
8232 #ifdef CONFIG_SMP
8233 .macro LOCK_PREFIX
8234-1: lock
8235+672: lock
8236 .section .smp_locks,"a"
8237 .align 4
8238- X86_ALIGN 1b
8239+ X86_ALIGN 672b
8240 .previous
8241 .endm
8242 #else
8243@@ -19,4 +19,43 @@
8244 .endm
8245 #endif
8246
8247+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8248+ .macro pax_force_retaddr_bts rip=0
8249+ btsq $63,\rip(%rsp)
8250+ .endm
8251+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8252+ .macro pax_force_retaddr rip=0, reload=0
8253+ btsq $63,\rip(%rsp)
8254+ .endm
8255+ .macro pax_force_fptr ptr
8256+ btsq $63,\ptr
8257+ .endm
8258+ .macro pax_set_fptr_mask
8259+ .endm
8260+#endif
8261+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8262+ .macro pax_force_retaddr rip=0, reload=0
8263+ .if \reload
8264+ pax_set_fptr_mask
8265+ .endif
8266+ orq %r10,\rip(%rsp)
8267+ .endm
8268+ .macro pax_force_fptr ptr
8269+ orq %r10,\ptr
8270+ .endm
8271+ .macro pax_set_fptr_mask
8272+ movabs $0x8000000000000000,%r10
8273+ .endm
8274+#endif
8275+#else
8276+ .macro pax_force_retaddr rip=0, reload=0
8277+ .endm
8278+ .macro pax_force_fptr ptr
8279+ .endm
8280+ .macro pax_force_retaddr_bts rip=0
8281+ .endm
8282+ .macro pax_set_fptr_mask
8283+ .endm
8284+#endif
8285+
8286 #endif /* __ASSEMBLY__ */
8287diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8288index c240efc..fdfadf3 100644
8289--- a/arch/x86/include/asm/alternative.h
8290+++ b/arch/x86/include/asm/alternative.h
8291@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8292 " .byte 662b-661b\n" /* sourcelen */ \
8293 " .byte 664f-663f\n" /* replacementlen */ \
8294 ".previous\n" \
8295- ".section .altinstr_replacement, \"ax\"\n" \
8296+ ".section .altinstr_replacement, \"a\"\n" \
8297 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8298 ".previous"
8299
8300diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8301index 474d80d..1f97d58 100644
8302--- a/arch/x86/include/asm/apic.h
8303+++ b/arch/x86/include/asm/apic.h
8304@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8305
8306 #ifdef CONFIG_X86_LOCAL_APIC
8307
8308-extern unsigned int apic_verbosity;
8309+extern int apic_verbosity;
8310 extern int local_apic_timer_c2_ok;
8311
8312 extern int disable_apic;
8313diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8314index 20370c6..a2eb9b0 100644
8315--- a/arch/x86/include/asm/apm.h
8316+++ b/arch/x86/include/asm/apm.h
8317@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8318 __asm__ __volatile__(APM_DO_ZERO_SEGS
8319 "pushl %%edi\n\t"
8320 "pushl %%ebp\n\t"
8321- "lcall *%%cs:apm_bios_entry\n\t"
8322+ "lcall *%%ss:apm_bios_entry\n\t"
8323 "setc %%al\n\t"
8324 "popl %%ebp\n\t"
8325 "popl %%edi\n\t"
8326@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8327 __asm__ __volatile__(APM_DO_ZERO_SEGS
8328 "pushl %%edi\n\t"
8329 "pushl %%ebp\n\t"
8330- "lcall *%%cs:apm_bios_entry\n\t"
8331+ "lcall *%%ss:apm_bios_entry\n\t"
8332 "setc %%bl\n\t"
8333 "popl %%ebp\n\t"
8334 "popl %%edi\n\t"
8335diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8336index dc5a667..939040c 100644
8337--- a/arch/x86/include/asm/atomic_32.h
8338+++ b/arch/x86/include/asm/atomic_32.h
8339@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8340 }
8341
8342 /**
8343+ * atomic_read_unchecked - read atomic variable
8344+ * @v: pointer of type atomic_unchecked_t
8345+ *
8346+ * Atomically reads the value of @v.
8347+ */
8348+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8349+{
8350+ return v->counter;
8351+}
8352+
8353+/**
8354 * atomic_set - set atomic variable
8355 * @v: pointer of type atomic_t
8356 * @i: required value
8357@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8358 }
8359
8360 /**
8361+ * atomic_set_unchecked - set atomic variable
8362+ * @v: pointer of type atomic_unchecked_t
8363+ * @i: required value
8364+ *
8365+ * Atomically sets the value of @v to @i.
8366+ */
8367+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8368+{
8369+ v->counter = i;
8370+}
8371+
8372+/**
8373 * atomic_add - add integer to atomic variable
8374 * @i: integer value to add
8375 * @v: pointer of type atomic_t
8376@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8377 */
8378 static inline void atomic_add(int i, atomic_t *v)
8379 {
8380- asm volatile(LOCK_PREFIX "addl %1,%0"
8381+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8382+
8383+#ifdef CONFIG_PAX_REFCOUNT
8384+ "jno 0f\n"
8385+ LOCK_PREFIX "subl %1,%0\n"
8386+ "int $4\n0:\n"
8387+ _ASM_EXTABLE(0b, 0b)
8388+#endif
8389+
8390+ : "+m" (v->counter)
8391+ : "ir" (i));
8392+}
8393+
8394+/**
8395+ * atomic_add_unchecked - add integer to atomic variable
8396+ * @i: integer value to add
8397+ * @v: pointer of type atomic_unchecked_t
8398+ *
8399+ * Atomically adds @i to @v.
8400+ */
8401+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8402+{
8403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8404 : "+m" (v->counter)
8405 : "ir" (i));
8406 }
8407@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8408 */
8409 static inline void atomic_sub(int i, atomic_t *v)
8410 {
8411- asm volatile(LOCK_PREFIX "subl %1,%0"
8412+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8413+
8414+#ifdef CONFIG_PAX_REFCOUNT
8415+ "jno 0f\n"
8416+ LOCK_PREFIX "addl %1,%0\n"
8417+ "int $4\n0:\n"
8418+ _ASM_EXTABLE(0b, 0b)
8419+#endif
8420+
8421+ : "+m" (v->counter)
8422+ : "ir" (i));
8423+}
8424+
8425+/**
8426+ * atomic_sub_unchecked - subtract integer from atomic variable
8427+ * @i: integer value to subtract
8428+ * @v: pointer of type atomic_unchecked_t
8429+ *
8430+ * Atomically subtracts @i from @v.
8431+ */
8432+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8433+{
8434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8435 : "+m" (v->counter)
8436 : "ir" (i));
8437 }
8438@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8439 {
8440 unsigned char c;
8441
8442- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8443+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8444+
8445+#ifdef CONFIG_PAX_REFCOUNT
8446+ "jno 0f\n"
8447+ LOCK_PREFIX "addl %2,%0\n"
8448+ "int $4\n0:\n"
8449+ _ASM_EXTABLE(0b, 0b)
8450+#endif
8451+
8452+ "sete %1\n"
8453 : "+m" (v->counter), "=qm" (c)
8454 : "ir" (i) : "memory");
8455 return c;
8456@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8457 */
8458 static inline void atomic_inc(atomic_t *v)
8459 {
8460- asm volatile(LOCK_PREFIX "incl %0"
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462+
8463+#ifdef CONFIG_PAX_REFCOUNT
8464+ "jno 0f\n"
8465+ LOCK_PREFIX "decl %0\n"
8466+ "int $4\n0:\n"
8467+ _ASM_EXTABLE(0b, 0b)
8468+#endif
8469+
8470+ : "+m" (v->counter));
8471+}
8472+
8473+/**
8474+ * atomic_inc_unchecked - increment atomic variable
8475+ * @v: pointer of type atomic_unchecked_t
8476+ *
8477+ * Atomically increments @v by 1.
8478+ */
8479+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8480+{
8481+ asm volatile(LOCK_PREFIX "incl %0\n"
8482 : "+m" (v->counter));
8483 }
8484
8485@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8486 */
8487 static inline void atomic_dec(atomic_t *v)
8488 {
8489- asm volatile(LOCK_PREFIX "decl %0"
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491+
8492+#ifdef CONFIG_PAX_REFCOUNT
8493+ "jno 0f\n"
8494+ LOCK_PREFIX "incl %0\n"
8495+ "int $4\n0:\n"
8496+ _ASM_EXTABLE(0b, 0b)
8497+#endif
8498+
8499+ : "+m" (v->counter));
8500+}
8501+
8502+/**
8503+ * atomic_dec_unchecked - decrement atomic variable
8504+ * @v: pointer of type atomic_unchecked_t
8505+ *
8506+ * Atomically decrements @v by 1.
8507+ */
8508+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8509+{
8510+ asm volatile(LOCK_PREFIX "decl %0\n"
8511 : "+m" (v->counter));
8512 }
8513
8514@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8515 {
8516 unsigned char c;
8517
8518- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8519+ asm volatile(LOCK_PREFIX "decl %0\n"
8520+
8521+#ifdef CONFIG_PAX_REFCOUNT
8522+ "jno 0f\n"
8523+ LOCK_PREFIX "incl %0\n"
8524+ "int $4\n0:\n"
8525+ _ASM_EXTABLE(0b, 0b)
8526+#endif
8527+
8528+ "sete %1\n"
8529 : "+m" (v->counter), "=qm" (c)
8530 : : "memory");
8531 return c != 0;
8532@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8533 {
8534 unsigned char c;
8535
8536- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8537+ asm volatile(LOCK_PREFIX "incl %0\n"
8538+
8539+#ifdef CONFIG_PAX_REFCOUNT
8540+ "jno 0f\n"
8541+ LOCK_PREFIX "decl %0\n"
8542+ "into\n0:\n"
8543+ _ASM_EXTABLE(0b, 0b)
8544+#endif
8545+
8546+ "sete %1\n"
8547+ : "+m" (v->counter), "=qm" (c)
8548+ : : "memory");
8549+ return c != 0;
8550+}
8551+
8552+/**
8553+ * atomic_inc_and_test_unchecked - increment and test
8554+ * @v: pointer of type atomic_unchecked_t
8555+ *
8556+ * Atomically increments @v by 1
8557+ * and returns true if the result is zero, or false for all
8558+ * other cases.
8559+ */
8560+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8561+{
8562+ unsigned char c;
8563+
8564+ asm volatile(LOCK_PREFIX "incl %0\n"
8565+ "sete %1\n"
8566 : "+m" (v->counter), "=qm" (c)
8567 : : "memory");
8568 return c != 0;
8569@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8570 {
8571 unsigned char c;
8572
8573- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8574+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8575+
8576+#ifdef CONFIG_PAX_REFCOUNT
8577+ "jno 0f\n"
8578+ LOCK_PREFIX "subl %2,%0\n"
8579+ "int $4\n0:\n"
8580+ _ASM_EXTABLE(0b, 0b)
8581+#endif
8582+
8583+ "sets %1\n"
8584 : "+m" (v->counter), "=qm" (c)
8585 : "ir" (i) : "memory");
8586 return c;
8587@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8588 #endif
8589 /* Modern 486+ processor */
8590 __i = i;
8591- asm volatile(LOCK_PREFIX "xaddl %0, %1"
8592+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8593+
8594+#ifdef CONFIG_PAX_REFCOUNT
8595+ "jno 0f\n"
8596+ "movl %0, %1\n"
8597+ "int $4\n0:\n"
8598+ _ASM_EXTABLE(0b, 0b)
8599+#endif
8600+
8601 : "+r" (i), "+m" (v->counter)
8602 : : "memory");
8603 return i + __i;
8604@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8605 }
8606
8607 /**
8608+ * atomic_add_return_unchecked - add integer and return
8609+ * @v: pointer of type atomic_unchecked_t
8610+ * @i: integer value to add
8611+ *
8612+ * Atomically adds @i to @v and returns @i + @v
8613+ */
8614+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8615+{
8616+ int __i;
8617+#ifdef CONFIG_M386
8618+ unsigned long flags;
8619+ if (unlikely(boot_cpu_data.x86 <= 3))
8620+ goto no_xadd;
8621+#endif
8622+ /* Modern 486+ processor */
8623+ __i = i;
8624+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
8625+ : "+r" (i), "+m" (v->counter)
8626+ : : "memory");
8627+ return i + __i;
8628+
8629+#ifdef CONFIG_M386
8630+no_xadd: /* Legacy 386 processor */
8631+ local_irq_save(flags);
8632+ __i = atomic_read_unchecked(v);
8633+ atomic_set_unchecked(v, i + __i);
8634+ local_irq_restore(flags);
8635+ return i + __i;
8636+#endif
8637+}
8638+
8639+/**
8640 * atomic_sub_return - subtract integer and return
8641 * @v: pointer of type atomic_t
8642 * @i: integer value to subtract
8643@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8644 return cmpxchg(&v->counter, old, new);
8645 }
8646
8647+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8648+{
8649+ return cmpxchg(&v->counter, old, new);
8650+}
8651+
8652 static inline int atomic_xchg(atomic_t *v, int new)
8653 {
8654 return xchg(&v->counter, new);
8655 }
8656
8657+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8658+{
8659+ return xchg(&v->counter, new);
8660+}
8661+
8662 /**
8663 * atomic_add_unless - add unless the number is already a given value
8664 * @v: pointer of type atomic_t
8665@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8666 */
8667 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8668 {
8669- int c, old;
8670+ int c, old, new;
8671 c = atomic_read(v);
8672 for (;;) {
8673- if (unlikely(c == (u)))
8674+ if (unlikely(c == u))
8675 break;
8676- old = atomic_cmpxchg((v), c, c + (a));
8677+
8678+ asm volatile("addl %2,%0\n"
8679+
8680+#ifdef CONFIG_PAX_REFCOUNT
8681+ "jno 0f\n"
8682+ "subl %2,%0\n"
8683+ "int $4\n0:\n"
8684+ _ASM_EXTABLE(0b, 0b)
8685+#endif
8686+
8687+ : "=r" (new)
8688+ : "0" (c), "ir" (a));
8689+
8690+ old = atomic_cmpxchg(v, c, new);
8691 if (likely(old == c))
8692 break;
8693 c = old;
8694 }
8695- return c != (u);
8696+ return c != u;
8697 }
8698
8699 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8700
8701 #define atomic_inc_return(v) (atomic_add_return(1, v))
8702+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8703+{
8704+ return atomic_add_return_unchecked(1, v);
8705+}
8706 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8707
8708 /* These are x86-specific, used by some header files */
8709@@ -266,9 +495,18 @@ typedef struct {
8710 u64 __aligned(8) counter;
8711 } atomic64_t;
8712
8713+#ifdef CONFIG_PAX_REFCOUNT
8714+typedef struct {
8715+ u64 __aligned(8) counter;
8716+} atomic64_unchecked_t;
8717+#else
8718+typedef atomic64_t atomic64_unchecked_t;
8719+#endif
8720+
8721 #define ATOMIC64_INIT(val) { (val) }
8722
8723 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8724+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8725
8726 /**
8727 * atomic64_xchg - xchg atomic64 variable
8728@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8729 * the old value.
8730 */
8731 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8732+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8733
8734 /**
8735 * atomic64_set - set atomic64 variable
8736@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8737 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8738
8739 /**
8740+ * atomic64_unchecked_set - set atomic64 variable
8741+ * @ptr: pointer to type atomic64_unchecked_t
8742+ * @new_val: value to assign
8743+ *
8744+ * Atomically sets the value of @ptr to @new_val.
8745+ */
8746+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8747+
8748+/**
8749 * atomic64_read - read atomic64 variable
8750 * @ptr: pointer to type atomic64_t
8751 *
8752@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8753 return res;
8754 }
8755
8756-extern u64 atomic64_read(atomic64_t *ptr);
8757+/**
8758+ * atomic64_read_unchecked - read atomic64 variable
8759+ * @ptr: pointer to type atomic64_unchecked_t
8760+ *
8761+ * Atomically reads the value of @ptr and returns it.
8762+ */
8763+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8764+{
8765+ u64 res;
8766+
8767+ /*
8768+ * Note, we inline this atomic64_unchecked_t primitive because
8769+ * it only clobbers EAX/EDX and leaves the others
8770+ * untouched. We also (somewhat subtly) rely on the
8771+ * fact that cmpxchg8b returns the current 64-bit value
8772+ * of the memory location we are touching:
8773+ */
8774+ asm volatile(
8775+ "mov %%ebx, %%eax\n\t"
8776+ "mov %%ecx, %%edx\n\t"
8777+ LOCK_PREFIX "cmpxchg8b %1\n"
8778+ : "=&A" (res)
8779+ : "m" (*ptr)
8780+ );
8781+
8782+ return res;
8783+}
8784
8785 /**
8786 * atomic64_add_return - add and return
8787@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8788 * Other variants with different arithmetic operators:
8789 */
8790 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8791+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8792 extern u64 atomic64_inc_return(atomic64_t *ptr);
8793+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8794 extern u64 atomic64_dec_return(atomic64_t *ptr);
8795+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8796
8797 /**
8798 * atomic64_add - add integer to atomic64 variable
8799@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8800 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8801
8802 /**
8803+ * atomic64_add_unchecked - add integer to atomic64 variable
8804+ * @delta: integer value to add
8805+ * @ptr: pointer to type atomic64_unchecked_t
8806+ *
8807+ * Atomically adds @delta to @ptr.
8808+ */
8809+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8810+
8811+/**
8812 * atomic64_sub - subtract the atomic64 variable
8813 * @delta: integer value to subtract
8814 * @ptr: pointer to type atomic64_t
8815@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8816 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8817
8818 /**
8819+ * atomic64_sub_unchecked - subtract the atomic64 variable
8820+ * @delta: integer value to subtract
8821+ * @ptr: pointer to type atomic64_unchecked_t
8822+ *
8823+ * Atomically subtracts @delta from @ptr.
8824+ */
8825+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8826+
8827+/**
8828 * atomic64_sub_and_test - subtract value from variable and test result
8829 * @delta: integer value to subtract
8830 * @ptr: pointer to type atomic64_t
8831@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8832 extern void atomic64_inc(atomic64_t *ptr);
8833
8834 /**
8835+ * atomic64_inc_unchecked - increment atomic64 variable
8836+ * @ptr: pointer to type atomic64_unchecked_t
8837+ *
8838+ * Atomically increments @ptr by 1.
8839+ */
8840+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8841+
8842+/**
8843 * atomic64_dec - decrement atomic64 variable
8844 * @ptr: pointer to type atomic64_t
8845 *
8846@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8847 extern void atomic64_dec(atomic64_t *ptr);
8848
8849 /**
8850+ * atomic64_dec_unchecked - decrement atomic64 variable
8851+ * @ptr: pointer to type atomic64_unchecked_t
8852+ *
8853+ * Atomically decrements @ptr by 1.
8854+ */
8855+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8856+
8857+/**
8858 * atomic64_dec_and_test - decrement and test
8859 * @ptr: pointer to type atomic64_t
8860 *
8861diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8862index d605dc2..fafd7bd 100644
8863--- a/arch/x86/include/asm/atomic_64.h
8864+++ b/arch/x86/include/asm/atomic_64.h
8865@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8866 }
8867
8868 /**
8869+ * atomic_read_unchecked - read atomic variable
8870+ * @v: pointer of type atomic_unchecked_t
8871+ *
8872+ * Atomically reads the value of @v.
8873+ */
8874+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8875+{
8876+ return v->counter;
8877+}
8878+
8879+/**
8880 * atomic_set - set atomic variable
8881 * @v: pointer of type atomic_t
8882 * @i: required value
8883@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8884 }
8885
8886 /**
8887+ * atomic_set_unchecked - set atomic variable
8888+ * @v: pointer of type atomic_unchecked_t
8889+ * @i: required value
8890+ *
8891+ * Atomically sets the value of @v to @i.
8892+ */
8893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8894+{
8895+ v->counter = i;
8896+}
8897+
8898+/**
8899 * atomic_add - add integer to atomic variable
8900 * @i: integer value to add
8901 * @v: pointer of type atomic_t
8902@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8903 */
8904 static inline void atomic_add(int i, atomic_t *v)
8905 {
8906- asm volatile(LOCK_PREFIX "addl %1,%0"
8907+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8908+
8909+#ifdef CONFIG_PAX_REFCOUNT
8910+ "jno 0f\n"
8911+ LOCK_PREFIX "subl %1,%0\n"
8912+ "int $4\n0:\n"
8913+ _ASM_EXTABLE(0b, 0b)
8914+#endif
8915+
8916+ : "=m" (v->counter)
8917+ : "ir" (i), "m" (v->counter));
8918+}
8919+
8920+/**
8921+ * atomic_add_unchecked - add integer to atomic variable
8922+ * @i: integer value to add
8923+ * @v: pointer of type atomic_unchecked_t
8924+ *
8925+ * Atomically adds @i to @v.
8926+ */
8927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8928+{
8929+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8930 : "=m" (v->counter)
8931 : "ir" (i), "m" (v->counter));
8932 }
8933@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8934 */
8935 static inline void atomic_sub(int i, atomic_t *v)
8936 {
8937- asm volatile(LOCK_PREFIX "subl %1,%0"
8938+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8939+
8940+#ifdef CONFIG_PAX_REFCOUNT
8941+ "jno 0f\n"
8942+ LOCK_PREFIX "addl %1,%0\n"
8943+ "int $4\n0:\n"
8944+ _ASM_EXTABLE(0b, 0b)
8945+#endif
8946+
8947+ : "=m" (v->counter)
8948+ : "ir" (i), "m" (v->counter));
8949+}
8950+
8951+/**
8952+ * atomic_sub_unchecked - subtract the atomic variable
8953+ * @i: integer value to subtract
8954+ * @v: pointer of type atomic_unchecked_t
8955+ *
8956+ * Atomically subtracts @i from @v.
8957+ */
8958+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8959+{
8960+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8961 : "=m" (v->counter)
8962 : "ir" (i), "m" (v->counter));
8963 }
8964@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8965 {
8966 unsigned char c;
8967
8968- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8969+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8970+
8971+#ifdef CONFIG_PAX_REFCOUNT
8972+ "jno 0f\n"
8973+ LOCK_PREFIX "addl %2,%0\n"
8974+ "int $4\n0:\n"
8975+ _ASM_EXTABLE(0b, 0b)
8976+#endif
8977+
8978+ "sete %1\n"
8979 : "=m" (v->counter), "=qm" (c)
8980 : "ir" (i), "m" (v->counter) : "memory");
8981 return c;
8982@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8983 */
8984 static inline void atomic_inc(atomic_t *v)
8985 {
8986- asm volatile(LOCK_PREFIX "incl %0"
8987+ asm volatile(LOCK_PREFIX "incl %0\n"
8988+
8989+#ifdef CONFIG_PAX_REFCOUNT
8990+ "jno 0f\n"
8991+ LOCK_PREFIX "decl %0\n"
8992+ "int $4\n0:\n"
8993+ _ASM_EXTABLE(0b, 0b)
8994+#endif
8995+
8996+ : "=m" (v->counter)
8997+ : "m" (v->counter));
8998+}
8999+
9000+/**
9001+ * atomic_inc_unchecked - increment atomic variable
9002+ * @v: pointer of type atomic_unchecked_t
9003+ *
9004+ * Atomically increments @v by 1.
9005+ */
9006+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9007+{
9008+ asm volatile(LOCK_PREFIX "incl %0\n"
9009 : "=m" (v->counter)
9010 : "m" (v->counter));
9011 }
9012@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9013 */
9014 static inline void atomic_dec(atomic_t *v)
9015 {
9016- asm volatile(LOCK_PREFIX "decl %0"
9017+ asm volatile(LOCK_PREFIX "decl %0\n"
9018+
9019+#ifdef CONFIG_PAX_REFCOUNT
9020+ "jno 0f\n"
9021+ LOCK_PREFIX "incl %0\n"
9022+ "int $4\n0:\n"
9023+ _ASM_EXTABLE(0b, 0b)
9024+#endif
9025+
9026+ : "=m" (v->counter)
9027+ : "m" (v->counter));
9028+}
9029+
9030+/**
9031+ * atomic_dec_unchecked - decrement atomic variable
9032+ * @v: pointer of type atomic_unchecked_t
9033+ *
9034+ * Atomically decrements @v by 1.
9035+ */
9036+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9037+{
9038+ asm volatile(LOCK_PREFIX "decl %0\n"
9039 : "=m" (v->counter)
9040 : "m" (v->counter));
9041 }
9042@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9043 {
9044 unsigned char c;
9045
9046- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9047+ asm volatile(LOCK_PREFIX "decl %0\n"
9048+
9049+#ifdef CONFIG_PAX_REFCOUNT
9050+ "jno 0f\n"
9051+ LOCK_PREFIX "incl %0\n"
9052+ "int $4\n0:\n"
9053+ _ASM_EXTABLE(0b, 0b)
9054+#endif
9055+
9056+ "sete %1\n"
9057 : "=m" (v->counter), "=qm" (c)
9058 : "m" (v->counter) : "memory");
9059 return c != 0;
9060@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9061 {
9062 unsigned char c;
9063
9064- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9065+ asm volatile(LOCK_PREFIX "incl %0\n"
9066+
9067+#ifdef CONFIG_PAX_REFCOUNT
9068+ "jno 0f\n"
9069+ LOCK_PREFIX "decl %0\n"
9070+ "int $4\n0:\n"
9071+ _ASM_EXTABLE(0b, 0b)
9072+#endif
9073+
9074+ "sete %1\n"
9075+ : "=m" (v->counter), "=qm" (c)
9076+ : "m" (v->counter) : "memory");
9077+ return c != 0;
9078+}
9079+
9080+/**
9081+ * atomic_inc_and_test_unchecked - increment and test
9082+ * @v: pointer of type atomic_unchecked_t
9083+ *
9084+ * Atomically increments @v by 1
9085+ * and returns true if the result is zero, or false for all
9086+ * other cases.
9087+ */
9088+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9089+{
9090+ unsigned char c;
9091+
9092+ asm volatile(LOCK_PREFIX "incl %0\n"
9093+ "sete %1\n"
9094 : "=m" (v->counter), "=qm" (c)
9095 : "m" (v->counter) : "memory");
9096 return c != 0;
9097@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9098 {
9099 unsigned char c;
9100
9101- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9102+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9103+
9104+#ifdef CONFIG_PAX_REFCOUNT
9105+ "jno 0f\n"
9106+ LOCK_PREFIX "subl %2,%0\n"
9107+ "int $4\n0:\n"
9108+ _ASM_EXTABLE(0b, 0b)
9109+#endif
9110+
9111+ "sets %1\n"
9112 : "=m" (v->counter), "=qm" (c)
9113 : "ir" (i), "m" (v->counter) : "memory");
9114 return c;
9115@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9116 static inline int atomic_add_return(int i, atomic_t *v)
9117 {
9118 int __i = i;
9119- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9120+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9121+
9122+#ifdef CONFIG_PAX_REFCOUNT
9123+ "jno 0f\n"
9124+ "movl %0, %1\n"
9125+ "int $4\n0:\n"
9126+ _ASM_EXTABLE(0b, 0b)
9127+#endif
9128+
9129+ : "+r" (i), "+m" (v->counter)
9130+ : : "memory");
9131+ return i + __i;
9132+}
9133+
9134+/**
9135+ * atomic_add_return_unchecked - add and return
9136+ * @i: integer value to add
9137+ * @v: pointer of type atomic_unchecked_t
9138+ *
9139+ * Atomically adds @i to @v and returns @i + @v
9140+ */
9141+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9142+{
9143+ int __i = i;
9144+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9145 : "+r" (i), "+m" (v->counter)
9146 : : "memory");
9147 return i + __i;
9148@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9149 }
9150
9151 #define atomic_inc_return(v) (atomic_add_return(1, v))
9152+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9153+{
9154+ return atomic_add_return_unchecked(1, v);
9155+}
9156 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9157
9158 /* The 64-bit atomic type */
9159@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9160 }
9161
9162 /**
9163+ * atomic64_read_unchecked - read atomic64 variable
9164+ * @v: pointer of type atomic64_unchecked_t
9165+ *
9166+ * Atomically reads the value of @v.
9167+ * Doesn't imply a read memory barrier.
9168+ */
9169+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9170+{
9171+ return v->counter;
9172+}
9173+
9174+/**
9175 * atomic64_set - set atomic64 variable
9176 * @v: pointer to type atomic64_t
9177 * @i: required value
9178@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9179 }
9180
9181 /**
9182+ * atomic64_set_unchecked - set atomic64 variable
9183+ * @v: pointer to type atomic64_unchecked_t
9184+ * @i: required value
9185+ *
9186+ * Atomically sets the value of @v to @i.
9187+ */
9188+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9189+{
9190+ v->counter = i;
9191+}
9192+
9193+/**
9194 * atomic64_add - add integer to atomic64 variable
9195 * @i: integer value to add
9196 * @v: pointer to type atomic64_t
9197@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9198 */
9199 static inline void atomic64_add(long i, atomic64_t *v)
9200 {
9201+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9202+
9203+#ifdef CONFIG_PAX_REFCOUNT
9204+ "jno 0f\n"
9205+ LOCK_PREFIX "subq %1,%0\n"
9206+ "int $4\n0:\n"
9207+ _ASM_EXTABLE(0b, 0b)
9208+#endif
9209+
9210+ : "=m" (v->counter)
9211+ : "er" (i), "m" (v->counter));
9212+}
9213+
9214+/**
9215+ * atomic64_add_unchecked - add integer to atomic64 variable
9216+ * @i: integer value to add
9217+ * @v: pointer to type atomic64_unchecked_t
9218+ *
9219+ * Atomically adds @i to @v.
9220+ */
9221+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9222+{
9223 asm volatile(LOCK_PREFIX "addq %1,%0"
9224 : "=m" (v->counter)
9225 : "er" (i), "m" (v->counter));
9226@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9227 */
9228 static inline void atomic64_sub(long i, atomic64_t *v)
9229 {
9230- asm volatile(LOCK_PREFIX "subq %1,%0"
9231+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9232+
9233+#ifdef CONFIG_PAX_REFCOUNT
9234+ "jno 0f\n"
9235+ LOCK_PREFIX "addq %1,%0\n"
9236+ "int $4\n0:\n"
9237+ _ASM_EXTABLE(0b, 0b)
9238+#endif
9239+
9240 : "=m" (v->counter)
9241 : "er" (i), "m" (v->counter));
9242 }
9243@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9244 {
9245 unsigned char c;
9246
9247- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9248+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9249+
9250+#ifdef CONFIG_PAX_REFCOUNT
9251+ "jno 0f\n"
9252+ LOCK_PREFIX "addq %2,%0\n"
9253+ "int $4\n0:\n"
9254+ _ASM_EXTABLE(0b, 0b)
9255+#endif
9256+
9257+ "sete %1\n"
9258 : "=m" (v->counter), "=qm" (c)
9259 : "er" (i), "m" (v->counter) : "memory");
9260 return c;
9261@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9262 */
9263 static inline void atomic64_inc(atomic64_t *v)
9264 {
9265+ asm volatile(LOCK_PREFIX "incq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "decq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_inc_unchecked - increment atomic64 variable
9280+ * @v: pointer to type atomic64_unchecked_t
9281+ *
9282+ * Atomically increments @v by 1.
9283+ */
9284+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9285+{
9286 asm volatile(LOCK_PREFIX "incq %0"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9290 */
9291 static inline void atomic64_dec(atomic64_t *v)
9292 {
9293- asm volatile(LOCK_PREFIX "decq %0"
9294+ asm volatile(LOCK_PREFIX "decq %0\n"
9295+
9296+#ifdef CONFIG_PAX_REFCOUNT
9297+ "jno 0f\n"
9298+ LOCK_PREFIX "incq %0\n"
9299+ "int $4\n0:\n"
9300+ _ASM_EXTABLE(0b, 0b)
9301+#endif
9302+
9303+ : "=m" (v->counter)
9304+ : "m" (v->counter));
9305+}
9306+
9307+/**
9308+ * atomic64_dec_unchecked - decrement atomic64 variable
9309+ * @v: pointer to type atomic64_t
9310+ *
9311+ * Atomically decrements @v by 1.
9312+ */
9313+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9314+{
9315+ asm volatile(LOCK_PREFIX "decq %0\n"
9316 : "=m" (v->counter)
9317 : "m" (v->counter));
9318 }
9319@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9320 {
9321 unsigned char c;
9322
9323- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9324+ asm volatile(LOCK_PREFIX "decq %0\n"
9325+
9326+#ifdef CONFIG_PAX_REFCOUNT
9327+ "jno 0f\n"
9328+ LOCK_PREFIX "incq %0\n"
9329+ "int $4\n0:\n"
9330+ _ASM_EXTABLE(0b, 0b)
9331+#endif
9332+
9333+ "sete %1\n"
9334 : "=m" (v->counter), "=qm" (c)
9335 : "m" (v->counter) : "memory");
9336 return c != 0;
9337@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9338 {
9339 unsigned char c;
9340
9341- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9342+ asm volatile(LOCK_PREFIX "incq %0\n"
9343+
9344+#ifdef CONFIG_PAX_REFCOUNT
9345+ "jno 0f\n"
9346+ LOCK_PREFIX "decq %0\n"
9347+ "int $4\n0:\n"
9348+ _ASM_EXTABLE(0b, 0b)
9349+#endif
9350+
9351+ "sete %1\n"
9352 : "=m" (v->counter), "=qm" (c)
9353 : "m" (v->counter) : "memory");
9354 return c != 0;
9355@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9356 {
9357 unsigned char c;
9358
9359- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9360+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9361+
9362+#ifdef CONFIG_PAX_REFCOUNT
9363+ "jno 0f\n"
9364+ LOCK_PREFIX "subq %2,%0\n"
9365+ "int $4\n0:\n"
9366+ _ASM_EXTABLE(0b, 0b)
9367+#endif
9368+
9369+ "sets %1\n"
9370 : "=m" (v->counter), "=qm" (c)
9371 : "er" (i), "m" (v->counter) : "memory");
9372 return c;
9373@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9374 static inline long atomic64_add_return(long i, atomic64_t *v)
9375 {
9376 long __i = i;
9377- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9378+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9379+
9380+#ifdef CONFIG_PAX_REFCOUNT
9381+ "jno 0f\n"
9382+ "movq %0, %1\n"
9383+ "int $4\n0:\n"
9384+ _ASM_EXTABLE(0b, 0b)
9385+#endif
9386+
9387+ : "+r" (i), "+m" (v->counter)
9388+ : : "memory");
9389+ return i + __i;
9390+}
9391+
9392+/**
9393+ * atomic64_add_return_unchecked - add and return
9394+ * @i: integer value to add
9395+ * @v: pointer to type atomic64_unchecked_t
9396+ *
9397+ * Atomically adds @i to @v and returns @i + @v
9398+ */
9399+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9400+{
9401+ long __i = i;
9402+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9403 : "+r" (i), "+m" (v->counter)
9404 : : "memory");
9405 return i + __i;
9406@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9407 }
9408
9409 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9410+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9411+{
9412+ return atomic64_add_return_unchecked(1, v);
9413+}
9414 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9415
9416 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9417@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9418 return cmpxchg(&v->counter, old, new);
9419 }
9420
9421+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9422+{
9423+ return cmpxchg(&v->counter, old, new);
9424+}
9425+
9426 static inline long atomic64_xchg(atomic64_t *v, long new)
9427 {
9428 return xchg(&v->counter, new);
9429 }
9430
9431+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9432+{
9433+ return xchg(&v->counter, new);
9434+}
9435+
9436 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9437 {
9438 return cmpxchg(&v->counter, old, new);
9439 }
9440
9441+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9442+{
9443+ return cmpxchg(&v->counter, old, new);
9444+}
9445+
9446 static inline long atomic_xchg(atomic_t *v, int new)
9447 {
9448 return xchg(&v->counter, new);
9449 }
9450
9451+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9452+{
9453+ return xchg(&v->counter, new);
9454+}
9455+
9456 /**
9457 * atomic_add_unless - add unless the number is a given value
9458 * @v: pointer of type atomic_t
9459@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9460 */
9461 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9462 {
9463- int c, old;
9464+ int c, old, new;
9465 c = atomic_read(v);
9466 for (;;) {
9467- if (unlikely(c == (u)))
9468+ if (unlikely(c == u))
9469 break;
9470- old = atomic_cmpxchg((v), c, c + (a));
9471+
9472+ asm volatile("addl %2,%0\n"
9473+
9474+#ifdef CONFIG_PAX_REFCOUNT
9475+ "jno 0f\n"
9476+ "subl %2,%0\n"
9477+ "int $4\n0:\n"
9478+ _ASM_EXTABLE(0b, 0b)
9479+#endif
9480+
9481+ : "=r" (new)
9482+ : "0" (c), "ir" (a));
9483+
9484+ old = atomic_cmpxchg(v, c, new);
9485 if (likely(old == c))
9486 break;
9487 c = old;
9488 }
9489- return c != (u);
9490+ return c != u;
9491 }
9492
9493 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9494@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9495 */
9496 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9497 {
9498- long c, old;
9499+ long c, old, new;
9500 c = atomic64_read(v);
9501 for (;;) {
9502- if (unlikely(c == (u)))
9503+ if (unlikely(c == u))
9504 break;
9505- old = atomic64_cmpxchg((v), c, c + (a));
9506+
9507+ asm volatile("addq %2,%0\n"
9508+
9509+#ifdef CONFIG_PAX_REFCOUNT
9510+ "jno 0f\n"
9511+ "subq %2,%0\n"
9512+ "int $4\n0:\n"
9513+ _ASM_EXTABLE(0b, 0b)
9514+#endif
9515+
9516+ : "=r" (new)
9517+ : "0" (c), "er" (a));
9518+
9519+ old = atomic64_cmpxchg(v, c, new);
9520 if (likely(old == c))
9521 break;
9522 c = old;
9523 }
9524- return c != (u);
9525+ return c != u;
9526 }
9527
9528 /**
9529diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9530index 02b47a6..d5c4b15 100644
9531--- a/arch/x86/include/asm/bitops.h
9532+++ b/arch/x86/include/asm/bitops.h
9533@@ -38,7 +38,7 @@
9534 * a mask operation on a byte.
9535 */
9536 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9537-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9538+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9539 #define CONST_MASK(nr) (1 << ((nr) & 7))
9540
9541 /**
9542diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9543index 7a10659..8bbf355 100644
9544--- a/arch/x86/include/asm/boot.h
9545+++ b/arch/x86/include/asm/boot.h
9546@@ -11,10 +11,15 @@
9547 #include <asm/pgtable_types.h>
9548
9549 /* Physical address where kernel should be loaded. */
9550-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9552 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9553 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9554
9555+#ifndef __ASSEMBLY__
9556+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9557+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9558+#endif
9559+
9560 /* Minimum kernel alignment, as a power of two */
9561 #ifdef CONFIG_X86_64
9562 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9563diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9564index 549860d..7d45f68 100644
9565--- a/arch/x86/include/asm/cache.h
9566+++ b/arch/x86/include/asm/cache.h
9567@@ -5,9 +5,10 @@
9568
9569 /* L1 cache line size */
9570 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9572+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9573
9574 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9575+#define __read_only __attribute__((__section__(".data.read_only")))
9576
9577 #ifdef CONFIG_X86_VSMP
9578 /* vSMP Internode cacheline shift */
9579diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9580index b54f6af..5b376a6 100644
9581--- a/arch/x86/include/asm/cacheflush.h
9582+++ b/arch/x86/include/asm/cacheflush.h
9583@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9584 static inline unsigned long get_page_memtype(struct page *pg)
9585 {
9586 if (!PageUncached(pg) && !PageWC(pg))
9587- return -1;
9588+ return ~0UL;
9589 else if (!PageUncached(pg) && PageWC(pg))
9590 return _PAGE_CACHE_WC;
9591 else if (PageUncached(pg) && !PageWC(pg))
9592@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9593 SetPageWC(pg);
9594 break;
9595 default:
9596- case -1:
9597+ case ~0UL:
9598 ClearPageUncached(pg);
9599 ClearPageWC(pg);
9600 break;
9601diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9602index 0e63c9a..ab8d972 100644
9603--- a/arch/x86/include/asm/calling.h
9604+++ b/arch/x86/include/asm/calling.h
9605@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9606 * for assembly code:
9607 */
9608
9609-#define R15 0
9610-#define R14 8
9611-#define R13 16
9612-#define R12 24
9613-#define RBP 32
9614-#define RBX 40
9615+#define R15 (0)
9616+#define R14 (8)
9617+#define R13 (16)
9618+#define R12 (24)
9619+#define RBP (32)
9620+#define RBX (40)
9621
9622 /* arguments: interrupts/non tracing syscalls only save up to here: */
9623-#define R11 48
9624-#define R10 56
9625-#define R9 64
9626-#define R8 72
9627-#define RAX 80
9628-#define RCX 88
9629-#define RDX 96
9630-#define RSI 104
9631-#define RDI 112
9632-#define ORIG_RAX 120 /* + error_code */
9633+#define R11 (48)
9634+#define R10 (56)
9635+#define R9 (64)
9636+#define R8 (72)
9637+#define RAX (80)
9638+#define RCX (88)
9639+#define RDX (96)
9640+#define RSI (104)
9641+#define RDI (112)
9642+#define ORIG_RAX (120) /* + error_code */
9643 /* end of arguments */
9644
9645 /* cpu exception frame or undefined in case of fast syscall: */
9646-#define RIP 128
9647-#define CS 136
9648-#define EFLAGS 144
9649-#define RSP 152
9650-#define SS 160
9651+#define RIP (128)
9652+#define CS (136)
9653+#define EFLAGS (144)
9654+#define RSP (152)
9655+#define SS (160)
9656
9657 #define ARGOFFSET R11
9658 #define SWFRAME ORIG_RAX
9659diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9660index 46fc474..b02b0f9 100644
9661--- a/arch/x86/include/asm/checksum_32.h
9662+++ b/arch/x86/include/asm/checksum_32.h
9663@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9664 int len, __wsum sum,
9665 int *src_err_ptr, int *dst_err_ptr);
9666
9667+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9668+ int len, __wsum sum,
9669+ int *src_err_ptr, int *dst_err_ptr);
9670+
9671+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9672+ int len, __wsum sum,
9673+ int *src_err_ptr, int *dst_err_ptr);
9674+
9675 /*
9676 * Note: when you get a NULL pointer exception here this means someone
9677 * passed in an incorrect kernel address to one of these functions.
9678@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9679 int *err_ptr)
9680 {
9681 might_sleep();
9682- return csum_partial_copy_generic((__force void *)src, dst,
9683+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9684 len, sum, err_ptr, NULL);
9685 }
9686
9687@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9688 {
9689 might_sleep();
9690 if (access_ok(VERIFY_WRITE, dst, len))
9691- return csum_partial_copy_generic(src, (__force void *)dst,
9692+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9693 len, sum, NULL, err_ptr);
9694
9695 if (len)
9696diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9697index 617bd56..7b047a1 100644
9698--- a/arch/x86/include/asm/desc.h
9699+++ b/arch/x86/include/asm/desc.h
9700@@ -4,6 +4,7 @@
9701 #include <asm/desc_defs.h>
9702 #include <asm/ldt.h>
9703 #include <asm/mmu.h>
9704+#include <asm/pgtable.h>
9705 #include <linux/smp.h>
9706
9707 static inline void fill_ldt(struct desc_struct *desc,
9708@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9709 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9710 desc->type = (info->read_exec_only ^ 1) << 1;
9711 desc->type |= info->contents << 2;
9712+ desc->type |= info->seg_not_present ^ 1;
9713 desc->s = 1;
9714 desc->dpl = 0x3;
9715 desc->p = info->seg_not_present ^ 1;
9716@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9717 }
9718
9719 extern struct desc_ptr idt_descr;
9720-extern gate_desc idt_table[];
9721-
9722-struct gdt_page {
9723- struct desc_struct gdt[GDT_ENTRIES];
9724-} __attribute__((aligned(PAGE_SIZE)));
9725-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9726+extern gate_desc idt_table[256];
9727
9728+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9729 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9730 {
9731- return per_cpu(gdt_page, cpu).gdt;
9732+ return cpu_gdt_table[cpu];
9733 }
9734
9735 #ifdef CONFIG_X86_64
9736@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9737 unsigned long base, unsigned dpl, unsigned flags,
9738 unsigned short seg)
9739 {
9740- gate->a = (seg << 16) | (base & 0xffff);
9741- gate->b = (base & 0xffff0000) |
9742- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9743+ gate->gate.offset_low = base;
9744+ gate->gate.seg = seg;
9745+ gate->gate.reserved = 0;
9746+ gate->gate.type = type;
9747+ gate->gate.s = 0;
9748+ gate->gate.dpl = dpl;
9749+ gate->gate.p = 1;
9750+ gate->gate.offset_high = base >> 16;
9751 }
9752
9753 #endif
9754@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9755 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9756 const gate_desc *gate)
9757 {
9758+ pax_open_kernel();
9759 memcpy(&idt[entry], gate, sizeof(*gate));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9764 const void *desc)
9765 {
9766+ pax_open_kernel();
9767 memcpy(&ldt[entry], desc, 8);
9768+ pax_close_kernel();
9769 }
9770
9771 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9772@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9773 size = sizeof(struct desc_struct);
9774 break;
9775 }
9776+
9777+ pax_open_kernel();
9778 memcpy(&gdt[entry], desc, size);
9779+ pax_close_kernel();
9780 }
9781
9782 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9783@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9784
9785 static inline void native_load_tr_desc(void)
9786 {
9787+ pax_open_kernel();
9788 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9789+ pax_close_kernel();
9790 }
9791
9792 static inline void native_load_gdt(const struct desc_ptr *dtr)
9793@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9794 unsigned int i;
9795 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9796
9797+ pax_open_kernel();
9798 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9799 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9800+ pax_close_kernel();
9801 }
9802
9803 #define _LDT_empty(info) \
9804@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9805 desc->limit = (limit >> 16) & 0xf;
9806 }
9807
9808-static inline void _set_gate(int gate, unsigned type, void *addr,
9809+static inline void _set_gate(int gate, unsigned type, const void *addr,
9810 unsigned dpl, unsigned ist, unsigned seg)
9811 {
9812 gate_desc s;
9813@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9814 * Pentium F0 0F bugfix can have resulted in the mapped
9815 * IDT being write-protected.
9816 */
9817-static inline void set_intr_gate(unsigned int n, void *addr)
9818+static inline void set_intr_gate(unsigned int n, const void *addr)
9819 {
9820 BUG_ON((unsigned)n > 0xFF);
9821 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9822@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9823 /*
9824 * This routine sets up an interrupt gate at directory privilege level 3.
9825 */
9826-static inline void set_system_intr_gate(unsigned int n, void *addr)
9827+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9828 {
9829 BUG_ON((unsigned)n > 0xFF);
9830 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9831 }
9832
9833-static inline void set_system_trap_gate(unsigned int n, void *addr)
9834+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9835 {
9836 BUG_ON((unsigned)n > 0xFF);
9837 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9838 }
9839
9840-static inline void set_trap_gate(unsigned int n, void *addr)
9841+static inline void set_trap_gate(unsigned int n, const void *addr)
9842 {
9843 BUG_ON((unsigned)n > 0xFF);
9844 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9845@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9846 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9847 {
9848 BUG_ON((unsigned)n > 0xFF);
9849- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9850+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9851 }
9852
9853-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9854+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9855 {
9856 BUG_ON((unsigned)n > 0xFF);
9857 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9858 }
9859
9860-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9861+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9862 {
9863 BUG_ON((unsigned)n > 0xFF);
9864 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9865 }
9866
9867+#ifdef CONFIG_X86_32
9868+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9869+{
9870+ struct desc_struct d;
9871+
9872+ if (likely(limit))
9873+ limit = (limit - 1UL) >> PAGE_SHIFT;
9874+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9875+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9876+}
9877+#endif
9878+
9879 #endif /* _ASM_X86_DESC_H */
9880diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9881index 9d66848..6b4a691 100644
9882--- a/arch/x86/include/asm/desc_defs.h
9883+++ b/arch/x86/include/asm/desc_defs.h
9884@@ -31,6 +31,12 @@ struct desc_struct {
9885 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9886 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9887 };
9888+ struct {
9889+ u16 offset_low;
9890+ u16 seg;
9891+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9892+ unsigned offset_high: 16;
9893+ } gate;
9894 };
9895 } __attribute__((packed));
9896
9897diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9898index cee34e9..a7c3fa2 100644
9899--- a/arch/x86/include/asm/device.h
9900+++ b/arch/x86/include/asm/device.h
9901@@ -6,7 +6,7 @@ struct dev_archdata {
9902 void *acpi_handle;
9903 #endif
9904 #ifdef CONFIG_X86_64
9905-struct dma_map_ops *dma_ops;
9906+ const struct dma_map_ops *dma_ops;
9907 #endif
9908 #ifdef CONFIG_DMAR
9909 void *iommu; /* hook for IOMMU specific extension */
9910diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9911index 6a25d5d..786b202 100644
9912--- a/arch/x86/include/asm/dma-mapping.h
9913+++ b/arch/x86/include/asm/dma-mapping.h
9914@@ -25,9 +25,9 @@ extern int iommu_merge;
9915 extern struct device x86_dma_fallback_dev;
9916 extern int panic_on_overflow;
9917
9918-extern struct dma_map_ops *dma_ops;
9919+extern const struct dma_map_ops *dma_ops;
9920
9921-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9922+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9923 {
9924 #ifdef CONFIG_X86_32
9925 return dma_ops;
9926@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9927 /* Make sure we keep the same behaviour */
9928 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9929 {
9930- struct dma_map_ops *ops = get_dma_ops(dev);
9931+ const struct dma_map_ops *ops = get_dma_ops(dev);
9932 if (ops->mapping_error)
9933 return ops->mapping_error(dev, dma_addr);
9934
9935@@ -122,7 +122,7 @@ static inline void *
9936 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9937 gfp_t gfp)
9938 {
9939- struct dma_map_ops *ops = get_dma_ops(dev);
9940+ const struct dma_map_ops *ops = get_dma_ops(dev);
9941 void *memory;
9942
9943 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9944@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9945 static inline void dma_free_coherent(struct device *dev, size_t size,
9946 void *vaddr, dma_addr_t bus)
9947 {
9948- struct dma_map_ops *ops = get_dma_ops(dev);
9949+ const struct dma_map_ops *ops = get_dma_ops(dev);
9950
9951 WARN_ON(irqs_disabled()); /* for portability */
9952
9953diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9954index 40b4e61..40d8133 100644
9955--- a/arch/x86/include/asm/e820.h
9956+++ b/arch/x86/include/asm/e820.h
9957@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9958 #define ISA_END_ADDRESS 0x100000
9959 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9960
9961-#define BIOS_BEGIN 0x000a0000
9962+#define BIOS_BEGIN 0x000c0000
9963 #define BIOS_END 0x00100000
9964
9965 #ifdef __KERNEL__
9966diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9967index 8ac9d9a..0a6c96e 100644
9968--- a/arch/x86/include/asm/elf.h
9969+++ b/arch/x86/include/asm/elf.h
9970@@ -257,7 +257,25 @@ extern int force_personality32;
9971 the loader. We need to make sure that it is out of the way of the program
9972 that it will "exec", and that there is sufficient room for the brk. */
9973
9974+#ifdef CONFIG_PAX_SEGMEXEC
9975+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9976+#else
9977 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9978+#endif
9979+
9980+#ifdef CONFIG_PAX_ASLR
9981+#ifdef CONFIG_X86_32
9982+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9983+
9984+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9985+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9986+#else
9987+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9988+
9989+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9990+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9991+#endif
9992+#endif
9993
9994 /* This yields a mask that user programs can use to figure out what
9995 instruction set this CPU supports. This could be done in user space,
9996@@ -310,9 +328,7 @@ do { \
9997
9998 #define ARCH_DLINFO \
9999 do { \
10000- if (vdso_enabled) \
10001- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10002- (unsigned long)current->mm->context.vdso); \
10003+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10004 } while (0)
10005
10006 #define AT_SYSINFO 32
10007@@ -323,7 +339,7 @@ do { \
10008
10009 #endif /* !CONFIG_X86_32 */
10010
10011-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10012+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10013
10014 #define VDSO_ENTRY \
10015 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10016@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10017 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10018 #define compat_arch_setup_additional_pages syscall32_setup_pages
10019
10020-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10021-#define arch_randomize_brk arch_randomize_brk
10022-
10023 #endif /* _ASM_X86_ELF_H */
10024diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10025index cc70c1c..d96d011 100644
10026--- a/arch/x86/include/asm/emergency-restart.h
10027+++ b/arch/x86/include/asm/emergency-restart.h
10028@@ -15,6 +15,6 @@ enum reboot_type {
10029
10030 extern enum reboot_type reboot_type;
10031
10032-extern void machine_emergency_restart(void);
10033+extern void machine_emergency_restart(void) __noreturn;
10034
10035 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10036diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10037index 1f11ce4..7caabd1 100644
10038--- a/arch/x86/include/asm/futex.h
10039+++ b/arch/x86/include/asm/futex.h
10040@@ -12,16 +12,18 @@
10041 #include <asm/system.h>
10042
10043 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10044+ typecheck(u32 __user *, uaddr); \
10045 asm volatile("1:\t" insn "\n" \
10046 "2:\t.section .fixup,\"ax\"\n" \
10047 "3:\tmov\t%3, %1\n" \
10048 "\tjmp\t2b\n" \
10049 "\t.previous\n" \
10050 _ASM_EXTABLE(1b, 3b) \
10051- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10052+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10053 : "i" (-EFAULT), "0" (oparg), "1" (0))
10054
10055 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10056+ typecheck(u32 __user *, uaddr); \
10057 asm volatile("1:\tmovl %2, %0\n" \
10058 "\tmovl\t%0, %3\n" \
10059 "\t" insn "\n" \
10060@@ -34,10 +36,10 @@
10061 _ASM_EXTABLE(1b, 4b) \
10062 _ASM_EXTABLE(2b, 4b) \
10063 : "=&a" (oldval), "=&r" (ret), \
10064- "+m" (*uaddr), "=&r" (tem) \
10065+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10066 : "r" (oparg), "i" (-EFAULT), "1" (0))
10067
10068-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10069+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10070 {
10071 int op = (encoded_op >> 28) & 7;
10072 int cmp = (encoded_op >> 24) & 15;
10073@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10074
10075 switch (op) {
10076 case FUTEX_OP_SET:
10077- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10078+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10079 break;
10080 case FUTEX_OP_ADD:
10081- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10082+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10083 uaddr, oparg);
10084 break;
10085 case FUTEX_OP_OR:
10086@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10087 return ret;
10088 }
10089
10090-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10091+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10092 int newval)
10093 {
10094
10095@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10096 return -ENOSYS;
10097 #endif
10098
10099- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10100+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10101 return -EFAULT;
10102
10103- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10104+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10105 "2:\t.section .fixup, \"ax\"\n"
10106 "3:\tmov %2, %0\n"
10107 "\tjmp 2b\n"
10108 "\t.previous\n"
10109 _ASM_EXTABLE(1b, 3b)
10110- : "=a" (oldval), "+m" (*uaddr)
10111+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10112 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10113 : "memory"
10114 );
10115diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10116index ba180d9..3bad351 100644
10117--- a/arch/x86/include/asm/hw_irq.h
10118+++ b/arch/x86/include/asm/hw_irq.h
10119@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10120 extern void enable_IO_APIC(void);
10121
10122 /* Statistics */
10123-extern atomic_t irq_err_count;
10124-extern atomic_t irq_mis_count;
10125+extern atomic_unchecked_t irq_err_count;
10126+extern atomic_unchecked_t irq_mis_count;
10127
10128 /* EISA */
10129 extern void eisa_set_level_irq(unsigned int irq);
10130diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10131index 0b20bbb..4cb1396 100644
10132--- a/arch/x86/include/asm/i387.h
10133+++ b/arch/x86/include/asm/i387.h
10134@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10135 {
10136 int err;
10137
10138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10139+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10140+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10141+#endif
10142+
10143 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10144 "2:\n"
10145 ".section .fixup,\"ax\"\n"
10146@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10147 {
10148 int err;
10149
10150+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10151+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10152+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10153+#endif
10154+
10155 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10156 "2:\n"
10157 ".section .fixup,\"ax\"\n"
10158@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10159 }
10160
10161 /* We need a safe address that is cheap to find and that is already
10162- in L1 during context switch. The best choices are unfortunately
10163- different for UP and SMP */
10164-#ifdef CONFIG_SMP
10165-#define safe_address (__per_cpu_offset[0])
10166-#else
10167-#define safe_address (kstat_cpu(0).cpustat.user)
10168-#endif
10169+ in L1 during context switch. */
10170+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10171
10172 /*
10173 * These must be called with preempt disabled
10174@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10175 struct thread_info *me = current_thread_info();
10176 preempt_disable();
10177 if (me->status & TS_USEDFPU)
10178- __save_init_fpu(me->task);
10179+ __save_init_fpu(current);
10180 else
10181 clts();
10182 }
10183diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10184index a299900..15c5410 100644
10185--- a/arch/x86/include/asm/io_32.h
10186+++ b/arch/x86/include/asm/io_32.h
10187@@ -3,6 +3,7 @@
10188
10189 #include <linux/string.h>
10190 #include <linux/compiler.h>
10191+#include <asm/processor.h>
10192
10193 /*
10194 * This file contains the definitions for the x86 IO instructions
10195@@ -42,6 +43,17 @@
10196
10197 #ifdef __KERNEL__
10198
10199+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10200+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10201+{
10202+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10203+}
10204+
10205+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10206+{
10207+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10208+}
10209+
10210 #include <asm-generic/iomap.h>
10211
10212 #include <linux/vmalloc.h>
10213diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10214index 2440678..c158b88 100644
10215--- a/arch/x86/include/asm/io_64.h
10216+++ b/arch/x86/include/asm/io_64.h
10217@@ -140,6 +140,17 @@ __OUTS(l)
10218
10219 #include <linux/vmalloc.h>
10220
10221+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10222+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10223+{
10224+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10225+}
10226+
10227+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10228+{
10229+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10230+}
10231+
10232 #include <asm-generic/iomap.h>
10233
10234 void __memcpy_fromio(void *, unsigned long, unsigned);
10235diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10236index fd6d21b..8b13915 100644
10237--- a/arch/x86/include/asm/iommu.h
10238+++ b/arch/x86/include/asm/iommu.h
10239@@ -3,7 +3,7 @@
10240
10241 extern void pci_iommu_shutdown(void);
10242 extern void no_iommu_init(void);
10243-extern struct dma_map_ops nommu_dma_ops;
10244+extern const struct dma_map_ops nommu_dma_ops;
10245 extern int force_iommu, no_iommu;
10246 extern int iommu_detected;
10247 extern int iommu_pass_through;
10248diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10249index 9e2b952..557206e 100644
10250--- a/arch/x86/include/asm/irqflags.h
10251+++ b/arch/x86/include/asm/irqflags.h
10252@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10253 sti; \
10254 sysexit
10255
10256+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10257+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10258+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10259+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10260+
10261 #else
10262 #define INTERRUPT_RETURN iret
10263 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10264diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10265index 4fe681d..bb6d40c 100644
10266--- a/arch/x86/include/asm/kprobes.h
10267+++ b/arch/x86/include/asm/kprobes.h
10268@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10269 #define BREAKPOINT_INSTRUCTION 0xcc
10270 #define RELATIVEJUMP_INSTRUCTION 0xe9
10271 #define MAX_INSN_SIZE 16
10272-#define MAX_STACK_SIZE 64
10273-#define MIN_STACK_SIZE(ADDR) \
10274- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10275- THREAD_SIZE - (unsigned long)(ADDR))) \
10276- ? (MAX_STACK_SIZE) \
10277- : (((unsigned long)current_thread_info()) + \
10278- THREAD_SIZE - (unsigned long)(ADDR)))
10279+#define MAX_STACK_SIZE 64UL
10280+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10281
10282 #define flush_insn_slot(p) do { } while (0)
10283
10284diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10285index 08bc2ff..2e88d1f 100644
10286--- a/arch/x86/include/asm/kvm_host.h
10287+++ b/arch/x86/include/asm/kvm_host.h
10288@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10289 bool (*gb_page_enable)(void);
10290
10291 const struct trace_print_flags *exit_reasons_str;
10292-};
10293+} __do_const;
10294
10295-extern struct kvm_x86_ops *kvm_x86_ops;
10296+extern const struct kvm_x86_ops *kvm_x86_ops;
10297
10298 int kvm_mmu_module_init(void);
10299 void kvm_mmu_module_exit(void);
10300diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10301index 47b9b6f..815aaa1 100644
10302--- a/arch/x86/include/asm/local.h
10303+++ b/arch/x86/include/asm/local.h
10304@@ -18,26 +18,58 @@ typedef struct {
10305
10306 static inline void local_inc(local_t *l)
10307 {
10308- asm volatile(_ASM_INC "%0"
10309+ asm volatile(_ASM_INC "%0\n"
10310+
10311+#ifdef CONFIG_PAX_REFCOUNT
10312+ "jno 0f\n"
10313+ _ASM_DEC "%0\n"
10314+ "int $4\n0:\n"
10315+ _ASM_EXTABLE(0b, 0b)
10316+#endif
10317+
10318 : "+m" (l->a.counter));
10319 }
10320
10321 static inline void local_dec(local_t *l)
10322 {
10323- asm volatile(_ASM_DEC "%0"
10324+ asm volatile(_ASM_DEC "%0\n"
10325+
10326+#ifdef CONFIG_PAX_REFCOUNT
10327+ "jno 0f\n"
10328+ _ASM_INC "%0\n"
10329+ "int $4\n0:\n"
10330+ _ASM_EXTABLE(0b, 0b)
10331+#endif
10332+
10333 : "+m" (l->a.counter));
10334 }
10335
10336 static inline void local_add(long i, local_t *l)
10337 {
10338- asm volatile(_ASM_ADD "%1,%0"
10339+ asm volatile(_ASM_ADD "%1,%0\n"
10340+
10341+#ifdef CONFIG_PAX_REFCOUNT
10342+ "jno 0f\n"
10343+ _ASM_SUB "%1,%0\n"
10344+ "int $4\n0:\n"
10345+ _ASM_EXTABLE(0b, 0b)
10346+#endif
10347+
10348 : "+m" (l->a.counter)
10349 : "ir" (i));
10350 }
10351
10352 static inline void local_sub(long i, local_t *l)
10353 {
10354- asm volatile(_ASM_SUB "%1,%0"
10355+ asm volatile(_ASM_SUB "%1,%0\n"
10356+
10357+#ifdef CONFIG_PAX_REFCOUNT
10358+ "jno 0f\n"
10359+ _ASM_ADD "%1,%0\n"
10360+ "int $4\n0:\n"
10361+ _ASM_EXTABLE(0b, 0b)
10362+#endif
10363+
10364 : "+m" (l->a.counter)
10365 : "ir" (i));
10366 }
10367@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10368 {
10369 unsigned char c;
10370
10371- asm volatile(_ASM_SUB "%2,%0; sete %1"
10372+ asm volatile(_ASM_SUB "%2,%0\n"
10373+
10374+#ifdef CONFIG_PAX_REFCOUNT
10375+ "jno 0f\n"
10376+ _ASM_ADD "%2,%0\n"
10377+ "int $4\n0:\n"
10378+ _ASM_EXTABLE(0b, 0b)
10379+#endif
10380+
10381+ "sete %1\n"
10382 : "+m" (l->a.counter), "=qm" (c)
10383 : "ir" (i) : "memory");
10384 return c;
10385@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10386 {
10387 unsigned char c;
10388
10389- asm volatile(_ASM_DEC "%0; sete %1"
10390+ asm volatile(_ASM_DEC "%0\n"
10391+
10392+#ifdef CONFIG_PAX_REFCOUNT
10393+ "jno 0f\n"
10394+ _ASM_INC "%0\n"
10395+ "int $4\n0:\n"
10396+ _ASM_EXTABLE(0b, 0b)
10397+#endif
10398+
10399+ "sete %1\n"
10400 : "+m" (l->a.counter), "=qm" (c)
10401 : : "memory");
10402 return c != 0;
10403@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10404 {
10405 unsigned char c;
10406
10407- asm volatile(_ASM_INC "%0; sete %1"
10408+ asm volatile(_ASM_INC "%0\n"
10409+
10410+#ifdef CONFIG_PAX_REFCOUNT
10411+ "jno 0f\n"
10412+ _ASM_DEC "%0\n"
10413+ "int $4\n0:\n"
10414+ _ASM_EXTABLE(0b, 0b)
10415+#endif
10416+
10417+ "sete %1\n"
10418 : "+m" (l->a.counter), "=qm" (c)
10419 : : "memory");
10420 return c != 0;
10421@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10422 {
10423 unsigned char c;
10424
10425- asm volatile(_ASM_ADD "%2,%0; sets %1"
10426+ asm volatile(_ASM_ADD "%2,%0\n"
10427+
10428+#ifdef CONFIG_PAX_REFCOUNT
10429+ "jno 0f\n"
10430+ _ASM_SUB "%2,%0\n"
10431+ "int $4\n0:\n"
10432+ _ASM_EXTABLE(0b, 0b)
10433+#endif
10434+
10435+ "sets %1\n"
10436 : "+m" (l->a.counter), "=qm" (c)
10437 : "ir" (i) : "memory");
10438 return c;
10439@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10440 #endif
10441 /* Modern 486+ processor */
10442 __i = i;
10443- asm volatile(_ASM_XADD "%0, %1;"
10444+ asm volatile(_ASM_XADD "%0, %1\n"
10445+
10446+#ifdef CONFIG_PAX_REFCOUNT
10447+ "jno 0f\n"
10448+ _ASM_MOV "%0,%1\n"
10449+ "int $4\n0:\n"
10450+ _ASM_EXTABLE(0b, 0b)
10451+#endif
10452+
10453 : "+r" (i), "+m" (l->a.counter)
10454 : : "memory");
10455 return i + __i;
10456diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10457index ef51b50..514ba37 100644
10458--- a/arch/x86/include/asm/microcode.h
10459+++ b/arch/x86/include/asm/microcode.h
10460@@ -12,13 +12,13 @@ struct device;
10461 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10462
10463 struct microcode_ops {
10464- enum ucode_state (*request_microcode_user) (int cpu,
10465+ enum ucode_state (* const request_microcode_user) (int cpu,
10466 const void __user *buf, size_t size);
10467
10468- enum ucode_state (*request_microcode_fw) (int cpu,
10469+ enum ucode_state (* const request_microcode_fw) (int cpu,
10470 struct device *device);
10471
10472- void (*microcode_fini_cpu) (int cpu);
10473+ void (* const microcode_fini_cpu) (int cpu);
10474
10475 /*
10476 * The generic 'microcode_core' part guarantees that
10477@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10478 extern struct ucode_cpu_info ucode_cpu_info[];
10479
10480 #ifdef CONFIG_MICROCODE_INTEL
10481-extern struct microcode_ops * __init init_intel_microcode(void);
10482+extern const struct microcode_ops * __init init_intel_microcode(void);
10483 #else
10484-static inline struct microcode_ops * __init init_intel_microcode(void)
10485+static inline const struct microcode_ops * __init init_intel_microcode(void)
10486 {
10487 return NULL;
10488 }
10489 #endif /* CONFIG_MICROCODE_INTEL */
10490
10491 #ifdef CONFIG_MICROCODE_AMD
10492-extern struct microcode_ops * __init init_amd_microcode(void);
10493+extern const struct microcode_ops * __init init_amd_microcode(void);
10494 #else
10495-static inline struct microcode_ops * __init init_amd_microcode(void)
10496+static inline const struct microcode_ops * __init init_amd_microcode(void)
10497 {
10498 return NULL;
10499 }
10500diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10501index 593e51d..fa69c9a 100644
10502--- a/arch/x86/include/asm/mman.h
10503+++ b/arch/x86/include/asm/mman.h
10504@@ -5,4 +5,14 @@
10505
10506 #include <asm-generic/mman.h>
10507
10508+#ifdef __KERNEL__
10509+#ifndef __ASSEMBLY__
10510+#ifdef CONFIG_X86_32
10511+#define arch_mmap_check i386_mmap_check
10512+int i386_mmap_check(unsigned long addr, unsigned long len,
10513+ unsigned long flags);
10514+#endif
10515+#endif
10516+#endif
10517+
10518 #endif /* _ASM_X86_MMAN_H */
10519diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10520index 80a1dee..239c67d 100644
10521--- a/arch/x86/include/asm/mmu.h
10522+++ b/arch/x86/include/asm/mmu.h
10523@@ -9,10 +9,23 @@
10524 * we put the segment information here.
10525 */
10526 typedef struct {
10527- void *ldt;
10528+ struct desc_struct *ldt;
10529 int size;
10530 struct mutex lock;
10531- void *vdso;
10532+ unsigned long vdso;
10533+
10534+#ifdef CONFIG_X86_32
10535+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10536+ unsigned long user_cs_base;
10537+ unsigned long user_cs_limit;
10538+
10539+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10540+ cpumask_t cpu_user_cs_mask;
10541+#endif
10542+
10543+#endif
10544+#endif
10545+
10546 } mm_context_t;
10547
10548 #ifdef CONFIG_SMP
10549diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10550index 8b5393e..8143173 100644
10551--- a/arch/x86/include/asm/mmu_context.h
10552+++ b/arch/x86/include/asm/mmu_context.h
10553@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10554
10555 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10556 {
10557+
10558+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559+ unsigned int i;
10560+ pgd_t *pgd;
10561+
10562+ pax_open_kernel();
10563+ pgd = get_cpu_pgd(smp_processor_id());
10564+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10565+ set_pgd_batched(pgd+i, native_make_pgd(0));
10566+ pax_close_kernel();
10567+#endif
10568+
10569 #ifdef CONFIG_SMP
10570 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10571 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10572@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10573 struct task_struct *tsk)
10574 {
10575 unsigned cpu = smp_processor_id();
10576+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10577+ int tlbstate = TLBSTATE_OK;
10578+#endif
10579
10580 if (likely(prev != next)) {
10581 #ifdef CONFIG_SMP
10582+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10583+ tlbstate = percpu_read(cpu_tlbstate.state);
10584+#endif
10585 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10586 percpu_write(cpu_tlbstate.active_mm, next);
10587 #endif
10588 cpumask_set_cpu(cpu, mm_cpumask(next));
10589
10590 /* Re-load page tables */
10591+#ifdef CONFIG_PAX_PER_CPU_PGD
10592+ pax_open_kernel();
10593+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10594+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10595+ pax_close_kernel();
10596+ load_cr3(get_cpu_pgd(cpu));
10597+#else
10598 load_cr3(next->pgd);
10599+#endif
10600
10601 /* stop flush ipis for the previous mm */
10602 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10603@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10604 */
10605 if (unlikely(prev->context.ldt != next->context.ldt))
10606 load_LDT_nolock(&next->context);
10607- }
10608+
10609+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10610+ if (!nx_enabled) {
10611+ smp_mb__before_clear_bit();
10612+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10613+ smp_mb__after_clear_bit();
10614+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10615+ }
10616+#endif
10617+
10618+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10619+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10620+ prev->context.user_cs_limit != next->context.user_cs_limit))
10621+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10622 #ifdef CONFIG_SMP
10623+ else if (unlikely(tlbstate != TLBSTATE_OK))
10624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10625+#endif
10626+#endif
10627+
10628+ }
10629 else {
10630+
10631+#ifdef CONFIG_PAX_PER_CPU_PGD
10632+ pax_open_kernel();
10633+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10634+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10635+ pax_close_kernel();
10636+ load_cr3(get_cpu_pgd(cpu));
10637+#endif
10638+
10639+#ifdef CONFIG_SMP
10640 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10641 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10642
10643@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10644 * tlb flush IPI delivery. We must reload CR3
10645 * to make sure to use no freed page tables.
10646 */
10647+
10648+#ifndef CONFIG_PAX_PER_CPU_PGD
10649 load_cr3(next->pgd);
10650+#endif
10651+
10652 load_LDT_nolock(&next->context);
10653+
10654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10655+ if (!nx_enabled)
10656+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10657+#endif
10658+
10659+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10660+#ifdef CONFIG_PAX_PAGEEXEC
10661+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10662+#endif
10663+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10664+#endif
10665+
10666 }
10667+#endif
10668 }
10669-#endif
10670 }
10671
10672 #define activate_mm(prev, next) \
10673diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10674index 3e2ce58..caaf478 100644
10675--- a/arch/x86/include/asm/module.h
10676+++ b/arch/x86/include/asm/module.h
10677@@ -5,6 +5,7 @@
10678
10679 #ifdef CONFIG_X86_64
10680 /* X86_64 does not define MODULE_PROC_FAMILY */
10681+#define MODULE_PROC_FAMILY ""
10682 #elif defined CONFIG_M386
10683 #define MODULE_PROC_FAMILY "386 "
10684 #elif defined CONFIG_M486
10685@@ -59,13 +60,26 @@
10686 #error unknown processor family
10687 #endif
10688
10689-#ifdef CONFIG_X86_32
10690-# ifdef CONFIG_4KSTACKS
10691-# define MODULE_STACKSIZE "4KSTACKS "
10692-# else
10693-# define MODULE_STACKSIZE ""
10694-# endif
10695-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10696+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10697+#define MODULE_STACKSIZE "4KSTACKS "
10698+#else
10699+#define MODULE_STACKSIZE ""
10700 #endif
10701
10702+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10703+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10704+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10705+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10706+#else
10707+#define MODULE_PAX_KERNEXEC ""
10708+#endif
10709+
10710+#ifdef CONFIG_PAX_MEMORY_UDEREF
10711+#define MODULE_PAX_UDEREF "UDEREF "
10712+#else
10713+#define MODULE_PAX_UDEREF ""
10714+#endif
10715+
10716+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10717+
10718 #endif /* _ASM_X86_MODULE_H */
10719diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10720index 7639dbf..e08a58c 100644
10721--- a/arch/x86/include/asm/page_64_types.h
10722+++ b/arch/x86/include/asm/page_64_types.h
10723@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10724
10725 /* duplicated to the one in bootmem.h */
10726 extern unsigned long max_pfn;
10727-extern unsigned long phys_base;
10728+extern const unsigned long phys_base;
10729
10730 extern unsigned long __phys_addr(unsigned long);
10731 #define __phys_reloc_hide(x) (x)
10732diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10733index efb3899..ef30687 100644
10734--- a/arch/x86/include/asm/paravirt.h
10735+++ b/arch/x86/include/asm/paravirt.h
10736@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10737 val);
10738 }
10739
10740+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10741+{
10742+ pgdval_t val = native_pgd_val(pgd);
10743+
10744+ if (sizeof(pgdval_t) > sizeof(long))
10745+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10746+ val, (u64)val >> 32);
10747+ else
10748+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10749+ val);
10750+}
10751+
10752 static inline void pgd_clear(pgd_t *pgdp)
10753 {
10754 set_pgd(pgdp, __pgd(0));
10755@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10756 pv_mmu_ops.set_fixmap(idx, phys, flags);
10757 }
10758
10759+#ifdef CONFIG_PAX_KERNEXEC
10760+static inline unsigned long pax_open_kernel(void)
10761+{
10762+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10763+}
10764+
10765+static inline unsigned long pax_close_kernel(void)
10766+{
10767+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10768+}
10769+#else
10770+static inline unsigned long pax_open_kernel(void) { return 0; }
10771+static inline unsigned long pax_close_kernel(void) { return 0; }
10772+#endif
10773+
10774 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10775
10776 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10777@@ -945,7 +972,7 @@ extern void default_banner(void);
10778
10779 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10780 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10781-#define PARA_INDIRECT(addr) *%cs:addr
10782+#define PARA_INDIRECT(addr) *%ss:addr
10783 #endif
10784
10785 #define INTERRUPT_RETURN \
10786@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10787 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10788 CLBR_NONE, \
10789 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10790+
10791+#define GET_CR0_INTO_RDI \
10792+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10793+ mov %rax,%rdi
10794+
10795+#define SET_RDI_INTO_CR0 \
10796+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10797+
10798+#define GET_CR3_INTO_RDI \
10799+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10800+ mov %rax,%rdi
10801+
10802+#define SET_RDI_INTO_CR3 \
10803+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10804+
10805 #endif /* CONFIG_X86_32 */
10806
10807 #endif /* __ASSEMBLY__ */
10808diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10809index 9357473..aeb2de5 100644
10810--- a/arch/x86/include/asm/paravirt_types.h
10811+++ b/arch/x86/include/asm/paravirt_types.h
10812@@ -78,19 +78,19 @@ struct pv_init_ops {
10813 */
10814 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10815 unsigned long addr, unsigned len);
10816-};
10817+} __no_const;
10818
10819
10820 struct pv_lazy_ops {
10821 /* Set deferred update mode, used for batching operations. */
10822 void (*enter)(void);
10823 void (*leave)(void);
10824-};
10825+} __no_const;
10826
10827 struct pv_time_ops {
10828 unsigned long long (*sched_clock)(void);
10829 unsigned long (*get_tsc_khz)(void);
10830-};
10831+} __no_const;
10832
10833 struct pv_cpu_ops {
10834 /* hooks for various privileged instructions */
10835@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10836
10837 void (*start_context_switch)(struct task_struct *prev);
10838 void (*end_context_switch)(struct task_struct *next);
10839-};
10840+} __no_const;
10841
10842 struct pv_irq_ops {
10843 /*
10844@@ -217,7 +217,7 @@ struct pv_apic_ops {
10845 unsigned long start_eip,
10846 unsigned long start_esp);
10847 #endif
10848-};
10849+} __no_const;
10850
10851 struct pv_mmu_ops {
10852 unsigned long (*read_cr2)(void);
10853@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10854 struct paravirt_callee_save make_pud;
10855
10856 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10857+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10858 #endif /* PAGETABLE_LEVELS == 4 */
10859 #endif /* PAGETABLE_LEVELS >= 3 */
10860
10861@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10862 an mfn. We can tell which is which from the index. */
10863 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10864 phys_addr_t phys, pgprot_t flags);
10865+
10866+#ifdef CONFIG_PAX_KERNEXEC
10867+ unsigned long (*pax_open_kernel)(void);
10868+ unsigned long (*pax_close_kernel)(void);
10869+#endif
10870+
10871 };
10872
10873 struct raw_spinlock;
10874@@ -326,7 +333,7 @@ struct pv_lock_ops {
10875 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10876 int (*spin_trylock)(struct raw_spinlock *lock);
10877 void (*spin_unlock)(struct raw_spinlock *lock);
10878-};
10879+} __no_const;
10880
10881 /* This contains all the paravirt structures: we get a convenient
10882 * number for each function using the offset which we use to indicate
10883diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10884index b399988..3f47c38 100644
10885--- a/arch/x86/include/asm/pci_x86.h
10886+++ b/arch/x86/include/asm/pci_x86.h
10887@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10888 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10889
10890 struct pci_raw_ops {
10891- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10892+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10893 int reg, int len, u32 *val);
10894- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10895+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10896 int reg, int len, u32 val);
10897 };
10898
10899-extern struct pci_raw_ops *raw_pci_ops;
10900-extern struct pci_raw_ops *raw_pci_ext_ops;
10901+extern const struct pci_raw_ops *raw_pci_ops;
10902+extern const struct pci_raw_ops *raw_pci_ext_ops;
10903
10904-extern struct pci_raw_ops pci_direct_conf1;
10905+extern const struct pci_raw_ops pci_direct_conf1;
10906 extern bool port_cf9_safe;
10907
10908 /* arch_initcall level */
10909diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10910index b65a36d..50345a4 100644
10911--- a/arch/x86/include/asm/percpu.h
10912+++ b/arch/x86/include/asm/percpu.h
10913@@ -78,6 +78,7 @@ do { \
10914 if (0) { \
10915 T__ tmp__; \
10916 tmp__ = (val); \
10917+ (void)tmp__; \
10918 } \
10919 switch (sizeof(var)) { \
10920 case 1: \
10921diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10922index 271de94..ef944d6 100644
10923--- a/arch/x86/include/asm/pgalloc.h
10924+++ b/arch/x86/include/asm/pgalloc.h
10925@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10926 pmd_t *pmd, pte_t *pte)
10927 {
10928 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10929+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10930+}
10931+
10932+static inline void pmd_populate_user(struct mm_struct *mm,
10933+ pmd_t *pmd, pte_t *pte)
10934+{
10935+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10936 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10937 }
10938
10939diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10940index 2334982..70bc412 100644
10941--- a/arch/x86/include/asm/pgtable-2level.h
10942+++ b/arch/x86/include/asm/pgtable-2level.h
10943@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10944
10945 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10946 {
10947+ pax_open_kernel();
10948 *pmdp = pmd;
10949+ pax_close_kernel();
10950 }
10951
10952 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10953diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10954index 33927d2..ccde329 100644
10955--- a/arch/x86/include/asm/pgtable-3level.h
10956+++ b/arch/x86/include/asm/pgtable-3level.h
10957@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10958
10959 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10960 {
10961+ pax_open_kernel();
10962 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10963+ pax_close_kernel();
10964 }
10965
10966 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10967 {
10968+ pax_open_kernel();
10969 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10970+ pax_close_kernel();
10971 }
10972
10973 /*
10974diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10975index af6fd36..867ff74 100644
10976--- a/arch/x86/include/asm/pgtable.h
10977+++ b/arch/x86/include/asm/pgtable.h
10978@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10979
10980 #ifndef __PAGETABLE_PUD_FOLDED
10981 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10982+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10983 #define pgd_clear(pgd) native_pgd_clear(pgd)
10984 #endif
10985
10986@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10987
10988 #define arch_end_context_switch(prev) do {} while(0)
10989
10990+#define pax_open_kernel() native_pax_open_kernel()
10991+#define pax_close_kernel() native_pax_close_kernel()
10992 #endif /* CONFIG_PARAVIRT */
10993
10994+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10995+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10996+
10997+#ifdef CONFIG_PAX_KERNEXEC
10998+static inline unsigned long native_pax_open_kernel(void)
10999+{
11000+ unsigned long cr0;
11001+
11002+ preempt_disable();
11003+ barrier();
11004+ cr0 = read_cr0() ^ X86_CR0_WP;
11005+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11006+ write_cr0(cr0);
11007+ return cr0 ^ X86_CR0_WP;
11008+}
11009+
11010+static inline unsigned long native_pax_close_kernel(void)
11011+{
11012+ unsigned long cr0;
11013+
11014+ cr0 = read_cr0() ^ X86_CR0_WP;
11015+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11016+ write_cr0(cr0);
11017+ barrier();
11018+ preempt_enable_no_resched();
11019+ return cr0 ^ X86_CR0_WP;
11020+}
11021+#else
11022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11024+#endif
11025+
11026 /*
11027 * The following only work if pte_present() is true.
11028 * Undefined behaviour if not..
11029 */
11030+static inline int pte_user(pte_t pte)
11031+{
11032+ return pte_val(pte) & _PAGE_USER;
11033+}
11034+
11035 static inline int pte_dirty(pte_t pte)
11036 {
11037 return pte_flags(pte) & _PAGE_DIRTY;
11038@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11039 return pte_clear_flags(pte, _PAGE_RW);
11040 }
11041
11042+static inline pte_t pte_mkread(pte_t pte)
11043+{
11044+ return __pte(pte_val(pte) | _PAGE_USER);
11045+}
11046+
11047 static inline pte_t pte_mkexec(pte_t pte)
11048 {
11049- return pte_clear_flags(pte, _PAGE_NX);
11050+#ifdef CONFIG_X86_PAE
11051+ if (__supported_pte_mask & _PAGE_NX)
11052+ return pte_clear_flags(pte, _PAGE_NX);
11053+ else
11054+#endif
11055+ return pte_set_flags(pte, _PAGE_USER);
11056+}
11057+
11058+static inline pte_t pte_exprotect(pte_t pte)
11059+{
11060+#ifdef CONFIG_X86_PAE
11061+ if (__supported_pte_mask & _PAGE_NX)
11062+ return pte_set_flags(pte, _PAGE_NX);
11063+ else
11064+#endif
11065+ return pte_clear_flags(pte, _PAGE_USER);
11066 }
11067
11068 static inline pte_t pte_mkdirty(pte_t pte)
11069@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11070 #endif
11071
11072 #ifndef __ASSEMBLY__
11073+
11074+#ifdef CONFIG_PAX_PER_CPU_PGD
11075+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11076+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11077+{
11078+ return cpu_pgd[cpu];
11079+}
11080+#endif
11081+
11082 #include <linux/mm_types.h>
11083
11084 static inline int pte_none(pte_t pte)
11085@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11086
11087 static inline int pgd_bad(pgd_t pgd)
11088 {
11089- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11090+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11091 }
11092
11093 static inline int pgd_none(pgd_t pgd)
11094@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11095 * pgd_offset() returns a (pgd_t *)
11096 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11097 */
11098-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11099+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11100+
11101+#ifdef CONFIG_PAX_PER_CPU_PGD
11102+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11103+#endif
11104+
11105 /*
11106 * a shortcut which implies the use of the kernel's pgd, instead
11107 * of a process's
11108@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11109 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11110 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11111
11112+#ifdef CONFIG_X86_32
11113+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11114+#else
11115+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11116+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11117+
11118+#ifdef CONFIG_PAX_MEMORY_UDEREF
11119+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11120+#else
11121+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11122+#endif
11123+
11124+#endif
11125+
11126 #ifndef __ASSEMBLY__
11127
11128 extern int direct_gbpages;
11129@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11130 * dst and src can be on the same page, but the range must not overlap,
11131 * and must not cross a page boundary.
11132 */
11133-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11134+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11135 {
11136- memcpy(dst, src, count * sizeof(pgd_t));
11137+ pax_open_kernel();
11138+ while (count--)
11139+ *dst++ = *src++;
11140+ pax_close_kernel();
11141 }
11142
11143+#ifdef CONFIG_PAX_PER_CPU_PGD
11144+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11145+#endif
11146+
11147+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11148+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11149+#else
11150+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11151+#endif
11152
11153 #include <asm-generic/pgtable.h>
11154 #endif /* __ASSEMBLY__ */
11155diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11156index 750f1bf..971e839 100644
11157--- a/arch/x86/include/asm/pgtable_32.h
11158+++ b/arch/x86/include/asm/pgtable_32.h
11159@@ -26,9 +26,6 @@
11160 struct mm_struct;
11161 struct vm_area_struct;
11162
11163-extern pgd_t swapper_pg_dir[1024];
11164-extern pgd_t trampoline_pg_dir[1024];
11165-
11166 static inline void pgtable_cache_init(void) { }
11167 static inline void check_pgt_cache(void) { }
11168 void paging_init(void);
11169@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11170 # include <asm/pgtable-2level.h>
11171 #endif
11172
11173+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11174+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11175+#ifdef CONFIG_X86_PAE
11176+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11177+#endif
11178+
11179 #if defined(CONFIG_HIGHPTE)
11180 #define __KM_PTE \
11181 (in_nmi() ? KM_NMI_PTE : \
11182@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11183 /* Clear a kernel PTE and flush it from the TLB */
11184 #define kpte_clear_flush(ptep, vaddr) \
11185 do { \
11186+ pax_open_kernel(); \
11187 pte_clear(&init_mm, (vaddr), (ptep)); \
11188+ pax_close_kernel(); \
11189 __flush_tlb_one((vaddr)); \
11190 } while (0)
11191
11192@@ -85,6 +90,9 @@ do { \
11193
11194 #endif /* !__ASSEMBLY__ */
11195
11196+#define HAVE_ARCH_UNMAPPED_AREA
11197+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11198+
11199 /*
11200 * kern_addr_valid() is (1) for FLATMEM and (0) for
11201 * SPARSEMEM and DISCONTIGMEM
11202diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11203index 5e67c15..12d5c47 100644
11204--- a/arch/x86/include/asm/pgtable_32_types.h
11205+++ b/arch/x86/include/asm/pgtable_32_types.h
11206@@ -8,7 +8,7 @@
11207 */
11208 #ifdef CONFIG_X86_PAE
11209 # include <asm/pgtable-3level_types.h>
11210-# define PMD_SIZE (1UL << PMD_SHIFT)
11211+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11212 # define PMD_MASK (~(PMD_SIZE - 1))
11213 #else
11214 # include <asm/pgtable-2level_types.h>
11215@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11216 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11217 #endif
11218
11219+#ifdef CONFIG_PAX_KERNEXEC
11220+#ifndef __ASSEMBLY__
11221+extern unsigned char MODULES_EXEC_VADDR[];
11222+extern unsigned char MODULES_EXEC_END[];
11223+#endif
11224+#include <asm/boot.h>
11225+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11226+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11227+#else
11228+#define ktla_ktva(addr) (addr)
11229+#define ktva_ktla(addr) (addr)
11230+#endif
11231+
11232 #define MODULES_VADDR VMALLOC_START
11233 #define MODULES_END VMALLOC_END
11234 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11235diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11236index c57a301..6b414ff 100644
11237--- a/arch/x86/include/asm/pgtable_64.h
11238+++ b/arch/x86/include/asm/pgtable_64.h
11239@@ -16,10 +16,14 @@
11240
11241 extern pud_t level3_kernel_pgt[512];
11242 extern pud_t level3_ident_pgt[512];
11243+extern pud_t level3_vmalloc_start_pgt[512];
11244+extern pud_t level3_vmalloc_end_pgt[512];
11245+extern pud_t level3_vmemmap_pgt[512];
11246+extern pud_t level2_vmemmap_pgt[512];
11247 extern pmd_t level2_kernel_pgt[512];
11248 extern pmd_t level2_fixmap_pgt[512];
11249-extern pmd_t level2_ident_pgt[512];
11250-extern pgd_t init_level4_pgt[];
11251+extern pmd_t level2_ident_pgt[512*2];
11252+extern pgd_t init_level4_pgt[512];
11253
11254 #define swapper_pg_dir init_level4_pgt
11255
11256@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11257
11258 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11259 {
11260+ pax_open_kernel();
11261 *pmdp = pmd;
11262+ pax_close_kernel();
11263 }
11264
11265 static inline void native_pmd_clear(pmd_t *pmd)
11266@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11267
11268 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11269 {
11270+ pax_open_kernel();
11271+ *pgdp = pgd;
11272+ pax_close_kernel();
11273+}
11274+
11275+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11276+{
11277 *pgdp = pgd;
11278 }
11279
11280diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11281index 766ea16..5b96cb3 100644
11282--- a/arch/x86/include/asm/pgtable_64_types.h
11283+++ b/arch/x86/include/asm/pgtable_64_types.h
11284@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11285 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11286 #define MODULES_END _AC(0xffffffffff000000, UL)
11287 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11288+#define MODULES_EXEC_VADDR MODULES_VADDR
11289+#define MODULES_EXEC_END MODULES_END
11290+
11291+#define ktla_ktva(addr) (addr)
11292+#define ktva_ktla(addr) (addr)
11293
11294 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11295diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11296index d1f4a76..2f46ba1 100644
11297--- a/arch/x86/include/asm/pgtable_types.h
11298+++ b/arch/x86/include/asm/pgtable_types.h
11299@@ -16,12 +16,11 @@
11300 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11301 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11302 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11303-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11304+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11305 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11306 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11307 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11308-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11309-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11310+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11311 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11312
11313 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11314@@ -39,7 +38,6 @@
11315 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11316 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11317 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11318-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11319 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11320 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11321 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11322@@ -55,8 +53,10 @@
11323
11324 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11325 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11326-#else
11327+#elif defined(CONFIG_KMEMCHECK)
11328 #define _PAGE_NX (_AT(pteval_t, 0))
11329+#else
11330+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11331 #endif
11332
11333 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11334@@ -93,6 +93,9 @@
11335 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11336 _PAGE_ACCESSED)
11337
11338+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11339+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11340+
11341 #define __PAGE_KERNEL_EXEC \
11342 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11343 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11344@@ -103,8 +106,8 @@
11345 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11346 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11347 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11348-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11349-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11350+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11351+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11352 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11353 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11354 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11355@@ -163,8 +166,8 @@
11356 * bits are combined, this will alow user to access the high address mapped
11357 * VDSO in the presence of CONFIG_COMPAT_VDSO
11358 */
11359-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11360-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11361+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11362+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11363 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11364 #endif
11365
11366@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11367 {
11368 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11369 }
11370+#endif
11371
11372+#if PAGETABLE_LEVELS == 3
11373+#include <asm-generic/pgtable-nopud.h>
11374+#endif
11375+
11376+#if PAGETABLE_LEVELS == 2
11377+#include <asm-generic/pgtable-nopmd.h>
11378+#endif
11379+
11380+#ifndef __ASSEMBLY__
11381 #if PAGETABLE_LEVELS > 3
11382 typedef struct { pudval_t pud; } pud_t;
11383
11384@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11385 return pud.pud;
11386 }
11387 #else
11388-#include <asm-generic/pgtable-nopud.h>
11389-
11390 static inline pudval_t native_pud_val(pud_t pud)
11391 {
11392 return native_pgd_val(pud.pgd);
11393@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11394 return pmd.pmd;
11395 }
11396 #else
11397-#include <asm-generic/pgtable-nopmd.h>
11398-
11399 static inline pmdval_t native_pmd_val(pmd_t pmd)
11400 {
11401 return native_pgd_val(pmd.pud.pgd);
11402@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11403
11404 extern pteval_t __supported_pte_mask;
11405 extern void set_nx(void);
11406+
11407+#ifdef CONFIG_X86_32
11408+#ifdef CONFIG_X86_PAE
11409 extern int nx_enabled;
11410+#else
11411+#define nx_enabled (0)
11412+#endif
11413+#else
11414+#define nx_enabled (1)
11415+#endif
11416
11417 #define pgprot_writecombine pgprot_writecombine
11418 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11419diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11420index fa04dea..5f823fc 100644
11421--- a/arch/x86/include/asm/processor.h
11422+++ b/arch/x86/include/asm/processor.h
11423@@ -272,7 +272,7 @@ struct tss_struct {
11424
11425 } ____cacheline_aligned;
11426
11427-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11428+extern struct tss_struct init_tss[NR_CPUS];
11429
11430 /*
11431 * Save the original ist values for checking stack pointers during debugging
11432@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11433 */
11434 #define TASK_SIZE PAGE_OFFSET
11435 #define TASK_SIZE_MAX TASK_SIZE
11436+
11437+#ifdef CONFIG_PAX_SEGMEXEC
11438+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11439+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11440+#else
11441 #define STACK_TOP TASK_SIZE
11442-#define STACK_TOP_MAX STACK_TOP
11443+#endif
11444+
11445+#define STACK_TOP_MAX TASK_SIZE
11446
11447 #define INIT_THREAD { \
11448- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11449+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11450 .vm86_info = NULL, \
11451 .sysenter_cs = __KERNEL_CS, \
11452 .io_bitmap_ptr = NULL, \
11453@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11454 */
11455 #define INIT_TSS { \
11456 .x86_tss = { \
11457- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11458+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11459 .ss0 = __KERNEL_DS, \
11460 .ss1 = __KERNEL_CS, \
11461 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11462@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11463 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11464
11465 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11466-#define KSTK_TOP(info) \
11467-({ \
11468- unsigned long *__ptr = (unsigned long *)(info); \
11469- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11470-})
11471+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11472
11473 /*
11474 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11475@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11476 #define task_pt_regs(task) \
11477 ({ \
11478 struct pt_regs *__regs__; \
11479- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11480+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11481 __regs__ - 1; \
11482 })
11483
11484@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11485 /*
11486 * User space process size. 47bits minus one guard page.
11487 */
11488-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11489+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11490
11491 /* This decides where the kernel will search for a free chunk of vm
11492 * space during mmap's.
11493 */
11494 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11495- 0xc0000000 : 0xFFFFe000)
11496+ 0xc0000000 : 0xFFFFf000)
11497
11498 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11499 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11500@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11501 #define STACK_TOP_MAX TASK_SIZE_MAX
11502
11503 #define INIT_THREAD { \
11504- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11505+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11506 }
11507
11508 #define INIT_TSS { \
11509- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11510+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11511 }
11512
11513 /*
11514@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11515 */
11516 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11517
11518+#ifdef CONFIG_PAX_SEGMEXEC
11519+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11520+#endif
11521+
11522 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11523
11524 /* Get/set a process' ability to use the timestamp counter instruction */
11525diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11526index 0f0d908..f2e3da2 100644
11527--- a/arch/x86/include/asm/ptrace.h
11528+++ b/arch/x86/include/asm/ptrace.h
11529@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11530 }
11531
11532 /*
11533- * user_mode_vm(regs) determines whether a register set came from user mode.
11534+ * user_mode(regs) determines whether a register set came from user mode.
11535 * This is true if V8086 mode was enabled OR if the register set was from
11536 * protected mode with RPL-3 CS value. This tricky test checks that with
11537 * one comparison. Many places in the kernel can bypass this full check
11538- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11539+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11540+ * be used.
11541 */
11542-static inline int user_mode(struct pt_regs *regs)
11543+static inline int user_mode_novm(struct pt_regs *regs)
11544 {
11545 #ifdef CONFIG_X86_32
11546 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11547 #else
11548- return !!(regs->cs & 3);
11549+ return !!(regs->cs & SEGMENT_RPL_MASK);
11550 #endif
11551 }
11552
11553-static inline int user_mode_vm(struct pt_regs *regs)
11554+static inline int user_mode(struct pt_regs *regs)
11555 {
11556 #ifdef CONFIG_X86_32
11557 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11558 USER_RPL;
11559 #else
11560- return user_mode(regs);
11561+ return user_mode_novm(regs);
11562 #endif
11563 }
11564
11565diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11566index 562d4fd..6e39df1 100644
11567--- a/arch/x86/include/asm/reboot.h
11568+++ b/arch/x86/include/asm/reboot.h
11569@@ -6,19 +6,19 @@
11570 struct pt_regs;
11571
11572 struct machine_ops {
11573- void (*restart)(char *cmd);
11574- void (*halt)(void);
11575- void (*power_off)(void);
11576+ void (* __noreturn restart)(char *cmd);
11577+ void (* __noreturn halt)(void);
11578+ void (* __noreturn power_off)(void);
11579 void (*shutdown)(void);
11580 void (*crash_shutdown)(struct pt_regs *);
11581- void (*emergency_restart)(void);
11582-};
11583+ void (* __noreturn emergency_restart)(void);
11584+} __no_const;
11585
11586 extern struct machine_ops machine_ops;
11587
11588 void native_machine_crash_shutdown(struct pt_regs *regs);
11589 void native_machine_shutdown(void);
11590-void machine_real_restart(const unsigned char *code, int length);
11591+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11592
11593 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11594 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11595diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11596index 606ede1..dbfff37 100644
11597--- a/arch/x86/include/asm/rwsem.h
11598+++ b/arch/x86/include/asm/rwsem.h
11599@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11600 {
11601 asm volatile("# beginning down_read\n\t"
11602 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* adds 0x00000001, returns the old value */
11612 " jns 1f\n"
11613 " call call_rwsem_down_read_failed\n"
11614@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11615 "1:\n\t"
11616 " mov %1,%2\n\t"
11617 " add %3,%2\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "sub %3,%2\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 " jle 2f\n\t"
11627 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11628 " jnz 1b\n\t"
11629@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11630 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11631 asm volatile("# beginning down_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* subtract 0x0000ffff, returns the old value */
11642 " test %1,%1\n\t"
11643 /* was the count 0 before? */
11644@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11645 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11646 asm volatile("# beginning __up_read\n\t"
11647 LOCK_PREFIX " xadd %1,(%2)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ "mov %1,(%2)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /* subtracts 1, returns the old value */
11657 " jns 1f\n\t"
11658 " call call_rwsem_wake\n"
11659@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11660 rwsem_count_t tmp;
11661 asm volatile("# beginning __up_write\n\t"
11662 LOCK_PREFIX " xadd %1,(%2)\n\t"
11663+
11664+#ifdef CONFIG_PAX_REFCOUNT
11665+ "jno 0f\n"
11666+ "mov %1,(%2)\n"
11667+ "int $4\n0:\n"
11668+ _ASM_EXTABLE(0b, 0b)
11669+#endif
11670+
11671 /* tries to transition
11672 0xffff0001 -> 0x00000000 */
11673 " jz 1f\n"
11674@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11675 {
11676 asm volatile("# beginning __downgrade_write\n\t"
11677 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11678+
11679+#ifdef CONFIG_PAX_REFCOUNT
11680+ "jno 0f\n"
11681+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11682+ "int $4\n0:\n"
11683+ _ASM_EXTABLE(0b, 0b)
11684+#endif
11685+
11686 /*
11687 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11688 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11689@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11690 static inline void rwsem_atomic_add(rwsem_count_t delta,
11691 struct rw_semaphore *sem)
11692 {
11693- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11694+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11695+
11696+#ifdef CONFIG_PAX_REFCOUNT
11697+ "jno 0f\n"
11698+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11699+ "int $4\n0:\n"
11700+ _ASM_EXTABLE(0b, 0b)
11701+#endif
11702+
11703 : "+m" (sem->count)
11704 : "er" (delta));
11705 }
11706@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11707 {
11708 rwsem_count_t tmp = delta;
11709
11710- asm volatile(LOCK_PREFIX "xadd %0,%1"
11711+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11712+
11713+#ifdef CONFIG_PAX_REFCOUNT
11714+ "jno 0f\n"
11715+ "mov %0,%1\n"
11716+ "int $4\n0:\n"
11717+ _ASM_EXTABLE(0b, 0b)
11718+#endif
11719+
11720 : "+r" (tmp), "+m" (sem->count)
11721 : : "memory");
11722
11723diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11724index 14e0ed8..7f7dd5e 100644
11725--- a/arch/x86/include/asm/segment.h
11726+++ b/arch/x86/include/asm/segment.h
11727@@ -62,10 +62,15 @@
11728 * 26 - ESPFIX small SS
11729 * 27 - per-cpu [ offset to per-cpu data area ]
11730 * 28 - stack_canary-20 [ for stack protector ]
11731- * 29 - unused
11732- * 30 - unused
11733+ * 29 - PCI BIOS CS
11734+ * 30 - PCI BIOS DS
11735 * 31 - TSS for double fault handler
11736 */
11737+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11738+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11739+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11740+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11741+
11742 #define GDT_ENTRY_TLS_MIN 6
11743 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11744
11745@@ -77,6 +82,8 @@
11746
11747 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11748
11749+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11750+
11751 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11752
11753 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11754@@ -88,7 +95,7 @@
11755 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11756 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11757
11758-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11759+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11760 #ifdef CONFIG_SMP
11761 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11762 #else
11763@@ -102,6 +109,12 @@
11764 #define __KERNEL_STACK_CANARY 0
11765 #endif
11766
11767+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11768+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11769+
11770+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11771+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11772+
11773 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11774
11775 /*
11776@@ -139,7 +152,7 @@
11777 */
11778
11779 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11780-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11781+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11782
11783
11784 #else
11785@@ -163,6 +176,8 @@
11786 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11787 #define __USER32_DS __USER_DS
11788
11789+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11790+
11791 #define GDT_ENTRY_TSS 8 /* needs two entries */
11792 #define GDT_ENTRY_LDT 10 /* needs two entries */
11793 #define GDT_ENTRY_TLS_MIN 12
11794@@ -183,6 +198,7 @@
11795 #endif
11796
11797 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11798+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11799 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11800 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11801 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11802diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11803index 4c2f63c..5685db2 100644
11804--- a/arch/x86/include/asm/smp.h
11805+++ b/arch/x86/include/asm/smp.h
11806@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11807 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11808 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11809 DECLARE_PER_CPU(u16, cpu_llc_id);
11810-DECLARE_PER_CPU(int, cpu_number);
11811+DECLARE_PER_CPU(unsigned int, cpu_number);
11812
11813 static inline struct cpumask *cpu_sibling_mask(int cpu)
11814 {
11815@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11816 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11817
11818 /* Static state in head.S used to set up a CPU */
11819-extern struct {
11820- void *sp;
11821- unsigned short ss;
11822-} stack_start;
11823+extern unsigned long stack_start; /* Initial stack pointer address */
11824
11825 struct smp_ops {
11826 void (*smp_prepare_boot_cpu)(void);
11827@@ -60,7 +57,7 @@ struct smp_ops {
11828
11829 void (*send_call_func_ipi)(const struct cpumask *mask);
11830 void (*send_call_func_single_ipi)(int cpu);
11831-};
11832+} __no_const;
11833
11834 /* Globals due to paravirt */
11835 extern void set_cpu_sibling_map(int cpu);
11836@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11837 extern int safe_smp_processor_id(void);
11838
11839 #elif defined(CONFIG_X86_64_SMP)
11840-#define raw_smp_processor_id() (percpu_read(cpu_number))
11841-
11842-#define stack_smp_processor_id() \
11843-({ \
11844- struct thread_info *ti; \
11845- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11846- ti->cpu; \
11847-})
11848+#define raw_smp_processor_id() (percpu_read(cpu_number))
11849+#define stack_smp_processor_id() raw_smp_processor_id()
11850 #define safe_smp_processor_id() smp_processor_id()
11851
11852 #endif
11853diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11854index 4e77853..4359783 100644
11855--- a/arch/x86/include/asm/spinlock.h
11856+++ b/arch/x86/include/asm/spinlock.h
11857@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11858 static inline void __raw_read_lock(raw_rwlock_t *rw)
11859 {
11860 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11861+
11862+#ifdef CONFIG_PAX_REFCOUNT
11863+ "jno 0f\n"
11864+ LOCK_PREFIX " addl $1,(%0)\n"
11865+ "int $4\n0:\n"
11866+ _ASM_EXTABLE(0b, 0b)
11867+#endif
11868+
11869 "jns 1f\n"
11870 "call __read_lock_failed\n\t"
11871 "1:\n"
11872@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11873 static inline void __raw_write_lock(raw_rwlock_t *rw)
11874 {
11875 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11876+
11877+#ifdef CONFIG_PAX_REFCOUNT
11878+ "jno 0f\n"
11879+ LOCK_PREFIX " addl %1,(%0)\n"
11880+ "int $4\n0:\n"
11881+ _ASM_EXTABLE(0b, 0b)
11882+#endif
11883+
11884 "jz 1f\n"
11885 "call __write_lock_failed\n\t"
11886 "1:\n"
11887@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11888
11889 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11890 {
11891- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11892+ asm volatile(LOCK_PREFIX "incl %0\n"
11893+
11894+#ifdef CONFIG_PAX_REFCOUNT
11895+ "jno 0f\n"
11896+ LOCK_PREFIX "decl %0\n"
11897+ "int $4\n0:\n"
11898+ _ASM_EXTABLE(0b, 0b)
11899+#endif
11900+
11901+ :"+m" (rw->lock) : : "memory");
11902 }
11903
11904 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11905 {
11906- asm volatile(LOCK_PREFIX "addl %1, %0"
11907+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11908+
11909+#ifdef CONFIG_PAX_REFCOUNT
11910+ "jno 0f\n"
11911+ LOCK_PREFIX "subl %1, %0\n"
11912+ "int $4\n0:\n"
11913+ _ASM_EXTABLE(0b, 0b)
11914+#endif
11915+
11916 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11917 }
11918
11919diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11920index 1575177..cb23f52 100644
11921--- a/arch/x86/include/asm/stackprotector.h
11922+++ b/arch/x86/include/asm/stackprotector.h
11923@@ -48,7 +48,7 @@
11924 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11925 */
11926 #define GDT_STACK_CANARY_INIT \
11927- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11928+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11929
11930 /*
11931 * Initialize the stackprotector canary value.
11932@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11933
11934 static inline void load_stack_canary_segment(void)
11935 {
11936-#ifdef CONFIG_X86_32
11937+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11938 asm volatile ("mov %0, %%gs" : : "r" (0));
11939 #endif
11940 }
11941diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11942index e0fbf29..858ef4a 100644
11943--- a/arch/x86/include/asm/system.h
11944+++ b/arch/x86/include/asm/system.h
11945@@ -132,7 +132,7 @@ do { \
11946 "thread_return:\n\t" \
11947 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11948 __switch_canary \
11949- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11950+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11951 "movq %%rax,%%rdi\n\t" \
11952 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11953 "jnz ret_from_fork\n\t" \
11954@@ -143,7 +143,7 @@ do { \
11955 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11956 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11957 [_tif_fork] "i" (_TIF_FORK), \
11958- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11959+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11960 [current_task] "m" (per_cpu_var(current_task)) \
11961 __switch_canary_iparam \
11962 : "memory", "cc" __EXTRA_CLOBBER)
11963@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11964 {
11965 unsigned long __limit;
11966 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11967- return __limit + 1;
11968+ return __limit;
11969 }
11970
11971 static inline void native_clts(void)
11972@@ -340,12 +340,12 @@ void enable_hlt(void);
11973
11974 void cpu_idle_wait(void);
11975
11976-extern unsigned long arch_align_stack(unsigned long sp);
11977+#define arch_align_stack(x) ((x) & ~0xfUL)
11978 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11979
11980 void default_idle(void);
11981
11982-void stop_this_cpu(void *dummy);
11983+void stop_this_cpu(void *dummy) __noreturn;
11984
11985 /*
11986 * Force strict CPU ordering.
11987diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11988index 19c3ce4..8962535 100644
11989--- a/arch/x86/include/asm/thread_info.h
11990+++ b/arch/x86/include/asm/thread_info.h
11991@@ -10,6 +10,7 @@
11992 #include <linux/compiler.h>
11993 #include <asm/page.h>
11994 #include <asm/types.h>
11995+#include <asm/percpu.h>
11996
11997 /*
11998 * low level task data that entry.S needs immediate access to
11999@@ -24,7 +25,6 @@ struct exec_domain;
12000 #include <asm/atomic.h>
12001
12002 struct thread_info {
12003- struct task_struct *task; /* main task structure */
12004 struct exec_domain *exec_domain; /* execution domain */
12005 __u32 flags; /* low level flags */
12006 __u32 status; /* thread synchronous flags */
12007@@ -34,18 +34,12 @@ struct thread_info {
12008 mm_segment_t addr_limit;
12009 struct restart_block restart_block;
12010 void __user *sysenter_return;
12011-#ifdef CONFIG_X86_32
12012- unsigned long previous_esp; /* ESP of the previous stack in
12013- case of nested (IRQ) stacks
12014- */
12015- __u8 supervisor_stack[0];
12016-#endif
12017+ unsigned long lowest_stack;
12018 int uaccess_err;
12019 };
12020
12021-#define INIT_THREAD_INFO(tsk) \
12022+#define INIT_THREAD_INFO \
12023 { \
12024- .task = &tsk, \
12025 .exec_domain = &default_exec_domain, \
12026 .flags = 0, \
12027 .cpu = 0, \
12028@@ -56,7 +50,7 @@ struct thread_info {
12029 }, \
12030 }
12031
12032-#define init_thread_info (init_thread_union.thread_info)
12033+#define init_thread_info (init_thread_union.stack)
12034 #define init_stack (init_thread_union.stack)
12035
12036 #else /* !__ASSEMBLY__ */
12037@@ -163,45 +157,40 @@ struct thread_info {
12038 #define alloc_thread_info(tsk) \
12039 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12040
12041-#ifdef CONFIG_X86_32
12042-
12043-#define STACK_WARN (THREAD_SIZE/8)
12044-/*
12045- * macros/functions for gaining access to the thread information structure
12046- *
12047- * preempt_count needs to be 1 initially, until the scheduler is functional.
12048- */
12049-#ifndef __ASSEMBLY__
12050-
12051-
12052-/* how to get the current stack pointer from C */
12053-register unsigned long current_stack_pointer asm("esp") __used;
12054-
12055-/* how to get the thread information struct from C */
12056-static inline struct thread_info *current_thread_info(void)
12057-{
12058- return (struct thread_info *)
12059- (current_stack_pointer & ~(THREAD_SIZE - 1));
12060-}
12061-
12062-#else /* !__ASSEMBLY__ */
12063-
12064+#ifdef __ASSEMBLY__
12065 /* how to get the thread information struct from ASM */
12066 #define GET_THREAD_INFO(reg) \
12067- movl $-THREAD_SIZE, reg; \
12068- andl %esp, reg
12069+ mov PER_CPU_VAR(current_tinfo), reg
12070
12071 /* use this one if reg already contains %esp */
12072-#define GET_THREAD_INFO_WITH_ESP(reg) \
12073- andl $-THREAD_SIZE, reg
12074+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12075+#else
12076+/* how to get the thread information struct from C */
12077+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12078+
12079+static __always_inline struct thread_info *current_thread_info(void)
12080+{
12081+ return percpu_read_stable(current_tinfo);
12082+}
12083+#endif
12084+
12085+#ifdef CONFIG_X86_32
12086+
12087+#define STACK_WARN (THREAD_SIZE/8)
12088+/*
12089+ * macros/functions for gaining access to the thread information structure
12090+ *
12091+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12092+ */
12093+#ifndef __ASSEMBLY__
12094+
12095+/* how to get the current stack pointer from C */
12096+register unsigned long current_stack_pointer asm("esp") __used;
12097
12098 #endif
12099
12100 #else /* X86_32 */
12101
12102-#include <asm/percpu.h>
12103-#define KERNEL_STACK_OFFSET (5*8)
12104-
12105 /*
12106 * macros/functions for gaining access to the thread information structure
12107 * preempt_count needs to be 1 initially, until the scheduler is functional.
12108@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12109 #ifndef __ASSEMBLY__
12110 DECLARE_PER_CPU(unsigned long, kernel_stack);
12111
12112-static inline struct thread_info *current_thread_info(void)
12113-{
12114- struct thread_info *ti;
12115- ti = (void *)(percpu_read_stable(kernel_stack) +
12116- KERNEL_STACK_OFFSET - THREAD_SIZE);
12117- return ti;
12118-}
12119-
12120-#else /* !__ASSEMBLY__ */
12121-
12122-/* how to get the thread information struct from ASM */
12123-#define GET_THREAD_INFO(reg) \
12124- movq PER_CPU_VAR(kernel_stack),reg ; \
12125- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12126-
12127+/* how to get the current stack pointer from C */
12128+register unsigned long current_stack_pointer asm("rsp") __used;
12129 #endif
12130
12131 #endif /* !X86_32 */
12132@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12133 extern void free_thread_info(struct thread_info *ti);
12134 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12135 #define arch_task_cache_init arch_task_cache_init
12136+
12137+#define __HAVE_THREAD_FUNCTIONS
12138+#define task_thread_info(task) (&(task)->tinfo)
12139+#define task_stack_page(task) ((task)->stack)
12140+#define setup_thread_stack(p, org) do {} while (0)
12141+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12142+
12143+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12144+extern struct task_struct *alloc_task_struct(void);
12145+extern void free_task_struct(struct task_struct *);
12146+
12147 #endif
12148 #endif /* _ASM_X86_THREAD_INFO_H */
12149diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12150index 61c5874..8a046e9 100644
12151--- a/arch/x86/include/asm/uaccess.h
12152+++ b/arch/x86/include/asm/uaccess.h
12153@@ -8,12 +8,15 @@
12154 #include <linux/thread_info.h>
12155 #include <linux/prefetch.h>
12156 #include <linux/string.h>
12157+#include <linux/sched.h>
12158 #include <asm/asm.h>
12159 #include <asm/page.h>
12160
12161 #define VERIFY_READ 0
12162 #define VERIFY_WRITE 1
12163
12164+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12165+
12166 /*
12167 * The fs value determines whether argument validity checking should be
12168 * performed or not. If get_fs() == USER_DS, checking is performed, with
12169@@ -29,7 +32,12 @@
12170
12171 #define get_ds() (KERNEL_DS)
12172 #define get_fs() (current_thread_info()->addr_limit)
12173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12174+void __set_fs(mm_segment_t x);
12175+void set_fs(mm_segment_t x);
12176+#else
12177 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12178+#endif
12179
12180 #define segment_eq(a, b) ((a).seg == (b).seg)
12181
12182@@ -77,7 +85,33 @@
12183 * checks that the pointer is in the user space range - after calling
12184 * this function, memory access functions may still return -EFAULT.
12185 */
12186-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12187+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12188+#define access_ok(type, addr, size) \
12189+({ \
12190+ long __size = size; \
12191+ unsigned long __addr = (unsigned long)addr; \
12192+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12193+ unsigned long __end_ao = __addr + __size - 1; \
12194+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12195+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12196+ while(__addr_ao <= __end_ao) { \
12197+ char __c_ao; \
12198+ __addr_ao += PAGE_SIZE; \
12199+ if (__size > PAGE_SIZE) \
12200+ cond_resched(); \
12201+ if (__get_user(__c_ao, (char __user *)__addr)) \
12202+ break; \
12203+ if (type != VERIFY_WRITE) { \
12204+ __addr = __addr_ao; \
12205+ continue; \
12206+ } \
12207+ if (__put_user(__c_ao, (char __user *)__addr)) \
12208+ break; \
12209+ __addr = __addr_ao; \
12210+ } \
12211+ } \
12212+ __ret_ao; \
12213+})
12214
12215 /*
12216 * The exception table consists of pairs of addresses: the first is the
12217@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12218 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12219 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12220
12221-
12222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12223+#define __copyuser_seg "gs;"
12224+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12225+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12226+#else
12227+#define __copyuser_seg
12228+#define __COPYUSER_SET_ES
12229+#define __COPYUSER_RESTORE_ES
12230+#endif
12231
12232 #ifdef CONFIG_X86_32
12233 #define __put_user_asm_u64(x, addr, err, errret) \
12234- asm volatile("1: movl %%eax,0(%2)\n" \
12235- "2: movl %%edx,4(%2)\n" \
12236+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12237+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12238 "3:\n" \
12239 ".section .fixup,\"ax\"\n" \
12240 "4: movl %3,%0\n" \
12241@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12242 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12243
12244 #define __put_user_asm_ex_u64(x, addr) \
12245- asm volatile("1: movl %%eax,0(%1)\n" \
12246- "2: movl %%edx,4(%1)\n" \
12247+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12248+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12249 "3:\n" \
12250 _ASM_EXTABLE(1b, 2b - 1b) \
12251 _ASM_EXTABLE(2b, 3b - 2b) \
12252@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12253 __typeof__(*(ptr)) __pu_val; \
12254 __chk_user_ptr(ptr); \
12255 might_fault(); \
12256- __pu_val = x; \
12257+ __pu_val = (x); \
12258 switch (sizeof(*(ptr))) { \
12259 case 1: \
12260 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12261@@ -374,7 +416,7 @@ do { \
12262 } while (0)
12263
12264 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12265- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12266+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12267 "2:\n" \
12268 ".section .fixup,\"ax\"\n" \
12269 "3: mov %3,%0\n" \
12270@@ -382,7 +424,7 @@ do { \
12271 " jmp 2b\n" \
12272 ".previous\n" \
12273 _ASM_EXTABLE(1b, 3b) \
12274- : "=r" (err), ltype(x) \
12275+ : "=r" (err), ltype (x) \
12276 : "m" (__m(addr)), "i" (errret), "0" (err))
12277
12278 #define __get_user_size_ex(x, ptr, size) \
12279@@ -407,7 +449,7 @@ do { \
12280 } while (0)
12281
12282 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12283- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12284+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12285 "2:\n" \
12286 _ASM_EXTABLE(1b, 2b - 1b) \
12287 : ltype(x) : "m" (__m(addr)))
12288@@ -424,13 +466,24 @@ do { \
12289 int __gu_err; \
12290 unsigned long __gu_val; \
12291 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12292- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12293+ (x) = (__typeof__(*(ptr)))__gu_val; \
12294 __gu_err; \
12295 })
12296
12297 /* FIXME: this hack is definitely wrong -AK */
12298 struct __large_struct { unsigned long buf[100]; };
12299-#define __m(x) (*(struct __large_struct __user *)(x))
12300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301+#define ____m(x) \
12302+({ \
12303+ unsigned long ____x = (unsigned long)(x); \
12304+ if (____x < PAX_USER_SHADOW_BASE) \
12305+ ____x += PAX_USER_SHADOW_BASE; \
12306+ (void __user *)____x; \
12307+})
12308+#else
12309+#define ____m(x) (x)
12310+#endif
12311+#define __m(x) (*(struct __large_struct __user *)____m(x))
12312
12313 /*
12314 * Tell gcc we read from memory instead of writing: this is because
12315@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12316 * aliasing issues.
12317 */
12318 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12319- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12320+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12321 "2:\n" \
12322 ".section .fixup,\"ax\"\n" \
12323 "3: mov %3,%0\n" \
12324@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12325 ".previous\n" \
12326 _ASM_EXTABLE(1b, 3b) \
12327 : "=r"(err) \
12328- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12329+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12330
12331 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12332- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12333+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12334 "2:\n" \
12335 _ASM_EXTABLE(1b, 2b - 1b) \
12336 : : ltype(x), "m" (__m(addr)))
12337@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12338 * On error, the variable @x is set to zero.
12339 */
12340
12341+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12342+#define __get_user(x, ptr) get_user((x), (ptr))
12343+#else
12344 #define __get_user(x, ptr) \
12345 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12346+#endif
12347
12348 /**
12349 * __put_user: - Write a simple value into user space, with less checking.
12350@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12351 * Returns zero on success, or -EFAULT on error.
12352 */
12353
12354+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12355+#define __put_user(x, ptr) put_user((x), (ptr))
12356+#else
12357 #define __put_user(x, ptr) \
12358 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12359+#endif
12360
12361 #define __get_user_unaligned __get_user
12362 #define __put_user_unaligned __put_user
12363@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12364 #define get_user_ex(x, ptr) do { \
12365 unsigned long __gue_val; \
12366 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12367- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12368+ (x) = (__typeof__(*(ptr)))__gue_val; \
12369 } while (0)
12370
12371 #ifdef CONFIG_X86_WP_WORKS_OK
12372@@ -567,6 +628,7 @@ extern struct movsl_mask {
12373
12374 #define ARCH_HAS_NOCACHE_UACCESS 1
12375
12376+#define ARCH_HAS_SORT_EXTABLE
12377 #ifdef CONFIG_X86_32
12378 # include "uaccess_32.h"
12379 #else
12380diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12381index 632fb44..e30e334 100644
12382--- a/arch/x86/include/asm/uaccess_32.h
12383+++ b/arch/x86/include/asm/uaccess_32.h
12384@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12385 static __always_inline unsigned long __must_check
12386 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12387 {
12388+ pax_track_stack();
12389+
12390+ if ((long)n < 0)
12391+ return n;
12392+
12393 if (__builtin_constant_p(n)) {
12394 unsigned long ret;
12395
12396@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12397 return ret;
12398 }
12399 }
12400+ if (!__builtin_constant_p(n))
12401+ check_object_size(from, n, true);
12402 return __copy_to_user_ll(to, from, n);
12403 }
12404
12405@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12406 __copy_to_user(void __user *to, const void *from, unsigned long n)
12407 {
12408 might_fault();
12409+
12410 return __copy_to_user_inatomic(to, from, n);
12411 }
12412
12413 static __always_inline unsigned long
12414 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12415 {
12416+ if ((long)n < 0)
12417+ return n;
12418+
12419 /* Avoid zeroing the tail if the copy fails..
12420 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12421 * but as the zeroing behaviour is only significant when n is not
12422@@ -138,6 +149,12 @@ static __always_inline unsigned long
12423 __copy_from_user(void *to, const void __user *from, unsigned long n)
12424 {
12425 might_fault();
12426+
12427+ pax_track_stack();
12428+
12429+ if ((long)n < 0)
12430+ return n;
12431+
12432 if (__builtin_constant_p(n)) {
12433 unsigned long ret;
12434
12435@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12436 return ret;
12437 }
12438 }
12439+ if (!__builtin_constant_p(n))
12440+ check_object_size(to, n, false);
12441 return __copy_from_user_ll(to, from, n);
12442 }
12443
12444@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12445 const void __user *from, unsigned long n)
12446 {
12447 might_fault();
12448+
12449+ if ((long)n < 0)
12450+ return n;
12451+
12452 if (__builtin_constant_p(n)) {
12453 unsigned long ret;
12454
12455@@ -182,14 +205,62 @@ static __always_inline unsigned long
12456 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12457 unsigned long n)
12458 {
12459- return __copy_from_user_ll_nocache_nozero(to, from, n);
12460+ if ((long)n < 0)
12461+ return n;
12462+
12463+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12464+}
12465+
12466+/**
12467+ * copy_to_user: - Copy a block of data into user space.
12468+ * @to: Destination address, in user space.
12469+ * @from: Source address, in kernel space.
12470+ * @n: Number of bytes to copy.
12471+ *
12472+ * Context: User context only. This function may sleep.
12473+ *
12474+ * Copy data from kernel space to user space.
12475+ *
12476+ * Returns number of bytes that could not be copied.
12477+ * On success, this will be zero.
12478+ */
12479+static __always_inline unsigned long __must_check
12480+copy_to_user(void __user *to, const void *from, unsigned long n)
12481+{
12482+ if (access_ok(VERIFY_WRITE, to, n))
12483+ n = __copy_to_user(to, from, n);
12484+ return n;
12485+}
12486+
12487+/**
12488+ * copy_from_user: - Copy a block of data from user space.
12489+ * @to: Destination address, in kernel space.
12490+ * @from: Source address, in user space.
12491+ * @n: Number of bytes to copy.
12492+ *
12493+ * Context: User context only. This function may sleep.
12494+ *
12495+ * Copy data from user space to kernel space.
12496+ *
12497+ * Returns number of bytes that could not be copied.
12498+ * On success, this will be zero.
12499+ *
12500+ * If some data could not be copied, this function will pad the copied
12501+ * data to the requested size using zero bytes.
12502+ */
12503+static __always_inline unsigned long __must_check
12504+copy_from_user(void *to, const void __user *from, unsigned long n)
12505+{
12506+ if (access_ok(VERIFY_READ, from, n))
12507+ n = __copy_from_user(to, from, n);
12508+ else if ((long)n > 0) {
12509+ if (!__builtin_constant_p(n))
12510+ check_object_size(to, n, false);
12511+ memset(to, 0, n);
12512+ }
12513+ return n;
12514 }
12515
12516-unsigned long __must_check copy_to_user(void __user *to,
12517- const void *from, unsigned long n);
12518-unsigned long __must_check copy_from_user(void *to,
12519- const void __user *from,
12520- unsigned long n);
12521 long __must_check strncpy_from_user(char *dst, const char __user *src,
12522 long count);
12523 long __must_check __strncpy_from_user(char *dst,
12524diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12525index db24b21..f595ae7 100644
12526--- a/arch/x86/include/asm/uaccess_64.h
12527+++ b/arch/x86/include/asm/uaccess_64.h
12528@@ -9,6 +9,9 @@
12529 #include <linux/prefetch.h>
12530 #include <linux/lockdep.h>
12531 #include <asm/page.h>
12532+#include <asm/pgtable.h>
12533+
12534+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12535
12536 /*
12537 * Copy To/From Userspace
12538@@ -16,116 +19,205 @@
12539
12540 /* Handles exceptions in both to and from, but doesn't do access_ok */
12541 __must_check unsigned long
12542-copy_user_generic(void *to, const void *from, unsigned len);
12543+copy_user_generic(void *to, const void *from, unsigned long len);
12544
12545 __must_check unsigned long
12546-copy_to_user(void __user *to, const void *from, unsigned len);
12547-__must_check unsigned long
12548-copy_from_user(void *to, const void __user *from, unsigned len);
12549-__must_check unsigned long
12550-copy_in_user(void __user *to, const void __user *from, unsigned len);
12551+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12552
12553 static __always_inline __must_check
12554-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12555+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12556 {
12557- int ret = 0;
12558+ unsigned ret = 0;
12559
12560 might_fault();
12561- if (!__builtin_constant_p(size))
12562- return copy_user_generic(dst, (__force void *)src, size);
12563+
12564+ if (size > INT_MAX)
12565+ return size;
12566+
12567+#ifdef CONFIG_PAX_MEMORY_UDEREF
12568+ if (!__access_ok(VERIFY_READ, src, size))
12569+ return size;
12570+#endif
12571+
12572+ if (!__builtin_constant_p(size)) {
12573+ check_object_size(dst, size, false);
12574+
12575+#ifdef CONFIG_PAX_MEMORY_UDEREF
12576+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12577+ src += PAX_USER_SHADOW_BASE;
12578+#endif
12579+
12580+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12581+ }
12582 switch (size) {
12583- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12584+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12585 ret, "b", "b", "=q", 1);
12586 return ret;
12587- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12588+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12589 ret, "w", "w", "=r", 2);
12590 return ret;
12591- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12592+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12593 ret, "l", "k", "=r", 4);
12594 return ret;
12595- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12596+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12597 ret, "q", "", "=r", 8);
12598 return ret;
12599 case 10:
12600- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12601+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12602 ret, "q", "", "=r", 10);
12603 if (unlikely(ret))
12604 return ret;
12605 __get_user_asm(*(u16 *)(8 + (char *)dst),
12606- (u16 __user *)(8 + (char __user *)src),
12607+ (const u16 __user *)(8 + (const char __user *)src),
12608 ret, "w", "w", "=r", 2);
12609 return ret;
12610 case 16:
12611- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12612+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12613 ret, "q", "", "=r", 16);
12614 if (unlikely(ret))
12615 return ret;
12616 __get_user_asm(*(u64 *)(8 + (char *)dst),
12617- (u64 __user *)(8 + (char __user *)src),
12618+ (const u64 __user *)(8 + (const char __user *)src),
12619 ret, "q", "", "=r", 8);
12620 return ret;
12621 default:
12622- return copy_user_generic(dst, (__force void *)src, size);
12623+
12624+#ifdef CONFIG_PAX_MEMORY_UDEREF
12625+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12626+ src += PAX_USER_SHADOW_BASE;
12627+#endif
12628+
12629+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12630 }
12631 }
12632
12633 static __always_inline __must_check
12634-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12635+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12636 {
12637- int ret = 0;
12638+ unsigned ret = 0;
12639
12640 might_fault();
12641- if (!__builtin_constant_p(size))
12642- return copy_user_generic((__force void *)dst, src, size);
12643+
12644+ pax_track_stack();
12645+
12646+ if (size > INT_MAX)
12647+ return size;
12648+
12649+#ifdef CONFIG_PAX_MEMORY_UDEREF
12650+ if (!__access_ok(VERIFY_WRITE, dst, size))
12651+ return size;
12652+#endif
12653+
12654+ if (!__builtin_constant_p(size)) {
12655+ check_object_size(src, size, true);
12656+
12657+#ifdef CONFIG_PAX_MEMORY_UDEREF
12658+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659+ dst += PAX_USER_SHADOW_BASE;
12660+#endif
12661+
12662+ return copy_user_generic((__force_kernel void *)dst, src, size);
12663+ }
12664 switch (size) {
12665- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12666+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12667 ret, "b", "b", "iq", 1);
12668 return ret;
12669- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12670+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12671 ret, "w", "w", "ir", 2);
12672 return ret;
12673- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12674+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12675 ret, "l", "k", "ir", 4);
12676 return ret;
12677- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12678+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12679 ret, "q", "", "er", 8);
12680 return ret;
12681 case 10:
12682- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12683+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12684 ret, "q", "", "er", 10);
12685 if (unlikely(ret))
12686 return ret;
12687 asm("":::"memory");
12688- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12689+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12690 ret, "w", "w", "ir", 2);
12691 return ret;
12692 case 16:
12693- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12694+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12695 ret, "q", "", "er", 16);
12696 if (unlikely(ret))
12697 return ret;
12698 asm("":::"memory");
12699- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12700+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12701 ret, "q", "", "er", 8);
12702 return ret;
12703 default:
12704- return copy_user_generic((__force void *)dst, src, size);
12705+
12706+#ifdef CONFIG_PAX_MEMORY_UDEREF
12707+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12708+ dst += PAX_USER_SHADOW_BASE;
12709+#endif
12710+
12711+ return copy_user_generic((__force_kernel void *)dst, src, size);
12712+ }
12713+}
12714+
12715+static __always_inline __must_check
12716+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12717+{
12718+ if (access_ok(VERIFY_WRITE, to, len))
12719+ len = __copy_to_user(to, from, len);
12720+ return len;
12721+}
12722+
12723+static __always_inline __must_check
12724+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12725+{
12726+ might_fault();
12727+
12728+ if (access_ok(VERIFY_READ, from, len))
12729+ len = __copy_from_user(to, from, len);
12730+ else if (len < INT_MAX) {
12731+ if (!__builtin_constant_p(len))
12732+ check_object_size(to, len, false);
12733+ memset(to, 0, len);
12734 }
12735+ return len;
12736 }
12737
12738 static __always_inline __must_check
12739-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12740+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12741 {
12742- int ret = 0;
12743+ unsigned ret = 0;
12744
12745 might_fault();
12746- if (!__builtin_constant_p(size))
12747- return copy_user_generic((__force void *)dst,
12748- (__force void *)src, size);
12749+
12750+ pax_track_stack();
12751+
12752+ if (size > INT_MAX)
12753+ return size;
12754+
12755+#ifdef CONFIG_PAX_MEMORY_UDEREF
12756+ if (!__access_ok(VERIFY_READ, src, size))
12757+ return size;
12758+ if (!__access_ok(VERIFY_WRITE, dst, size))
12759+ return size;
12760+#endif
12761+
12762+ if (!__builtin_constant_p(size)) {
12763+
12764+#ifdef CONFIG_PAX_MEMORY_UDEREF
12765+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12766+ src += PAX_USER_SHADOW_BASE;
12767+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12768+ dst += PAX_USER_SHADOW_BASE;
12769+#endif
12770+
12771+ return copy_user_generic((__force_kernel void *)dst,
12772+ (__force_kernel const void *)src, size);
12773+ }
12774 switch (size) {
12775 case 1: {
12776 u8 tmp;
12777- __get_user_asm(tmp, (u8 __user *)src,
12778+ __get_user_asm(tmp, (const u8 __user *)src,
12779 ret, "b", "b", "=q", 1);
12780 if (likely(!ret))
12781 __put_user_asm(tmp, (u8 __user *)dst,
12782@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12783 }
12784 case 2: {
12785 u16 tmp;
12786- __get_user_asm(tmp, (u16 __user *)src,
12787+ __get_user_asm(tmp, (const u16 __user *)src,
12788 ret, "w", "w", "=r", 2);
12789 if (likely(!ret))
12790 __put_user_asm(tmp, (u16 __user *)dst,
12791@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12792
12793 case 4: {
12794 u32 tmp;
12795- __get_user_asm(tmp, (u32 __user *)src,
12796+ __get_user_asm(tmp, (const u32 __user *)src,
12797 ret, "l", "k", "=r", 4);
12798 if (likely(!ret))
12799 __put_user_asm(tmp, (u32 __user *)dst,
12800@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12801 }
12802 case 8: {
12803 u64 tmp;
12804- __get_user_asm(tmp, (u64 __user *)src,
12805+ __get_user_asm(tmp, (const u64 __user *)src,
12806 ret, "q", "", "=r", 8);
12807 if (likely(!ret))
12808 __put_user_asm(tmp, (u64 __user *)dst,
12809@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12810 return ret;
12811 }
12812 default:
12813- return copy_user_generic((__force void *)dst,
12814- (__force void *)src, size);
12815+
12816+#ifdef CONFIG_PAX_MEMORY_UDEREF
12817+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12818+ src += PAX_USER_SHADOW_BASE;
12819+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12820+ dst += PAX_USER_SHADOW_BASE;
12821+#endif
12822+
12823+ return copy_user_generic((__force_kernel void *)dst,
12824+ (__force_kernel const void *)src, size);
12825 }
12826 }
12827
12828@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12829 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12830 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12831
12832-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12833- unsigned size);
12834+static __must_check __always_inline unsigned long
12835+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12836+{
12837+ pax_track_stack();
12838+
12839+ if (size > INT_MAX)
12840+ return size;
12841+
12842+#ifdef CONFIG_PAX_MEMORY_UDEREF
12843+ if (!__access_ok(VERIFY_READ, src, size))
12844+ return size;
12845
12846-static __must_check __always_inline int
12847-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12848+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12849+ src += PAX_USER_SHADOW_BASE;
12850+#endif
12851+
12852+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12853+}
12854+
12855+static __must_check __always_inline unsigned long
12856+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12857 {
12858- return copy_user_generic((__force void *)dst, src, size);
12859+ if (size > INT_MAX)
12860+ return size;
12861+
12862+#ifdef CONFIG_PAX_MEMORY_UDEREF
12863+ if (!__access_ok(VERIFY_WRITE, dst, size))
12864+ return size;
12865+
12866+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12867+ dst += PAX_USER_SHADOW_BASE;
12868+#endif
12869+
12870+ return copy_user_generic((__force_kernel void *)dst, src, size);
12871 }
12872
12873-extern long __copy_user_nocache(void *dst, const void __user *src,
12874- unsigned size, int zerorest);
12875+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12876+ unsigned long size, int zerorest);
12877
12878-static inline int
12879-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12880+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12881 {
12882 might_sleep();
12883+
12884+ if (size > INT_MAX)
12885+ return size;
12886+
12887+#ifdef CONFIG_PAX_MEMORY_UDEREF
12888+ if (!__access_ok(VERIFY_READ, src, size))
12889+ return size;
12890+#endif
12891+
12892 return __copy_user_nocache(dst, src, size, 1);
12893 }
12894
12895-static inline int
12896-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12897- unsigned size)
12898+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12899+ unsigned long size)
12900 {
12901+ if (size > INT_MAX)
12902+ return size;
12903+
12904+#ifdef CONFIG_PAX_MEMORY_UDEREF
12905+ if (!__access_ok(VERIFY_READ, src, size))
12906+ return size;
12907+#endif
12908+
12909 return __copy_user_nocache(dst, src, size, 0);
12910 }
12911
12912-unsigned long
12913-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12914+extern unsigned long
12915+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12916
12917 #endif /* _ASM_X86_UACCESS_64_H */
12918diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12919index 9064052..786cfbc 100644
12920--- a/arch/x86/include/asm/vdso.h
12921+++ b/arch/x86/include/asm/vdso.h
12922@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12923 #define VDSO32_SYMBOL(base, name) \
12924 ({ \
12925 extern const char VDSO32_##name[]; \
12926- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12927+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12928 })
12929 #endif
12930
12931diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12932index 3d61e20..9507180 100644
12933--- a/arch/x86/include/asm/vgtod.h
12934+++ b/arch/x86/include/asm/vgtod.h
12935@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12936 int sysctl_enabled;
12937 struct timezone sys_tz;
12938 struct { /* extract of a clocksource struct */
12939+ char name[8];
12940 cycle_t (*vread)(void);
12941 cycle_t cycle_last;
12942 cycle_t mask;
12943diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12944index 61e08c0..b0da582 100644
12945--- a/arch/x86/include/asm/vmi.h
12946+++ b/arch/x86/include/asm/vmi.h
12947@@ -191,6 +191,7 @@ struct vrom_header {
12948 u8 reserved[96]; /* Reserved for headers */
12949 char vmi_init[8]; /* VMI_Init jump point */
12950 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12951+ char rom_data[8048]; /* rest of the option ROM */
12952 } __attribute__((packed));
12953
12954 struct pnp_header {
12955diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12956index c6e0bee..fcb9f74 100644
12957--- a/arch/x86/include/asm/vmi_time.h
12958+++ b/arch/x86/include/asm/vmi_time.h
12959@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12960 int (*wallclock_updated)(void);
12961 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12962 void (*cancel_alarm)(u32 flags);
12963-} vmi_timer_ops;
12964+} __no_const vmi_timer_ops;
12965
12966 /* Prototypes */
12967 extern void __init vmi_time_init(void);
12968diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12969index d0983d2..1f7c9e9 100644
12970--- a/arch/x86/include/asm/vsyscall.h
12971+++ b/arch/x86/include/asm/vsyscall.h
12972@@ -15,9 +15,10 @@ enum vsyscall_num {
12973
12974 #ifdef __KERNEL__
12975 #include <linux/seqlock.h>
12976+#include <linux/getcpu.h>
12977+#include <linux/time.h>
12978
12979 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12980-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12981
12982 /* Definitions for CONFIG_GENERIC_TIME definitions */
12983 #define __section_vsyscall_gtod_data __attribute__ \
12984@@ -31,7 +32,6 @@ enum vsyscall_num {
12985 #define VGETCPU_LSL 2
12986
12987 extern int __vgetcpu_mode;
12988-extern volatile unsigned long __jiffies;
12989
12990 /* kernel space (writeable) */
12991 extern int vgetcpu_mode;
12992@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12993
12994 extern void map_vsyscall(void);
12995
12996+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12997+extern time_t vtime(time_t *t);
12998+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12999 #endif /* __KERNEL__ */
13000
13001 #endif /* _ASM_X86_VSYSCALL_H */
13002diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13003index 2c756fd..3377e37 100644
13004--- a/arch/x86/include/asm/x86_init.h
13005+++ b/arch/x86/include/asm/x86_init.h
13006@@ -28,7 +28,7 @@ struct x86_init_mpparse {
13007 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13008 void (*find_smp_config)(unsigned int reserve);
13009 void (*get_smp_config)(unsigned int early);
13010-};
13011+} __no_const;
13012
13013 /**
13014 * struct x86_init_resources - platform specific resource related ops
13015@@ -42,7 +42,7 @@ struct x86_init_resources {
13016 void (*probe_roms)(void);
13017 void (*reserve_resources)(void);
13018 char *(*memory_setup)(void);
13019-};
13020+} __no_const;
13021
13022 /**
13023 * struct x86_init_irqs - platform specific interrupt setup
13024@@ -55,7 +55,7 @@ struct x86_init_irqs {
13025 void (*pre_vector_init)(void);
13026 void (*intr_init)(void);
13027 void (*trap_init)(void);
13028-};
13029+} __no_const;
13030
13031 /**
13032 * struct x86_init_oem - oem platform specific customizing functions
13033@@ -65,7 +65,7 @@ struct x86_init_irqs {
13034 struct x86_init_oem {
13035 void (*arch_setup)(void);
13036 void (*banner)(void);
13037-};
13038+} __no_const;
13039
13040 /**
13041 * struct x86_init_paging - platform specific paging functions
13042@@ -75,7 +75,7 @@ struct x86_init_oem {
13043 struct x86_init_paging {
13044 void (*pagetable_setup_start)(pgd_t *base);
13045 void (*pagetable_setup_done)(pgd_t *base);
13046-};
13047+} __no_const;
13048
13049 /**
13050 * struct x86_init_timers - platform specific timer setup
13051@@ -88,7 +88,7 @@ struct x86_init_timers {
13052 void (*setup_percpu_clockev)(void);
13053 void (*tsc_pre_init)(void);
13054 void (*timer_init)(void);
13055-};
13056+} __no_const;
13057
13058 /**
13059 * struct x86_init_ops - functions for platform specific setup
13060@@ -101,7 +101,7 @@ struct x86_init_ops {
13061 struct x86_init_oem oem;
13062 struct x86_init_paging paging;
13063 struct x86_init_timers timers;
13064-};
13065+} __no_const;
13066
13067 /**
13068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13069@@ -109,7 +109,7 @@ struct x86_init_ops {
13070 */
13071 struct x86_cpuinit_ops {
13072 void (*setup_percpu_clockev)(void);
13073-};
13074+} __no_const;
13075
13076 /**
13077 * struct x86_platform_ops - platform specific runtime functions
13078@@ -121,7 +121,7 @@ struct x86_platform_ops {
13079 unsigned long (*calibrate_tsc)(void);
13080 unsigned long (*get_wallclock)(void);
13081 int (*set_wallclock)(unsigned long nowtime);
13082-};
13083+} __no_const;
13084
13085 extern struct x86_init_ops x86_init;
13086 extern struct x86_cpuinit_ops x86_cpuinit;
13087diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13088index 727acc1..554f3eb 100644
13089--- a/arch/x86/include/asm/xsave.h
13090+++ b/arch/x86/include/asm/xsave.h
13091@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13092 static inline int xsave_user(struct xsave_struct __user *buf)
13093 {
13094 int err;
13095+
13096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13097+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13098+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13099+#endif
13100+
13101 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13102 "2:\n"
13103 ".section .fixup,\"ax\"\n"
13104@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13105 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13106 {
13107 int err;
13108- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13109+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13110 u32 lmask = mask;
13111 u32 hmask = mask >> 32;
13112
13113+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13114+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13115+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13116+#endif
13117+
13118 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13119 "2:\n"
13120 ".section .fixup,\"ax\"\n"
13121diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13122index 6a564ac..9b1340c 100644
13123--- a/arch/x86/kernel/acpi/realmode/Makefile
13124+++ b/arch/x86/kernel/acpi/realmode/Makefile
13125@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13126 $(call cc-option, -fno-stack-protector) \
13127 $(call cc-option, -mpreferred-stack-boundary=2)
13128 KBUILD_CFLAGS += $(call cc-option, -m32)
13129+ifdef CONSTIFY_PLUGIN
13130+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13131+endif
13132 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13133 GCOV_PROFILE := n
13134
13135diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13136index 580b4e2..d4129e4 100644
13137--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13138+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13139@@ -91,6 +91,9 @@ _start:
13140 /* Do any other stuff... */
13141
13142 #ifndef CONFIG_64BIT
13143+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13144+ call verify_cpu
13145+
13146 /* This could also be done in C code... */
13147 movl pmode_cr3, %eax
13148 movl %eax, %cr3
13149@@ -104,7 +107,7 @@ _start:
13150 movl %eax, %ecx
13151 orl %edx, %ecx
13152 jz 1f
13153- movl $0xc0000080, %ecx
13154+ mov $MSR_EFER, %ecx
13155 wrmsr
13156 1:
13157
13158@@ -114,6 +117,7 @@ _start:
13159 movl pmode_cr0, %eax
13160 movl %eax, %cr0
13161 jmp pmode_return
13162+# include "../../verify_cpu.S"
13163 #else
13164 pushw $0
13165 pushw trampoline_segment
13166diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13167index ca93638..7042f24 100644
13168--- a/arch/x86/kernel/acpi/sleep.c
13169+++ b/arch/x86/kernel/acpi/sleep.c
13170@@ -11,11 +11,12 @@
13171 #include <linux/cpumask.h>
13172 #include <asm/segment.h>
13173 #include <asm/desc.h>
13174+#include <asm/e820.h>
13175
13176 #include "realmode/wakeup.h"
13177 #include "sleep.h"
13178
13179-unsigned long acpi_wakeup_address;
13180+unsigned long acpi_wakeup_address = 0x2000;
13181 unsigned long acpi_realmode_flags;
13182
13183 /* address in low memory of the wakeup routine. */
13184@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13185 #else /* CONFIG_64BIT */
13186 header->trampoline_segment = setup_trampoline() >> 4;
13187 #ifdef CONFIG_SMP
13188- stack_start.sp = temp_stack + sizeof(temp_stack);
13189+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13190+
13191+ pax_open_kernel();
13192 early_gdt_descr.address =
13193 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13194+ pax_close_kernel();
13195+
13196 initial_gs = per_cpu_offset(smp_processor_id());
13197 #endif
13198 initial_code = (unsigned long)wakeup_long64;
13199@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13200 return;
13201 }
13202
13203- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13204-
13205- if (!acpi_realmode) {
13206- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13207- return;
13208- }
13209-
13210- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13211+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13212+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13213 }
13214
13215
13216diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13217index 8ded418..079961e 100644
13218--- a/arch/x86/kernel/acpi/wakeup_32.S
13219+++ b/arch/x86/kernel/acpi/wakeup_32.S
13220@@ -30,13 +30,11 @@ wakeup_pmode_return:
13221 # and restore the stack ... but you need gdt for this to work
13222 movl saved_context_esp, %esp
13223
13224- movl %cs:saved_magic, %eax
13225- cmpl $0x12345678, %eax
13226+ cmpl $0x12345678, saved_magic
13227 jne bogus_magic
13228
13229 # jump to place where we left off
13230- movl saved_eip, %eax
13231- jmp *%eax
13232+ jmp *(saved_eip)
13233
13234 bogus_magic:
13235 jmp bogus_magic
13236diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13237index de7353c..075da5f 100644
13238--- a/arch/x86/kernel/alternative.c
13239+++ b/arch/x86/kernel/alternative.c
13240@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13241
13242 BUG_ON(p->len > MAX_PATCH_LEN);
13243 /* prep the buffer with the original instructions */
13244- memcpy(insnbuf, p->instr, p->len);
13245+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13246 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13247 (unsigned long)p->instr, p->len);
13248
13249@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13250 if (smp_alt_once)
13251 free_init_pages("SMP alternatives",
13252 (unsigned long)__smp_locks,
13253- (unsigned long)__smp_locks_end);
13254+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13255
13256 restart_nmi();
13257 }
13258@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13259 * instructions. And on the local CPU you need to be protected again NMI or MCE
13260 * handlers seeing an inconsistent instruction while you patch.
13261 */
13262-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13263+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13264 size_t len)
13265 {
13266 unsigned long flags;
13267 local_irq_save(flags);
13268- memcpy(addr, opcode, len);
13269+
13270+ pax_open_kernel();
13271+ memcpy(ktla_ktva(addr), opcode, len);
13272 sync_core();
13273+ pax_close_kernel();
13274+
13275 local_irq_restore(flags);
13276 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13277 that causes hangs on some VIA CPUs. */
13278@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13279 */
13280 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13281 {
13282- unsigned long flags;
13283- char *vaddr;
13284+ unsigned char *vaddr = ktla_ktva(addr);
13285 struct page *pages[2];
13286- int i;
13287+ size_t i;
13288
13289 if (!core_kernel_text((unsigned long)addr)) {
13290- pages[0] = vmalloc_to_page(addr);
13291- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13292+ pages[0] = vmalloc_to_page(vaddr);
13293+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13294 } else {
13295- pages[0] = virt_to_page(addr);
13296+ pages[0] = virt_to_page(vaddr);
13297 WARN_ON(!PageReserved(pages[0]));
13298- pages[1] = virt_to_page(addr + PAGE_SIZE);
13299+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13300 }
13301 BUG_ON(!pages[0]);
13302- local_irq_save(flags);
13303- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13304- if (pages[1])
13305- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13306- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13307- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13308- clear_fixmap(FIX_TEXT_POKE0);
13309- if (pages[1])
13310- clear_fixmap(FIX_TEXT_POKE1);
13311- local_flush_tlb();
13312- sync_core();
13313- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13314- that causes hangs on some VIA CPUs. */
13315+ text_poke_early(addr, opcode, len);
13316 for (i = 0; i < len; i++)
13317- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13318- local_irq_restore(flags);
13319+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13320 return addr;
13321 }
13322diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13323index 3a44b75..1601800 100644
13324--- a/arch/x86/kernel/amd_iommu.c
13325+++ b/arch/x86/kernel/amd_iommu.c
13326@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13327 }
13328 }
13329
13330-static struct dma_map_ops amd_iommu_dma_ops = {
13331+static const struct dma_map_ops amd_iommu_dma_ops = {
13332 .alloc_coherent = alloc_coherent,
13333 .free_coherent = free_coherent,
13334 .map_page = map_page,
13335diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13336index 1d2d670..8e3f477 100644
13337--- a/arch/x86/kernel/apic/apic.c
13338+++ b/arch/x86/kernel/apic/apic.c
13339@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13340 /*
13341 * Debug level, exported for io_apic.c
13342 */
13343-unsigned int apic_verbosity;
13344+int apic_verbosity;
13345
13346 int pic_mode;
13347
13348@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13349 apic_write(APIC_ESR, 0);
13350 v1 = apic_read(APIC_ESR);
13351 ack_APIC_irq();
13352- atomic_inc(&irq_err_count);
13353+ atomic_inc_unchecked(&irq_err_count);
13354
13355 /*
13356 * Here is what the APIC error bits mean:
13357@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13358 u16 *bios_cpu_apicid;
13359 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13360
13361+ pax_track_stack();
13362+
13363 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13364 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13365
13366diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13367index 8928d97..f799cea 100644
13368--- a/arch/x86/kernel/apic/io_apic.c
13369+++ b/arch/x86/kernel/apic/io_apic.c
13370@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13371 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13372 GFP_ATOMIC);
13373 if (!ioapic_entries)
13374- return 0;
13375+ return NULL;
13376
13377 for (apic = 0; apic < nr_ioapics; apic++) {
13378 ioapic_entries[apic] =
13379@@ -733,7 +733,7 @@ nomem:
13380 kfree(ioapic_entries[apic]);
13381 kfree(ioapic_entries);
13382
13383- return 0;
13384+ return NULL;
13385 }
13386
13387 /*
13388@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13389 }
13390 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13391
13392-void lock_vector_lock(void)
13393+void lock_vector_lock(void) __acquires(vector_lock)
13394 {
13395 /* Used to the online set of cpus does not change
13396 * during assign_irq_vector.
13397@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13398 spin_lock(&vector_lock);
13399 }
13400
13401-void unlock_vector_lock(void)
13402+void unlock_vector_lock(void) __releases(vector_lock)
13403 {
13404 spin_unlock(&vector_lock);
13405 }
13406@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13407 ack_APIC_irq();
13408 }
13409
13410-atomic_t irq_mis_count;
13411+atomic_unchecked_t irq_mis_count;
13412
13413 static void ack_apic_level(unsigned int irq)
13414 {
13415@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13416
13417 /* Tail end of version 0x11 I/O APIC bug workaround */
13418 if (!(v & (1 << (i & 0x1f)))) {
13419- atomic_inc(&irq_mis_count);
13420+ atomic_inc_unchecked(&irq_mis_count);
13421 spin_lock(&ioapic_lock);
13422 __mask_and_edge_IO_APIC_irq(cfg);
13423 __unmask_and_level_IO_APIC_irq(cfg);
13424diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13425index 151ace6..f317474 100644
13426--- a/arch/x86/kernel/apm_32.c
13427+++ b/arch/x86/kernel/apm_32.c
13428@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13429 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13430 * even though they are called in protected mode.
13431 */
13432-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13433+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13434 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13435
13436 static const char driver_version[] = "1.16ac"; /* no spaces */
13437@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13438 BUG_ON(cpu != 0);
13439 gdt = get_cpu_gdt_table(cpu);
13440 save_desc_40 = gdt[0x40 / 8];
13441+
13442+ pax_open_kernel();
13443 gdt[0x40 / 8] = bad_bios_desc;
13444+ pax_close_kernel();
13445
13446 apm_irq_save(flags);
13447 APM_DO_SAVE_SEGS;
13448@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13449 &call->esi);
13450 APM_DO_RESTORE_SEGS;
13451 apm_irq_restore(flags);
13452+
13453+ pax_open_kernel();
13454 gdt[0x40 / 8] = save_desc_40;
13455+ pax_close_kernel();
13456+
13457 put_cpu();
13458
13459 return call->eax & 0xff;
13460@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13461 BUG_ON(cpu != 0);
13462 gdt = get_cpu_gdt_table(cpu);
13463 save_desc_40 = gdt[0x40 / 8];
13464+
13465+ pax_open_kernel();
13466 gdt[0x40 / 8] = bad_bios_desc;
13467+ pax_close_kernel();
13468
13469 apm_irq_save(flags);
13470 APM_DO_SAVE_SEGS;
13471@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13472 &call->eax);
13473 APM_DO_RESTORE_SEGS;
13474 apm_irq_restore(flags);
13475+
13476+ pax_open_kernel();
13477 gdt[0x40 / 8] = save_desc_40;
13478+ pax_close_kernel();
13479+
13480 put_cpu();
13481 return error;
13482 }
13483@@ -975,7 +989,7 @@ recalc:
13484
13485 static void apm_power_off(void)
13486 {
13487- unsigned char po_bios_call[] = {
13488+ const unsigned char po_bios_call[] = {
13489 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13490 0x8e, 0xd0, /* movw ax,ss */
13491 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13492@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13493 * code to that CPU.
13494 */
13495 gdt = get_cpu_gdt_table(0);
13496+
13497+ pax_open_kernel();
13498 set_desc_base(&gdt[APM_CS >> 3],
13499 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13500 set_desc_base(&gdt[APM_CS_16 >> 3],
13501 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13502 set_desc_base(&gdt[APM_DS >> 3],
13503 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13504+ pax_close_kernel();
13505
13506 proc_create("apm", 0, NULL, &apm_file_ops);
13507
13508diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13509index dfdbf64..9b2b6ce 100644
13510--- a/arch/x86/kernel/asm-offsets_32.c
13511+++ b/arch/x86/kernel/asm-offsets_32.c
13512@@ -51,7 +51,6 @@ void foo(void)
13513 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13514 BLANK();
13515
13516- OFFSET(TI_task, thread_info, task);
13517 OFFSET(TI_exec_domain, thread_info, exec_domain);
13518 OFFSET(TI_flags, thread_info, flags);
13519 OFFSET(TI_status, thread_info, status);
13520@@ -60,6 +59,8 @@ void foo(void)
13521 OFFSET(TI_restart_block, thread_info, restart_block);
13522 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13523 OFFSET(TI_cpu, thread_info, cpu);
13524+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13525+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13526 BLANK();
13527
13528 OFFSET(GDS_size, desc_ptr, size);
13529@@ -99,6 +100,7 @@ void foo(void)
13530
13531 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13532 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13533+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13534 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13535 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13536 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13537@@ -115,6 +117,11 @@ void foo(void)
13538 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13539 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13540 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13541+
13542+#ifdef CONFIG_PAX_KERNEXEC
13543+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13544+#endif
13545+
13546 #endif
13547
13548 #ifdef CONFIG_XEN
13549diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13550index 4a6aeed..371de20 100644
13551--- a/arch/x86/kernel/asm-offsets_64.c
13552+++ b/arch/x86/kernel/asm-offsets_64.c
13553@@ -44,6 +44,8 @@ int main(void)
13554 ENTRY(addr_limit);
13555 ENTRY(preempt_count);
13556 ENTRY(status);
13557+ ENTRY(lowest_stack);
13558+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13559 #ifdef CONFIG_IA32_EMULATION
13560 ENTRY(sysenter_return);
13561 #endif
13562@@ -63,6 +65,18 @@ int main(void)
13563 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13564 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13565 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13566+
13567+#ifdef CONFIG_PAX_KERNEXEC
13568+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13569+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13570+#endif
13571+
13572+#ifdef CONFIG_PAX_MEMORY_UDEREF
13573+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13574+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13575+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13576+#endif
13577+
13578 #endif
13579
13580
13581@@ -115,6 +129,7 @@ int main(void)
13582 ENTRY(cr8);
13583 BLANK();
13584 #undef ENTRY
13585+ DEFINE(TSS_size, sizeof(struct tss_struct));
13586 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13587 BLANK();
13588 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13589@@ -130,6 +145,7 @@ int main(void)
13590
13591 BLANK();
13592 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13593+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13594 #ifdef CONFIG_XEN
13595 BLANK();
13596 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13597diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13598index ff502cc..dc5133e 100644
13599--- a/arch/x86/kernel/cpu/Makefile
13600+++ b/arch/x86/kernel/cpu/Makefile
13601@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13602 CFLAGS_REMOVE_common.o = -pg
13603 endif
13604
13605-# Make sure load_percpu_segment has no stackprotector
13606-nostackp := $(call cc-option, -fno-stack-protector)
13607-CFLAGS_common.o := $(nostackp)
13608-
13609 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13610 obj-y += proc.o capflags.o powerflags.o common.o
13611 obj-y += vmware.o hypervisor.o sched.o
13612diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13613index 6e082dc..a0b5f36 100644
13614--- a/arch/x86/kernel/cpu/amd.c
13615+++ b/arch/x86/kernel/cpu/amd.c
13616@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13617 unsigned int size)
13618 {
13619 /* AMD errata T13 (order #21922) */
13620- if ((c->x86 == 6)) {
13621+ if (c->x86 == 6) {
13622 /* Duron Rev A0 */
13623 if (c->x86_model == 3 && c->x86_mask == 0)
13624 size = 64;
13625diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13626index 4e34d10..ba6bc97 100644
13627--- a/arch/x86/kernel/cpu/common.c
13628+++ b/arch/x86/kernel/cpu/common.c
13629@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13630
13631 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13632
13633-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13634-#ifdef CONFIG_X86_64
13635- /*
13636- * We need valid kernel segments for data and code in long mode too
13637- * IRET will check the segment types kkeil 2000/10/28
13638- * Also sysret mandates a special GDT layout
13639- *
13640- * TLS descriptors are currently at a different place compared to i386.
13641- * Hopefully nobody expects them at a fixed place (Wine?)
13642- */
13643- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13644- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13645- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13646- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13647- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13648- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13649-#else
13650- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13651- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13652- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13653- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13654- /*
13655- * Segments used for calling PnP BIOS have byte granularity.
13656- * They code segments and data segments have fixed 64k limits,
13657- * the transfer segment sizes are set at run time.
13658- */
13659- /* 32-bit code */
13660- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13661- /* 16-bit code */
13662- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13663- /* 16-bit data */
13664- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13665- /* 16-bit data */
13666- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13667- /* 16-bit data */
13668- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13669- /*
13670- * The APM segments have byte granularity and their bases
13671- * are set at run time. All have 64k limits.
13672- */
13673- /* 32-bit code */
13674- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13675- /* 16-bit code */
13676- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13677- /* data */
13678- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13679-
13680- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13681- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13682- GDT_STACK_CANARY_INIT
13683-#endif
13684-} };
13685-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13686-
13687 static int __init x86_xsave_setup(char *s)
13688 {
13689 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13690@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13691 {
13692 struct desc_ptr gdt_descr;
13693
13694- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13695+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13696 gdt_descr.size = GDT_SIZE - 1;
13697 load_gdt(&gdt_descr);
13698 /* Reload the per-cpu base */
13699@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13700 /* Filter out anything that depends on CPUID levels we don't have */
13701 filter_cpuid_features(c, true);
13702
13703+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13704+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13705+#endif
13706+
13707 /* If the model name is still unset, do table lookup. */
13708 if (!c->x86_model_id[0]) {
13709 const char *p;
13710@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13711 }
13712 __setup("clearcpuid=", setup_disablecpuid);
13713
13714+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13715+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13716+
13717 #ifdef CONFIG_X86_64
13718 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13719
13720@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13721 EXPORT_PER_CPU_SYMBOL(current_task);
13722
13723 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13724- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13725+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13726 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13727
13728 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13729@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13730 {
13731 memset(regs, 0, sizeof(struct pt_regs));
13732 regs->fs = __KERNEL_PERCPU;
13733- regs->gs = __KERNEL_STACK_CANARY;
13734+ savesegment(gs, regs->gs);
13735
13736 return regs;
13737 }
13738@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13739 int i;
13740
13741 cpu = stack_smp_processor_id();
13742- t = &per_cpu(init_tss, cpu);
13743+ t = init_tss + cpu;
13744 orig_ist = &per_cpu(orig_ist, cpu);
13745
13746 #ifdef CONFIG_NUMA
13747@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13748 switch_to_new_gdt(cpu);
13749 loadsegment(fs, 0);
13750
13751- load_idt((const struct desc_ptr *)&idt_descr);
13752+ load_idt(&idt_descr);
13753
13754 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13755 syscall_init();
13756@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13757 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13758 barrier();
13759
13760- check_efer();
13761 if (cpu != 0)
13762 enable_x2apic();
13763
13764@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13765 {
13766 int cpu = smp_processor_id();
13767 struct task_struct *curr = current;
13768- struct tss_struct *t = &per_cpu(init_tss, cpu);
13769+ struct tss_struct *t = init_tss + cpu;
13770 struct thread_struct *thread = &curr->thread;
13771
13772 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13773diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13774index 6a77cca..4f4fca0 100644
13775--- a/arch/x86/kernel/cpu/intel.c
13776+++ b/arch/x86/kernel/cpu/intel.c
13777@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13778 * Update the IDT descriptor and reload the IDT so that
13779 * it uses the read-only mapped virtual address.
13780 */
13781- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13782+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13783 load_idt(&idt_descr);
13784 }
13785 #endif
13786diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13787index 417990f..96dc36b 100644
13788--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13789+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13790@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13791 return ret;
13792 }
13793
13794-static struct sysfs_ops sysfs_ops = {
13795+static const struct sysfs_ops sysfs_ops = {
13796 .show = show,
13797 .store = store,
13798 };
13799diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13800index 472763d..9831e11 100644
13801--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13802+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13803@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13804 static int inject_init(void)
13805 {
13806 printk(KERN_INFO "Machine check injector initialized\n");
13807- mce_chrdev_ops.write = mce_write;
13808+ pax_open_kernel();
13809+ *(void **)&mce_chrdev_ops.write = mce_write;
13810+ pax_close_kernel();
13811 register_die_notifier(&mce_raise_nb);
13812 return 0;
13813 }
13814diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13815index 0f16a2b..21740f5 100644
13816--- a/arch/x86/kernel/cpu/mcheck/mce.c
13817+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13818@@ -43,6 +43,7 @@
13819 #include <asm/ipi.h>
13820 #include <asm/mce.h>
13821 #include <asm/msr.h>
13822+#include <asm/local.h>
13823
13824 #include "mce-internal.h"
13825
13826@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13827 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13828 m->cs, m->ip);
13829
13830- if (m->cs == __KERNEL_CS)
13831+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13832 print_symbol("{%s}", m->ip);
13833 pr_cont("\n");
13834 }
13835@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13836
13837 #define PANIC_TIMEOUT 5 /* 5 seconds */
13838
13839-static atomic_t mce_paniced;
13840+static atomic_unchecked_t mce_paniced;
13841
13842 static int fake_panic;
13843-static atomic_t mce_fake_paniced;
13844+static atomic_unchecked_t mce_fake_paniced;
13845
13846 /* Panic in progress. Enable interrupts and wait for final IPI */
13847 static void wait_for_panic(void)
13848@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13849 /*
13850 * Make sure only one CPU runs in machine check panic
13851 */
13852- if (atomic_inc_return(&mce_paniced) > 1)
13853+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13854 wait_for_panic();
13855 barrier();
13856
13857@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13858 console_verbose();
13859 } else {
13860 /* Don't log too much for fake panic */
13861- if (atomic_inc_return(&mce_fake_paniced) > 1)
13862+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13863 return;
13864 }
13865 print_mce_head();
13866@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13867 * might have been modified by someone else.
13868 */
13869 rmb();
13870- if (atomic_read(&mce_paniced))
13871+ if (atomic_read_unchecked(&mce_paniced))
13872 wait_for_panic();
13873 if (!monarch_timeout)
13874 goto out;
13875@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13876 }
13877
13878 /* Call the installed machine check handler for this CPU setup. */
13879-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13880+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13881 unexpected_machine_check;
13882
13883 /*
13884@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13885 return;
13886 }
13887
13888+ pax_open_kernel();
13889 machine_check_vector = do_machine_check;
13890+ pax_close_kernel();
13891
13892 mce_init();
13893 mce_cpu_features(c);
13894@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13895 */
13896
13897 static DEFINE_SPINLOCK(mce_state_lock);
13898-static int open_count; /* #times opened */
13899+static local_t open_count; /* #times opened */
13900 static int open_exclu; /* already open exclusive? */
13901
13902 static int mce_open(struct inode *inode, struct file *file)
13903 {
13904 spin_lock(&mce_state_lock);
13905
13906- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13907+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13908 spin_unlock(&mce_state_lock);
13909
13910 return -EBUSY;
13911@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13912
13913 if (file->f_flags & O_EXCL)
13914 open_exclu = 1;
13915- open_count++;
13916+ local_inc(&open_count);
13917
13918 spin_unlock(&mce_state_lock);
13919
13920@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13921 {
13922 spin_lock(&mce_state_lock);
13923
13924- open_count--;
13925+ local_dec(&open_count);
13926 open_exclu = 0;
13927
13928 spin_unlock(&mce_state_lock);
13929@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13930 static void mce_reset(void)
13931 {
13932 cpu_missing = 0;
13933- atomic_set(&mce_fake_paniced, 0);
13934+ atomic_set_unchecked(&mce_fake_paniced, 0);
13935 atomic_set(&mce_executing, 0);
13936 atomic_set(&mce_callin, 0);
13937 atomic_set(&global_nwo, 0);
13938diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13939index ef3cd31..9d2f6ab 100644
13940--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13941+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13942@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13943 return ret;
13944 }
13945
13946-static struct sysfs_ops threshold_ops = {
13947+static const struct sysfs_ops threshold_ops = {
13948 .show = show,
13949 .store = store,
13950 };
13951diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13952index 5c0e653..0882b0a 100644
13953--- a/arch/x86/kernel/cpu/mcheck/p5.c
13954+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13955@@ -12,6 +12,7 @@
13956 #include <asm/system.h>
13957 #include <asm/mce.h>
13958 #include <asm/msr.h>
13959+#include <asm/pgtable.h>
13960
13961 /* By default disabled */
13962 int mce_p5_enabled __read_mostly;
13963@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13964 if (!cpu_has(c, X86_FEATURE_MCE))
13965 return;
13966
13967+ pax_open_kernel();
13968 machine_check_vector = pentium_machine_check;
13969+ pax_close_kernel();
13970 /* Make sure the vector pointer is visible before we enable MCEs: */
13971 wmb();
13972
13973diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13974index 54060f5..c1a7577 100644
13975--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13976+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13977@@ -11,6 +11,7 @@
13978 #include <asm/system.h>
13979 #include <asm/mce.h>
13980 #include <asm/msr.h>
13981+#include <asm/pgtable.h>
13982
13983 /* Machine check handler for WinChip C6: */
13984 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13985@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13986 {
13987 u32 lo, hi;
13988
13989+ pax_open_kernel();
13990 machine_check_vector = winchip_machine_check;
13991+ pax_close_kernel();
13992 /* Make sure the vector pointer is visible before we enable MCEs: */
13993 wmb();
13994
13995diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13996index 33af141..92ba9cd 100644
13997--- a/arch/x86/kernel/cpu/mtrr/amd.c
13998+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13999@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14000 return 0;
14001 }
14002
14003-static struct mtrr_ops amd_mtrr_ops = {
14004+static const struct mtrr_ops amd_mtrr_ops = {
14005 .vendor = X86_VENDOR_AMD,
14006 .set = amd_set_mtrr,
14007 .get = amd_get_mtrr,
14008diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14009index de89f14..316fe3e 100644
14010--- a/arch/x86/kernel/cpu/mtrr/centaur.c
14011+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14012@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14013 return 0;
14014 }
14015
14016-static struct mtrr_ops centaur_mtrr_ops = {
14017+static const struct mtrr_ops centaur_mtrr_ops = {
14018 .vendor = X86_VENDOR_CENTAUR,
14019 .set = centaur_set_mcr,
14020 .get = centaur_get_mcr,
14021diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14022index 228d982..68a3343 100644
14023--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14024+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14025@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14026 post_set();
14027 }
14028
14029-static struct mtrr_ops cyrix_mtrr_ops = {
14030+static const struct mtrr_ops cyrix_mtrr_ops = {
14031 .vendor = X86_VENDOR_CYRIX,
14032 .set_all = cyrix_set_all,
14033 .set = cyrix_set_arr,
14034diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14035index 55da0c5..4d75584 100644
14036--- a/arch/x86/kernel/cpu/mtrr/generic.c
14037+++ b/arch/x86/kernel/cpu/mtrr/generic.c
14038@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14039 /*
14040 * Generic structure...
14041 */
14042-struct mtrr_ops generic_mtrr_ops = {
14043+const struct mtrr_ops generic_mtrr_ops = {
14044 .use_intel_if = 1,
14045 .set_all = generic_set_all,
14046 .get = generic_get_mtrr,
14047diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14048index fd60f09..c94ef52 100644
14049--- a/arch/x86/kernel/cpu/mtrr/main.c
14050+++ b/arch/x86/kernel/cpu/mtrr/main.c
14051@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14052 u64 size_or_mask, size_and_mask;
14053 static bool mtrr_aps_delayed_init;
14054
14055-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14056+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14057
14058-struct mtrr_ops *mtrr_if;
14059+const struct mtrr_ops *mtrr_if;
14060
14061 static void set_mtrr(unsigned int reg, unsigned long base,
14062 unsigned long size, mtrr_type type);
14063
14064-void set_mtrr_ops(struct mtrr_ops *ops)
14065+void set_mtrr_ops(const struct mtrr_ops *ops)
14066 {
14067 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14068 mtrr_ops[ops->vendor] = ops;
14069diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14070index a501dee..816c719 100644
14071--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14072+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14073@@ -25,14 +25,14 @@ struct mtrr_ops {
14074 int (*validate_add_page)(unsigned long base, unsigned long size,
14075 unsigned int type);
14076 int (*have_wrcomb)(void);
14077-};
14078+} __do_const;
14079
14080 extern int generic_get_free_region(unsigned long base, unsigned long size,
14081 int replace_reg);
14082 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14083 unsigned int type);
14084
14085-extern struct mtrr_ops generic_mtrr_ops;
14086+extern const struct mtrr_ops generic_mtrr_ops;
14087
14088 extern int positive_have_wrcomb(void);
14089
14090@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14091 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14092 void get_mtrr_state(void);
14093
14094-extern void set_mtrr_ops(struct mtrr_ops *ops);
14095+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14096
14097 extern u64 size_or_mask, size_and_mask;
14098-extern struct mtrr_ops *mtrr_if;
14099+extern const struct mtrr_ops *mtrr_if;
14100
14101 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14102 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14103diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14104index 0ff02ca..fc49a60 100644
14105--- a/arch/x86/kernel/cpu/perf_event.c
14106+++ b/arch/x86/kernel/cpu/perf_event.c
14107@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14108 * count to the generic event atomically:
14109 */
14110 again:
14111- prev_raw_count = atomic64_read(&hwc->prev_count);
14112+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14113 rdmsrl(hwc->event_base + idx, new_raw_count);
14114
14115- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14116+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14117 new_raw_count) != prev_raw_count)
14118 goto again;
14119
14120@@ -741,7 +741,7 @@ again:
14121 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14122 delta >>= shift;
14123
14124- atomic64_add(delta, &event->count);
14125+ atomic64_add_unchecked(delta, &event->count);
14126 atomic64_sub(delta, &hwc->period_left);
14127
14128 return new_raw_count;
14129@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14130 * The hw event starts counting from this event offset,
14131 * mark it to be able to extra future deltas:
14132 */
14133- atomic64_set(&hwc->prev_count, (u64)-left);
14134+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14135
14136 err = checking_wrmsrl(hwc->event_base + idx,
14137 (u64)(-left) & x86_pmu.event_mask);
14138@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14139 break;
14140
14141 callchain_store(entry, frame.return_address);
14142- fp = frame.next_frame;
14143+ fp = (__force const void __user *)frame.next_frame;
14144 }
14145 }
14146
14147diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14148index 898df97..9e82503 100644
14149--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14150+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14151@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14152
14153 /* Interface defining a CPU specific perfctr watchdog */
14154 struct wd_ops {
14155- int (*reserve)(void);
14156- void (*unreserve)(void);
14157- int (*setup)(unsigned nmi_hz);
14158- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14159- void (*stop)(void);
14160+ int (* const reserve)(void);
14161+ void (* const unreserve)(void);
14162+ int (* const setup)(unsigned nmi_hz);
14163+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14164+ void (* const stop)(void);
14165 unsigned perfctr;
14166 unsigned evntsel;
14167 u64 checkbit;
14168@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14169 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14170 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14171
14172+/* cannot be const */
14173 static struct wd_ops intel_arch_wd_ops;
14174
14175 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14176@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14177 return 1;
14178 }
14179
14180+/* cannot be const */
14181 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14182 .reserve = single_msr_reserve,
14183 .unreserve = single_msr_unreserve,
14184diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14185index ff95824..2ffdcb5 100644
14186--- a/arch/x86/kernel/crash.c
14187+++ b/arch/x86/kernel/crash.c
14188@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14189 regs = args->regs;
14190
14191 #ifdef CONFIG_X86_32
14192- if (!user_mode_vm(regs)) {
14193+ if (!user_mode(regs)) {
14194 crash_fixup_ss_esp(&fixed_regs, regs);
14195 regs = &fixed_regs;
14196 }
14197diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14198index 37250fe..bf2ec74 100644
14199--- a/arch/x86/kernel/doublefault_32.c
14200+++ b/arch/x86/kernel/doublefault_32.c
14201@@ -11,7 +11,7 @@
14202
14203 #define DOUBLEFAULT_STACKSIZE (1024)
14204 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14205-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14206+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14207
14208 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14209
14210@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14211 unsigned long gdt, tss;
14212
14213 store_gdt(&gdt_desc);
14214- gdt = gdt_desc.address;
14215+ gdt = (unsigned long)gdt_desc.address;
14216
14217 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14218
14219@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14220 /* 0x2 bit is always set */
14221 .flags = X86_EFLAGS_SF | 0x2,
14222 .sp = STACK_START,
14223- .es = __USER_DS,
14224+ .es = __KERNEL_DS,
14225 .cs = __KERNEL_CS,
14226 .ss = __KERNEL_DS,
14227- .ds = __USER_DS,
14228+ .ds = __KERNEL_DS,
14229 .fs = __KERNEL_PERCPU,
14230
14231 .__cr3 = __pa_nodebug(swapper_pg_dir),
14232diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14233index 2d8a371..4fa6ae6 100644
14234--- a/arch/x86/kernel/dumpstack.c
14235+++ b/arch/x86/kernel/dumpstack.c
14236@@ -2,6 +2,9 @@
14237 * Copyright (C) 1991, 1992 Linus Torvalds
14238 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14239 */
14240+#ifdef CONFIG_GRKERNSEC_HIDESYM
14241+#define __INCLUDED_BY_HIDESYM 1
14242+#endif
14243 #include <linux/kallsyms.h>
14244 #include <linux/kprobes.h>
14245 #include <linux/uaccess.h>
14246@@ -28,7 +31,7 @@ static int die_counter;
14247
14248 void printk_address(unsigned long address, int reliable)
14249 {
14250- printk(" [<%p>] %s%pS\n", (void *) address,
14251+ printk(" [<%p>] %s%pA\n", (void *) address,
14252 reliable ? "" : "? ", (void *) address);
14253 }
14254
14255@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14256 static void
14257 print_ftrace_graph_addr(unsigned long addr, void *data,
14258 const struct stacktrace_ops *ops,
14259- struct thread_info *tinfo, int *graph)
14260+ struct task_struct *task, int *graph)
14261 {
14262- struct task_struct *task = tinfo->task;
14263 unsigned long ret_addr;
14264 int index = task->curr_ret_stack;
14265
14266@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14267 static inline void
14268 print_ftrace_graph_addr(unsigned long addr, void *data,
14269 const struct stacktrace_ops *ops,
14270- struct thread_info *tinfo, int *graph)
14271+ struct task_struct *task, int *graph)
14272 { }
14273 #endif
14274
14275@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14276 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14277 */
14278
14279-static inline int valid_stack_ptr(struct thread_info *tinfo,
14280- void *p, unsigned int size, void *end)
14281+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14282 {
14283- void *t = tinfo;
14284 if (end) {
14285 if (p < end && p >= (end-THREAD_SIZE))
14286 return 1;
14287@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14288 }
14289
14290 unsigned long
14291-print_context_stack(struct thread_info *tinfo,
14292+print_context_stack(struct task_struct *task, void *stack_start,
14293 unsigned long *stack, unsigned long bp,
14294 const struct stacktrace_ops *ops, void *data,
14295 unsigned long *end, int *graph)
14296 {
14297 struct stack_frame *frame = (struct stack_frame *)bp;
14298
14299- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14300+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14301 unsigned long addr;
14302
14303 addr = *stack;
14304@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14305 } else {
14306 ops->address(data, addr, 0);
14307 }
14308- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14309+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14310 }
14311 stack++;
14312 }
14313@@ -180,7 +180,7 @@ void dump_stack(void)
14314 #endif
14315
14316 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14317- current->pid, current->comm, print_tainted(),
14318+ task_pid_nr(current), current->comm, print_tainted(),
14319 init_utsname()->release,
14320 (int)strcspn(init_utsname()->version, " "),
14321 init_utsname()->version);
14322@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14323 return flags;
14324 }
14325
14326+extern void gr_handle_kernel_exploit(void);
14327+
14328 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14329 {
14330 if (regs && kexec_should_crash(current))
14331@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14332 panic("Fatal exception in interrupt");
14333 if (panic_on_oops)
14334 panic("Fatal exception");
14335- do_exit(signr);
14336+
14337+ gr_handle_kernel_exploit();
14338+
14339+ do_group_exit(signr);
14340 }
14341
14342 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14343@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14344 unsigned long flags = oops_begin();
14345 int sig = SIGSEGV;
14346
14347- if (!user_mode_vm(regs))
14348+ if (!user_mode(regs))
14349 report_bug(regs->ip, regs);
14350
14351 if (__die(str, regs, err))
14352diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14353index 81086c2..13e8b17 100644
14354--- a/arch/x86/kernel/dumpstack.h
14355+++ b/arch/x86/kernel/dumpstack.h
14356@@ -15,7 +15,7 @@
14357 #endif
14358
14359 extern unsigned long
14360-print_context_stack(struct thread_info *tinfo,
14361+print_context_stack(struct task_struct *task, void *stack_start,
14362 unsigned long *stack, unsigned long bp,
14363 const struct stacktrace_ops *ops, void *data,
14364 unsigned long *end, int *graph);
14365diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14366index f7dd2a7..504f53b 100644
14367--- a/arch/x86/kernel/dumpstack_32.c
14368+++ b/arch/x86/kernel/dumpstack_32.c
14369@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14370 #endif
14371
14372 for (;;) {
14373- struct thread_info *context;
14374+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14375+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14376
14377- context = (struct thread_info *)
14378- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14379- bp = print_context_stack(context, stack, bp, ops,
14380- data, NULL, &graph);
14381-
14382- stack = (unsigned long *)context->previous_esp;
14383- if (!stack)
14384+ if (stack_start == task_stack_page(task))
14385 break;
14386+ stack = *(unsigned long **)stack_start;
14387 if (ops->stack(data, "IRQ") < 0)
14388 break;
14389 touch_nmi_watchdog();
14390@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14391 * When in-kernel, we also print out the stack and code at the
14392 * time of the fault..
14393 */
14394- if (!user_mode_vm(regs)) {
14395+ if (!user_mode(regs)) {
14396 unsigned int code_prologue = code_bytes * 43 / 64;
14397 unsigned int code_len = code_bytes;
14398 unsigned char c;
14399 u8 *ip;
14400+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14401
14402 printk(KERN_EMERG "Stack:\n");
14403 show_stack_log_lvl(NULL, regs, &regs->sp,
14404@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14405
14406 printk(KERN_EMERG "Code: ");
14407
14408- ip = (u8 *)regs->ip - code_prologue;
14409+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14410 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14411 /* try starting at IP */
14412- ip = (u8 *)regs->ip;
14413+ ip = (u8 *)regs->ip + cs_base;
14414 code_len = code_len - code_prologue + 1;
14415 }
14416 for (i = 0; i < code_len; i++, ip++) {
14417@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14418 printk(" Bad EIP value.");
14419 break;
14420 }
14421- if (ip == (u8 *)regs->ip)
14422+ if (ip == (u8 *)regs->ip + cs_base)
14423 printk("<%02x> ", c);
14424 else
14425 printk("%02x ", c);
14426@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14427 printk("\n");
14428 }
14429
14430+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14431+void pax_check_alloca(unsigned long size)
14432+{
14433+ unsigned long sp = (unsigned long)&sp, stack_left;
14434+
14435+ /* all kernel stacks are of the same size */
14436+ stack_left = sp & (THREAD_SIZE - 1);
14437+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14438+}
14439+EXPORT_SYMBOL(pax_check_alloca);
14440+#endif
14441+
14442 int is_valid_bugaddr(unsigned long ip)
14443 {
14444 unsigned short ud2;
14445
14446+ ip = ktla_ktva(ip);
14447 if (ip < PAGE_OFFSET)
14448 return 0;
14449 if (probe_kernel_address((unsigned short *)ip, ud2))
14450diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14451index a071e6b..36cd585 100644
14452--- a/arch/x86/kernel/dumpstack_64.c
14453+++ b/arch/x86/kernel/dumpstack_64.c
14454@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14455 unsigned long *irq_stack_end =
14456 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14457 unsigned used = 0;
14458- struct thread_info *tinfo;
14459 int graph = 0;
14460+ void *stack_start;
14461
14462 if (!task)
14463 task = current;
14464@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14465 * current stack address. If the stacks consist of nested
14466 * exceptions
14467 */
14468- tinfo = task_thread_info(task);
14469 for (;;) {
14470 char *id;
14471 unsigned long *estack_end;
14472+
14473 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14474 &used, &id);
14475
14476@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14477 if (ops->stack(data, id) < 0)
14478 break;
14479
14480- bp = print_context_stack(tinfo, stack, bp, ops,
14481+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14482 data, estack_end, &graph);
14483 ops->stack(data, "<EOE>");
14484 /*
14485@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14486 if (stack >= irq_stack && stack < irq_stack_end) {
14487 if (ops->stack(data, "IRQ") < 0)
14488 break;
14489- bp = print_context_stack(tinfo, stack, bp,
14490+ bp = print_context_stack(task, irq_stack, stack, bp,
14491 ops, data, irq_stack_end, &graph);
14492 /*
14493 * We link to the next stack (which would be
14494@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14495 /*
14496 * This handles the process stack:
14497 */
14498- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14499+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14500+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14501 put_cpu();
14502 }
14503 EXPORT_SYMBOL(dump_trace);
14504@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14505 return ud2 == 0x0b0f;
14506 }
14507
14508+
14509+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14510+void pax_check_alloca(unsigned long size)
14511+{
14512+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14513+ unsigned cpu, used;
14514+ char *id;
14515+
14516+ /* check the process stack first */
14517+ stack_start = (unsigned long)task_stack_page(current);
14518+ stack_end = stack_start + THREAD_SIZE;
14519+ if (likely(stack_start <= sp && sp < stack_end)) {
14520+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14521+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14522+ return;
14523+ }
14524+
14525+ cpu = get_cpu();
14526+
14527+ /* check the irq stacks */
14528+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14529+ stack_start = stack_end - IRQ_STACK_SIZE;
14530+ if (stack_start <= sp && sp < stack_end) {
14531+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14532+ put_cpu();
14533+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14534+ return;
14535+ }
14536+
14537+ /* check the exception stacks */
14538+ used = 0;
14539+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14540+ stack_start = stack_end - EXCEPTION_STKSZ;
14541+ if (stack_end && stack_start <= sp && sp < stack_end) {
14542+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14543+ put_cpu();
14544+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14545+ return;
14546+ }
14547+
14548+ put_cpu();
14549+
14550+ /* unknown stack */
14551+ BUG();
14552+}
14553+EXPORT_SYMBOL(pax_check_alloca);
14554+#endif
14555diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14556index a89739a..95e0c48 100644
14557--- a/arch/x86/kernel/e820.c
14558+++ b/arch/x86/kernel/e820.c
14559@@ -733,7 +733,7 @@ struct early_res {
14560 };
14561 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14562 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14563- {}
14564+ { 0, 0, {0}, 0 }
14565 };
14566
14567 static int __init find_overlapped_early(u64 start, u64 end)
14568diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14569index b9c830c..1e41a96 100644
14570--- a/arch/x86/kernel/early_printk.c
14571+++ b/arch/x86/kernel/early_printk.c
14572@@ -7,6 +7,7 @@
14573 #include <linux/pci_regs.h>
14574 #include <linux/pci_ids.h>
14575 #include <linux/errno.h>
14576+#include <linux/sched.h>
14577 #include <asm/io.h>
14578 #include <asm/processor.h>
14579 #include <asm/fcntl.h>
14580@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14581 int n;
14582 va_list ap;
14583
14584+ pax_track_stack();
14585+
14586 va_start(ap, fmt);
14587 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14588 early_console->write(early_console, buf, n);
14589diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14590index 5cab48e..b025f9b 100644
14591--- a/arch/x86/kernel/efi_32.c
14592+++ b/arch/x86/kernel/efi_32.c
14593@@ -38,70 +38,56 @@
14594 */
14595
14596 static unsigned long efi_rt_eflags;
14597-static pgd_t efi_bak_pg_dir_pointer[2];
14598+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14599
14600-void efi_call_phys_prelog(void)
14601+void __init efi_call_phys_prelog(void)
14602 {
14603- unsigned long cr4;
14604- unsigned long temp;
14605 struct desc_ptr gdt_descr;
14606
14607+#ifdef CONFIG_PAX_KERNEXEC
14608+ struct desc_struct d;
14609+#endif
14610+
14611 local_irq_save(efi_rt_eflags);
14612
14613- /*
14614- * If I don't have PAE, I should just duplicate two entries in page
14615- * directory. If I have PAE, I just need to duplicate one entry in
14616- * page directory.
14617- */
14618- cr4 = read_cr4_safe();
14619-
14620- if (cr4 & X86_CR4_PAE) {
14621- efi_bak_pg_dir_pointer[0].pgd =
14622- swapper_pg_dir[pgd_index(0)].pgd;
14623- swapper_pg_dir[0].pgd =
14624- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14625- } else {
14626- efi_bak_pg_dir_pointer[0].pgd =
14627- swapper_pg_dir[pgd_index(0)].pgd;
14628- efi_bak_pg_dir_pointer[1].pgd =
14629- swapper_pg_dir[pgd_index(0x400000)].pgd;
14630- swapper_pg_dir[pgd_index(0)].pgd =
14631- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14632- temp = PAGE_OFFSET + 0x400000;
14633- swapper_pg_dir[pgd_index(0x400000)].pgd =
14634- swapper_pg_dir[pgd_index(temp)].pgd;
14635- }
14636+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14637+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14638+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14639
14640 /*
14641 * After the lock is released, the original page table is restored.
14642 */
14643 __flush_tlb_all();
14644
14645+#ifdef CONFIG_PAX_KERNEXEC
14646+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14647+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14648+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14649+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14650+#endif
14651+
14652 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14653 gdt_descr.size = GDT_SIZE - 1;
14654 load_gdt(&gdt_descr);
14655 }
14656
14657-void efi_call_phys_epilog(void)
14658+void __init efi_call_phys_epilog(void)
14659 {
14660- unsigned long cr4;
14661 struct desc_ptr gdt_descr;
14662
14663+#ifdef CONFIG_PAX_KERNEXEC
14664+ struct desc_struct d;
14665+
14666+ memset(&d, 0, sizeof d);
14667+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14668+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14669+#endif
14670+
14671 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14672 gdt_descr.size = GDT_SIZE - 1;
14673 load_gdt(&gdt_descr);
14674
14675- cr4 = read_cr4_safe();
14676-
14677- if (cr4 & X86_CR4_PAE) {
14678- swapper_pg_dir[pgd_index(0)].pgd =
14679- efi_bak_pg_dir_pointer[0].pgd;
14680- } else {
14681- swapper_pg_dir[pgd_index(0)].pgd =
14682- efi_bak_pg_dir_pointer[0].pgd;
14683- swapper_pg_dir[pgd_index(0x400000)].pgd =
14684- efi_bak_pg_dir_pointer[1].pgd;
14685- }
14686+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14687
14688 /*
14689 * After the lock is released, the original page table is restored.
14690diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14691index fbe66e6..c5c0dd2 100644
14692--- a/arch/x86/kernel/efi_stub_32.S
14693+++ b/arch/x86/kernel/efi_stub_32.S
14694@@ -6,7 +6,9 @@
14695 */
14696
14697 #include <linux/linkage.h>
14698+#include <linux/init.h>
14699 #include <asm/page_types.h>
14700+#include <asm/segment.h>
14701
14702 /*
14703 * efi_call_phys(void *, ...) is a function with variable parameters.
14704@@ -20,7 +22,7 @@
14705 * service functions will comply with gcc calling convention, too.
14706 */
14707
14708-.text
14709+__INIT
14710 ENTRY(efi_call_phys)
14711 /*
14712 * 0. The function can only be called in Linux kernel. So CS has been
14713@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14714 * The mapping of lower virtual memory has been created in prelog and
14715 * epilog.
14716 */
14717- movl $1f, %edx
14718- subl $__PAGE_OFFSET, %edx
14719- jmp *%edx
14720+ movl $(__KERNEXEC_EFI_DS), %edx
14721+ mov %edx, %ds
14722+ mov %edx, %es
14723+ mov %edx, %ss
14724+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14725 1:
14726
14727 /*
14728@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14729 * parameter 2, ..., param n. To make things easy, we save the return
14730 * address of efi_call_phys in a global variable.
14731 */
14732- popl %edx
14733- movl %edx, saved_return_addr
14734- /* get the function pointer into ECX*/
14735- popl %ecx
14736- movl %ecx, efi_rt_function_ptr
14737- movl $2f, %edx
14738- subl $__PAGE_OFFSET, %edx
14739- pushl %edx
14740+ popl (saved_return_addr)
14741+ popl (efi_rt_function_ptr)
14742
14743 /*
14744 * 3. Clear PG bit in %CR0.
14745@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14746 /*
14747 * 5. Call the physical function.
14748 */
14749- jmp *%ecx
14750+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14751
14752-2:
14753 /*
14754 * 6. After EFI runtime service returns, control will return to
14755 * following instruction. We'd better readjust stack pointer first.
14756@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14757 movl %cr0, %edx
14758 orl $0x80000000, %edx
14759 movl %edx, %cr0
14760- jmp 1f
14761-1:
14762+
14763 /*
14764 * 8. Now restore the virtual mode from flat mode by
14765 * adding EIP with PAGE_OFFSET.
14766 */
14767- movl $1f, %edx
14768- jmp *%edx
14769+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14770 1:
14771+ movl $(__KERNEL_DS), %edx
14772+ mov %edx, %ds
14773+ mov %edx, %es
14774+ mov %edx, %ss
14775
14776 /*
14777 * 9. Balance the stack. And because EAX contain the return value,
14778 * we'd better not clobber it.
14779 */
14780- leal efi_rt_function_ptr, %edx
14781- movl (%edx), %ecx
14782- pushl %ecx
14783+ pushl (efi_rt_function_ptr)
14784
14785 /*
14786- * 10. Push the saved return address onto the stack and return.
14787+ * 10. Return to the saved return address.
14788 */
14789- leal saved_return_addr, %edx
14790- movl (%edx), %ecx
14791- pushl %ecx
14792- ret
14793+ jmpl *(saved_return_addr)
14794 ENDPROC(efi_call_phys)
14795 .previous
14796
14797-.data
14798+__INITDATA
14799 saved_return_addr:
14800 .long 0
14801 efi_rt_function_ptr:
14802diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14803index 4c07cca..2c8427d 100644
14804--- a/arch/x86/kernel/efi_stub_64.S
14805+++ b/arch/x86/kernel/efi_stub_64.S
14806@@ -7,6 +7,7 @@
14807 */
14808
14809 #include <linux/linkage.h>
14810+#include <asm/alternative-asm.h>
14811
14812 #define SAVE_XMM \
14813 mov %rsp, %rax; \
14814@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14815 call *%rdi
14816 addq $32, %rsp
14817 RESTORE_XMM
14818+ pax_force_retaddr 0, 1
14819 ret
14820 ENDPROC(efi_call0)
14821
14822@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14823 call *%rdi
14824 addq $32, %rsp
14825 RESTORE_XMM
14826+ pax_force_retaddr 0, 1
14827 ret
14828 ENDPROC(efi_call1)
14829
14830@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14831 call *%rdi
14832 addq $32, %rsp
14833 RESTORE_XMM
14834+ pax_force_retaddr 0, 1
14835 ret
14836 ENDPROC(efi_call2)
14837
14838@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14839 call *%rdi
14840 addq $32, %rsp
14841 RESTORE_XMM
14842+ pax_force_retaddr 0, 1
14843 ret
14844 ENDPROC(efi_call3)
14845
14846@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14847 call *%rdi
14848 addq $32, %rsp
14849 RESTORE_XMM
14850+ pax_force_retaddr 0, 1
14851 ret
14852 ENDPROC(efi_call4)
14853
14854@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14855 call *%rdi
14856 addq $48, %rsp
14857 RESTORE_XMM
14858+ pax_force_retaddr 0, 1
14859 ret
14860 ENDPROC(efi_call5)
14861
14862@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14863 call *%rdi
14864 addq $48, %rsp
14865 RESTORE_XMM
14866+ pax_force_retaddr 0, 1
14867 ret
14868 ENDPROC(efi_call6)
14869diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14870index c097e7d..c689cf4 100644
14871--- a/arch/x86/kernel/entry_32.S
14872+++ b/arch/x86/kernel/entry_32.S
14873@@ -185,13 +185,146 @@
14874 /*CFI_REL_OFFSET gs, PT_GS*/
14875 .endm
14876 .macro SET_KERNEL_GS reg
14877+
14878+#ifdef CONFIG_CC_STACKPROTECTOR
14879 movl $(__KERNEL_STACK_CANARY), \reg
14880+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14881+ movl $(__USER_DS), \reg
14882+#else
14883+ xorl \reg, \reg
14884+#endif
14885+
14886 movl \reg, %gs
14887 .endm
14888
14889 #endif /* CONFIG_X86_32_LAZY_GS */
14890
14891-.macro SAVE_ALL
14892+.macro pax_enter_kernel
14893+#ifdef CONFIG_PAX_KERNEXEC
14894+ call pax_enter_kernel
14895+#endif
14896+.endm
14897+
14898+.macro pax_exit_kernel
14899+#ifdef CONFIG_PAX_KERNEXEC
14900+ call pax_exit_kernel
14901+#endif
14902+.endm
14903+
14904+#ifdef CONFIG_PAX_KERNEXEC
14905+ENTRY(pax_enter_kernel)
14906+#ifdef CONFIG_PARAVIRT
14907+ pushl %eax
14908+ pushl %ecx
14909+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14910+ mov %eax, %esi
14911+#else
14912+ mov %cr0, %esi
14913+#endif
14914+ bts $16, %esi
14915+ jnc 1f
14916+ mov %cs, %esi
14917+ cmp $__KERNEL_CS, %esi
14918+ jz 3f
14919+ ljmp $__KERNEL_CS, $3f
14920+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14921+2:
14922+#ifdef CONFIG_PARAVIRT
14923+ mov %esi, %eax
14924+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14925+#else
14926+ mov %esi, %cr0
14927+#endif
14928+3:
14929+#ifdef CONFIG_PARAVIRT
14930+ popl %ecx
14931+ popl %eax
14932+#endif
14933+ ret
14934+ENDPROC(pax_enter_kernel)
14935+
14936+ENTRY(pax_exit_kernel)
14937+#ifdef CONFIG_PARAVIRT
14938+ pushl %eax
14939+ pushl %ecx
14940+#endif
14941+ mov %cs, %esi
14942+ cmp $__KERNEXEC_KERNEL_CS, %esi
14943+ jnz 2f
14944+#ifdef CONFIG_PARAVIRT
14945+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14946+ mov %eax, %esi
14947+#else
14948+ mov %cr0, %esi
14949+#endif
14950+ btr $16, %esi
14951+ ljmp $__KERNEL_CS, $1f
14952+1:
14953+#ifdef CONFIG_PARAVIRT
14954+ mov %esi, %eax
14955+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14956+#else
14957+ mov %esi, %cr0
14958+#endif
14959+2:
14960+#ifdef CONFIG_PARAVIRT
14961+ popl %ecx
14962+ popl %eax
14963+#endif
14964+ ret
14965+ENDPROC(pax_exit_kernel)
14966+#endif
14967+
14968+.macro pax_erase_kstack
14969+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14970+ call pax_erase_kstack
14971+#endif
14972+.endm
14973+
14974+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14975+/*
14976+ * ebp: thread_info
14977+ * ecx, edx: can be clobbered
14978+ */
14979+ENTRY(pax_erase_kstack)
14980+ pushl %edi
14981+ pushl %eax
14982+
14983+ mov TI_lowest_stack(%ebp), %edi
14984+ mov $-0xBEEF, %eax
14985+ std
14986+
14987+1: mov %edi, %ecx
14988+ and $THREAD_SIZE_asm - 1, %ecx
14989+ shr $2, %ecx
14990+ repne scasl
14991+ jecxz 2f
14992+
14993+ cmp $2*16, %ecx
14994+ jc 2f
14995+
14996+ mov $2*16, %ecx
14997+ repe scasl
14998+ jecxz 2f
14999+ jne 1b
15000+
15001+2: cld
15002+ mov %esp, %ecx
15003+ sub %edi, %ecx
15004+ shr $2, %ecx
15005+ rep stosl
15006+
15007+ mov TI_task_thread_sp0(%ebp), %edi
15008+ sub $128, %edi
15009+ mov %edi, TI_lowest_stack(%ebp)
15010+
15011+ popl %eax
15012+ popl %edi
15013+ ret
15014+ENDPROC(pax_erase_kstack)
15015+#endif
15016+
15017+.macro __SAVE_ALL _DS
15018 cld
15019 PUSH_GS
15020 pushl %fs
15021@@ -224,7 +357,7 @@
15022 pushl %ebx
15023 CFI_ADJUST_CFA_OFFSET 4
15024 CFI_REL_OFFSET ebx, 0
15025- movl $(__USER_DS), %edx
15026+ movl $\_DS, %edx
15027 movl %edx, %ds
15028 movl %edx, %es
15029 movl $(__KERNEL_PERCPU), %edx
15030@@ -232,6 +365,15 @@
15031 SET_KERNEL_GS %edx
15032 .endm
15033
15034+.macro SAVE_ALL
15035+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15036+ __SAVE_ALL __KERNEL_DS
15037+ pax_enter_kernel
15038+#else
15039+ __SAVE_ALL __USER_DS
15040+#endif
15041+.endm
15042+
15043 .macro RESTORE_INT_REGS
15044 popl %ebx
15045 CFI_ADJUST_CFA_OFFSET -4
15046@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15047 CFI_ADJUST_CFA_OFFSET -4
15048 jmp syscall_exit
15049 CFI_ENDPROC
15050-END(ret_from_fork)
15051+ENDPROC(ret_from_fork)
15052
15053 /*
15054 * Return to user mode is not as complex as all this looks,
15055@@ -352,7 +494,15 @@ check_userspace:
15056 movb PT_CS(%esp), %al
15057 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15058 cmpl $USER_RPL, %eax
15059+
15060+#ifdef CONFIG_PAX_KERNEXEC
15061+ jae resume_userspace
15062+
15063+ PAX_EXIT_KERNEL
15064+ jmp resume_kernel
15065+#else
15066 jb resume_kernel # not returning to v8086 or userspace
15067+#endif
15068
15069 ENTRY(resume_userspace)
15070 LOCKDEP_SYS_EXIT
15071@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15072 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15073 # int/exception return?
15074 jne work_pending
15075- jmp restore_all
15076-END(ret_from_exception)
15077+ jmp restore_all_pax
15078+ENDPROC(ret_from_exception)
15079
15080 #ifdef CONFIG_PREEMPT
15081 ENTRY(resume_kernel)
15082@@ -380,7 +530,7 @@ need_resched:
15083 jz restore_all
15084 call preempt_schedule_irq
15085 jmp need_resched
15086-END(resume_kernel)
15087+ENDPROC(resume_kernel)
15088 #endif
15089 CFI_ENDPROC
15090
15091@@ -414,25 +564,36 @@ sysenter_past_esp:
15092 /*CFI_REL_OFFSET cs, 0*/
15093 /*
15094 * Push current_thread_info()->sysenter_return to the stack.
15095- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15096- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15097 */
15098- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15099+ pushl $0
15100 CFI_ADJUST_CFA_OFFSET 4
15101 CFI_REL_OFFSET eip, 0
15102
15103 pushl %eax
15104 CFI_ADJUST_CFA_OFFSET 4
15105 SAVE_ALL
15106+ GET_THREAD_INFO(%ebp)
15107+ movl TI_sysenter_return(%ebp),%ebp
15108+ movl %ebp,PT_EIP(%esp)
15109 ENABLE_INTERRUPTS(CLBR_NONE)
15110
15111 /*
15112 * Load the potential sixth argument from user stack.
15113 * Careful about security.
15114 */
15115+ movl PT_OLDESP(%esp),%ebp
15116+
15117+#ifdef CONFIG_PAX_MEMORY_UDEREF
15118+ mov PT_OLDSS(%esp),%ds
15119+1: movl %ds:(%ebp),%ebp
15120+ push %ss
15121+ pop %ds
15122+#else
15123 cmpl $__PAGE_OFFSET-3,%ebp
15124 jae syscall_fault
15125 1: movl (%ebp),%ebp
15126+#endif
15127+
15128 movl %ebp,PT_EBP(%esp)
15129 .section __ex_table,"a"
15130 .align 4
15131@@ -455,12 +616,24 @@ sysenter_do_call:
15132 testl $_TIF_ALLWORK_MASK, %ecx
15133 jne sysexit_audit
15134 sysenter_exit:
15135+
15136+#ifdef CONFIG_PAX_RANDKSTACK
15137+ pushl_cfi %eax
15138+ movl %esp, %eax
15139+ call pax_randomize_kstack
15140+ popl_cfi %eax
15141+#endif
15142+
15143+ pax_erase_kstack
15144+
15145 /* if something modifies registers it must also disable sysexit */
15146 movl PT_EIP(%esp), %edx
15147 movl PT_OLDESP(%esp), %ecx
15148 xorl %ebp,%ebp
15149 TRACE_IRQS_ON
15150 1: mov PT_FS(%esp), %fs
15151+2: mov PT_DS(%esp), %ds
15152+3: mov PT_ES(%esp), %es
15153 PTGS_TO_GS
15154 ENABLE_INTERRUPTS_SYSEXIT
15155
15156@@ -477,6 +650,9 @@ sysenter_audit:
15157 movl %eax,%edx /* 2nd arg: syscall number */
15158 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15159 call audit_syscall_entry
15160+
15161+ pax_erase_kstack
15162+
15163 pushl %ebx
15164 CFI_ADJUST_CFA_OFFSET 4
15165 movl PT_EAX(%esp),%eax /* reload syscall number */
15166@@ -504,11 +680,17 @@ sysexit_audit:
15167
15168 CFI_ENDPROC
15169 .pushsection .fixup,"ax"
15170-2: movl $0,PT_FS(%esp)
15171+4: movl $0,PT_FS(%esp)
15172+ jmp 1b
15173+5: movl $0,PT_DS(%esp)
15174+ jmp 1b
15175+6: movl $0,PT_ES(%esp)
15176 jmp 1b
15177 .section __ex_table,"a"
15178 .align 4
15179- .long 1b,2b
15180+ .long 1b,4b
15181+ .long 2b,5b
15182+ .long 3b,6b
15183 .popsection
15184 PTGS_TO_GS_EX
15185 ENDPROC(ia32_sysenter_target)
15186@@ -538,6 +720,15 @@ syscall_exit:
15187 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15188 jne syscall_exit_work
15189
15190+restore_all_pax:
15191+
15192+#ifdef CONFIG_PAX_RANDKSTACK
15193+ movl %esp, %eax
15194+ call pax_randomize_kstack
15195+#endif
15196+
15197+ pax_erase_kstack
15198+
15199 restore_all:
15200 TRACE_IRQS_IRET
15201 restore_all_notrace:
15202@@ -602,10 +793,29 @@ ldt_ss:
15203 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15204 mov %dx, %ax /* eax: new kernel esp */
15205 sub %eax, %edx /* offset (low word is 0) */
15206- PER_CPU(gdt_page, %ebx)
15207+#ifdef CONFIG_SMP
15208+ movl PER_CPU_VAR(cpu_number), %ebx
15209+ shll $PAGE_SHIFT_asm, %ebx
15210+ addl $cpu_gdt_table, %ebx
15211+#else
15212+ movl $cpu_gdt_table, %ebx
15213+#endif
15214 shr $16, %edx
15215+
15216+#ifdef CONFIG_PAX_KERNEXEC
15217+ mov %cr0, %esi
15218+ btr $16, %esi
15219+ mov %esi, %cr0
15220+#endif
15221+
15222 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15223 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15224+
15225+#ifdef CONFIG_PAX_KERNEXEC
15226+ bts $16, %esi
15227+ mov %esi, %cr0
15228+#endif
15229+
15230 pushl $__ESPFIX_SS
15231 CFI_ADJUST_CFA_OFFSET 4
15232 push %eax /* new kernel esp */
15233@@ -636,36 +846,30 @@ work_resched:
15234 movl TI_flags(%ebp), %ecx
15235 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15236 # than syscall tracing?
15237- jz restore_all
15238+ jz restore_all_pax
15239 testb $_TIF_NEED_RESCHED, %cl
15240 jnz work_resched
15241
15242 work_notifysig: # deal with pending signals and
15243 # notify-resume requests
15244+ movl %esp, %eax
15245 #ifdef CONFIG_VM86
15246 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15247- movl %esp, %eax
15248- jne work_notifysig_v86 # returning to kernel-space or
15249+ jz 1f # returning to kernel-space or
15250 # vm86-space
15251- xorl %edx, %edx
15252- call do_notify_resume
15253- jmp resume_userspace_sig
15254
15255- ALIGN
15256-work_notifysig_v86:
15257 pushl %ecx # save ti_flags for do_notify_resume
15258 CFI_ADJUST_CFA_OFFSET 4
15259 call save_v86_state # %eax contains pt_regs pointer
15260 popl %ecx
15261 CFI_ADJUST_CFA_OFFSET -4
15262 movl %eax, %esp
15263-#else
15264- movl %esp, %eax
15265+1:
15266 #endif
15267 xorl %edx, %edx
15268 call do_notify_resume
15269 jmp resume_userspace_sig
15270-END(work_pending)
15271+ENDPROC(work_pending)
15272
15273 # perform syscall exit tracing
15274 ALIGN
15275@@ -673,11 +877,14 @@ syscall_trace_entry:
15276 movl $-ENOSYS,PT_EAX(%esp)
15277 movl %esp, %eax
15278 call syscall_trace_enter
15279+
15280+ pax_erase_kstack
15281+
15282 /* What it returned is what we'll actually use. */
15283 cmpl $(nr_syscalls), %eax
15284 jnae syscall_call
15285 jmp syscall_exit
15286-END(syscall_trace_entry)
15287+ENDPROC(syscall_trace_entry)
15288
15289 # perform syscall exit tracing
15290 ALIGN
15291@@ -690,20 +897,24 @@ syscall_exit_work:
15292 movl %esp, %eax
15293 call syscall_trace_leave
15294 jmp resume_userspace
15295-END(syscall_exit_work)
15296+ENDPROC(syscall_exit_work)
15297 CFI_ENDPROC
15298
15299 RING0_INT_FRAME # can't unwind into user space anyway
15300 syscall_fault:
15301+#ifdef CONFIG_PAX_MEMORY_UDEREF
15302+ push %ss
15303+ pop %ds
15304+#endif
15305 GET_THREAD_INFO(%ebp)
15306 movl $-EFAULT,PT_EAX(%esp)
15307 jmp resume_userspace
15308-END(syscall_fault)
15309+ENDPROC(syscall_fault)
15310
15311 syscall_badsys:
15312 movl $-ENOSYS,PT_EAX(%esp)
15313 jmp resume_userspace
15314-END(syscall_badsys)
15315+ENDPROC(syscall_badsys)
15316 CFI_ENDPROC
15317
15318 /*
15319@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15320 PTREGSCALL(vm86)
15321 PTREGSCALL(vm86old)
15322
15323+ ALIGN;
15324+ENTRY(kernel_execve)
15325+ push %ebp
15326+ sub $PT_OLDSS+4,%esp
15327+ push %edi
15328+ push %ecx
15329+ push %eax
15330+ lea 3*4(%esp),%edi
15331+ mov $PT_OLDSS/4+1,%ecx
15332+ xorl %eax,%eax
15333+ rep stosl
15334+ pop %eax
15335+ pop %ecx
15336+ pop %edi
15337+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15338+ mov %eax,PT_EBX(%esp)
15339+ mov %edx,PT_ECX(%esp)
15340+ mov %ecx,PT_EDX(%esp)
15341+ mov %esp,%eax
15342+ call sys_execve
15343+ GET_THREAD_INFO(%ebp)
15344+ test %eax,%eax
15345+ jz syscall_exit
15346+ add $PT_OLDSS+4,%esp
15347+ pop %ebp
15348+ ret
15349+
15350 .macro FIXUP_ESPFIX_STACK
15351 /*
15352 * Switch back for ESPFIX stack to the normal zerobased stack
15353@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15354 * normal stack and adjusts ESP with the matching offset.
15355 */
15356 /* fixup the stack */
15357- PER_CPU(gdt_page, %ebx)
15358+#ifdef CONFIG_SMP
15359+ movl PER_CPU_VAR(cpu_number), %ebx
15360+ shll $PAGE_SHIFT_asm, %ebx
15361+ addl $cpu_gdt_table, %ebx
15362+#else
15363+ movl $cpu_gdt_table, %ebx
15364+#endif
15365 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15366 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15367 shl $16, %eax
15368@@ -793,7 +1037,7 @@ vector=vector+1
15369 .endr
15370 2: jmp common_interrupt
15371 .endr
15372-END(irq_entries_start)
15373+ENDPROC(irq_entries_start)
15374
15375 .previous
15376 END(interrupt)
15377@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15378 CFI_ADJUST_CFA_OFFSET 4
15379 jmp error_code
15380 CFI_ENDPROC
15381-END(coprocessor_error)
15382+ENDPROC(coprocessor_error)
15383
15384 ENTRY(simd_coprocessor_error)
15385 RING0_INT_FRAME
15386@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15387 CFI_ADJUST_CFA_OFFSET 4
15388 jmp error_code
15389 CFI_ENDPROC
15390-END(simd_coprocessor_error)
15391+ENDPROC(simd_coprocessor_error)
15392
15393 ENTRY(device_not_available)
15394 RING0_INT_FRAME
15395@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15396 CFI_ADJUST_CFA_OFFSET 4
15397 jmp error_code
15398 CFI_ENDPROC
15399-END(device_not_available)
15400+ENDPROC(device_not_available)
15401
15402 #ifdef CONFIG_PARAVIRT
15403 ENTRY(native_iret)
15404@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15405 .align 4
15406 .long native_iret, iret_exc
15407 .previous
15408-END(native_iret)
15409+ENDPROC(native_iret)
15410
15411 ENTRY(native_irq_enable_sysexit)
15412 sti
15413 sysexit
15414-END(native_irq_enable_sysexit)
15415+ENDPROC(native_irq_enable_sysexit)
15416 #endif
15417
15418 ENTRY(overflow)
15419@@ -885,7 +1129,7 @@ ENTRY(overflow)
15420 CFI_ADJUST_CFA_OFFSET 4
15421 jmp error_code
15422 CFI_ENDPROC
15423-END(overflow)
15424+ENDPROC(overflow)
15425
15426 ENTRY(bounds)
15427 RING0_INT_FRAME
15428@@ -895,7 +1139,7 @@ ENTRY(bounds)
15429 CFI_ADJUST_CFA_OFFSET 4
15430 jmp error_code
15431 CFI_ENDPROC
15432-END(bounds)
15433+ENDPROC(bounds)
15434
15435 ENTRY(invalid_op)
15436 RING0_INT_FRAME
15437@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15438 CFI_ADJUST_CFA_OFFSET 4
15439 jmp error_code
15440 CFI_ENDPROC
15441-END(invalid_op)
15442+ENDPROC(invalid_op)
15443
15444 ENTRY(coprocessor_segment_overrun)
15445 RING0_INT_FRAME
15446@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15447 CFI_ADJUST_CFA_OFFSET 4
15448 jmp error_code
15449 CFI_ENDPROC
15450-END(coprocessor_segment_overrun)
15451+ENDPROC(coprocessor_segment_overrun)
15452
15453 ENTRY(invalid_TSS)
15454 RING0_EC_FRAME
15455@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15456 CFI_ADJUST_CFA_OFFSET 4
15457 jmp error_code
15458 CFI_ENDPROC
15459-END(invalid_TSS)
15460+ENDPROC(invalid_TSS)
15461
15462 ENTRY(segment_not_present)
15463 RING0_EC_FRAME
15464@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15465 CFI_ADJUST_CFA_OFFSET 4
15466 jmp error_code
15467 CFI_ENDPROC
15468-END(segment_not_present)
15469+ENDPROC(segment_not_present)
15470
15471 ENTRY(stack_segment)
15472 RING0_EC_FRAME
15473@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15474 CFI_ADJUST_CFA_OFFSET 4
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(stack_segment)
15478+ENDPROC(stack_segment)
15479
15480 ENTRY(alignment_check)
15481 RING0_EC_FRAME
15482@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15483 CFI_ADJUST_CFA_OFFSET 4
15484 jmp error_code
15485 CFI_ENDPROC
15486-END(alignment_check)
15487+ENDPROC(alignment_check)
15488
15489 ENTRY(divide_error)
15490 RING0_INT_FRAME
15491@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15492 CFI_ADJUST_CFA_OFFSET 4
15493 jmp error_code
15494 CFI_ENDPROC
15495-END(divide_error)
15496+ENDPROC(divide_error)
15497
15498 #ifdef CONFIG_X86_MCE
15499 ENTRY(machine_check)
15500@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15501 CFI_ADJUST_CFA_OFFSET 4
15502 jmp error_code
15503 CFI_ENDPROC
15504-END(machine_check)
15505+ENDPROC(machine_check)
15506 #endif
15507
15508 ENTRY(spurious_interrupt_bug)
15509@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15510 CFI_ADJUST_CFA_OFFSET 4
15511 jmp error_code
15512 CFI_ENDPROC
15513-END(spurious_interrupt_bug)
15514+ENDPROC(spurious_interrupt_bug)
15515
15516 ENTRY(kernel_thread_helper)
15517 pushl $0 # fake return address for unwinder
15518@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15519
15520 ENTRY(mcount)
15521 ret
15522-END(mcount)
15523+ENDPROC(mcount)
15524
15525 ENTRY(ftrace_caller)
15526 cmpl $0, function_trace_stop
15527@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15528 .globl ftrace_stub
15529 ftrace_stub:
15530 ret
15531-END(ftrace_caller)
15532+ENDPROC(ftrace_caller)
15533
15534 #else /* ! CONFIG_DYNAMIC_FTRACE */
15535
15536@@ -1160,7 +1404,7 @@ trace:
15537 popl %ecx
15538 popl %eax
15539 jmp ftrace_stub
15540-END(mcount)
15541+ENDPROC(mcount)
15542 #endif /* CONFIG_DYNAMIC_FTRACE */
15543 #endif /* CONFIG_FUNCTION_TRACER */
15544
15545@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15546 popl %ecx
15547 popl %eax
15548 ret
15549-END(ftrace_graph_caller)
15550+ENDPROC(ftrace_graph_caller)
15551
15552 .globl return_to_handler
15553 return_to_handler:
15554@@ -1198,7 +1442,6 @@ return_to_handler:
15555 ret
15556 #endif
15557
15558-.section .rodata,"a"
15559 #include "syscall_table_32.S"
15560
15561 syscall_table_size=(.-sys_call_table)
15562@@ -1255,15 +1498,18 @@ error_code:
15563 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15564 REG_TO_PTGS %ecx
15565 SET_KERNEL_GS %ecx
15566- movl $(__USER_DS), %ecx
15567+ movl $(__KERNEL_DS), %ecx
15568 movl %ecx, %ds
15569 movl %ecx, %es
15570+
15571+ pax_enter_kernel
15572+
15573 TRACE_IRQS_OFF
15574 movl %esp,%eax # pt_regs pointer
15575 call *%edi
15576 jmp ret_from_exception
15577 CFI_ENDPROC
15578-END(page_fault)
15579+ENDPROC(page_fault)
15580
15581 /*
15582 * Debug traps and NMI can happen at the one SYSENTER instruction
15583@@ -1309,7 +1555,7 @@ debug_stack_correct:
15584 call do_debug
15585 jmp ret_from_exception
15586 CFI_ENDPROC
15587-END(debug)
15588+ENDPROC(debug)
15589
15590 /*
15591 * NMI is doubly nasty. It can happen _while_ we're handling
15592@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15593 xorl %edx,%edx # zero error code
15594 movl %esp,%eax # pt_regs pointer
15595 call do_nmi
15596+
15597+ pax_exit_kernel
15598+
15599 jmp restore_all_notrace
15600 CFI_ENDPROC
15601
15602@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15603 FIXUP_ESPFIX_STACK # %eax == %esp
15604 xorl %edx,%edx # zero error code
15605 call do_nmi
15606+
15607+ pax_exit_kernel
15608+
15609 RESTORE_REGS
15610 lss 12+4(%esp), %esp # back to espfix stack
15611 CFI_ADJUST_CFA_OFFSET -24
15612 jmp irq_return
15613 CFI_ENDPROC
15614-END(nmi)
15615+ENDPROC(nmi)
15616
15617 ENTRY(int3)
15618 RING0_INT_FRAME
15619@@ -1409,7 +1661,7 @@ ENTRY(int3)
15620 call do_int3
15621 jmp ret_from_exception
15622 CFI_ENDPROC
15623-END(int3)
15624+ENDPROC(int3)
15625
15626 ENTRY(general_protection)
15627 RING0_EC_FRAME
15628@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15629 CFI_ADJUST_CFA_OFFSET 4
15630 jmp error_code
15631 CFI_ENDPROC
15632-END(general_protection)
15633+ENDPROC(general_protection)
15634
15635 /*
15636 * End of kprobes section
15637diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15638index 34a56a9..4aa5c8b 100644
15639--- a/arch/x86/kernel/entry_64.S
15640+++ b/arch/x86/kernel/entry_64.S
15641@@ -53,6 +53,8 @@
15642 #include <asm/paravirt.h>
15643 #include <asm/ftrace.h>
15644 #include <asm/percpu.h>
15645+#include <asm/pgtable.h>
15646+#include <asm/alternative-asm.h>
15647
15648 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15649 #include <linux/elf-em.h>
15650@@ -64,8 +66,9 @@
15651 #ifdef CONFIG_FUNCTION_TRACER
15652 #ifdef CONFIG_DYNAMIC_FTRACE
15653 ENTRY(mcount)
15654+ pax_force_retaddr
15655 retq
15656-END(mcount)
15657+ENDPROC(mcount)
15658
15659 ENTRY(ftrace_caller)
15660 cmpl $0, function_trace_stop
15661@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15662 #endif
15663
15664 GLOBAL(ftrace_stub)
15665+ pax_force_retaddr
15666 retq
15667-END(ftrace_caller)
15668+ENDPROC(ftrace_caller)
15669
15670 #else /* ! CONFIG_DYNAMIC_FTRACE */
15671 ENTRY(mcount)
15672@@ -108,6 +112,7 @@ ENTRY(mcount)
15673 #endif
15674
15675 GLOBAL(ftrace_stub)
15676+ pax_force_retaddr
15677 retq
15678
15679 trace:
15680@@ -117,12 +122,13 @@ trace:
15681 movq 8(%rbp), %rsi
15682 subq $MCOUNT_INSN_SIZE, %rdi
15683
15684+ pax_force_fptr ftrace_trace_function
15685 call *ftrace_trace_function
15686
15687 MCOUNT_RESTORE_FRAME
15688
15689 jmp ftrace_stub
15690-END(mcount)
15691+ENDPROC(mcount)
15692 #endif /* CONFIG_DYNAMIC_FTRACE */
15693 #endif /* CONFIG_FUNCTION_TRACER */
15694
15695@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15696
15697 MCOUNT_RESTORE_FRAME
15698
15699+ pax_force_retaddr
15700 retq
15701-END(ftrace_graph_caller)
15702+ENDPROC(ftrace_graph_caller)
15703
15704 GLOBAL(return_to_handler)
15705 subq $24, %rsp
15706@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15707 movq 8(%rsp), %rdx
15708 movq (%rsp), %rax
15709 addq $16, %rsp
15710+ pax_force_retaddr
15711 retq
15712 #endif
15713
15714@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15715 ENDPROC(native_usergs_sysret64)
15716 #endif /* CONFIG_PARAVIRT */
15717
15718+ .macro ljmpq sel, off
15719+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15720+ .byte 0x48; ljmp *1234f(%rip)
15721+ .pushsection .rodata
15722+ .align 16
15723+ 1234: .quad \off; .word \sel
15724+ .popsection
15725+#else
15726+ pushq $\sel
15727+ pushq $\off
15728+ lretq
15729+#endif
15730+ .endm
15731+
15732+ .macro pax_enter_kernel
15733+ pax_set_fptr_mask
15734+#ifdef CONFIG_PAX_KERNEXEC
15735+ call pax_enter_kernel
15736+#endif
15737+ .endm
15738+
15739+ .macro pax_exit_kernel
15740+#ifdef CONFIG_PAX_KERNEXEC
15741+ call pax_exit_kernel
15742+#endif
15743+ .endm
15744+
15745+#ifdef CONFIG_PAX_KERNEXEC
15746+ENTRY(pax_enter_kernel)
15747+ pushq %rdi
15748+
15749+#ifdef CONFIG_PARAVIRT
15750+ PV_SAVE_REGS(CLBR_RDI)
15751+#endif
15752+
15753+ GET_CR0_INTO_RDI
15754+ bts $16,%rdi
15755+ jnc 3f
15756+ mov %cs,%edi
15757+ cmp $__KERNEL_CS,%edi
15758+ jnz 2f
15759+1:
15760+
15761+#ifdef CONFIG_PARAVIRT
15762+ PV_RESTORE_REGS(CLBR_RDI)
15763+#endif
15764+
15765+ popq %rdi
15766+ pax_force_retaddr
15767+ retq
15768+
15769+2: ljmpq __KERNEL_CS,1f
15770+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15771+4: SET_RDI_INTO_CR0
15772+ jmp 1b
15773+ENDPROC(pax_enter_kernel)
15774+
15775+ENTRY(pax_exit_kernel)
15776+ pushq %rdi
15777+
15778+#ifdef CONFIG_PARAVIRT
15779+ PV_SAVE_REGS(CLBR_RDI)
15780+#endif
15781+
15782+ mov %cs,%rdi
15783+ cmp $__KERNEXEC_KERNEL_CS,%edi
15784+ jz 2f
15785+1:
15786+
15787+#ifdef CONFIG_PARAVIRT
15788+ PV_RESTORE_REGS(CLBR_RDI);
15789+#endif
15790+
15791+ popq %rdi
15792+ pax_force_retaddr
15793+ retq
15794+
15795+2: GET_CR0_INTO_RDI
15796+ btr $16,%rdi
15797+ ljmpq __KERNEL_CS,3f
15798+3: SET_RDI_INTO_CR0
15799+ jmp 1b
15800+#ifdef CONFIG_PARAVIRT
15801+ PV_RESTORE_REGS(CLBR_RDI);
15802+#endif
15803+
15804+ popq %rdi
15805+ pax_force_retaddr
15806+ retq
15807+ENDPROC(pax_exit_kernel)
15808+#endif
15809+
15810+ .macro pax_enter_kernel_user
15811+ pax_set_fptr_mask
15812+#ifdef CONFIG_PAX_MEMORY_UDEREF
15813+ call pax_enter_kernel_user
15814+#endif
15815+ .endm
15816+
15817+ .macro pax_exit_kernel_user
15818+#ifdef CONFIG_PAX_MEMORY_UDEREF
15819+ call pax_exit_kernel_user
15820+#endif
15821+#ifdef CONFIG_PAX_RANDKSTACK
15822+ pushq %rax
15823+ call pax_randomize_kstack
15824+ popq %rax
15825+#endif
15826+ .endm
15827+
15828+#ifdef CONFIG_PAX_MEMORY_UDEREF
15829+ENTRY(pax_enter_kernel_user)
15830+ pushq %rdi
15831+ pushq %rbx
15832+
15833+#ifdef CONFIG_PARAVIRT
15834+ PV_SAVE_REGS(CLBR_RDI)
15835+#endif
15836+
15837+ GET_CR3_INTO_RDI
15838+ mov %rdi,%rbx
15839+ add $__START_KERNEL_map,%rbx
15840+ sub phys_base(%rip),%rbx
15841+
15842+#ifdef CONFIG_PARAVIRT
15843+ pushq %rdi
15844+ cmpl $0, pv_info+PARAVIRT_enabled
15845+ jz 1f
15846+ i = 0
15847+ .rept USER_PGD_PTRS
15848+ mov i*8(%rbx),%rsi
15849+ mov $0,%sil
15850+ lea i*8(%rbx),%rdi
15851+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15852+ i = i + 1
15853+ .endr
15854+ jmp 2f
15855+1:
15856+#endif
15857+
15858+ i = 0
15859+ .rept USER_PGD_PTRS
15860+ movb $0,i*8(%rbx)
15861+ i = i + 1
15862+ .endr
15863+
15864+#ifdef CONFIG_PARAVIRT
15865+2: popq %rdi
15866+#endif
15867+ SET_RDI_INTO_CR3
15868+
15869+#ifdef CONFIG_PAX_KERNEXEC
15870+ GET_CR0_INTO_RDI
15871+ bts $16,%rdi
15872+ SET_RDI_INTO_CR0
15873+#endif
15874+
15875+#ifdef CONFIG_PARAVIRT
15876+ PV_RESTORE_REGS(CLBR_RDI)
15877+#endif
15878+
15879+ popq %rbx
15880+ popq %rdi
15881+ pax_force_retaddr
15882+ retq
15883+ENDPROC(pax_enter_kernel_user)
15884+
15885+ENTRY(pax_exit_kernel_user)
15886+ push %rdi
15887+
15888+#ifdef CONFIG_PARAVIRT
15889+ pushq %rbx
15890+ PV_SAVE_REGS(CLBR_RDI)
15891+#endif
15892+
15893+#ifdef CONFIG_PAX_KERNEXEC
15894+ GET_CR0_INTO_RDI
15895+ btr $16,%rdi
15896+ SET_RDI_INTO_CR0
15897+#endif
15898+
15899+ GET_CR3_INTO_RDI
15900+ add $__START_KERNEL_map,%rdi
15901+ sub phys_base(%rip),%rdi
15902+
15903+#ifdef CONFIG_PARAVIRT
15904+ cmpl $0, pv_info+PARAVIRT_enabled
15905+ jz 1f
15906+ mov %rdi,%rbx
15907+ i = 0
15908+ .rept USER_PGD_PTRS
15909+ mov i*8(%rbx),%rsi
15910+ mov $0x67,%sil
15911+ lea i*8(%rbx),%rdi
15912+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15913+ i = i + 1
15914+ .endr
15915+ jmp 2f
15916+1:
15917+#endif
15918+
15919+ i = 0
15920+ .rept USER_PGD_PTRS
15921+ movb $0x67,i*8(%rdi)
15922+ i = i + 1
15923+ .endr
15924+
15925+#ifdef CONFIG_PARAVIRT
15926+2: PV_RESTORE_REGS(CLBR_RDI)
15927+ popq %rbx
15928+#endif
15929+
15930+ popq %rdi
15931+ pax_force_retaddr
15932+ retq
15933+ENDPROC(pax_exit_kernel_user)
15934+#endif
15935+
15936+.macro pax_erase_kstack
15937+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15938+ call pax_erase_kstack
15939+#endif
15940+.endm
15941+
15942+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15943+/*
15944+ * r11: thread_info
15945+ * rcx, rdx: can be clobbered
15946+ */
15947+ENTRY(pax_erase_kstack)
15948+ pushq %rdi
15949+ pushq %rax
15950+ pushq %r11
15951+
15952+ GET_THREAD_INFO(%r11)
15953+ mov TI_lowest_stack(%r11), %rdi
15954+ mov $-0xBEEF, %rax
15955+ std
15956+
15957+1: mov %edi, %ecx
15958+ and $THREAD_SIZE_asm - 1, %ecx
15959+ shr $3, %ecx
15960+ repne scasq
15961+ jecxz 2f
15962+
15963+ cmp $2*8, %ecx
15964+ jc 2f
15965+
15966+ mov $2*8, %ecx
15967+ repe scasq
15968+ jecxz 2f
15969+ jne 1b
15970+
15971+2: cld
15972+ mov %esp, %ecx
15973+ sub %edi, %ecx
15974+
15975+ cmp $THREAD_SIZE_asm, %rcx
15976+ jb 3f
15977+ ud2
15978+3:
15979+
15980+ shr $3, %ecx
15981+ rep stosq
15982+
15983+ mov TI_task_thread_sp0(%r11), %rdi
15984+ sub $256, %rdi
15985+ mov %rdi, TI_lowest_stack(%r11)
15986+
15987+ popq %r11
15988+ popq %rax
15989+ popq %rdi
15990+ pax_force_retaddr
15991+ ret
15992+ENDPROC(pax_erase_kstack)
15993+#endif
15994
15995 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15996 #ifdef CONFIG_TRACE_IRQFLAGS
15997@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15998 .endm
15999
16000 .macro UNFAKE_STACK_FRAME
16001- addq $8*6, %rsp
16002- CFI_ADJUST_CFA_OFFSET -(6*8)
16003+ addq $8*6 + ARG_SKIP, %rsp
16004+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16005 .endm
16006
16007 /*
16008@@ -317,7 +601,7 @@ ENTRY(save_args)
16009 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16010 movq_cfi rbp, 8 /* push %rbp */
16011 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16012- testl $3, CS(%rdi)
16013+ testb $3, CS(%rdi)
16014 je 1f
16015 SWAPGS
16016 /*
16017@@ -337,9 +621,10 @@ ENTRY(save_args)
16018 * We entered an interrupt context - irqs are off:
16019 */
16020 2: TRACE_IRQS_OFF
16021+ pax_force_retaddr
16022 ret
16023 CFI_ENDPROC
16024-END(save_args)
16025+ENDPROC(save_args)
16026
16027 ENTRY(save_rest)
16028 PARTIAL_FRAME 1 REST_SKIP+8
16029@@ -352,9 +637,10 @@ ENTRY(save_rest)
16030 movq_cfi r15, R15+16
16031 movq %r11, 8(%rsp) /* return address */
16032 FIXUP_TOP_OF_STACK %r11, 16
16033+ pax_force_retaddr
16034 ret
16035 CFI_ENDPROC
16036-END(save_rest)
16037+ENDPROC(save_rest)
16038
16039 /* save complete stack frame */
16040 .pushsection .kprobes.text, "ax"
16041@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16042 js 1f /* negative -> in kernel */
16043 SWAPGS
16044 xorl %ebx,%ebx
16045-1: ret
16046+1: pax_force_retaddr_bts
16047+ ret
16048 CFI_ENDPROC
16049-END(save_paranoid)
16050+ENDPROC(save_paranoid)
16051 .popsection
16052
16053 /*
16054@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16055
16056 RESTORE_REST
16057
16058- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16059+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16060 je int_ret_from_sys_call
16061
16062 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16063@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16064 jmp ret_from_sys_call # go to the SYSRET fastpath
16065
16066 CFI_ENDPROC
16067-END(ret_from_fork)
16068+ENDPROC(ret_from_fork)
16069
16070 /*
16071 * System call entry. Upto 6 arguments in registers are supported.
16072@@ -455,7 +742,7 @@ END(ret_from_fork)
16073 ENTRY(system_call)
16074 CFI_STARTPROC simple
16075 CFI_SIGNAL_FRAME
16076- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16077+ CFI_DEF_CFA rsp,0
16078 CFI_REGISTER rip,rcx
16079 /*CFI_REGISTER rflags,r11*/
16080 SWAPGS_UNSAFE_STACK
16081@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16082
16083 movq %rsp,PER_CPU_VAR(old_rsp)
16084 movq PER_CPU_VAR(kernel_stack),%rsp
16085+ SAVE_ARGS 8*6,1
16086+ pax_enter_kernel_user
16087 /*
16088 * No need to follow this irqs off/on section - it's straight
16089 * and short:
16090 */
16091 ENABLE_INTERRUPTS(CLBR_NONE)
16092- SAVE_ARGS 8,1
16093 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16094 movq %rcx,RIP-ARGOFFSET(%rsp)
16095 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16096@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16097 system_call_fastpath:
16098 cmpq $__NR_syscall_max,%rax
16099 ja badsys
16100- movq %r10,%rcx
16101+ movq R10-ARGOFFSET(%rsp),%rcx
16102 call *sys_call_table(,%rax,8) # XXX: rip relative
16103 movq %rax,RAX-ARGOFFSET(%rsp)
16104 /*
16105@@ -502,6 +790,8 @@ sysret_check:
16106 andl %edi,%edx
16107 jnz sysret_careful
16108 CFI_REMEMBER_STATE
16109+ pax_exit_kernel_user
16110+ pax_erase_kstack
16111 /*
16112 * sysretq will re-enable interrupts:
16113 */
16114@@ -555,14 +845,18 @@ badsys:
16115 * jump back to the normal fast path.
16116 */
16117 auditsys:
16118- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16119+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16120 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16121 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16122 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16123 movq %rax,%rsi /* 2nd arg: syscall number */
16124 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16125 call audit_syscall_entry
16126+
16127+ pax_erase_kstack
16128+
16129 LOAD_ARGS 0 /* reload call-clobbered registers */
16130+ pax_set_fptr_mask
16131 jmp system_call_fastpath
16132
16133 /*
16134@@ -592,16 +886,20 @@ tracesys:
16135 FIXUP_TOP_OF_STACK %rdi
16136 movq %rsp,%rdi
16137 call syscall_trace_enter
16138+
16139+ pax_erase_kstack
16140+
16141 /*
16142 * Reload arg registers from stack in case ptrace changed them.
16143 * We don't reload %rax because syscall_trace_enter() returned
16144 * the value it wants us to use in the table lookup.
16145 */
16146 LOAD_ARGS ARGOFFSET, 1
16147+ pax_set_fptr_mask
16148 RESTORE_REST
16149 cmpq $__NR_syscall_max,%rax
16150 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16151- movq %r10,%rcx /* fixup for C */
16152+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16153 call *sys_call_table(,%rax,8)
16154 movq %rax,RAX-ARGOFFSET(%rsp)
16155 /* Use IRET because user could have changed frame */
16156@@ -613,7 +911,7 @@ tracesys:
16157 GLOBAL(int_ret_from_sys_call)
16158 DISABLE_INTERRUPTS(CLBR_NONE)
16159 TRACE_IRQS_OFF
16160- testl $3,CS-ARGOFFSET(%rsp)
16161+ testb $3,CS-ARGOFFSET(%rsp)
16162 je retint_restore_args
16163 movl $_TIF_ALLWORK_MASK,%edi
16164 /* edi: mask to check */
16165@@ -674,7 +972,7 @@ int_restore_rest:
16166 TRACE_IRQS_OFF
16167 jmp int_with_check
16168 CFI_ENDPROC
16169-END(system_call)
16170+ENDPROC(system_call)
16171
16172 /*
16173 * Certain special system calls that need to save a complete full stack frame.
16174@@ -690,7 +988,7 @@ ENTRY(\label)
16175 call \func
16176 jmp ptregscall_common
16177 CFI_ENDPROC
16178-END(\label)
16179+ENDPROC(\label)
16180 .endm
16181
16182 PTREGSCALL stub_clone, sys_clone, %r8
16183@@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16184 movq_cfi_restore R12+8, r12
16185 movq_cfi_restore RBP+8, rbp
16186 movq_cfi_restore RBX+8, rbx
16187+ pax_force_retaddr
16188 ret $REST_SKIP /* pop extended registers */
16189 CFI_ENDPROC
16190-END(ptregscall_common)
16191+ENDPROC(ptregscall_common)
16192
16193 ENTRY(stub_execve)
16194 CFI_STARTPROC
16195@@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16196 RESTORE_REST
16197 jmp int_ret_from_sys_call
16198 CFI_ENDPROC
16199-END(stub_execve)
16200+ENDPROC(stub_execve)
16201
16202 /*
16203 * sigreturn is special because it needs to restore all registers on return.
16204@@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16205 RESTORE_REST
16206 jmp int_ret_from_sys_call
16207 CFI_ENDPROC
16208-END(stub_rt_sigreturn)
16209+ENDPROC(stub_rt_sigreturn)
16210
16211 /*
16212 * Build the entry stubs and pointer table with some assembler magic.
16213@@ -780,7 +1079,7 @@ vector=vector+1
16214 2: jmp common_interrupt
16215 .endr
16216 CFI_ENDPROC
16217-END(irq_entries_start)
16218+ENDPROC(irq_entries_start)
16219
16220 .previous
16221 END(interrupt)
16222@@ -800,6 +1099,16 @@ END(interrupt)
16223 CFI_ADJUST_CFA_OFFSET 10*8
16224 call save_args
16225 PARTIAL_FRAME 0
16226+#ifdef CONFIG_PAX_MEMORY_UDEREF
16227+ testb $3, CS(%rdi)
16228+ jnz 1f
16229+ pax_enter_kernel
16230+ jmp 2f
16231+1: pax_enter_kernel_user
16232+2:
16233+#else
16234+ pax_enter_kernel
16235+#endif
16236 call \func
16237 .endm
16238
16239@@ -822,7 +1131,7 @@ ret_from_intr:
16240 CFI_ADJUST_CFA_OFFSET -8
16241 exit_intr:
16242 GET_THREAD_INFO(%rcx)
16243- testl $3,CS-ARGOFFSET(%rsp)
16244+ testb $3,CS-ARGOFFSET(%rsp)
16245 je retint_kernel
16246
16247 /* Interrupt came from user space */
16248@@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16249 * The iretq could re-enable interrupts:
16250 */
16251 DISABLE_INTERRUPTS(CLBR_ANY)
16252+ pax_exit_kernel_user
16253+ pax_erase_kstack
16254 TRACE_IRQS_IRETQ
16255 SWAPGS
16256 jmp restore_args
16257
16258 retint_restore_args: /* return to kernel space */
16259 DISABLE_INTERRUPTS(CLBR_ANY)
16260+ pax_exit_kernel
16261+ pax_force_retaddr RIP-ARGOFFSET
16262 /*
16263 * The iretq could re-enable interrupts:
16264 */
16265@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16266 #endif
16267
16268 CFI_ENDPROC
16269-END(common_interrupt)
16270+ENDPROC(common_interrupt)
16271
16272 /*
16273 * APIC interrupts.
16274@@ -953,7 +1266,7 @@ ENTRY(\sym)
16275 interrupt \do_sym
16276 jmp ret_from_intr
16277 CFI_ENDPROC
16278-END(\sym)
16279+ENDPROC(\sym)
16280 .endm
16281
16282 #ifdef CONFIG_SMP
16283@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16284 CFI_ADJUST_CFA_OFFSET 15*8
16285 call error_entry
16286 DEFAULT_FRAME 0
16287+#ifdef CONFIG_PAX_MEMORY_UDEREF
16288+ testb $3, CS(%rsp)
16289+ jnz 1f
16290+ pax_enter_kernel
16291+ jmp 2f
16292+1: pax_enter_kernel_user
16293+2:
16294+#else
16295+ pax_enter_kernel
16296+#endif
16297 movq %rsp,%rdi /* pt_regs pointer */
16298 xorl %esi,%esi /* no error code */
16299 call \do_sym
16300 jmp error_exit /* %ebx: no swapgs flag */
16301 CFI_ENDPROC
16302-END(\sym)
16303+ENDPROC(\sym)
16304 .endm
16305
16306 .macro paranoidzeroentry sym do_sym
16307@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16308 subq $15*8, %rsp
16309 call save_paranoid
16310 TRACE_IRQS_OFF
16311+#ifdef CONFIG_PAX_MEMORY_UDEREF
16312+ testb $3, CS(%rsp)
16313+ jnz 1f
16314+ pax_enter_kernel
16315+ jmp 2f
16316+1: pax_enter_kernel_user
16317+2:
16318+#else
16319+ pax_enter_kernel
16320+#endif
16321 movq %rsp,%rdi /* pt_regs pointer */
16322 xorl %esi,%esi /* no error code */
16323 call \do_sym
16324 jmp paranoid_exit /* %ebx: no swapgs flag */
16325 CFI_ENDPROC
16326-END(\sym)
16327+ENDPROC(\sym)
16328 .endm
16329
16330 .macro paranoidzeroentry_ist sym do_sym ist
16331@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16332 subq $15*8, %rsp
16333 call save_paranoid
16334 TRACE_IRQS_OFF
16335+#ifdef CONFIG_PAX_MEMORY_UDEREF
16336+ testb $3, CS(%rsp)
16337+ jnz 1f
16338+ pax_enter_kernel
16339+ jmp 2f
16340+1: pax_enter_kernel_user
16341+2:
16342+#else
16343+ pax_enter_kernel
16344+#endif
16345 movq %rsp,%rdi /* pt_regs pointer */
16346 xorl %esi,%esi /* no error code */
16347- PER_CPU(init_tss, %rbp)
16348+#ifdef CONFIG_SMP
16349+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16350+ lea init_tss(%rbp), %rbp
16351+#else
16352+ lea init_tss(%rip), %rbp
16353+#endif
16354 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16355 call \do_sym
16356 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16357 jmp paranoid_exit /* %ebx: no swapgs flag */
16358 CFI_ENDPROC
16359-END(\sym)
16360+ENDPROC(\sym)
16361 .endm
16362
16363 .macro errorentry sym do_sym
16364@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16365 CFI_ADJUST_CFA_OFFSET 15*8
16366 call error_entry
16367 DEFAULT_FRAME 0
16368+#ifdef CONFIG_PAX_MEMORY_UDEREF
16369+ testb $3, CS(%rsp)
16370+ jnz 1f
16371+ pax_enter_kernel
16372+ jmp 2f
16373+1: pax_enter_kernel_user
16374+2:
16375+#else
16376+ pax_enter_kernel
16377+#endif
16378 movq %rsp,%rdi /* pt_regs pointer */
16379 movq ORIG_RAX(%rsp),%rsi /* get error code */
16380 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16381 call \do_sym
16382 jmp error_exit /* %ebx: no swapgs flag */
16383 CFI_ENDPROC
16384-END(\sym)
16385+ENDPROC(\sym)
16386 .endm
16387
16388 /* error code is on the stack already */
16389@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16390 call save_paranoid
16391 DEFAULT_FRAME 0
16392 TRACE_IRQS_OFF
16393+#ifdef CONFIG_PAX_MEMORY_UDEREF
16394+ testb $3, CS(%rsp)
16395+ jnz 1f
16396+ pax_enter_kernel
16397+ jmp 2f
16398+1: pax_enter_kernel_user
16399+2:
16400+#else
16401+ pax_enter_kernel
16402+#endif
16403 movq %rsp,%rdi /* pt_regs pointer */
16404 movq ORIG_RAX(%rsp),%rsi /* get error code */
16405 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16406 call \do_sym
16407 jmp paranoid_exit /* %ebx: no swapgs flag */
16408 CFI_ENDPROC
16409-END(\sym)
16410+ENDPROC(\sym)
16411 .endm
16412
16413 zeroentry divide_error do_divide_error
16414@@ -1141,9 +1509,10 @@ gs_change:
16415 SWAPGS
16416 popf
16417 CFI_ADJUST_CFA_OFFSET -8
16418+ pax_force_retaddr
16419 ret
16420 CFI_ENDPROC
16421-END(native_load_gs_index)
16422+ENDPROC(native_load_gs_index)
16423
16424 .section __ex_table,"a"
16425 .align 8
16426@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16427 * of hacks for example to fork off the per-CPU idle tasks.
16428 * [Hopefully no generic code relies on the reschedule -AK]
16429 */
16430- RESTORE_ALL
16431+ RESTORE_REST
16432 UNFAKE_STACK_FRAME
16433+ pax_force_retaddr
16434 ret
16435 CFI_ENDPROC
16436-END(kernel_thread)
16437+ENDPROC(kernel_thread)
16438
16439 ENTRY(child_rip)
16440 pushq $0 # fake return address
16441@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16442 */
16443 movq %rdi, %rax
16444 movq %rsi, %rdi
16445+ pax_force_fptr %rax
16446 call *%rax
16447 # exit
16448 mov %eax, %edi
16449 call do_exit
16450 ud2 # padding for call trace
16451 CFI_ENDPROC
16452-END(child_rip)
16453+ENDPROC(child_rip)
16454
16455 /*
16456 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16457@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16458 RESTORE_REST
16459 testq %rax,%rax
16460 je int_ret_from_sys_call
16461- RESTORE_ARGS
16462 UNFAKE_STACK_FRAME
16463+ pax_force_retaddr
16464 ret
16465 CFI_ENDPROC
16466-END(kernel_execve)
16467+ENDPROC(kernel_execve)
16468
16469 /* Call softirq on interrupt stack. Interrupts are off. */
16470 ENTRY(call_softirq)
16471@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16472 CFI_DEF_CFA_REGISTER rsp
16473 CFI_ADJUST_CFA_OFFSET -8
16474 decl PER_CPU_VAR(irq_count)
16475+ pax_force_retaddr
16476 ret
16477 CFI_ENDPROC
16478-END(call_softirq)
16479+ENDPROC(call_softirq)
16480
16481 #ifdef CONFIG_XEN
16482 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16483@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16484 decl PER_CPU_VAR(irq_count)
16485 jmp error_exit
16486 CFI_ENDPROC
16487-END(xen_do_hypervisor_callback)
16488+ENDPROC(xen_do_hypervisor_callback)
16489
16490 /*
16491 * Hypervisor uses this for application faults while it executes.
16492@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16493 SAVE_ALL
16494 jmp error_exit
16495 CFI_ENDPROC
16496-END(xen_failsafe_callback)
16497+ENDPROC(xen_failsafe_callback)
16498
16499 #endif /* CONFIG_XEN */
16500
16501@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16502 TRACE_IRQS_OFF
16503 testl %ebx,%ebx /* swapgs needed? */
16504 jnz paranoid_restore
16505- testl $3,CS(%rsp)
16506+ testb $3,CS(%rsp)
16507 jnz paranoid_userspace
16508+#ifdef CONFIG_PAX_MEMORY_UDEREF
16509+ pax_exit_kernel
16510+ TRACE_IRQS_IRETQ 0
16511+ SWAPGS_UNSAFE_STACK
16512+ RESTORE_ALL 8
16513+ pax_force_retaddr_bts
16514+ jmp irq_return
16515+#endif
16516 paranoid_swapgs:
16517+#ifdef CONFIG_PAX_MEMORY_UDEREF
16518+ pax_exit_kernel_user
16519+#else
16520+ pax_exit_kernel
16521+#endif
16522 TRACE_IRQS_IRETQ 0
16523 SWAPGS_UNSAFE_STACK
16524 RESTORE_ALL 8
16525 jmp irq_return
16526 paranoid_restore:
16527+ pax_exit_kernel
16528 TRACE_IRQS_IRETQ 0
16529 RESTORE_ALL 8
16530+ pax_force_retaddr_bts
16531 jmp irq_return
16532 paranoid_userspace:
16533 GET_THREAD_INFO(%rcx)
16534@@ -1443,7 +1830,7 @@ paranoid_schedule:
16535 TRACE_IRQS_OFF
16536 jmp paranoid_userspace
16537 CFI_ENDPROC
16538-END(paranoid_exit)
16539+ENDPROC(paranoid_exit)
16540
16541 /*
16542 * Exception entry point. This expects an error code/orig_rax on the stack.
16543@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16544 movq_cfi r14, R14+8
16545 movq_cfi r15, R15+8
16546 xorl %ebx,%ebx
16547- testl $3,CS+8(%rsp)
16548+ testb $3,CS+8(%rsp)
16549 je error_kernelspace
16550 error_swapgs:
16551 SWAPGS
16552 error_sti:
16553 TRACE_IRQS_OFF
16554+ pax_force_retaddr_bts
16555 ret
16556 CFI_ENDPROC
16557
16558@@ -1497,7 +1885,7 @@ error_kernelspace:
16559 cmpq $gs_change,RIP+8(%rsp)
16560 je error_swapgs
16561 jmp error_sti
16562-END(error_entry)
16563+ENDPROC(error_entry)
16564
16565
16566 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16567@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16568 jnz retint_careful
16569 jmp retint_swapgs
16570 CFI_ENDPROC
16571-END(error_exit)
16572+ENDPROC(error_exit)
16573
16574
16575 /* runs on exception stack */
16576@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16577 CFI_ADJUST_CFA_OFFSET 15*8
16578 call save_paranoid
16579 DEFAULT_FRAME 0
16580+#ifdef CONFIG_PAX_MEMORY_UDEREF
16581+ testb $3, CS(%rsp)
16582+ jnz 1f
16583+ pax_enter_kernel
16584+ jmp 2f
16585+1: pax_enter_kernel_user
16586+2:
16587+#else
16588+ pax_enter_kernel
16589+#endif
16590 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16591 movq %rsp,%rdi
16592 movq $-1,%rsi
16593@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16594 DISABLE_INTERRUPTS(CLBR_NONE)
16595 testl %ebx,%ebx /* swapgs needed? */
16596 jnz nmi_restore
16597- testl $3,CS(%rsp)
16598+ testb $3,CS(%rsp)
16599 jnz nmi_userspace
16600+#ifdef CONFIG_PAX_MEMORY_UDEREF
16601+ pax_exit_kernel
16602+ SWAPGS_UNSAFE_STACK
16603+ RESTORE_ALL 8
16604+ pax_force_retaddr_bts
16605+ jmp irq_return
16606+#endif
16607 nmi_swapgs:
16608+#ifdef CONFIG_PAX_MEMORY_UDEREF
16609+ pax_exit_kernel_user
16610+#else
16611+ pax_exit_kernel
16612+#endif
16613 SWAPGS_UNSAFE_STACK
16614+ RESTORE_ALL 8
16615+ jmp irq_return
16616 nmi_restore:
16617+ pax_exit_kernel
16618 RESTORE_ALL 8
16619+ pax_force_retaddr_bts
16620 jmp irq_return
16621 nmi_userspace:
16622 GET_THREAD_INFO(%rcx)
16623@@ -1573,14 +1987,14 @@ nmi_schedule:
16624 jmp paranoid_exit
16625 CFI_ENDPROC
16626 #endif
16627-END(nmi)
16628+ENDPROC(nmi)
16629
16630 ENTRY(ignore_sysret)
16631 CFI_STARTPROC
16632 mov $-ENOSYS,%eax
16633 sysret
16634 CFI_ENDPROC
16635-END(ignore_sysret)
16636+ENDPROC(ignore_sysret)
16637
16638 /*
16639 * End of kprobes section
16640diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16641index 9dbb527..7b3615a 100644
16642--- a/arch/x86/kernel/ftrace.c
16643+++ b/arch/x86/kernel/ftrace.c
16644@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16645 static void *mod_code_newcode; /* holds the text to write to the IP */
16646
16647 static unsigned nmi_wait_count;
16648-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16649+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16650
16651 int ftrace_arch_read_dyn_info(char *buf, int size)
16652 {
16653@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16654
16655 r = snprintf(buf, size, "%u %u",
16656 nmi_wait_count,
16657- atomic_read(&nmi_update_count));
16658+ atomic_read_unchecked(&nmi_update_count));
16659 return r;
16660 }
16661
16662@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16663 {
16664 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16665 smp_rmb();
16666+ pax_open_kernel();
16667 ftrace_mod_code();
16668- atomic_inc(&nmi_update_count);
16669+ pax_close_kernel();
16670+ atomic_inc_unchecked(&nmi_update_count);
16671 }
16672 /* Must have previous changes seen before executions */
16673 smp_mb();
16674@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16675
16676
16677
16678-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16679+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16680
16681 static unsigned char *ftrace_nop_replace(void)
16682 {
16683@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16684 {
16685 unsigned char replaced[MCOUNT_INSN_SIZE];
16686
16687+ ip = ktla_ktva(ip);
16688+
16689 /*
16690 * Note: Due to modules and __init, code can
16691 * disappear and change, we need to protect against faulting
16692@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16693 unsigned char old[MCOUNT_INSN_SIZE], *new;
16694 int ret;
16695
16696- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16697+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16698 new = ftrace_call_replace(ip, (unsigned long)func);
16699 ret = ftrace_modify_code(ip, old, new);
16700
16701@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16702 switch (faulted) {
16703 case 0:
16704 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16705- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16706+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16707 break;
16708 case 1:
16709 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16710- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16711+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16712 break;
16713 case 2:
16714 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16715- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16716+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16717 break;
16718 }
16719
16720@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16721 {
16722 unsigned char code[MCOUNT_INSN_SIZE];
16723
16724+ ip = ktla_ktva(ip);
16725+
16726 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16727 return -EFAULT;
16728
16729diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16730index 4f8e250..df24706 100644
16731--- a/arch/x86/kernel/head32.c
16732+++ b/arch/x86/kernel/head32.c
16733@@ -16,6 +16,7 @@
16734 #include <asm/apic.h>
16735 #include <asm/io_apic.h>
16736 #include <asm/bios_ebda.h>
16737+#include <asm/boot.h>
16738
16739 static void __init i386_default_early_setup(void)
16740 {
16741@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16742 {
16743 reserve_trampoline_memory();
16744
16745- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16746+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16747
16748 #ifdef CONFIG_BLK_DEV_INITRD
16749 /* Reserve INITRD */
16750diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16751index 34c3308..6fc4e76 100644
16752--- a/arch/x86/kernel/head_32.S
16753+++ b/arch/x86/kernel/head_32.S
16754@@ -19,10 +19,17 @@
16755 #include <asm/setup.h>
16756 #include <asm/processor-flags.h>
16757 #include <asm/percpu.h>
16758+#include <asm/msr-index.h>
16759
16760 /* Physical address */
16761 #define pa(X) ((X) - __PAGE_OFFSET)
16762
16763+#ifdef CONFIG_PAX_KERNEXEC
16764+#define ta(X) (X)
16765+#else
16766+#define ta(X) ((X) - __PAGE_OFFSET)
16767+#endif
16768+
16769 /*
16770 * References to members of the new_cpu_data structure.
16771 */
16772@@ -52,11 +59,7 @@
16773 * and small than max_low_pfn, otherwise will waste some page table entries
16774 */
16775
16776-#if PTRS_PER_PMD > 1
16777-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16778-#else
16779-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16780-#endif
16781+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16782
16783 /* Enough space to fit pagetables for the low memory linear map */
16784 MAPPING_BEYOND_END = \
16785@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16786 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16787
16788 /*
16789+ * Real beginning of normal "text" segment
16790+ */
16791+ENTRY(stext)
16792+ENTRY(_stext)
16793+
16794+/*
16795 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16796 * %esi points to the real-mode code as a 32-bit pointer.
16797 * CS and DS must be 4 GB flat segments, but we don't depend on
16798@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16799 * can.
16800 */
16801 __HEAD
16802+
16803+#ifdef CONFIG_PAX_KERNEXEC
16804+ jmp startup_32
16805+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16806+.fill PAGE_SIZE-5,1,0xcc
16807+#endif
16808+
16809 ENTRY(startup_32)
16810+ movl pa(stack_start),%ecx
16811+
16812 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16813 us to not reload segments */
16814 testb $(1<<6), BP_loadflags(%esi)
16815@@ -95,7 +113,60 @@ ENTRY(startup_32)
16816 movl %eax,%es
16817 movl %eax,%fs
16818 movl %eax,%gs
16819+ movl %eax,%ss
16820 2:
16821+ leal -__PAGE_OFFSET(%ecx),%esp
16822+
16823+#ifdef CONFIG_SMP
16824+ movl $pa(cpu_gdt_table),%edi
16825+ movl $__per_cpu_load,%eax
16826+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16827+ rorl $16,%eax
16828+ movb %al,__KERNEL_PERCPU + 4(%edi)
16829+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16830+ movl $__per_cpu_end - 1,%eax
16831+ subl $__per_cpu_start,%eax
16832+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16833+#endif
16834+
16835+#ifdef CONFIG_PAX_MEMORY_UDEREF
16836+ movl $NR_CPUS,%ecx
16837+ movl $pa(cpu_gdt_table),%edi
16838+1:
16839+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16840+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16841+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16842+ addl $PAGE_SIZE_asm,%edi
16843+ loop 1b
16844+#endif
16845+
16846+#ifdef CONFIG_PAX_KERNEXEC
16847+ movl $pa(boot_gdt),%edi
16848+ movl $__LOAD_PHYSICAL_ADDR,%eax
16849+ movw %ax,__BOOT_CS + 2(%edi)
16850+ rorl $16,%eax
16851+ movb %al,__BOOT_CS + 4(%edi)
16852+ movb %ah,__BOOT_CS + 7(%edi)
16853+ rorl $16,%eax
16854+
16855+ ljmp $(__BOOT_CS),$1f
16856+1:
16857+
16858+ movl $NR_CPUS,%ecx
16859+ movl $pa(cpu_gdt_table),%edi
16860+ addl $__PAGE_OFFSET,%eax
16861+1:
16862+ movw %ax,__KERNEL_CS + 2(%edi)
16863+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16864+ rorl $16,%eax
16865+ movb %al,__KERNEL_CS + 4(%edi)
16866+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16867+ movb %ah,__KERNEL_CS + 7(%edi)
16868+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16869+ rorl $16,%eax
16870+ addl $PAGE_SIZE_asm,%edi
16871+ loop 1b
16872+#endif
16873
16874 /*
16875 * Clear BSS first so that there are no surprises...
16876@@ -140,9 +211,7 @@ ENTRY(startup_32)
16877 cmpl $num_subarch_entries, %eax
16878 jae bad_subarch
16879
16880- movl pa(subarch_entries)(,%eax,4), %eax
16881- subl $__PAGE_OFFSET, %eax
16882- jmp *%eax
16883+ jmp *pa(subarch_entries)(,%eax,4)
16884
16885 bad_subarch:
16886 WEAK(lguest_entry)
16887@@ -154,10 +223,10 @@ WEAK(xen_entry)
16888 __INITDATA
16889
16890 subarch_entries:
16891- .long default_entry /* normal x86/PC */
16892- .long lguest_entry /* lguest hypervisor */
16893- .long xen_entry /* Xen hypervisor */
16894- .long default_entry /* Moorestown MID */
16895+ .long ta(default_entry) /* normal x86/PC */
16896+ .long ta(lguest_entry) /* lguest hypervisor */
16897+ .long ta(xen_entry) /* Xen hypervisor */
16898+ .long ta(default_entry) /* Moorestown MID */
16899 num_subarch_entries = (. - subarch_entries) / 4
16900 .previous
16901 #endif /* CONFIG_PARAVIRT */
16902@@ -218,8 +287,11 @@ default_entry:
16903 movl %eax, pa(max_pfn_mapped)
16904
16905 /* Do early initialization of the fixmap area */
16906- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16907- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16908+#ifdef CONFIG_COMPAT_VDSO
16909+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16910+#else
16911+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16912+#endif
16913 #else /* Not PAE */
16914
16915 page_pde_offset = (__PAGE_OFFSET >> 20);
16916@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16917 movl %eax, pa(max_pfn_mapped)
16918
16919 /* Do early initialization of the fixmap area */
16920- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16921- movl %eax,pa(swapper_pg_dir+0xffc)
16922+#ifdef CONFIG_COMPAT_VDSO
16923+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16924+#else
16925+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16926+#endif
16927 #endif
16928 jmp 3f
16929 /*
16930@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16931 movl %eax,%es
16932 movl %eax,%fs
16933 movl %eax,%gs
16934+ movl pa(stack_start),%ecx
16935+ movl %eax,%ss
16936+ leal -__PAGE_OFFSET(%ecx),%esp
16937 #endif /* CONFIG_SMP */
16938 3:
16939
16940@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16941 orl %edx,%eax
16942 movl %eax,%cr4
16943
16944+#ifdef CONFIG_X86_PAE
16945 btl $5, %eax # check if PAE is enabled
16946 jnc 6f
16947
16948@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16949 cpuid
16950 cmpl $0x80000000, %eax
16951 jbe 6f
16952+
16953+ /* Clear bogus XD_DISABLE bits */
16954+ call verify_cpu
16955+
16956 mov $0x80000001, %eax
16957 cpuid
16958 /* Execute Disable bit supported? */
16959@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16960 jnc 6f
16961
16962 /* Setup EFER (Extended Feature Enable Register) */
16963- movl $0xc0000080, %ecx
16964+ movl $MSR_EFER, %ecx
16965 rdmsr
16966
16967 btsl $11, %eax
16968 /* Make changes effective */
16969 wrmsr
16970
16971+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16972+ movl $1,pa(nx_enabled)
16973+#endif
16974+
16975 6:
16976
16977 /*
16978@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16979 movl %eax,%cr0 /* ..and set paging (PG) bit */
16980 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16981 1:
16982- /* Set up the stack pointer */
16983- lss stack_start,%esp
16984+ /* Shift the stack pointer to a virtual address */
16985+ addl $__PAGE_OFFSET, %esp
16986
16987 /*
16988 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16989@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16990
16991 #ifdef CONFIG_SMP
16992 cmpb $0, ready
16993- jz 1f /* Initial CPU cleans BSS */
16994- jmp checkCPUtype
16995-1:
16996+ jnz checkCPUtype
16997 #endif /* CONFIG_SMP */
16998
16999 /*
17000@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17001 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17002 movl %eax,%ss # after changing gdt.
17003
17004- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17005+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17006 movl %eax,%ds
17007 movl %eax,%es
17008
17009@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17010 */
17011 cmpb $0,ready
17012 jne 1f
17013- movl $per_cpu__gdt_page,%eax
17014+ movl $cpu_gdt_table,%eax
17015 movl $per_cpu__stack_canary,%ecx
17016+#ifdef CONFIG_SMP
17017+ addl $__per_cpu_load,%ecx
17018+#endif
17019 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17020 shrl $16, %ecx
17021 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17022 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17023 1:
17024-#endif
17025 movl $(__KERNEL_STACK_CANARY),%eax
17026+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17027+ movl $(__USER_DS),%eax
17028+#else
17029+ xorl %eax,%eax
17030+#endif
17031 movl %eax,%gs
17032
17033 xorl %eax,%eax # Clear LDT
17034@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17035
17036 cld # gcc2 wants the direction flag cleared at all times
17037 pushl $0 # fake return address for unwinder
17038-#ifdef CONFIG_SMP
17039- movb ready, %cl
17040 movb $1, ready
17041- cmpb $0,%cl # the first CPU calls start_kernel
17042- je 1f
17043- movl (stack_start), %esp
17044-1:
17045-#endif /* CONFIG_SMP */
17046 jmp *(initial_code)
17047
17048 /*
17049@@ -546,22 +631,22 @@ early_page_fault:
17050 jmp early_fault
17051
17052 early_fault:
17053- cld
17054 #ifdef CONFIG_PRINTK
17055+ cmpl $1,%ss:early_recursion_flag
17056+ je hlt_loop
17057+ incl %ss:early_recursion_flag
17058+ cld
17059 pusha
17060 movl $(__KERNEL_DS),%eax
17061 movl %eax,%ds
17062 movl %eax,%es
17063- cmpl $2,early_recursion_flag
17064- je hlt_loop
17065- incl early_recursion_flag
17066 movl %cr2,%eax
17067 pushl %eax
17068 pushl %edx /* trapno */
17069 pushl $fault_msg
17070 call printk
17071+; call dump_stack
17072 #endif
17073- call dump_stack
17074 hlt_loop:
17075 hlt
17076 jmp hlt_loop
17077@@ -569,8 +654,11 @@ hlt_loop:
17078 /* This is the default interrupt "handler" :-) */
17079 ALIGN
17080 ignore_int:
17081- cld
17082 #ifdef CONFIG_PRINTK
17083+ cmpl $2,%ss:early_recursion_flag
17084+ je hlt_loop
17085+ incl %ss:early_recursion_flag
17086+ cld
17087 pushl %eax
17088 pushl %ecx
17089 pushl %edx
17090@@ -579,9 +667,6 @@ ignore_int:
17091 movl $(__KERNEL_DS),%eax
17092 movl %eax,%ds
17093 movl %eax,%es
17094- cmpl $2,early_recursion_flag
17095- je hlt_loop
17096- incl early_recursion_flag
17097 pushl 16(%esp)
17098 pushl 24(%esp)
17099 pushl 32(%esp)
17100@@ -600,6 +685,8 @@ ignore_int:
17101 #endif
17102 iret
17103
17104+#include "verify_cpu.S"
17105+
17106 __REFDATA
17107 .align 4
17108 ENTRY(initial_code)
17109@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17110 /*
17111 * BSS section
17112 */
17113-__PAGE_ALIGNED_BSS
17114- .align PAGE_SIZE_asm
17115 #ifdef CONFIG_X86_PAE
17116+.section .swapper_pg_pmd,"a",@progbits
17117 swapper_pg_pmd:
17118 .fill 1024*KPMDS,4,0
17119 #else
17120+.section .swapper_pg_dir,"a",@progbits
17121 ENTRY(swapper_pg_dir)
17122 .fill 1024,4,0
17123 #endif
17124+.section .swapper_pg_fixmap,"a",@progbits
17125 swapper_pg_fixmap:
17126 .fill 1024,4,0
17127 #ifdef CONFIG_X86_TRAMPOLINE
17128+.section .trampoline_pg_dir,"a",@progbits
17129 ENTRY(trampoline_pg_dir)
17130+#ifdef CONFIG_X86_PAE
17131+ .fill 4,8,0
17132+#else
17133 .fill 1024,4,0
17134 #endif
17135+#endif
17136+
17137+.section .empty_zero_page,"a",@progbits
17138 ENTRY(empty_zero_page)
17139 .fill 4096,1,0
17140
17141 /*
17142+ * The IDT has to be page-aligned to simplify the Pentium
17143+ * F0 0F bug workaround.. We have a special link segment
17144+ * for this.
17145+ */
17146+.section .idt,"a",@progbits
17147+ENTRY(idt_table)
17148+ .fill 256,8,0
17149+
17150+/*
17151 * This starts the data section.
17152 */
17153 #ifdef CONFIG_X86_PAE
17154-__PAGE_ALIGNED_DATA
17155- /* Page-aligned for the benefit of paravirt? */
17156- .align PAGE_SIZE_asm
17157+.section .swapper_pg_dir,"a",@progbits
17158+
17159 ENTRY(swapper_pg_dir)
17160 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17161 # if KPMDS == 3
17162@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17163 # error "Kernel PMDs should be 1, 2 or 3"
17164 # endif
17165 .align PAGE_SIZE_asm /* needs to be page-sized too */
17166+
17167+#ifdef CONFIG_PAX_PER_CPU_PGD
17168+ENTRY(cpu_pgd)
17169+ .rept NR_CPUS
17170+ .fill 4,8,0
17171+ .endr
17172+#endif
17173+
17174 #endif
17175
17176 .data
17177+.balign 4
17178 ENTRY(stack_start)
17179- .long init_thread_union+THREAD_SIZE
17180- .long __BOOT_DS
17181+ .long init_thread_union+THREAD_SIZE-8
17182
17183 ready: .byte 0
17184
17185+.section .rodata,"a",@progbits
17186 early_recursion_flag:
17187 .long 0
17188
17189@@ -697,7 +809,7 @@ fault_msg:
17190 .word 0 # 32 bit align gdt_desc.address
17191 boot_gdt_descr:
17192 .word __BOOT_DS+7
17193- .long boot_gdt - __PAGE_OFFSET
17194+ .long pa(boot_gdt)
17195
17196 .word 0 # 32-bit align idt_desc.address
17197 idt_descr:
17198@@ -708,7 +820,7 @@ idt_descr:
17199 .word 0 # 32 bit align gdt_desc.address
17200 ENTRY(early_gdt_descr)
17201 .word GDT_ENTRIES*8-1
17202- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17203+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17204
17205 /*
17206 * The boot_gdt must mirror the equivalent in setup.S and is
17207@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17208 .align L1_CACHE_BYTES
17209 ENTRY(boot_gdt)
17210 .fill GDT_ENTRY_BOOT_CS,8,0
17211- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17212- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17213+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17214+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17215+
17216+ .align PAGE_SIZE_asm
17217+ENTRY(cpu_gdt_table)
17218+ .rept NR_CPUS
17219+ .quad 0x0000000000000000 /* NULL descriptor */
17220+ .quad 0x0000000000000000 /* 0x0b reserved */
17221+ .quad 0x0000000000000000 /* 0x13 reserved */
17222+ .quad 0x0000000000000000 /* 0x1b reserved */
17223+
17224+#ifdef CONFIG_PAX_KERNEXEC
17225+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17226+#else
17227+ .quad 0x0000000000000000 /* 0x20 unused */
17228+#endif
17229+
17230+ .quad 0x0000000000000000 /* 0x28 unused */
17231+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17232+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17233+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17234+ .quad 0x0000000000000000 /* 0x4b reserved */
17235+ .quad 0x0000000000000000 /* 0x53 reserved */
17236+ .quad 0x0000000000000000 /* 0x5b reserved */
17237+
17238+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17239+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17240+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17241+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17242+
17243+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17244+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17245+
17246+ /*
17247+ * Segments used for calling PnP BIOS have byte granularity.
17248+ * The code segments and data segments have fixed 64k limits,
17249+ * the transfer segment sizes are set at run time.
17250+ */
17251+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17252+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17253+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17254+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17255+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17256+
17257+ /*
17258+ * The APM segments have byte granularity and their bases
17259+ * are set at run time. All have 64k limits.
17260+ */
17261+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17262+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17263+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17264+
17265+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17266+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17267+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17268+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17269+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17270+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17271+
17272+ /* Be sure this is zeroed to avoid false validations in Xen */
17273+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17274+ .endr
17275diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17276index 780cd92..758b2a6 100644
17277--- a/arch/x86/kernel/head_64.S
17278+++ b/arch/x86/kernel/head_64.S
17279@@ -19,6 +19,8 @@
17280 #include <asm/cache.h>
17281 #include <asm/processor-flags.h>
17282 #include <asm/percpu.h>
17283+#include <asm/cpufeature.h>
17284+#include <asm/alternative-asm.h>
17285
17286 #ifdef CONFIG_PARAVIRT
17287 #include <asm/asm-offsets.h>
17288@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17289 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17290 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17291 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17292+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17293+L3_VMALLOC_START = pud_index(VMALLOC_START)
17294+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17295+L3_VMALLOC_END = pud_index(VMALLOC_END)
17296+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17297+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17298
17299 .text
17300 __HEAD
17301@@ -85,35 +93,23 @@ startup_64:
17302 */
17303 addq %rbp, init_level4_pgt + 0(%rip)
17304 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17305+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17306+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17307+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17308 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17309
17310 addq %rbp, level3_ident_pgt + 0(%rip)
17311+#ifndef CONFIG_XEN
17312+ addq %rbp, level3_ident_pgt + 8(%rip)
17313+#endif
17314
17315- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17316- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17317+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17318+
17319+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17320+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17321
17322 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17323-
17324- /* Add an Identity mapping if I am above 1G */
17325- leaq _text(%rip), %rdi
17326- andq $PMD_PAGE_MASK, %rdi
17327-
17328- movq %rdi, %rax
17329- shrq $PUD_SHIFT, %rax
17330- andq $(PTRS_PER_PUD - 1), %rax
17331- jz ident_complete
17332-
17333- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17334- leaq level3_ident_pgt(%rip), %rbx
17335- movq %rdx, 0(%rbx, %rax, 8)
17336-
17337- movq %rdi, %rax
17338- shrq $PMD_SHIFT, %rax
17339- andq $(PTRS_PER_PMD - 1), %rax
17340- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17341- leaq level2_spare_pgt(%rip), %rbx
17342- movq %rdx, 0(%rbx, %rax, 8)
17343-ident_complete:
17344+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17345
17346 /*
17347 * Fixup the kernel text+data virtual addresses. Note that
17348@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17349 * after the boot processor executes this code.
17350 */
17351
17352- /* Enable PAE mode and PGE */
17353- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17354+ /* Enable PAE mode and PSE/PGE */
17355+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17356 movq %rax, %cr4
17357
17358 /* Setup early boot stage 4 level pagetables. */
17359@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17360 movl $MSR_EFER, %ecx
17361 rdmsr
17362 btsl $_EFER_SCE, %eax /* Enable System Call */
17363- btl $20,%edi /* No Execute supported? */
17364+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17365 jnc 1f
17366 btsl $_EFER_NX, %eax
17367+ leaq init_level4_pgt(%rip), %rdi
17368+#ifndef CONFIG_EFI
17369+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17370+#endif
17371+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17372+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17373+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17374 1: wrmsr /* Make changes effective */
17375
17376 /* Setup cr0 */
17377@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17378 * jump. In addition we need to ensure %cs is set so we make this
17379 * a far return.
17380 */
17381+ pax_set_fptr_mask
17382 movq initial_code(%rip),%rax
17383 pushq $0 # fake return address to stop unwinder
17384 pushq $__KERNEL_CS # set correct cs
17385@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17386 .quad x86_64_start_kernel
17387 ENTRY(initial_gs)
17388 .quad INIT_PER_CPU_VAR(irq_stack_union)
17389- __FINITDATA
17390
17391 ENTRY(stack_start)
17392 .quad init_thread_union+THREAD_SIZE-8
17393 .word 0
17394+ __FINITDATA
17395
17396 bad_address:
17397 jmp bad_address
17398
17399- .section ".init.text","ax"
17400+ __INIT
17401 #ifdef CONFIG_EARLY_PRINTK
17402 .globl early_idt_handlers
17403 early_idt_handlers:
17404@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17405 #endif /* EARLY_PRINTK */
17406 1: hlt
17407 jmp 1b
17408+ .previous
17409
17410 #ifdef CONFIG_EARLY_PRINTK
17411+ __INITDATA
17412 early_recursion_flag:
17413 .long 0
17414+ .previous
17415
17416+ .section .rodata,"a",@progbits
17417 early_idt_msg:
17418 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17419 early_idt_ripmsg:
17420 .asciz "RIP %s\n"
17421+ .previous
17422 #endif /* CONFIG_EARLY_PRINTK */
17423- .previous
17424
17425+ .section .rodata,"a",@progbits
17426 #define NEXT_PAGE(name) \
17427 .balign PAGE_SIZE; \
17428 ENTRY(name)
17429@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17430 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17431 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17432 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17433+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17434+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17435+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17436+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17437+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17438+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17439 .org init_level4_pgt + L4_START_KERNEL*8, 0
17440 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17441 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17442
17443+#ifdef CONFIG_PAX_PER_CPU_PGD
17444+NEXT_PAGE(cpu_pgd)
17445+ .rept NR_CPUS
17446+ .fill 512,8,0
17447+ .endr
17448+#endif
17449+
17450 NEXT_PAGE(level3_ident_pgt)
17451 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17452+#ifdef CONFIG_XEN
17453 .fill 511,8,0
17454+#else
17455+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17456+ .fill 510,8,0
17457+#endif
17458+
17459+NEXT_PAGE(level3_vmalloc_start_pgt)
17460+ .fill 512,8,0
17461+
17462+NEXT_PAGE(level3_vmalloc_end_pgt)
17463+ .fill 512,8,0
17464+
17465+NEXT_PAGE(level3_vmemmap_pgt)
17466+ .fill L3_VMEMMAP_START,8,0
17467+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17468
17469 NEXT_PAGE(level3_kernel_pgt)
17470 .fill L3_START_KERNEL,8,0
17471@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17472 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17473 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17474
17475+NEXT_PAGE(level2_vmemmap_pgt)
17476+ .fill 512,8,0
17477+
17478 NEXT_PAGE(level2_fixmap_pgt)
17479- .fill 506,8,0
17480- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17481- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17482- .fill 5,8,0
17483+ .fill 507,8,0
17484+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17485+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17486+ .fill 4,8,0
17487
17488-NEXT_PAGE(level1_fixmap_pgt)
17489+NEXT_PAGE(level1_vsyscall_pgt)
17490 .fill 512,8,0
17491
17492-NEXT_PAGE(level2_ident_pgt)
17493- /* Since I easily can, map the first 1G.
17494+ /* Since I easily can, map the first 2G.
17495 * Don't set NX because code runs from these pages.
17496 */
17497- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17498+NEXT_PAGE(level2_ident_pgt)
17499+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17500
17501 NEXT_PAGE(level2_kernel_pgt)
17502 /*
17503@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17504 * If you want to increase this then increase MODULES_VADDR
17505 * too.)
17506 */
17507- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17508- KERNEL_IMAGE_SIZE/PMD_SIZE)
17509-
17510-NEXT_PAGE(level2_spare_pgt)
17511- .fill 512, 8, 0
17512+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17513
17514 #undef PMDS
17515 #undef NEXT_PAGE
17516
17517- .data
17518+ .align PAGE_SIZE
17519+ENTRY(cpu_gdt_table)
17520+ .rept NR_CPUS
17521+ .quad 0x0000000000000000 /* NULL descriptor */
17522+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17523+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17524+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17525+ .quad 0x00cffb000000ffff /* __USER32_CS */
17526+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17527+ .quad 0x00affb000000ffff /* __USER_CS */
17528+
17529+#ifdef CONFIG_PAX_KERNEXEC
17530+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17531+#else
17532+ .quad 0x0 /* unused */
17533+#endif
17534+
17535+ .quad 0,0 /* TSS */
17536+ .quad 0,0 /* LDT */
17537+ .quad 0,0,0 /* three TLS descriptors */
17538+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17539+ /* asm/segment.h:GDT_ENTRIES must match this */
17540+
17541+ /* zero the remaining page */
17542+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17543+ .endr
17544+
17545 .align 16
17546 .globl early_gdt_descr
17547 early_gdt_descr:
17548 .word GDT_ENTRIES*8-1
17549 early_gdt_descr_base:
17550- .quad INIT_PER_CPU_VAR(gdt_page)
17551+ .quad cpu_gdt_table
17552
17553 ENTRY(phys_base)
17554 /* This must match the first entry in level2_kernel_pgt */
17555 .quad 0x0000000000000000
17556
17557 #include "../../x86/xen/xen-head.S"
17558-
17559- .section .bss, "aw", @nobits
17560+
17561+ .section .rodata,"a",@progbits
17562 .align L1_CACHE_BYTES
17563 ENTRY(idt_table)
17564- .skip IDT_ENTRIES * 16
17565+ .fill 512,8,0
17566
17567 __PAGE_ALIGNED_BSS
17568 .align PAGE_SIZE
17569diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17570index 9c3bd4a..e1d9b35 100644
17571--- a/arch/x86/kernel/i386_ksyms_32.c
17572+++ b/arch/x86/kernel/i386_ksyms_32.c
17573@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17574 EXPORT_SYMBOL(cmpxchg8b_emu);
17575 #endif
17576
17577+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17578+
17579 /* Networking helper routines. */
17580 EXPORT_SYMBOL(csum_partial_copy_generic);
17581+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17582+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17583
17584 EXPORT_SYMBOL(__get_user_1);
17585 EXPORT_SYMBOL(__get_user_2);
17586@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17587
17588 EXPORT_SYMBOL(csum_partial);
17589 EXPORT_SYMBOL(empty_zero_page);
17590+
17591+#ifdef CONFIG_PAX_KERNEXEC
17592+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17593+#endif
17594diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17595index df89102..a244320 100644
17596--- a/arch/x86/kernel/i8259.c
17597+++ b/arch/x86/kernel/i8259.c
17598@@ -208,7 +208,7 @@ spurious_8259A_irq:
17599 "spurious 8259A interrupt: IRQ%d.\n", irq);
17600 spurious_irq_mask |= irqmask;
17601 }
17602- atomic_inc(&irq_err_count);
17603+ atomic_inc_unchecked(&irq_err_count);
17604 /*
17605 * Theoretically we do not have to handle this IRQ,
17606 * but in Linux this does not cause problems and is
17607diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17608index 3a54dcb..1c22348 100644
17609--- a/arch/x86/kernel/init_task.c
17610+++ b/arch/x86/kernel/init_task.c
17611@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17612 * way process stacks are handled. This is done by having a special
17613 * "init_task" linker map entry..
17614 */
17615-union thread_union init_thread_union __init_task_data =
17616- { INIT_THREAD_INFO(init_task) };
17617+union thread_union init_thread_union __init_task_data;
17618
17619 /*
17620 * Initial task structure.
17621@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17622 * section. Since TSS's are completely CPU-local, we want them
17623 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17624 */
17625-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17626-
17627+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17628+EXPORT_SYMBOL(init_tss);
17629diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17630index 99c4d30..74c84e9 100644
17631--- a/arch/x86/kernel/ioport.c
17632+++ b/arch/x86/kernel/ioport.c
17633@@ -6,6 +6,7 @@
17634 #include <linux/sched.h>
17635 #include <linux/kernel.h>
17636 #include <linux/capability.h>
17637+#include <linux/security.h>
17638 #include <linux/errno.h>
17639 #include <linux/types.h>
17640 #include <linux/ioport.h>
17641@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17642
17643 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17644 return -EINVAL;
17645+#ifdef CONFIG_GRKERNSEC_IO
17646+ if (turn_on && grsec_disable_privio) {
17647+ gr_handle_ioperm();
17648+ return -EPERM;
17649+ }
17650+#endif
17651 if (turn_on && !capable(CAP_SYS_RAWIO))
17652 return -EPERM;
17653
17654@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17655 * because the ->io_bitmap_max value must match the bitmap
17656 * contents:
17657 */
17658- tss = &per_cpu(init_tss, get_cpu());
17659+ tss = init_tss + get_cpu();
17660
17661 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17662
17663@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17664 return -EINVAL;
17665 /* Trying to gain more privileges? */
17666 if (level > old) {
17667+#ifdef CONFIG_GRKERNSEC_IO
17668+ if (grsec_disable_privio) {
17669+ gr_handle_iopl();
17670+ return -EPERM;
17671+ }
17672+#endif
17673 if (!capable(CAP_SYS_RAWIO))
17674 return -EPERM;
17675 }
17676diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17677index 04bbd52..83a07d9 100644
17678--- a/arch/x86/kernel/irq.c
17679+++ b/arch/x86/kernel/irq.c
17680@@ -15,7 +15,7 @@
17681 #include <asm/mce.h>
17682 #include <asm/hw_irq.h>
17683
17684-atomic_t irq_err_count;
17685+atomic_unchecked_t irq_err_count;
17686
17687 /* Function pointer for generic interrupt vector handling */
17688 void (*generic_interrupt_extension)(void) = NULL;
17689@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17690 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17691 seq_printf(p, " Machine check polls\n");
17692 #endif
17693- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17694+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17695 #if defined(CONFIG_X86_IO_APIC)
17696- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17697+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17698 #endif
17699 return 0;
17700 }
17701@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17702
17703 u64 arch_irq_stat(void)
17704 {
17705- u64 sum = atomic_read(&irq_err_count);
17706+ u64 sum = atomic_read_unchecked(&irq_err_count);
17707
17708 #ifdef CONFIG_X86_IO_APIC
17709- sum += atomic_read(&irq_mis_count);
17710+ sum += atomic_read_unchecked(&irq_mis_count);
17711 #endif
17712 return sum;
17713 }
17714diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17715index 7d35d0f..03f1d52 100644
17716--- a/arch/x86/kernel/irq_32.c
17717+++ b/arch/x86/kernel/irq_32.c
17718@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17719 __asm__ __volatile__("andl %%esp,%0" :
17720 "=r" (sp) : "0" (THREAD_SIZE - 1));
17721
17722- return sp < (sizeof(struct thread_info) + STACK_WARN);
17723+ return sp < STACK_WARN;
17724 }
17725
17726 static void print_stack_overflow(void)
17727@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17728 * per-CPU IRQ handling contexts (thread information and stack)
17729 */
17730 union irq_ctx {
17731- struct thread_info tinfo;
17732- u32 stack[THREAD_SIZE/sizeof(u32)];
17733-} __attribute__((aligned(PAGE_SIZE)));
17734+ unsigned long previous_esp;
17735+ u32 stack[THREAD_SIZE/sizeof(u32)];
17736+} __attribute__((aligned(THREAD_SIZE)));
17737
17738 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17739 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17740@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17741 static inline int
17742 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17743 {
17744- union irq_ctx *curctx, *irqctx;
17745+ union irq_ctx *irqctx;
17746 u32 *isp, arg1, arg2;
17747
17748- curctx = (union irq_ctx *) current_thread_info();
17749 irqctx = __get_cpu_var(hardirq_ctx);
17750
17751 /*
17752@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17753 * handler) we can't do that and just have to keep using the
17754 * current stack (which is the irq stack already after all)
17755 */
17756- if (unlikely(curctx == irqctx))
17757+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17758 return 0;
17759
17760 /* build the stack frame on the IRQ stack */
17761- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17762- irqctx->tinfo.task = curctx->tinfo.task;
17763- irqctx->tinfo.previous_esp = current_stack_pointer;
17764+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17765+ irqctx->previous_esp = current_stack_pointer;
17766
17767- /*
17768- * Copy the softirq bits in preempt_count so that the
17769- * softirq checks work in the hardirq context.
17770- */
17771- irqctx->tinfo.preempt_count =
17772- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17773- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17774+#ifdef CONFIG_PAX_MEMORY_UDEREF
17775+ __set_fs(MAKE_MM_SEG(0));
17776+#endif
17777
17778 if (unlikely(overflow))
17779 call_on_stack(print_stack_overflow, isp);
17780@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17781 : "0" (irq), "1" (desc), "2" (isp),
17782 "D" (desc->handle_irq)
17783 : "memory", "cc", "ecx");
17784+
17785+#ifdef CONFIG_PAX_MEMORY_UDEREF
17786+ __set_fs(current_thread_info()->addr_limit);
17787+#endif
17788+
17789 return 1;
17790 }
17791
17792@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17793 */
17794 void __cpuinit irq_ctx_init(int cpu)
17795 {
17796- union irq_ctx *irqctx;
17797-
17798 if (per_cpu(hardirq_ctx, cpu))
17799 return;
17800
17801- irqctx = &per_cpu(hardirq_stack, cpu);
17802- irqctx->tinfo.task = NULL;
17803- irqctx->tinfo.exec_domain = NULL;
17804- irqctx->tinfo.cpu = cpu;
17805- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17806- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17807-
17808- per_cpu(hardirq_ctx, cpu) = irqctx;
17809-
17810- irqctx = &per_cpu(softirq_stack, cpu);
17811- irqctx->tinfo.task = NULL;
17812- irqctx->tinfo.exec_domain = NULL;
17813- irqctx->tinfo.cpu = cpu;
17814- irqctx->tinfo.preempt_count = 0;
17815- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17816-
17817- per_cpu(softirq_ctx, cpu) = irqctx;
17818+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17819+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17820
17821 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17822 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17823@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17824 asmlinkage void do_softirq(void)
17825 {
17826 unsigned long flags;
17827- struct thread_info *curctx;
17828 union irq_ctx *irqctx;
17829 u32 *isp;
17830
17831@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17832 local_irq_save(flags);
17833
17834 if (local_softirq_pending()) {
17835- curctx = current_thread_info();
17836 irqctx = __get_cpu_var(softirq_ctx);
17837- irqctx->tinfo.task = curctx->task;
17838- irqctx->tinfo.previous_esp = current_stack_pointer;
17839+ irqctx->previous_esp = current_stack_pointer;
17840
17841 /* build the stack frame on the softirq stack */
17842- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17843+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17844+
17845+#ifdef CONFIG_PAX_MEMORY_UDEREF
17846+ __set_fs(MAKE_MM_SEG(0));
17847+#endif
17848
17849 call_on_stack(__do_softirq, isp);
17850+
17851+#ifdef CONFIG_PAX_MEMORY_UDEREF
17852+ __set_fs(current_thread_info()->addr_limit);
17853+#endif
17854+
17855 /*
17856 * Shouldnt happen, we returned above if in_interrupt():
17857 */
17858diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17859index 8d82a77..0baf312 100644
17860--- a/arch/x86/kernel/kgdb.c
17861+++ b/arch/x86/kernel/kgdb.c
17862@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17863
17864 /* clear the trace bit */
17865 linux_regs->flags &= ~X86_EFLAGS_TF;
17866- atomic_set(&kgdb_cpu_doing_single_step, -1);
17867+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17868
17869 /* set the trace bit if we're stepping */
17870 if (remcomInBuffer[0] == 's') {
17871 linux_regs->flags |= X86_EFLAGS_TF;
17872 kgdb_single_step = 1;
17873- atomic_set(&kgdb_cpu_doing_single_step,
17874+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17875 raw_smp_processor_id());
17876 }
17877
17878@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17879 break;
17880
17881 case DIE_DEBUG:
17882- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17883+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17884 raw_smp_processor_id()) {
17885 if (user_mode(regs))
17886 return single_step_cont(regs, args);
17887@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17888 return instruction_pointer(regs);
17889 }
17890
17891-struct kgdb_arch arch_kgdb_ops = {
17892+const struct kgdb_arch arch_kgdb_ops = {
17893 /* Breakpoint instruction: */
17894 .gdb_bpt_instr = { 0xcc },
17895 .flags = KGDB_HW_BREAKPOINT,
17896diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17897index 7a67820..8d15b75 100644
17898--- a/arch/x86/kernel/kprobes.c
17899+++ b/arch/x86/kernel/kprobes.c
17900@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17901 char op;
17902 s32 raddr;
17903 } __attribute__((packed)) * jop;
17904- jop = (struct __arch_jmp_op *)from;
17905+
17906+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17907+
17908+ pax_open_kernel();
17909 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17910 jop->op = RELATIVEJUMP_INSTRUCTION;
17911+ pax_close_kernel();
17912 }
17913
17914 /*
17915@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17916 kprobe_opcode_t opcode;
17917 kprobe_opcode_t *orig_opcodes = opcodes;
17918
17919- if (search_exception_tables((unsigned long)opcodes))
17920+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17921 return 0; /* Page fault may occur on this address. */
17922
17923 retry:
17924@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17925 disp = (u8 *) p->addr + *((s32 *) insn) -
17926 (u8 *) p->ainsn.insn;
17927 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17928+ pax_open_kernel();
17929 *(s32 *)insn = (s32) disp;
17930+ pax_close_kernel();
17931 }
17932 }
17933 #endif
17934@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17935
17936 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17937 {
17938- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17939+ pax_open_kernel();
17940+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17941+ pax_close_kernel();
17942
17943 fix_riprel(p);
17944
17945- if (can_boost(p->addr))
17946+ if (can_boost(ktla_ktva(p->addr)))
17947 p->ainsn.boostable = 0;
17948 else
17949 p->ainsn.boostable = -1;
17950
17951- p->opcode = *p->addr;
17952+ p->opcode = *(ktla_ktva(p->addr));
17953 }
17954
17955 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17956@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17957 if (p->opcode == BREAKPOINT_INSTRUCTION)
17958 regs->ip = (unsigned long)p->addr;
17959 else
17960- regs->ip = (unsigned long)p->ainsn.insn;
17961+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17962 }
17963
17964 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17965@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17966 if (p->ainsn.boostable == 1 && !p->post_handler) {
17967 /* Boost up -- we can execute copied instructions directly */
17968 reset_current_kprobe();
17969- regs->ip = (unsigned long)p->ainsn.insn;
17970+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17971 preempt_enable_no_resched();
17972 return;
17973 }
17974@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17975 struct kprobe_ctlblk *kcb;
17976
17977 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17978- if (*addr != BREAKPOINT_INSTRUCTION) {
17979+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17980 /*
17981 * The breakpoint instruction was removed right
17982 * after we hit it. Another cpu has removed
17983@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17984 /* Skip orig_ax, ip, cs */
17985 " addq $24, %rsp\n"
17986 " popfq\n"
17987+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17988+ " btsq $63,(%rsp)\n"
17989+#endif
17990 #else
17991 " pushf\n"
17992 /*
17993@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17994 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17995 {
17996 unsigned long *tos = stack_addr(regs);
17997- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17998+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17999 unsigned long orig_ip = (unsigned long)p->addr;
18000 kprobe_opcode_t *insn = p->ainsn.insn;
18001
18002@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18003 struct die_args *args = data;
18004 int ret = NOTIFY_DONE;
18005
18006- if (args->regs && user_mode_vm(args->regs))
18007+ if (args->regs && user_mode(args->regs))
18008 return ret;
18009
18010 switch (val) {
18011diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18012index 63b0ec8..6d92227 100644
18013--- a/arch/x86/kernel/kvm.c
18014+++ b/arch/x86/kernel/kvm.c
18015@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18016 pv_mmu_ops.set_pud = kvm_set_pud;
18017 #if PAGETABLE_LEVELS == 4
18018 pv_mmu_ops.set_pgd = kvm_set_pgd;
18019+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18020 #endif
18021 #endif
18022 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18023diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18024index ec6ef60..ab2c824 100644
18025--- a/arch/x86/kernel/ldt.c
18026+++ b/arch/x86/kernel/ldt.c
18027@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18028 if (reload) {
18029 #ifdef CONFIG_SMP
18030 preempt_disable();
18031- load_LDT(pc);
18032+ load_LDT_nolock(pc);
18033 if (!cpumask_equal(mm_cpumask(current->mm),
18034 cpumask_of(smp_processor_id())))
18035 smp_call_function(flush_ldt, current->mm, 1);
18036 preempt_enable();
18037 #else
18038- load_LDT(pc);
18039+ load_LDT_nolock(pc);
18040 #endif
18041 }
18042 if (oldsize) {
18043@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18044 return err;
18045
18046 for (i = 0; i < old->size; i++)
18047- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18048+ write_ldt_entry(new->ldt, i, old->ldt + i);
18049 return 0;
18050 }
18051
18052@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18053 retval = copy_ldt(&mm->context, &old_mm->context);
18054 mutex_unlock(&old_mm->context.lock);
18055 }
18056+
18057+ if (tsk == current) {
18058+ mm->context.vdso = 0;
18059+
18060+#ifdef CONFIG_X86_32
18061+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18062+ mm->context.user_cs_base = 0UL;
18063+ mm->context.user_cs_limit = ~0UL;
18064+
18065+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18066+ cpus_clear(mm->context.cpu_user_cs_mask);
18067+#endif
18068+
18069+#endif
18070+#endif
18071+
18072+ }
18073+
18074 return retval;
18075 }
18076
18077@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18078 }
18079 }
18080
18081+#ifdef CONFIG_PAX_SEGMEXEC
18082+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18083+ error = -EINVAL;
18084+ goto out_unlock;
18085+ }
18086+#endif
18087+
18088 fill_ldt(&ldt, &ldt_info);
18089 if (oldmode)
18090 ldt.avl = 0;
18091diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18092index c1c429d..f02eaf9 100644
18093--- a/arch/x86/kernel/machine_kexec_32.c
18094+++ b/arch/x86/kernel/machine_kexec_32.c
18095@@ -26,7 +26,7 @@
18096 #include <asm/system.h>
18097 #include <asm/cacheflush.h>
18098
18099-static void set_idt(void *newidt, __u16 limit)
18100+static void set_idt(struct desc_struct *newidt, __u16 limit)
18101 {
18102 struct desc_ptr curidt;
18103
18104@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18105 }
18106
18107
18108-static void set_gdt(void *newgdt, __u16 limit)
18109+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18110 {
18111 struct desc_ptr curgdt;
18112
18113@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18114 }
18115
18116 control_page = page_address(image->control_code_page);
18117- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18118+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18119
18120 relocate_kernel_ptr = control_page;
18121 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18122diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18123index 1e47679..e73449d 100644
18124--- a/arch/x86/kernel/microcode_amd.c
18125+++ b/arch/x86/kernel/microcode_amd.c
18126@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18127 uci->mc = NULL;
18128 }
18129
18130-static struct microcode_ops microcode_amd_ops = {
18131+static const struct microcode_ops microcode_amd_ops = {
18132 .request_microcode_user = request_microcode_user,
18133 .request_microcode_fw = request_microcode_fw,
18134 .collect_cpu_info = collect_cpu_info_amd,
18135@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18136 .microcode_fini_cpu = microcode_fini_cpu_amd,
18137 };
18138
18139-struct microcode_ops * __init init_amd_microcode(void)
18140+const struct microcode_ops * __init init_amd_microcode(void)
18141 {
18142 return &microcode_amd_ops;
18143 }
18144diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18145index 378e9a8..b5a6ea9 100644
18146--- a/arch/x86/kernel/microcode_core.c
18147+++ b/arch/x86/kernel/microcode_core.c
18148@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18149
18150 #define MICROCODE_VERSION "2.00"
18151
18152-static struct microcode_ops *microcode_ops;
18153+static const struct microcode_ops *microcode_ops;
18154
18155 /*
18156 * Synchronization.
18157diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18158index 0d334dd..14cedaf 100644
18159--- a/arch/x86/kernel/microcode_intel.c
18160+++ b/arch/x86/kernel/microcode_intel.c
18161@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18162
18163 static int get_ucode_user(void *to, const void *from, size_t n)
18164 {
18165- return copy_from_user(to, from, n);
18166+ return copy_from_user(to, (const void __force_user *)from, n);
18167 }
18168
18169 static enum ucode_state
18170 request_microcode_user(int cpu, const void __user *buf, size_t size)
18171 {
18172- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18173+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18174 }
18175
18176 static void microcode_fini_cpu(int cpu)
18177@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18178 uci->mc = NULL;
18179 }
18180
18181-static struct microcode_ops microcode_intel_ops = {
18182+static const struct microcode_ops microcode_intel_ops = {
18183 .request_microcode_user = request_microcode_user,
18184 .request_microcode_fw = request_microcode_fw,
18185 .collect_cpu_info = collect_cpu_info,
18186@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18187 .microcode_fini_cpu = microcode_fini_cpu,
18188 };
18189
18190-struct microcode_ops * __init init_intel_microcode(void)
18191+const struct microcode_ops * __init init_intel_microcode(void)
18192 {
18193 return &microcode_intel_ops;
18194 }
18195diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18196index 89f386f..9028f51 100644
18197--- a/arch/x86/kernel/module.c
18198+++ b/arch/x86/kernel/module.c
18199@@ -34,7 +34,7 @@
18200 #define DEBUGP(fmt...)
18201 #endif
18202
18203-void *module_alloc(unsigned long size)
18204+static void *__module_alloc(unsigned long size, pgprot_t prot)
18205 {
18206 struct vm_struct *area;
18207
18208@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18209 if (!area)
18210 return NULL;
18211
18212- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18213- PAGE_KERNEL_EXEC);
18214+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18215+}
18216+
18217+void *module_alloc(unsigned long size)
18218+{
18219+
18220+#ifdef CONFIG_PAX_KERNEXEC
18221+ return __module_alloc(size, PAGE_KERNEL);
18222+#else
18223+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18224+#endif
18225+
18226 }
18227
18228 /* Free memory returned from module_alloc */
18229@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18230 vfree(module_region);
18231 }
18232
18233+#ifdef CONFIG_PAX_KERNEXEC
18234+#ifdef CONFIG_X86_32
18235+void *module_alloc_exec(unsigned long size)
18236+{
18237+ struct vm_struct *area;
18238+
18239+ if (size == 0)
18240+ return NULL;
18241+
18242+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18243+ return area ? area->addr : NULL;
18244+}
18245+EXPORT_SYMBOL(module_alloc_exec);
18246+
18247+void module_free_exec(struct module *mod, void *module_region)
18248+{
18249+ vunmap(module_region);
18250+}
18251+EXPORT_SYMBOL(module_free_exec);
18252+#else
18253+void module_free_exec(struct module *mod, void *module_region)
18254+{
18255+ module_free(mod, module_region);
18256+}
18257+EXPORT_SYMBOL(module_free_exec);
18258+
18259+void *module_alloc_exec(unsigned long size)
18260+{
18261+ return __module_alloc(size, PAGE_KERNEL_RX);
18262+}
18263+EXPORT_SYMBOL(module_alloc_exec);
18264+#endif
18265+#endif
18266+
18267 /* We don't need anything special. */
18268 int module_frob_arch_sections(Elf_Ehdr *hdr,
18269 Elf_Shdr *sechdrs,
18270@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18271 unsigned int i;
18272 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18273 Elf32_Sym *sym;
18274- uint32_t *location;
18275+ uint32_t *plocation, location;
18276
18277 DEBUGP("Applying relocate section %u to %u\n", relsec,
18278 sechdrs[relsec].sh_info);
18279 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18280 /* This is where to make the change */
18281- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18282- + rel[i].r_offset;
18283+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18284+ location = (uint32_t)plocation;
18285+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18286+ plocation = ktla_ktva((void *)plocation);
18287 /* This is the symbol it is referring to. Note that all
18288 undefined symbols have been resolved. */
18289 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18290@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18291 switch (ELF32_R_TYPE(rel[i].r_info)) {
18292 case R_386_32:
18293 /* We add the value into the location given */
18294- *location += sym->st_value;
18295+ pax_open_kernel();
18296+ *plocation += sym->st_value;
18297+ pax_close_kernel();
18298 break;
18299 case R_386_PC32:
18300 /* Add the value, subtract its postition */
18301- *location += sym->st_value - (uint32_t)location;
18302+ pax_open_kernel();
18303+ *plocation += sym->st_value - location;
18304+ pax_close_kernel();
18305 break;
18306 default:
18307 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18308@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18309 case R_X86_64_NONE:
18310 break;
18311 case R_X86_64_64:
18312+ pax_open_kernel();
18313 *(u64 *)loc = val;
18314+ pax_close_kernel();
18315 break;
18316 case R_X86_64_32:
18317+ pax_open_kernel();
18318 *(u32 *)loc = val;
18319+ pax_close_kernel();
18320 if (val != *(u32 *)loc)
18321 goto overflow;
18322 break;
18323 case R_X86_64_32S:
18324+ pax_open_kernel();
18325 *(s32 *)loc = val;
18326+ pax_close_kernel();
18327 if ((s64)val != *(s32 *)loc)
18328 goto overflow;
18329 break;
18330 case R_X86_64_PC32:
18331 val -= (u64)loc;
18332+ pax_open_kernel();
18333 *(u32 *)loc = val;
18334+ pax_close_kernel();
18335+
18336 #if 0
18337 if ((s64)val != *(s32 *)loc)
18338 goto overflow;
18339diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18340index 3a7c5a4..9191528 100644
18341--- a/arch/x86/kernel/paravirt-spinlocks.c
18342+++ b/arch/x86/kernel/paravirt-spinlocks.c
18343@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18344 __raw_spin_lock(lock);
18345 }
18346
18347-struct pv_lock_ops pv_lock_ops = {
18348+struct pv_lock_ops pv_lock_ops __read_only = {
18349 #ifdef CONFIG_SMP
18350 .spin_is_locked = __ticket_spin_is_locked,
18351 .spin_is_contended = __ticket_spin_is_contended,
18352diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18353index 1b1739d..dea6077 100644
18354--- a/arch/x86/kernel/paravirt.c
18355+++ b/arch/x86/kernel/paravirt.c
18356@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18357 {
18358 return x;
18359 }
18360+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18361+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18362+#endif
18363
18364 void __init default_banner(void)
18365 {
18366@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18367 * corresponding structure. */
18368 static void *get_call_destination(u8 type)
18369 {
18370- struct paravirt_patch_template tmpl = {
18371+ const struct paravirt_patch_template tmpl = {
18372 .pv_init_ops = pv_init_ops,
18373 .pv_time_ops = pv_time_ops,
18374 .pv_cpu_ops = pv_cpu_ops,
18375@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18376 .pv_lock_ops = pv_lock_ops,
18377 #endif
18378 };
18379+
18380+ pax_track_stack();
18381 return *((void **)&tmpl + type);
18382 }
18383
18384@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18385 if (opfunc == NULL)
18386 /* If there's no function, patch it with a ud2a (BUG) */
18387 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18388- else if (opfunc == _paravirt_nop)
18389+ else if (opfunc == (void *)_paravirt_nop)
18390 /* If the operation is a nop, then nop the callsite */
18391 ret = paravirt_patch_nop();
18392
18393 /* identity functions just return their single argument */
18394- else if (opfunc == _paravirt_ident_32)
18395+ else if (opfunc == (void *)_paravirt_ident_32)
18396 ret = paravirt_patch_ident_32(insnbuf, len);
18397- else if (opfunc == _paravirt_ident_64)
18398+ else if (opfunc == (void *)_paravirt_ident_64)
18399 ret = paravirt_patch_ident_64(insnbuf, len);
18400+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18401+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18402+ ret = paravirt_patch_ident_64(insnbuf, len);
18403+#endif
18404
18405 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18406 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18407@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18408 if (insn_len > len || start == NULL)
18409 insn_len = len;
18410 else
18411- memcpy(insnbuf, start, insn_len);
18412+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18413
18414 return insn_len;
18415 }
18416@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18417 preempt_enable();
18418 }
18419
18420-struct pv_info pv_info = {
18421+struct pv_info pv_info __read_only = {
18422 .name = "bare hardware",
18423 .paravirt_enabled = 0,
18424 .kernel_rpl = 0,
18425 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18426 };
18427
18428-struct pv_init_ops pv_init_ops = {
18429+struct pv_init_ops pv_init_ops __read_only = {
18430 .patch = native_patch,
18431 };
18432
18433-struct pv_time_ops pv_time_ops = {
18434+struct pv_time_ops pv_time_ops __read_only = {
18435 .sched_clock = native_sched_clock,
18436 };
18437
18438-struct pv_irq_ops pv_irq_ops = {
18439+struct pv_irq_ops pv_irq_ops __read_only = {
18440 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18441 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18442 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18443@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18444 #endif
18445 };
18446
18447-struct pv_cpu_ops pv_cpu_ops = {
18448+struct pv_cpu_ops pv_cpu_ops __read_only = {
18449 .cpuid = native_cpuid,
18450 .get_debugreg = native_get_debugreg,
18451 .set_debugreg = native_set_debugreg,
18452@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18453 .end_context_switch = paravirt_nop,
18454 };
18455
18456-struct pv_apic_ops pv_apic_ops = {
18457+struct pv_apic_ops pv_apic_ops __read_only = {
18458 #ifdef CONFIG_X86_LOCAL_APIC
18459 .startup_ipi_hook = paravirt_nop,
18460 #endif
18461 };
18462
18463-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18464+#ifdef CONFIG_X86_32
18465+#ifdef CONFIG_X86_PAE
18466+/* 64-bit pagetable entries */
18467+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18468+#else
18469 /* 32-bit pagetable entries */
18470 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18471+#endif
18472 #else
18473 /* 64-bit pagetable entries */
18474 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18475 #endif
18476
18477-struct pv_mmu_ops pv_mmu_ops = {
18478+struct pv_mmu_ops pv_mmu_ops __read_only = {
18479
18480 .read_cr2 = native_read_cr2,
18481 .write_cr2 = native_write_cr2,
18482@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18483 .make_pud = PTE_IDENT,
18484
18485 .set_pgd = native_set_pgd,
18486+ .set_pgd_batched = native_set_pgd_batched,
18487 #endif
18488 #endif /* PAGETABLE_LEVELS >= 3 */
18489
18490@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18491 },
18492
18493 .set_fixmap = native_set_fixmap,
18494+
18495+#ifdef CONFIG_PAX_KERNEXEC
18496+ .pax_open_kernel = native_pax_open_kernel,
18497+ .pax_close_kernel = native_pax_close_kernel,
18498+#endif
18499+
18500 };
18501
18502 EXPORT_SYMBOL_GPL(pv_time_ops);
18503diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18504index 1a2d4b1..6a0dd55 100644
18505--- a/arch/x86/kernel/pci-calgary_64.c
18506+++ b/arch/x86/kernel/pci-calgary_64.c
18507@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18508 free_pages((unsigned long)vaddr, get_order(size));
18509 }
18510
18511-static struct dma_map_ops calgary_dma_ops = {
18512+static const struct dma_map_ops calgary_dma_ops = {
18513 .alloc_coherent = calgary_alloc_coherent,
18514 .free_coherent = calgary_free_coherent,
18515 .map_sg = calgary_map_sg,
18516diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18517index 6ac3931..42b4414 100644
18518--- a/arch/x86/kernel/pci-dma.c
18519+++ b/arch/x86/kernel/pci-dma.c
18520@@ -14,7 +14,7 @@
18521
18522 static int forbid_dac __read_mostly;
18523
18524-struct dma_map_ops *dma_ops;
18525+const struct dma_map_ops *dma_ops;
18526 EXPORT_SYMBOL(dma_ops);
18527
18528 static int iommu_sac_force __read_mostly;
18529@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18530
18531 int dma_supported(struct device *dev, u64 mask)
18532 {
18533- struct dma_map_ops *ops = get_dma_ops(dev);
18534+ const struct dma_map_ops *ops = get_dma_ops(dev);
18535
18536 #ifdef CONFIG_PCI
18537 if (mask > 0xffffffff && forbid_dac > 0) {
18538diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18539index 1c76691..e3632db 100644
18540--- a/arch/x86/kernel/pci-gart_64.c
18541+++ b/arch/x86/kernel/pci-gart_64.c
18542@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18543 return -1;
18544 }
18545
18546-static struct dma_map_ops gart_dma_ops = {
18547+static const struct dma_map_ops gart_dma_ops = {
18548 .map_sg = gart_map_sg,
18549 .unmap_sg = gart_unmap_sg,
18550 .map_page = gart_map_page,
18551diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18552index a3933d4..c898869 100644
18553--- a/arch/x86/kernel/pci-nommu.c
18554+++ b/arch/x86/kernel/pci-nommu.c
18555@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18556 flush_write_buffers();
18557 }
18558
18559-struct dma_map_ops nommu_dma_ops = {
18560+const struct dma_map_ops nommu_dma_ops = {
18561 .alloc_coherent = dma_generic_alloc_coherent,
18562 .free_coherent = nommu_free_coherent,
18563 .map_sg = nommu_map_sg,
18564diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18565index aaa6b78..4de1881 100644
18566--- a/arch/x86/kernel/pci-swiotlb.c
18567+++ b/arch/x86/kernel/pci-swiotlb.c
18568@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18569 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18570 }
18571
18572-static struct dma_map_ops swiotlb_dma_ops = {
18573+static const struct dma_map_ops swiotlb_dma_ops = {
18574 .mapping_error = swiotlb_dma_mapping_error,
18575 .alloc_coherent = x86_swiotlb_alloc_coherent,
18576 .free_coherent = swiotlb_free_coherent,
18577diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18578index fc6c84d..0312ca2 100644
18579--- a/arch/x86/kernel/process.c
18580+++ b/arch/x86/kernel/process.c
18581@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18582
18583 void free_thread_info(struct thread_info *ti)
18584 {
18585- free_thread_xstate(ti->task);
18586 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18587 }
18588
18589+static struct kmem_cache *task_struct_cachep;
18590+
18591 void arch_task_cache_init(void)
18592 {
18593- task_xstate_cachep =
18594- kmem_cache_create("task_xstate", xstate_size,
18595+ /* create a slab on which task_structs can be allocated */
18596+ task_struct_cachep =
18597+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18598+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18599+
18600+ task_xstate_cachep =
18601+ kmem_cache_create("task_xstate", xstate_size,
18602 __alignof__(union thread_xstate),
18603- SLAB_PANIC | SLAB_NOTRACK, NULL);
18604+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18605+}
18606+
18607+struct task_struct *alloc_task_struct(void)
18608+{
18609+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18610+}
18611+
18612+void free_task_struct(struct task_struct *task)
18613+{
18614+ free_thread_xstate(task);
18615+ kmem_cache_free(task_struct_cachep, task);
18616 }
18617
18618 /*
18619@@ -73,7 +90,7 @@ void exit_thread(void)
18620 unsigned long *bp = t->io_bitmap_ptr;
18621
18622 if (bp) {
18623- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18624+ struct tss_struct *tss = init_tss + get_cpu();
18625
18626 t->io_bitmap_ptr = NULL;
18627 clear_thread_flag(TIF_IO_BITMAP);
18628@@ -93,6 +110,9 @@ void flush_thread(void)
18629
18630 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18631
18632+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18633+ loadsegment(gs, 0);
18634+#endif
18635 tsk->thread.debugreg0 = 0;
18636 tsk->thread.debugreg1 = 0;
18637 tsk->thread.debugreg2 = 0;
18638@@ -307,7 +327,7 @@ void default_idle(void)
18639 EXPORT_SYMBOL(default_idle);
18640 #endif
18641
18642-void stop_this_cpu(void *dummy)
18643+__noreturn void stop_this_cpu(void *dummy)
18644 {
18645 local_irq_disable();
18646 /*
18647@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18648 }
18649 early_param("idle", idle_setup);
18650
18651-unsigned long arch_align_stack(unsigned long sp)
18652+#ifdef CONFIG_PAX_RANDKSTACK
18653+void pax_randomize_kstack(struct pt_regs *regs)
18654 {
18655- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18656- sp -= get_random_int() % 8192;
18657- return sp & ~0xf;
18658-}
18659+ struct thread_struct *thread = &current->thread;
18660+ unsigned long time;
18661
18662-unsigned long arch_randomize_brk(struct mm_struct *mm)
18663-{
18664- unsigned long range_end = mm->brk + 0x02000000;
18665- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18666+ if (!randomize_va_space)
18667+ return;
18668+
18669+ if (v8086_mode(regs))
18670+ return;
18671+
18672+ rdtscl(time);
18673+
18674+ /* P4 seems to return a 0 LSB, ignore it */
18675+#ifdef CONFIG_MPENTIUM4
18676+ time &= 0x3EUL;
18677+ time <<= 2;
18678+#elif defined(CONFIG_X86_64)
18679+ time &= 0xFUL;
18680+ time <<= 4;
18681+#else
18682+ time &= 0x1FUL;
18683+ time <<= 3;
18684+#endif
18685+
18686+ thread->sp0 ^= time;
18687+ load_sp0(init_tss + smp_processor_id(), thread);
18688+
18689+#ifdef CONFIG_X86_64
18690+ percpu_write(kernel_stack, thread->sp0);
18691+#endif
18692 }
18693+#endif
18694
18695diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18696index c40c432..6e1df72 100644
18697--- a/arch/x86/kernel/process_32.c
18698+++ b/arch/x86/kernel/process_32.c
18699@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18700 unsigned long thread_saved_pc(struct task_struct *tsk)
18701 {
18702 return ((unsigned long *)tsk->thread.sp)[3];
18703+//XXX return tsk->thread.eip;
18704 }
18705
18706 #ifndef CONFIG_SMP
18707@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18708 unsigned short ss, gs;
18709 const char *board;
18710
18711- if (user_mode_vm(regs)) {
18712+ if (user_mode(regs)) {
18713 sp = regs->sp;
18714 ss = regs->ss & 0xffff;
18715- gs = get_user_gs(regs);
18716 } else {
18717 sp = (unsigned long) (&regs->sp);
18718 savesegment(ss, ss);
18719- savesegment(gs, gs);
18720 }
18721+ gs = get_user_gs(regs);
18722
18723 printk("\n");
18724
18725@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18726 regs.bx = (unsigned long) fn;
18727 regs.dx = (unsigned long) arg;
18728
18729- regs.ds = __USER_DS;
18730- regs.es = __USER_DS;
18731+ regs.ds = __KERNEL_DS;
18732+ regs.es = __KERNEL_DS;
18733 regs.fs = __KERNEL_PERCPU;
18734- regs.gs = __KERNEL_STACK_CANARY;
18735+ savesegment(gs, regs.gs);
18736 regs.orig_ax = -1;
18737 regs.ip = (unsigned long) kernel_thread_helper;
18738 regs.cs = __KERNEL_CS | get_kernel_rpl();
18739@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18740 struct task_struct *tsk;
18741 int err;
18742
18743- childregs = task_pt_regs(p);
18744+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18745 *childregs = *regs;
18746 childregs->ax = 0;
18747 childregs->sp = sp;
18748
18749 p->thread.sp = (unsigned long) childregs;
18750 p->thread.sp0 = (unsigned long) (childregs+1);
18751+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18752
18753 p->thread.ip = (unsigned long) ret_from_fork;
18754
18755@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18756 struct thread_struct *prev = &prev_p->thread,
18757 *next = &next_p->thread;
18758 int cpu = smp_processor_id();
18759- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18760+ struct tss_struct *tss = init_tss + cpu;
18761 bool preload_fpu;
18762
18763 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18764@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18765 */
18766 lazy_save_gs(prev->gs);
18767
18768+#ifdef CONFIG_PAX_MEMORY_UDEREF
18769+ __set_fs(task_thread_info(next_p)->addr_limit);
18770+#endif
18771+
18772 /*
18773 * Load the per-thread Thread-Local Storage descriptor.
18774 */
18775@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18776 */
18777 arch_end_context_switch(next_p);
18778
18779+ percpu_write(current_task, next_p);
18780+ percpu_write(current_tinfo, &next_p->tinfo);
18781+
18782 if (preload_fpu)
18783 __math_state_restore();
18784
18785@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18786 if (prev->gs | next->gs)
18787 lazy_load_gs(next->gs);
18788
18789- percpu_write(current_task, next_p);
18790-
18791 return prev_p;
18792 }
18793
18794@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18795 } while (count++ < 16);
18796 return 0;
18797 }
18798-
18799diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18800index 39493bc..196816d 100644
18801--- a/arch/x86/kernel/process_64.c
18802+++ b/arch/x86/kernel/process_64.c
18803@@ -91,7 +91,7 @@ static void __exit_idle(void)
18804 void exit_idle(void)
18805 {
18806 /* idle loop has pid 0 */
18807- if (current->pid)
18808+ if (task_pid_nr(current))
18809 return;
18810 __exit_idle();
18811 }
18812@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18813 if (!board)
18814 board = "";
18815 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18816- current->pid, current->comm, print_tainted(),
18817+ task_pid_nr(current), current->comm, print_tainted(),
18818 init_utsname()->release,
18819 (int)strcspn(init_utsname()->version, " "),
18820 init_utsname()->version, board);
18821@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18822 struct pt_regs *childregs;
18823 struct task_struct *me = current;
18824
18825- childregs = ((struct pt_regs *)
18826- (THREAD_SIZE + task_stack_page(p))) - 1;
18827+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18828 *childregs = *regs;
18829
18830 childregs->ax = 0;
18831@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18832 p->thread.sp = (unsigned long) childregs;
18833 p->thread.sp0 = (unsigned long) (childregs+1);
18834 p->thread.usersp = me->thread.usersp;
18835+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18836
18837 set_tsk_thread_flag(p, TIF_FORK);
18838
18839@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18840 struct thread_struct *prev = &prev_p->thread;
18841 struct thread_struct *next = &next_p->thread;
18842 int cpu = smp_processor_id();
18843- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18844+ struct tss_struct *tss = init_tss + cpu;
18845 unsigned fsindex, gsindex;
18846 bool preload_fpu;
18847
18848@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18849 prev->usersp = percpu_read(old_rsp);
18850 percpu_write(old_rsp, next->usersp);
18851 percpu_write(current_task, next_p);
18852+ percpu_write(current_tinfo, &next_p->tinfo);
18853
18854- percpu_write(kernel_stack,
18855- (unsigned long)task_stack_page(next_p) +
18856- THREAD_SIZE - KERNEL_STACK_OFFSET);
18857+ percpu_write(kernel_stack, next->sp0);
18858
18859 /*
18860 * Now maybe reload the debug registers and handle I/O bitmaps
18861@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18862 if (!p || p == current || p->state == TASK_RUNNING)
18863 return 0;
18864 stack = (unsigned long)task_stack_page(p);
18865- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18866+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18867 return 0;
18868 fp = *(u64 *)(p->thread.sp);
18869 do {
18870- if (fp < (unsigned long)stack ||
18871- fp >= (unsigned long)stack+THREAD_SIZE)
18872+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18873 return 0;
18874 ip = *(u64 *)(fp+8);
18875 if (!in_sched_functions(ip))
18876diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18877index c06acdd..3f5fff5 100644
18878--- a/arch/x86/kernel/ptrace.c
18879+++ b/arch/x86/kernel/ptrace.c
18880@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18881 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18882 {
18883 int ret;
18884- unsigned long __user *datap = (unsigned long __user *)data;
18885+ unsigned long __user *datap = (__force unsigned long __user *)data;
18886
18887 switch (request) {
18888 /* read the word at location addr in the USER area. */
18889@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18890 if (addr < 0)
18891 return -EIO;
18892 ret = do_get_thread_area(child, addr,
18893- (struct user_desc __user *) data);
18894+ (__force struct user_desc __user *) data);
18895 break;
18896
18897 case PTRACE_SET_THREAD_AREA:
18898 if (addr < 0)
18899 return -EIO;
18900 ret = do_set_thread_area(child, addr,
18901- (struct user_desc __user *) data, 0);
18902+ (__force struct user_desc __user *) data, 0);
18903 break;
18904 #endif
18905
18906@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18907 #ifdef CONFIG_X86_PTRACE_BTS
18908 case PTRACE_BTS_CONFIG:
18909 ret = ptrace_bts_config
18910- (child, data, (struct ptrace_bts_config __user *)addr);
18911+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18912 break;
18913
18914 case PTRACE_BTS_STATUS:
18915 ret = ptrace_bts_status
18916- (child, data, (struct ptrace_bts_config __user *)addr);
18917+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18918 break;
18919
18920 case PTRACE_BTS_SIZE:
18921@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18922
18923 case PTRACE_BTS_GET:
18924 ret = ptrace_bts_read_record
18925- (child, data, (struct bts_struct __user *) addr);
18926+ (child, data, (__force struct bts_struct __user *) addr);
18927 break;
18928
18929 case PTRACE_BTS_CLEAR:
18930@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18931
18932 case PTRACE_BTS_DRAIN:
18933 ret = ptrace_bts_drain
18934- (child, data, (struct bts_struct __user *) addr);
18935+ (child, data, (__force struct bts_struct __user *) addr);
18936 break;
18937 #endif /* CONFIG_X86_PTRACE_BTS */
18938
18939@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18940 info.si_code = si_code;
18941
18942 /* User-mode ip? */
18943- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18944+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18945
18946 /* Send us the fake SIGTRAP */
18947 force_sig_info(SIGTRAP, &info, tsk);
18948@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18949 * We must return the syscall number to actually look up in the table.
18950 * This can be -1L to skip running any syscall at all.
18951 */
18952-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18953+long syscall_trace_enter(struct pt_regs *regs)
18954 {
18955 long ret = 0;
18956
18957@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18958 return ret ?: regs->orig_ax;
18959 }
18960
18961-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18962+void syscall_trace_leave(struct pt_regs *regs)
18963 {
18964 if (unlikely(current->audit_context))
18965 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18966diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18967index cf98100..e76e03d 100644
18968--- a/arch/x86/kernel/reboot.c
18969+++ b/arch/x86/kernel/reboot.c
18970@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18971 EXPORT_SYMBOL(pm_power_off);
18972
18973 static const struct desc_ptr no_idt = {};
18974-static int reboot_mode;
18975+static unsigned short reboot_mode;
18976 enum reboot_type reboot_type = BOOT_KBD;
18977 int reboot_force;
18978
18979@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18980 controller to pulse the CPU reset line, which is more thorough, but
18981 doesn't work with at least one type of 486 motherboard. It is easy
18982 to stop this code working; hence the copious comments. */
18983-static const unsigned long long
18984-real_mode_gdt_entries [3] =
18985+static struct desc_struct
18986+real_mode_gdt_entries [3] __read_only =
18987 {
18988- 0x0000000000000000ULL, /* Null descriptor */
18989- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18990- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18991+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18992+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18993+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18994 };
18995
18996 static const struct desc_ptr
18997@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18998 * specified by the code and length parameters.
18999 * We assume that length will aways be less that 100!
19000 */
19001-void machine_real_restart(const unsigned char *code, int length)
19002+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19003 {
19004 local_irq_disable();
19005
19006@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19007 /* Remap the kernel at virtual address zero, as well as offset zero
19008 from the kernel segment. This assumes the kernel segment starts at
19009 virtual address PAGE_OFFSET. */
19010- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19011- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19012+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19013+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19014
19015 /*
19016 * Use `swapper_pg_dir' as our page directory.
19017@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19018 boot)". This seems like a fairly standard thing that gets set by
19019 REBOOT.COM programs, and the previous reset routine did this
19020 too. */
19021- *((unsigned short *)0x472) = reboot_mode;
19022+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19023
19024 /* For the switch to real mode, copy some code to low memory. It has
19025 to be in the first 64k because it is running in 16-bit mode, and it
19026 has to have the same physical and virtual address, because it turns
19027 off paging. Copy it near the end of the first page, out of the way
19028 of BIOS variables. */
19029- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19030- real_mode_switch, sizeof (real_mode_switch));
19031- memcpy((void *)(0x1000 - 100), code, length);
19032+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19033+ memcpy(__va(0x1000 - 100), code, length);
19034
19035 /* Set up the IDT for real mode. */
19036 load_idt(&real_mode_idt);
19037@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19038 __asm__ __volatile__ ("ljmp $0x0008,%0"
19039 :
19040 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19041+ do { } while (1);
19042 }
19043 #ifdef CONFIG_APM_MODULE
19044 EXPORT_SYMBOL(machine_real_restart);
19045@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19046 {
19047 }
19048
19049-static void native_machine_emergency_restart(void)
19050+__noreturn static void native_machine_emergency_restart(void)
19051 {
19052 int i;
19053
19054@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19055 #endif
19056 }
19057
19058-static void __machine_emergency_restart(int emergency)
19059+static __noreturn void __machine_emergency_restart(int emergency)
19060 {
19061 reboot_emergency = emergency;
19062 machine_ops.emergency_restart();
19063 }
19064
19065-static void native_machine_restart(char *__unused)
19066+static __noreturn void native_machine_restart(char *__unused)
19067 {
19068 printk("machine restart\n");
19069
19070@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19071 __machine_emergency_restart(0);
19072 }
19073
19074-static void native_machine_halt(void)
19075+static __noreturn void native_machine_halt(void)
19076 {
19077 /* stop other cpus and apics */
19078 machine_shutdown();
19079@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19080 stop_this_cpu(NULL);
19081 }
19082
19083-static void native_machine_power_off(void)
19084+__noreturn static void native_machine_power_off(void)
19085 {
19086 if (pm_power_off) {
19087 if (!reboot_force)
19088@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19089 }
19090 /* a fallback in case there is no PM info available */
19091 tboot_shutdown(TB_SHUTDOWN_HALT);
19092+ do { } while (1);
19093 }
19094
19095 struct machine_ops machine_ops = {
19096diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19097index 7a6f3b3..976a959 100644
19098--- a/arch/x86/kernel/relocate_kernel_64.S
19099+++ b/arch/x86/kernel/relocate_kernel_64.S
19100@@ -11,6 +11,7 @@
19101 #include <asm/kexec.h>
19102 #include <asm/processor-flags.h>
19103 #include <asm/pgtable_types.h>
19104+#include <asm/alternative-asm.h>
19105
19106 /*
19107 * Must be relocatable PIC code callable as a C function
19108@@ -167,6 +168,7 @@ identity_mapped:
19109 xorq %r14, %r14
19110 xorq %r15, %r15
19111
19112+ pax_force_retaddr 0, 1
19113 ret
19114
19115 1:
19116diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19117index 5449a26..0b6c759 100644
19118--- a/arch/x86/kernel/setup.c
19119+++ b/arch/x86/kernel/setup.c
19120@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19121
19122 if (!boot_params.hdr.root_flags)
19123 root_mountflags &= ~MS_RDONLY;
19124- init_mm.start_code = (unsigned long) _text;
19125- init_mm.end_code = (unsigned long) _etext;
19126+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19127+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19128 init_mm.end_data = (unsigned long) _edata;
19129 init_mm.brk = _brk_end;
19130
19131- code_resource.start = virt_to_phys(_text);
19132- code_resource.end = virt_to_phys(_etext)-1;
19133- data_resource.start = virt_to_phys(_etext);
19134+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19135+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19136+ data_resource.start = virt_to_phys(_sdata);
19137 data_resource.end = virt_to_phys(_edata)-1;
19138 bss_resource.start = virt_to_phys(&__bss_start);
19139 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19140diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19141index d559af9..524c6ad 100644
19142--- a/arch/x86/kernel/setup_percpu.c
19143+++ b/arch/x86/kernel/setup_percpu.c
19144@@ -25,19 +25,17 @@
19145 # define DBG(x...)
19146 #endif
19147
19148-DEFINE_PER_CPU(int, cpu_number);
19149+#ifdef CONFIG_SMP
19150+DEFINE_PER_CPU(unsigned int, cpu_number);
19151 EXPORT_PER_CPU_SYMBOL(cpu_number);
19152+#endif
19153
19154-#ifdef CONFIG_X86_64
19155 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19156-#else
19157-#define BOOT_PERCPU_OFFSET 0
19158-#endif
19159
19160 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19161 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19162
19163-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19164+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19165 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19166 };
19167 EXPORT_SYMBOL(__per_cpu_offset);
19168@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19169 {
19170 #ifdef CONFIG_X86_32
19171 struct desc_struct gdt;
19172+ unsigned long base = per_cpu_offset(cpu);
19173
19174- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19175- 0x2 | DESCTYPE_S, 0x8);
19176- gdt.s = 1;
19177+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19178+ 0x83 | DESCTYPE_S, 0xC);
19179 write_gdt_entry(get_cpu_gdt_table(cpu),
19180 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19181 #endif
19182@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19183 /* alrighty, percpu areas up and running */
19184 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19185 for_each_possible_cpu(cpu) {
19186+#ifdef CONFIG_CC_STACKPROTECTOR
19187+#ifdef CONFIG_X86_32
19188+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19189+#endif
19190+#endif
19191 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19192 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19193 per_cpu(cpu_number, cpu) = cpu;
19194@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19195 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19196 #endif
19197 #endif
19198+#ifdef CONFIG_CC_STACKPROTECTOR
19199+#ifdef CONFIG_X86_32
19200+ if (!cpu)
19201+ per_cpu(stack_canary.canary, cpu) = canary;
19202+#endif
19203+#endif
19204 /*
19205 * Up to this point, the boot CPU has been using .data.init
19206 * area. Reload any changed state for the boot CPU.
19207diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19208index 6a44a76..a9287a1 100644
19209--- a/arch/x86/kernel/signal.c
19210+++ b/arch/x86/kernel/signal.c
19211@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19212 * Align the stack pointer according to the i386 ABI,
19213 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19214 */
19215- sp = ((sp + 4) & -16ul) - 4;
19216+ sp = ((sp - 12) & -16ul) - 4;
19217 #else /* !CONFIG_X86_32 */
19218 sp = round_down(sp, 16) - 8;
19219 #endif
19220@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19221 * Return an always-bogus address instead so we will die with SIGSEGV.
19222 */
19223 if (onsigstack && !likely(on_sig_stack(sp)))
19224- return (void __user *)-1L;
19225+ return (__force void __user *)-1L;
19226
19227 /* save i387 state */
19228 if (used_math() && save_i387_xstate(*fpstate) < 0)
19229- return (void __user *)-1L;
19230+ return (__force void __user *)-1L;
19231
19232 return (void __user *)sp;
19233 }
19234@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19235 }
19236
19237 if (current->mm->context.vdso)
19238- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19239+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19240 else
19241- restorer = &frame->retcode;
19242+ restorer = (void __user *)&frame->retcode;
19243 if (ka->sa.sa_flags & SA_RESTORER)
19244 restorer = ka->sa.sa_restorer;
19245
19246@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19247 * reasons and because gdb uses it as a signature to notice
19248 * signal handler stack frames.
19249 */
19250- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19251+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19252
19253 if (err)
19254 return -EFAULT;
19255@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19256 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19257
19258 /* Set up to return from userspace. */
19259- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19260+ if (current->mm->context.vdso)
19261+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19262+ else
19263+ restorer = (void __user *)&frame->retcode;
19264 if (ka->sa.sa_flags & SA_RESTORER)
19265 restorer = ka->sa.sa_restorer;
19266 put_user_ex(restorer, &frame->pretcode);
19267@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19268 * reasons and because gdb uses it as a signature to notice
19269 * signal handler stack frames.
19270 */
19271- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19272+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19273 } put_user_catch(err);
19274
19275 if (err)
19276@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19277 int signr;
19278 sigset_t *oldset;
19279
19280+ pax_track_stack();
19281+
19282 /*
19283 * We want the common case to go fast, which is why we may in certain
19284 * cases get here from kernel mode. Just return without doing anything
19285@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19286 * X86_32: vm86 regs switched out by assembly code before reaching
19287 * here, so testing against kernel CS suffices.
19288 */
19289- if (!user_mode(regs))
19290+ if (!user_mode_novm(regs))
19291 return;
19292
19293 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19294diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19295index 7e8e905..64d5c32 100644
19296--- a/arch/x86/kernel/smpboot.c
19297+++ b/arch/x86/kernel/smpboot.c
19298@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19299 */
19300 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19301
19302-void cpu_hotplug_driver_lock()
19303+void cpu_hotplug_driver_lock(void)
19304 {
19305- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19306+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19307 }
19308
19309-void cpu_hotplug_driver_unlock()
19310+void cpu_hotplug_driver_unlock(void)
19311 {
19312- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19313+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19314 }
19315
19316 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19317@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19318 * target processor state.
19319 */
19320 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19321- (unsigned long)stack_start.sp);
19322+ stack_start);
19323
19324 /*
19325 * Run STARTUP IPI loop.
19326@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19327 set_idle_for_cpu(cpu, c_idle.idle);
19328 do_rest:
19329 per_cpu(current_task, cpu) = c_idle.idle;
19330+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19331 #ifdef CONFIG_X86_32
19332 /* Stack for startup_32 can be just as for start_secondary onwards */
19333 irq_ctx_init(cpu);
19334@@ -750,13 +751,15 @@ do_rest:
19335 #else
19336 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19337 initial_gs = per_cpu_offset(cpu);
19338- per_cpu(kernel_stack, cpu) =
19339- (unsigned long)task_stack_page(c_idle.idle) -
19340- KERNEL_STACK_OFFSET + THREAD_SIZE;
19341+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19342 #endif
19343+
19344+ pax_open_kernel();
19345 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19346+ pax_close_kernel();
19347+
19348 initial_code = (unsigned long)start_secondary;
19349- stack_start.sp = (void *) c_idle.idle->thread.sp;
19350+ stack_start = c_idle.idle->thread.sp;
19351
19352 /* start_ip had better be page-aligned! */
19353 start_ip = setup_trampoline();
19354@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19355
19356 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19357
19358+#ifdef CONFIG_PAX_PER_CPU_PGD
19359+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19360+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19361+ KERNEL_PGD_PTRS);
19362+#endif
19363+
19364 err = do_boot_cpu(apicid, cpu);
19365
19366 if (err) {
19367diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19368index 3149032..14f1053 100644
19369--- a/arch/x86/kernel/step.c
19370+++ b/arch/x86/kernel/step.c
19371@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19372 struct desc_struct *desc;
19373 unsigned long base;
19374
19375- seg &= ~7UL;
19376+ seg >>= 3;
19377
19378 mutex_lock(&child->mm->context.lock);
19379- if (unlikely((seg >> 3) >= child->mm->context.size))
19380+ if (unlikely(seg >= child->mm->context.size))
19381 addr = -1L; /* bogus selector, access would fault */
19382 else {
19383 desc = child->mm->context.ldt + seg;
19384@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19385 addr += base;
19386 }
19387 mutex_unlock(&child->mm->context.lock);
19388- }
19389+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19390+ addr = ktla_ktva(addr);
19391
19392 return addr;
19393 }
19394@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19395 unsigned char opcode[15];
19396 unsigned long addr = convert_ip_to_linear(child, regs);
19397
19398+ if (addr == -EINVAL)
19399+ return 0;
19400+
19401 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19402 for (i = 0; i < copied; i++) {
19403 switch (opcode[i]) {
19404@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19405
19406 #ifdef CONFIG_X86_64
19407 case 0x40 ... 0x4f:
19408- if (regs->cs != __USER_CS)
19409+ if ((regs->cs & 0xffff) != __USER_CS)
19410 /* 32-bit mode: register increment */
19411 return 0;
19412 /* 64-bit mode: REX prefix */
19413diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19414index dee1ff7..a397f7f 100644
19415--- a/arch/x86/kernel/sys_i386_32.c
19416+++ b/arch/x86/kernel/sys_i386_32.c
19417@@ -24,6 +24,21 @@
19418
19419 #include <asm/syscalls.h>
19420
19421+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19422+{
19423+ unsigned long pax_task_size = TASK_SIZE;
19424+
19425+#ifdef CONFIG_PAX_SEGMEXEC
19426+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19427+ pax_task_size = SEGMEXEC_TASK_SIZE;
19428+#endif
19429+
19430+ if (len > pax_task_size || addr > pax_task_size - len)
19431+ return -EINVAL;
19432+
19433+ return 0;
19434+}
19435+
19436 /*
19437 * Perform the select(nd, in, out, ex, tv) and mmap() system
19438 * calls. Linux/i386 didn't use to be able to handle more than
19439@@ -58,6 +73,212 @@ out:
19440 return err;
19441 }
19442
19443+unsigned long
19444+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19445+ unsigned long len, unsigned long pgoff, unsigned long flags)
19446+{
19447+ struct mm_struct *mm = current->mm;
19448+ struct vm_area_struct *vma;
19449+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19450+
19451+#ifdef CONFIG_PAX_SEGMEXEC
19452+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19453+ pax_task_size = SEGMEXEC_TASK_SIZE;
19454+#endif
19455+
19456+ pax_task_size -= PAGE_SIZE;
19457+
19458+ if (len > pax_task_size)
19459+ return -ENOMEM;
19460+
19461+ if (flags & MAP_FIXED)
19462+ return addr;
19463+
19464+#ifdef CONFIG_PAX_RANDMMAP
19465+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19466+#endif
19467+
19468+ if (addr) {
19469+ addr = PAGE_ALIGN(addr);
19470+ if (pax_task_size - len >= addr) {
19471+ vma = find_vma(mm, addr);
19472+ if (check_heap_stack_gap(vma, addr, len))
19473+ return addr;
19474+ }
19475+ }
19476+ if (len > mm->cached_hole_size) {
19477+ start_addr = addr = mm->free_area_cache;
19478+ } else {
19479+ start_addr = addr = mm->mmap_base;
19480+ mm->cached_hole_size = 0;
19481+ }
19482+
19483+#ifdef CONFIG_PAX_PAGEEXEC
19484+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19485+ start_addr = 0x00110000UL;
19486+
19487+#ifdef CONFIG_PAX_RANDMMAP
19488+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19489+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19490+#endif
19491+
19492+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19493+ start_addr = addr = mm->mmap_base;
19494+ else
19495+ addr = start_addr;
19496+ }
19497+#endif
19498+
19499+full_search:
19500+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19501+ /* At this point: (!vma || addr < vma->vm_end). */
19502+ if (pax_task_size - len < addr) {
19503+ /*
19504+ * Start a new search - just in case we missed
19505+ * some holes.
19506+ */
19507+ if (start_addr != mm->mmap_base) {
19508+ start_addr = addr = mm->mmap_base;
19509+ mm->cached_hole_size = 0;
19510+ goto full_search;
19511+ }
19512+ return -ENOMEM;
19513+ }
19514+ if (check_heap_stack_gap(vma, addr, len))
19515+ break;
19516+ if (addr + mm->cached_hole_size < vma->vm_start)
19517+ mm->cached_hole_size = vma->vm_start - addr;
19518+ addr = vma->vm_end;
19519+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19520+ start_addr = addr = mm->mmap_base;
19521+ mm->cached_hole_size = 0;
19522+ goto full_search;
19523+ }
19524+ }
19525+
19526+ /*
19527+ * Remember the place where we stopped the search:
19528+ */
19529+ mm->free_area_cache = addr + len;
19530+ return addr;
19531+}
19532+
19533+unsigned long
19534+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19535+ const unsigned long len, const unsigned long pgoff,
19536+ const unsigned long flags)
19537+{
19538+ struct vm_area_struct *vma;
19539+ struct mm_struct *mm = current->mm;
19540+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19541+
19542+#ifdef CONFIG_PAX_SEGMEXEC
19543+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19544+ pax_task_size = SEGMEXEC_TASK_SIZE;
19545+#endif
19546+
19547+ pax_task_size -= PAGE_SIZE;
19548+
19549+ /* requested length too big for entire address space */
19550+ if (len > pax_task_size)
19551+ return -ENOMEM;
19552+
19553+ if (flags & MAP_FIXED)
19554+ return addr;
19555+
19556+#ifdef CONFIG_PAX_PAGEEXEC
19557+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19558+ goto bottomup;
19559+#endif
19560+
19561+#ifdef CONFIG_PAX_RANDMMAP
19562+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19563+#endif
19564+
19565+ /* requesting a specific address */
19566+ if (addr) {
19567+ addr = PAGE_ALIGN(addr);
19568+ if (pax_task_size - len >= addr) {
19569+ vma = find_vma(mm, addr);
19570+ if (check_heap_stack_gap(vma, addr, len))
19571+ return addr;
19572+ }
19573+ }
19574+
19575+ /* check if free_area_cache is useful for us */
19576+ if (len <= mm->cached_hole_size) {
19577+ mm->cached_hole_size = 0;
19578+ mm->free_area_cache = mm->mmap_base;
19579+ }
19580+
19581+ /* either no address requested or can't fit in requested address hole */
19582+ addr = mm->free_area_cache;
19583+
19584+ /* make sure it can fit in the remaining address space */
19585+ if (addr > len) {
19586+ vma = find_vma(mm, addr-len);
19587+ if (check_heap_stack_gap(vma, addr - len, len))
19588+ /* remember the address as a hint for next time */
19589+ return (mm->free_area_cache = addr-len);
19590+ }
19591+
19592+ if (mm->mmap_base < len)
19593+ goto bottomup;
19594+
19595+ addr = mm->mmap_base-len;
19596+
19597+ do {
19598+ /*
19599+ * Lookup failure means no vma is above this address,
19600+ * else if new region fits below vma->vm_start,
19601+ * return with success:
19602+ */
19603+ vma = find_vma(mm, addr);
19604+ if (check_heap_stack_gap(vma, addr, len))
19605+ /* remember the address as a hint for next time */
19606+ return (mm->free_area_cache = addr);
19607+
19608+ /* remember the largest hole we saw so far */
19609+ if (addr + mm->cached_hole_size < vma->vm_start)
19610+ mm->cached_hole_size = vma->vm_start - addr;
19611+
19612+ /* try just below the current vma->vm_start */
19613+ addr = skip_heap_stack_gap(vma, len);
19614+ } while (!IS_ERR_VALUE(addr));
19615+
19616+bottomup:
19617+ /*
19618+ * A failed mmap() very likely causes application failure,
19619+ * so fall back to the bottom-up function here. This scenario
19620+ * can happen with large stack limits and large mmap()
19621+ * allocations.
19622+ */
19623+
19624+#ifdef CONFIG_PAX_SEGMEXEC
19625+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19626+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19627+ else
19628+#endif
19629+
19630+ mm->mmap_base = TASK_UNMAPPED_BASE;
19631+
19632+#ifdef CONFIG_PAX_RANDMMAP
19633+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19634+ mm->mmap_base += mm->delta_mmap;
19635+#endif
19636+
19637+ mm->free_area_cache = mm->mmap_base;
19638+ mm->cached_hole_size = ~0UL;
19639+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19640+ /*
19641+ * Restore the topdown base:
19642+ */
19643+ mm->mmap_base = base;
19644+ mm->free_area_cache = base;
19645+ mm->cached_hole_size = ~0UL;
19646+
19647+ return addr;
19648+}
19649
19650 struct sel_arg_struct {
19651 unsigned long n;
19652@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19653 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19654 case SEMTIMEDOP:
19655 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19656- (const struct timespec __user *)fifth);
19657+ (__force const struct timespec __user *)fifth);
19658
19659 case SEMGET:
19660 return sys_semget(first, second, third);
19661@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19662 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19663 if (ret)
19664 return ret;
19665- return put_user(raddr, (ulong __user *) third);
19666+ return put_user(raddr, (__force ulong __user *) third);
19667 }
19668 case 1: /* iBCS2 emulator entry point */
19669 if (!segment_eq(get_fs(), get_ds()))
19670@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19671
19672 return error;
19673 }
19674-
19675-
19676-/*
19677- * Do a system call from kernel instead of calling sys_execve so we
19678- * end up with proper pt_regs.
19679- */
19680-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19681-{
19682- long __res;
19683- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19684- : "=a" (__res)
19685- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19686- return __res;
19687-}
19688diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19689index 8aa2057..b604bc1 100644
19690--- a/arch/x86/kernel/sys_x86_64.c
19691+++ b/arch/x86/kernel/sys_x86_64.c
19692@@ -32,8 +32,8 @@ out:
19693 return error;
19694 }
19695
19696-static void find_start_end(unsigned long flags, unsigned long *begin,
19697- unsigned long *end)
19698+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19699+ unsigned long *begin, unsigned long *end)
19700 {
19701 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19702 unsigned long new_begin;
19703@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19704 *begin = new_begin;
19705 }
19706 } else {
19707- *begin = TASK_UNMAPPED_BASE;
19708+ *begin = mm->mmap_base;
19709 *end = TASK_SIZE;
19710 }
19711 }
19712@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19713 if (flags & MAP_FIXED)
19714 return addr;
19715
19716- find_start_end(flags, &begin, &end);
19717+ find_start_end(mm, flags, &begin, &end);
19718
19719 if (len > end)
19720 return -ENOMEM;
19721
19722+#ifdef CONFIG_PAX_RANDMMAP
19723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19724+#endif
19725+
19726 if (addr) {
19727 addr = PAGE_ALIGN(addr);
19728 vma = find_vma(mm, addr);
19729- if (end - len >= addr &&
19730- (!vma || addr + len <= vma->vm_start))
19731+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19732 return addr;
19733 }
19734 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19735@@ -106,7 +109,7 @@ full_search:
19736 }
19737 return -ENOMEM;
19738 }
19739- if (!vma || addr + len <= vma->vm_start) {
19740+ if (check_heap_stack_gap(vma, addr, len)) {
19741 /*
19742 * Remember the place where we stopped the search:
19743 */
19744@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19745 {
19746 struct vm_area_struct *vma;
19747 struct mm_struct *mm = current->mm;
19748- unsigned long addr = addr0;
19749+ unsigned long base = mm->mmap_base, addr = addr0;
19750
19751 /* requested length too big for entire address space */
19752 if (len > TASK_SIZE)
19753@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19754 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19755 goto bottomup;
19756
19757+#ifdef CONFIG_PAX_RANDMMAP
19758+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19759+#endif
19760+
19761 /* requesting a specific address */
19762 if (addr) {
19763 addr = PAGE_ALIGN(addr);
19764- vma = find_vma(mm, addr);
19765- if (TASK_SIZE - len >= addr &&
19766- (!vma || addr + len <= vma->vm_start))
19767- return addr;
19768+ if (TASK_SIZE - len >= addr) {
19769+ vma = find_vma(mm, addr);
19770+ if (check_heap_stack_gap(vma, addr, len))
19771+ return addr;
19772+ }
19773 }
19774
19775 /* check if free_area_cache is useful for us */
19776@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19777 /* make sure it can fit in the remaining address space */
19778 if (addr > len) {
19779 vma = find_vma(mm, addr-len);
19780- if (!vma || addr <= vma->vm_start)
19781+ if (check_heap_stack_gap(vma, addr - len, len))
19782 /* remember the address as a hint for next time */
19783 return mm->free_area_cache = addr-len;
19784 }
19785@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19786 * return with success:
19787 */
19788 vma = find_vma(mm, addr);
19789- if (!vma || addr+len <= vma->vm_start)
19790+ if (check_heap_stack_gap(vma, addr, len))
19791 /* remember the address as a hint for next time */
19792 return mm->free_area_cache = addr;
19793
19794@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19795 mm->cached_hole_size = vma->vm_start - addr;
19796
19797 /* try just below the current vma->vm_start */
19798- addr = vma->vm_start-len;
19799- } while (len < vma->vm_start);
19800+ addr = skip_heap_stack_gap(vma, len);
19801+ } while (!IS_ERR_VALUE(addr));
19802
19803 bottomup:
19804 /*
19805@@ -198,13 +206,21 @@ bottomup:
19806 * can happen with large stack limits and large mmap()
19807 * allocations.
19808 */
19809+ mm->mmap_base = TASK_UNMAPPED_BASE;
19810+
19811+#ifdef CONFIG_PAX_RANDMMAP
19812+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19813+ mm->mmap_base += mm->delta_mmap;
19814+#endif
19815+
19816+ mm->free_area_cache = mm->mmap_base;
19817 mm->cached_hole_size = ~0UL;
19818- mm->free_area_cache = TASK_UNMAPPED_BASE;
19819 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19820 /*
19821 * Restore the topdown base:
19822 */
19823- mm->free_area_cache = mm->mmap_base;
19824+ mm->mmap_base = base;
19825+ mm->free_area_cache = base;
19826 mm->cached_hole_size = ~0UL;
19827
19828 return addr;
19829diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19830index 76d70a4..4c94a44 100644
19831--- a/arch/x86/kernel/syscall_table_32.S
19832+++ b/arch/x86/kernel/syscall_table_32.S
19833@@ -1,3 +1,4 @@
19834+.section .rodata,"a",@progbits
19835 ENTRY(sys_call_table)
19836 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19837 .long sys_exit
19838diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19839index 46b8277..3349d55 100644
19840--- a/arch/x86/kernel/tboot.c
19841+++ b/arch/x86/kernel/tboot.c
19842@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19843
19844 void tboot_shutdown(u32 shutdown_type)
19845 {
19846- void (*shutdown)(void);
19847+ void (* __noreturn shutdown)(void);
19848
19849 if (!tboot_enabled())
19850 return;
19851@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19852
19853 switch_to_tboot_pt();
19854
19855- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19856+ shutdown = (void *)tboot->shutdown_entry;
19857 shutdown();
19858
19859 /* should not reach here */
19860@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19861 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19862 }
19863
19864-static atomic_t ap_wfs_count;
19865+static atomic_unchecked_t ap_wfs_count;
19866
19867 static int tboot_wait_for_aps(int num_aps)
19868 {
19869@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19870 {
19871 switch (action) {
19872 case CPU_DYING:
19873- atomic_inc(&ap_wfs_count);
19874+ atomic_inc_unchecked(&ap_wfs_count);
19875 if (num_online_cpus() == 1)
19876- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19877+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19878 return NOTIFY_BAD;
19879 break;
19880 }
19881@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19882
19883 tboot_create_trampoline();
19884
19885- atomic_set(&ap_wfs_count, 0);
19886+ atomic_set_unchecked(&ap_wfs_count, 0);
19887 register_hotcpu_notifier(&tboot_cpu_notifier);
19888 return 0;
19889 }
19890diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19891index be25734..87fe232 100644
19892--- a/arch/x86/kernel/time.c
19893+++ b/arch/x86/kernel/time.c
19894@@ -26,17 +26,13 @@
19895 int timer_ack;
19896 #endif
19897
19898-#ifdef CONFIG_X86_64
19899-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19900-#endif
19901-
19902 unsigned long profile_pc(struct pt_regs *regs)
19903 {
19904 unsigned long pc = instruction_pointer(regs);
19905
19906- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19907+ if (!user_mode(regs) && in_lock_functions(pc)) {
19908 #ifdef CONFIG_FRAME_POINTER
19909- return *(unsigned long *)(regs->bp + sizeof(long));
19910+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19911 #else
19912 unsigned long *sp =
19913 (unsigned long *)kernel_stack_pointer(regs);
19914@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19915 * or above a saved flags. Eflags has bits 22-31 zero,
19916 * kernel addresses don't.
19917 */
19918+
19919+#ifdef CONFIG_PAX_KERNEXEC
19920+ return ktla_ktva(sp[0]);
19921+#else
19922 if (sp[0] >> 22)
19923 return sp[0];
19924 if (sp[1] >> 22)
19925 return sp[1];
19926 #endif
19927+
19928+#endif
19929 }
19930 return pc;
19931 }
19932diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19933index 6bb7b85..dd853e1 100644
19934--- a/arch/x86/kernel/tls.c
19935+++ b/arch/x86/kernel/tls.c
19936@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19937 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19938 return -EINVAL;
19939
19940+#ifdef CONFIG_PAX_SEGMEXEC
19941+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19942+ return -EINVAL;
19943+#endif
19944+
19945 set_tls_desc(p, idx, &info, 1);
19946
19947 return 0;
19948diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19949index 8508237..229b664 100644
19950--- a/arch/x86/kernel/trampoline_32.S
19951+++ b/arch/x86/kernel/trampoline_32.S
19952@@ -32,6 +32,12 @@
19953 #include <asm/segment.h>
19954 #include <asm/page_types.h>
19955
19956+#ifdef CONFIG_PAX_KERNEXEC
19957+#define ta(X) (X)
19958+#else
19959+#define ta(X) ((X) - __PAGE_OFFSET)
19960+#endif
19961+
19962 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19963 __CPUINITRODATA
19964 .code16
19965@@ -60,7 +66,7 @@ r_base = .
19966 inc %ax # protected mode (PE) bit
19967 lmsw %ax # into protected mode
19968 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19969- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19970+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19971
19972 # These need to be in the same 64K segment as the above;
19973 # hence we don't use the boot_gdt_descr defined in head.S
19974diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19975index 3af2dff..ba8aa49 100644
19976--- a/arch/x86/kernel/trampoline_64.S
19977+++ b/arch/x86/kernel/trampoline_64.S
19978@@ -91,7 +91,7 @@ startup_32:
19979 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19980 movl %eax, %ds
19981
19982- movl $X86_CR4_PAE, %eax
19983+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19984 movl %eax, %cr4 # Enable PAE mode
19985
19986 # Setup trampoline 4 level pagetables
19987@@ -127,7 +127,7 @@ startup_64:
19988 no_longmode:
19989 hlt
19990 jmp no_longmode
19991-#include "verify_cpu_64.S"
19992+#include "verify_cpu.S"
19993
19994 # Careful these need to be in the same 64K segment as the above;
19995 tidt:
19996@@ -138,7 +138,7 @@ tidt:
19997 # so the kernel can live anywhere
19998 .balign 4
19999 tgdt:
20000- .short tgdt_end - tgdt # gdt limit
20001+ .short tgdt_end - tgdt - 1 # gdt limit
20002 .long tgdt - r_base
20003 .short 0
20004 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20005diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20006index 7e37dce..ec3f8e5 100644
20007--- a/arch/x86/kernel/traps.c
20008+++ b/arch/x86/kernel/traps.c
20009@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20010
20011 /* Do we ignore FPU interrupts ? */
20012 char ignore_fpu_irq;
20013-
20014-/*
20015- * The IDT has to be page-aligned to simplify the Pentium
20016- * F0 0F bug workaround.
20017- */
20018-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20019 #endif
20020
20021 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20022@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20023 static inline void
20024 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20025 {
20026- if (!user_mode_vm(regs))
20027+ if (!user_mode(regs))
20028 die(str, regs, err);
20029 }
20030 #endif
20031
20032 static void __kprobes
20033-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20034+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20035 long error_code, siginfo_t *info)
20036 {
20037 struct task_struct *tsk = current;
20038
20039 #ifdef CONFIG_X86_32
20040- if (regs->flags & X86_VM_MASK) {
20041+ if (v8086_mode(regs)) {
20042 /*
20043 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20044 * On nmi (interrupt 2), do_trap should not be called.
20045@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20046 }
20047 #endif
20048
20049- if (!user_mode(regs))
20050+ if (!user_mode_novm(regs))
20051 goto kernel_trap;
20052
20053 #ifdef CONFIG_X86_32
20054@@ -158,7 +152,7 @@ trap_signal:
20055 printk_ratelimit()) {
20056 printk(KERN_INFO
20057 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20058- tsk->comm, tsk->pid, str,
20059+ tsk->comm, task_pid_nr(tsk), str,
20060 regs->ip, regs->sp, error_code);
20061 print_vma_addr(" in ", regs->ip);
20062 printk("\n");
20063@@ -175,8 +169,20 @@ kernel_trap:
20064 if (!fixup_exception(regs)) {
20065 tsk->thread.error_code = error_code;
20066 tsk->thread.trap_no = trapnr;
20067+
20068+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20069+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20070+ str = "PAX: suspicious stack segment fault";
20071+#endif
20072+
20073 die(str, regs, error_code);
20074 }
20075+
20076+#ifdef CONFIG_PAX_REFCOUNT
20077+ if (trapnr == 4)
20078+ pax_report_refcount_overflow(regs);
20079+#endif
20080+
20081 return;
20082
20083 #ifdef CONFIG_X86_32
20084@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20085 conditional_sti(regs);
20086
20087 #ifdef CONFIG_X86_32
20088- if (regs->flags & X86_VM_MASK)
20089+ if (v8086_mode(regs))
20090 goto gp_in_vm86;
20091 #endif
20092
20093 tsk = current;
20094- if (!user_mode(regs))
20095+ if (!user_mode_novm(regs))
20096 goto gp_in_kernel;
20097
20098+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20099+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20100+ struct mm_struct *mm = tsk->mm;
20101+ unsigned long limit;
20102+
20103+ down_write(&mm->mmap_sem);
20104+ limit = mm->context.user_cs_limit;
20105+ if (limit < TASK_SIZE) {
20106+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20107+ up_write(&mm->mmap_sem);
20108+ return;
20109+ }
20110+ up_write(&mm->mmap_sem);
20111+ }
20112+#endif
20113+
20114 tsk->thread.error_code = error_code;
20115 tsk->thread.trap_no = 13;
20116
20117@@ -305,6 +327,13 @@ gp_in_kernel:
20118 if (notify_die(DIE_GPF, "general protection fault", regs,
20119 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20120 return;
20121+
20122+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20123+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20124+ die("PAX: suspicious general protection fault", regs, error_code);
20125+ else
20126+#endif
20127+
20128 die("general protection fault", regs, error_code);
20129 }
20130
20131@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20132 dotraplinkage notrace __kprobes void
20133 do_nmi(struct pt_regs *regs, long error_code)
20134 {
20135+
20136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20137+ if (!user_mode(regs)) {
20138+ unsigned long cs = regs->cs & 0xFFFF;
20139+ unsigned long ip = ktva_ktla(regs->ip);
20140+
20141+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20142+ regs->ip = ip;
20143+ }
20144+#endif
20145+
20146 nmi_enter();
20147
20148 inc_irq_stat(__nmi_count);
20149@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20150 }
20151
20152 #ifdef CONFIG_X86_32
20153- if (regs->flags & X86_VM_MASK)
20154+ if (v8086_mode(regs))
20155 goto debug_vm86;
20156 #endif
20157
20158@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20159 * kernel space (but re-enable TF when returning to user mode).
20160 */
20161 if (condition & DR_STEP) {
20162- if (!user_mode(regs))
20163+ if (!user_mode_novm(regs))
20164 goto clear_TF_reenable;
20165 }
20166
20167@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20168 * Handle strange cache flush from user space exception
20169 * in all other cases. This is undocumented behaviour.
20170 */
20171- if (regs->flags & X86_VM_MASK) {
20172+ if (v8086_mode(regs)) {
20173 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20174 return;
20175 }
20176@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20177 void __math_state_restore(void)
20178 {
20179 struct thread_info *thread = current_thread_info();
20180- struct task_struct *tsk = thread->task;
20181+ struct task_struct *tsk = current;
20182
20183 /*
20184 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20185@@ -825,8 +865,7 @@ void __math_state_restore(void)
20186 */
20187 asmlinkage void math_state_restore(void)
20188 {
20189- struct thread_info *thread = current_thread_info();
20190- struct task_struct *tsk = thread->task;
20191+ struct task_struct *tsk = current;
20192
20193 if (!tsk_used_math(tsk)) {
20194 local_irq_enable();
20195diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20196new file mode 100644
20197index 0000000..50c5edd
20198--- /dev/null
20199+++ b/arch/x86/kernel/verify_cpu.S
20200@@ -0,0 +1,140 @@
20201+/*
20202+ *
20203+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20204+ * code has been borrowed from boot/setup.S and was introduced by
20205+ * Andi Kleen.
20206+ *
20207+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20208+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20209+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20210+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20211+ *
20212+ * This source code is licensed under the GNU General Public License,
20213+ * Version 2. See the file COPYING for more details.
20214+ *
20215+ * This is a common code for verification whether CPU supports
20216+ * long mode and SSE or not. It is not called directly instead this
20217+ * file is included at various places and compiled in that context.
20218+ * This file is expected to run in 32bit code. Currently:
20219+ *
20220+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20221+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20222+ * arch/x86/kernel/head_32.S: processor startup
20223+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20224+ *
20225+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20226+ * 0: Success 1: Failure
20227+ *
20228+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20229+ *
20230+ * The caller needs to check for the error code and take the action
20231+ * appropriately. Either display a message or halt.
20232+ */
20233+
20234+#include <asm/cpufeature.h>
20235+#include <asm/msr-index.h>
20236+
20237+verify_cpu:
20238+ pushfl # Save caller passed flags
20239+ pushl $0 # Kill any dangerous flags
20240+ popfl
20241+
20242+ pushfl # standard way to check for cpuid
20243+ popl %eax
20244+ movl %eax,%ebx
20245+ xorl $0x200000,%eax
20246+ pushl %eax
20247+ popfl
20248+ pushfl
20249+ popl %eax
20250+ cmpl %eax,%ebx
20251+ jz verify_cpu_no_longmode # cpu has no cpuid
20252+
20253+ movl $0x0,%eax # See if cpuid 1 is implemented
20254+ cpuid
20255+ cmpl $0x1,%eax
20256+ jb verify_cpu_no_longmode # no cpuid 1
20257+
20258+ xor %di,%di
20259+ cmpl $0x68747541,%ebx # AuthenticAMD
20260+ jnz verify_cpu_noamd
20261+ cmpl $0x69746e65,%edx
20262+ jnz verify_cpu_noamd
20263+ cmpl $0x444d4163,%ecx
20264+ jnz verify_cpu_noamd
20265+ mov $1,%di # cpu is from AMD
20266+ jmp verify_cpu_check
20267+
20268+verify_cpu_noamd:
20269+ cmpl $0x756e6547,%ebx # GenuineIntel?
20270+ jnz verify_cpu_check
20271+ cmpl $0x49656e69,%edx
20272+ jnz verify_cpu_check
20273+ cmpl $0x6c65746e,%ecx
20274+ jnz verify_cpu_check
20275+
20276+ # only call IA32_MISC_ENABLE when:
20277+ # family > 6 || (family == 6 && model >= 0xd)
20278+ movl $0x1, %eax # check CPU family and model
20279+ cpuid
20280+ movl %eax, %ecx
20281+
20282+ andl $0x0ff00f00, %eax # mask family and extended family
20283+ shrl $8, %eax
20284+ cmpl $6, %eax
20285+ ja verify_cpu_clear_xd # family > 6, ok
20286+ jb verify_cpu_check # family < 6, skip
20287+
20288+ andl $0x000f00f0, %ecx # mask model and extended model
20289+ shrl $4, %ecx
20290+ cmpl $0xd, %ecx
20291+ jb verify_cpu_check # family == 6, model < 0xd, skip
20292+
20293+verify_cpu_clear_xd:
20294+ movl $MSR_IA32_MISC_ENABLE, %ecx
20295+ rdmsr
20296+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20297+ jnc verify_cpu_check # only write MSR if bit was changed
20298+ wrmsr
20299+
20300+verify_cpu_check:
20301+ movl $0x1,%eax # Does the cpu have what it takes
20302+ cpuid
20303+ andl $REQUIRED_MASK0,%edx
20304+ xorl $REQUIRED_MASK0,%edx
20305+ jnz verify_cpu_no_longmode
20306+
20307+ movl $0x80000000,%eax # See if extended cpuid is implemented
20308+ cpuid
20309+ cmpl $0x80000001,%eax
20310+ jb verify_cpu_no_longmode # no extended cpuid
20311+
20312+ movl $0x80000001,%eax # Does the cpu have what it takes
20313+ cpuid
20314+ andl $REQUIRED_MASK1,%edx
20315+ xorl $REQUIRED_MASK1,%edx
20316+ jnz verify_cpu_no_longmode
20317+
20318+verify_cpu_sse_test:
20319+ movl $1,%eax
20320+ cpuid
20321+ andl $SSE_MASK,%edx
20322+ cmpl $SSE_MASK,%edx
20323+ je verify_cpu_sse_ok
20324+ test %di,%di
20325+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20326+ movl $MSR_K7_HWCR,%ecx
20327+ rdmsr
20328+ btr $15,%eax # enable SSE
20329+ wrmsr
20330+ xor %di,%di # don't loop
20331+ jmp verify_cpu_sse_test # try again
20332+
20333+verify_cpu_no_longmode:
20334+ popfl # Restore caller passed flags
20335+ movl $1,%eax
20336+ ret
20337+verify_cpu_sse_ok:
20338+ popfl # Restore caller passed flags
20339+ xorl %eax, %eax
20340+ ret
20341diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20342deleted file mode 100644
20343index 45b6f8a..0000000
20344--- a/arch/x86/kernel/verify_cpu_64.S
20345+++ /dev/null
20346@@ -1,105 +0,0 @@
20347-/*
20348- *
20349- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20350- * code has been borrowed from boot/setup.S and was introduced by
20351- * Andi Kleen.
20352- *
20353- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20354- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20355- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20356- *
20357- * This source code is licensed under the GNU General Public License,
20358- * Version 2. See the file COPYING for more details.
20359- *
20360- * This is a common code for verification whether CPU supports
20361- * long mode and SSE or not. It is not called directly instead this
20362- * file is included at various places and compiled in that context.
20363- * Following are the current usage.
20364- *
20365- * This file is included by both 16bit and 32bit code.
20366- *
20367- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20368- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20369- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20370- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20371- *
20372- * verify_cpu, returns the status of cpu check in register %eax.
20373- * 0: Success 1: Failure
20374- *
20375- * The caller needs to check for the error code and take the action
20376- * appropriately. Either display a message or halt.
20377- */
20378-
20379-#include <asm/cpufeature.h>
20380-
20381-verify_cpu:
20382- pushfl # Save caller passed flags
20383- pushl $0 # Kill any dangerous flags
20384- popfl
20385-
20386- pushfl # standard way to check for cpuid
20387- popl %eax
20388- movl %eax,%ebx
20389- xorl $0x200000,%eax
20390- pushl %eax
20391- popfl
20392- pushfl
20393- popl %eax
20394- cmpl %eax,%ebx
20395- jz verify_cpu_no_longmode # cpu has no cpuid
20396-
20397- movl $0x0,%eax # See if cpuid 1 is implemented
20398- cpuid
20399- cmpl $0x1,%eax
20400- jb verify_cpu_no_longmode # no cpuid 1
20401-
20402- xor %di,%di
20403- cmpl $0x68747541,%ebx # AuthenticAMD
20404- jnz verify_cpu_noamd
20405- cmpl $0x69746e65,%edx
20406- jnz verify_cpu_noamd
20407- cmpl $0x444d4163,%ecx
20408- jnz verify_cpu_noamd
20409- mov $1,%di # cpu is from AMD
20410-
20411-verify_cpu_noamd:
20412- movl $0x1,%eax # Does the cpu have what it takes
20413- cpuid
20414- andl $REQUIRED_MASK0,%edx
20415- xorl $REQUIRED_MASK0,%edx
20416- jnz verify_cpu_no_longmode
20417-
20418- movl $0x80000000,%eax # See if extended cpuid is implemented
20419- cpuid
20420- cmpl $0x80000001,%eax
20421- jb verify_cpu_no_longmode # no extended cpuid
20422-
20423- movl $0x80000001,%eax # Does the cpu have what it takes
20424- cpuid
20425- andl $REQUIRED_MASK1,%edx
20426- xorl $REQUIRED_MASK1,%edx
20427- jnz verify_cpu_no_longmode
20428-
20429-verify_cpu_sse_test:
20430- movl $1,%eax
20431- cpuid
20432- andl $SSE_MASK,%edx
20433- cmpl $SSE_MASK,%edx
20434- je verify_cpu_sse_ok
20435- test %di,%di
20436- jz verify_cpu_no_longmode # only try to force SSE on AMD
20437- movl $0xc0010015,%ecx # HWCR
20438- rdmsr
20439- btr $15,%eax # enable SSE
20440- wrmsr
20441- xor %di,%di # don't loop
20442- jmp verify_cpu_sse_test # try again
20443-
20444-verify_cpu_no_longmode:
20445- popfl # Restore caller passed flags
20446- movl $1,%eax
20447- ret
20448-verify_cpu_sse_ok:
20449- popfl # Restore caller passed flags
20450- xorl %eax, %eax
20451- ret
20452diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20453index 9c4e625..c992817 100644
20454--- a/arch/x86/kernel/vm86_32.c
20455+++ b/arch/x86/kernel/vm86_32.c
20456@@ -41,6 +41,7 @@
20457 #include <linux/ptrace.h>
20458 #include <linux/audit.h>
20459 #include <linux/stddef.h>
20460+#include <linux/grsecurity.h>
20461
20462 #include <asm/uaccess.h>
20463 #include <asm/io.h>
20464@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20465 do_exit(SIGSEGV);
20466 }
20467
20468- tss = &per_cpu(init_tss, get_cpu());
20469+ tss = init_tss + get_cpu();
20470 current->thread.sp0 = current->thread.saved_sp0;
20471 current->thread.sysenter_cs = __KERNEL_CS;
20472 load_sp0(tss, &current->thread);
20473@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20474 struct task_struct *tsk;
20475 int tmp, ret = -EPERM;
20476
20477+#ifdef CONFIG_GRKERNSEC_VM86
20478+ if (!capable(CAP_SYS_RAWIO)) {
20479+ gr_handle_vm86();
20480+ goto out;
20481+ }
20482+#endif
20483+
20484 tsk = current;
20485 if (tsk->thread.saved_sp0)
20486 goto out;
20487@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20488 int tmp, ret;
20489 struct vm86plus_struct __user *v86;
20490
20491+#ifdef CONFIG_GRKERNSEC_VM86
20492+ if (!capable(CAP_SYS_RAWIO)) {
20493+ gr_handle_vm86();
20494+ ret = -EPERM;
20495+ goto out;
20496+ }
20497+#endif
20498+
20499 tsk = current;
20500 switch (regs->bx) {
20501 case VM86_REQUEST_IRQ:
20502@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20503 tsk->thread.saved_fs = info->regs32->fs;
20504 tsk->thread.saved_gs = get_user_gs(info->regs32);
20505
20506- tss = &per_cpu(init_tss, get_cpu());
20507+ tss = init_tss + get_cpu();
20508 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20509 if (cpu_has_sep)
20510 tsk->thread.sysenter_cs = 0;
20511@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20512 goto cannot_handle;
20513 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20514 goto cannot_handle;
20515- intr_ptr = (unsigned long __user *) (i << 2);
20516+ intr_ptr = (__force unsigned long __user *) (i << 2);
20517 if (get_user(segoffs, intr_ptr))
20518 goto cannot_handle;
20519 if ((segoffs >> 16) == BIOSSEG)
20520diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20521index d430e4c..831f817 100644
20522--- a/arch/x86/kernel/vmi_32.c
20523+++ b/arch/x86/kernel/vmi_32.c
20524@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20525 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20526
20527 #define call_vrom_func(rom,func) \
20528- (((VROMFUNC *)(rom->func))())
20529+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20530
20531 #define call_vrom_long_func(rom,func,arg) \
20532- (((VROMLONGFUNC *)(rom->func)) (arg))
20533+({\
20534+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20535+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20536+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20537+ __reloc;\
20538+})
20539
20540-static struct vrom_header *vmi_rom;
20541+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20542 static int disable_pge;
20543 static int disable_pse;
20544 static int disable_sep;
20545@@ -76,10 +81,10 @@ static struct {
20546 void (*set_initial_ap_state)(int, int);
20547 void (*halt)(void);
20548 void (*set_lazy_mode)(int mode);
20549-} vmi_ops;
20550+} __no_const vmi_ops __read_only;
20551
20552 /* Cached VMI operations */
20553-struct vmi_timer_ops vmi_timer_ops;
20554+struct vmi_timer_ops vmi_timer_ops __read_only;
20555
20556 /*
20557 * VMI patching routines.
20558@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20559 static inline void patch_offset(void *insnbuf,
20560 unsigned long ip, unsigned long dest)
20561 {
20562- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20563+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20564 }
20565
20566 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20567@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20568 {
20569 u64 reloc;
20570 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20571+
20572 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20573 switch(rel->type) {
20574 case VMI_RELOCATION_CALL_REL:
20575@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20576
20577 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20578 {
20579- const pte_t pte = { .pte = 0 };
20580+ const pte_t pte = __pte(0ULL);
20581 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20582 }
20583
20584 static void vmi_pmd_clear(pmd_t *pmd)
20585 {
20586- const pte_t pte = { .pte = 0 };
20587+ const pte_t pte = __pte(0ULL);
20588 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20589 }
20590 #endif
20591@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20592 ap.ss = __KERNEL_DS;
20593 ap.esp = (unsigned long) start_esp;
20594
20595- ap.ds = __USER_DS;
20596- ap.es = __USER_DS;
20597+ ap.ds = __KERNEL_DS;
20598+ ap.es = __KERNEL_DS;
20599 ap.fs = __KERNEL_PERCPU;
20600- ap.gs = __KERNEL_STACK_CANARY;
20601+ savesegment(gs, ap.gs);
20602
20603 ap.eflags = 0;
20604
20605@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20606 paravirt_leave_lazy_mmu();
20607 }
20608
20609+#ifdef CONFIG_PAX_KERNEXEC
20610+static unsigned long vmi_pax_open_kernel(void)
20611+{
20612+ return 0;
20613+}
20614+
20615+static unsigned long vmi_pax_close_kernel(void)
20616+{
20617+ return 0;
20618+}
20619+#endif
20620+
20621 static inline int __init check_vmi_rom(struct vrom_header *rom)
20622 {
20623 struct pci_header *pci;
20624@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20625 return 0;
20626 if (rom->vrom_signature != VMI_SIGNATURE)
20627 return 0;
20628+ if (rom->rom_length * 512 > sizeof(*rom)) {
20629+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20630+ return 0;
20631+ }
20632 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20633 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20634 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20635@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20636 struct vrom_header *romstart;
20637 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20638 if (check_vmi_rom(romstart)) {
20639- vmi_rom = romstart;
20640+ vmi_rom = *romstart;
20641 return 1;
20642 }
20643 }
20644@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20645
20646 para_fill(pv_irq_ops.safe_halt, Halt);
20647
20648+#ifdef CONFIG_PAX_KERNEXEC
20649+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20650+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20651+#endif
20652+
20653 /*
20654 * Alternative instruction rewriting doesn't happen soon enough
20655 * to convert VMI_IRET to a call instead of a jump; so we have
20656@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20657
20658 void __init vmi_init(void)
20659 {
20660- if (!vmi_rom)
20661+ if (!vmi_rom.rom_signature)
20662 probe_vmi_rom();
20663 else
20664- check_vmi_rom(vmi_rom);
20665+ check_vmi_rom(&vmi_rom);
20666
20667 /* In case probing for or validating the ROM failed, basil */
20668- if (!vmi_rom)
20669+ if (!vmi_rom.rom_signature)
20670 return;
20671
20672- reserve_top_address(-vmi_rom->virtual_top);
20673+ reserve_top_address(-vmi_rom.virtual_top);
20674
20675 #ifdef CONFIG_X86_IO_APIC
20676 /* This is virtual hardware; timer routing is wired correctly */
20677@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20678 {
20679 unsigned long flags;
20680
20681- if (!vmi_rom)
20682+ if (!vmi_rom.rom_signature)
20683 return;
20684
20685 local_irq_save(flags);
20686diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20687index 3c68fe2..12c8280 100644
20688--- a/arch/x86/kernel/vmlinux.lds.S
20689+++ b/arch/x86/kernel/vmlinux.lds.S
20690@@ -26,6 +26,13 @@
20691 #include <asm/page_types.h>
20692 #include <asm/cache.h>
20693 #include <asm/boot.h>
20694+#include <asm/segment.h>
20695+
20696+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20697+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20698+#else
20699+#define __KERNEL_TEXT_OFFSET 0
20700+#endif
20701
20702 #undef i386 /* in case the preprocessor is a 32bit one */
20703
20704@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20705 #ifdef CONFIG_X86_32
20706 OUTPUT_ARCH(i386)
20707 ENTRY(phys_startup_32)
20708-jiffies = jiffies_64;
20709 #else
20710 OUTPUT_ARCH(i386:x86-64)
20711 ENTRY(phys_startup_64)
20712-jiffies_64 = jiffies;
20713 #endif
20714
20715 PHDRS {
20716 text PT_LOAD FLAGS(5); /* R_E */
20717- data PT_LOAD FLAGS(7); /* RWE */
20718+#ifdef CONFIG_X86_32
20719+ module PT_LOAD FLAGS(5); /* R_E */
20720+#endif
20721+#ifdef CONFIG_XEN
20722+ rodata PT_LOAD FLAGS(5); /* R_E */
20723+#else
20724+ rodata PT_LOAD FLAGS(4); /* R__ */
20725+#endif
20726+ data PT_LOAD FLAGS(6); /* RW_ */
20727 #ifdef CONFIG_X86_64
20728 user PT_LOAD FLAGS(5); /* R_E */
20729+#endif
20730+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20731 #ifdef CONFIG_SMP
20732 percpu PT_LOAD FLAGS(6); /* RW_ */
20733 #endif
20734+ text.init PT_LOAD FLAGS(5); /* R_E */
20735+ text.exit PT_LOAD FLAGS(5); /* R_E */
20736 init PT_LOAD FLAGS(7); /* RWE */
20737-#endif
20738 note PT_NOTE FLAGS(0); /* ___ */
20739 }
20740
20741 SECTIONS
20742 {
20743 #ifdef CONFIG_X86_32
20744- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20745- phys_startup_32 = startup_32 - LOAD_OFFSET;
20746+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20747 #else
20748- . = __START_KERNEL;
20749- phys_startup_64 = startup_64 - LOAD_OFFSET;
20750+ . = __START_KERNEL;
20751 #endif
20752
20753 /* Text and read-only data */
20754- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20755- _text = .;
20756+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20757 /* bootstrapping code */
20758+#ifdef CONFIG_X86_32
20759+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20760+#else
20761+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20762+#endif
20763+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20764+ _text = .;
20765 HEAD_TEXT
20766 #ifdef CONFIG_X86_32
20767 . = ALIGN(PAGE_SIZE);
20768@@ -82,28 +102,71 @@ SECTIONS
20769 IRQENTRY_TEXT
20770 *(.fixup)
20771 *(.gnu.warning)
20772- /* End of text section */
20773- _etext = .;
20774 } :text = 0x9090
20775
20776- NOTES :text :note
20777+ . += __KERNEL_TEXT_OFFSET;
20778
20779- EXCEPTION_TABLE(16) :text = 0x9090
20780+#ifdef CONFIG_X86_32
20781+ . = ALIGN(PAGE_SIZE);
20782+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20783+ *(.vmi.rom)
20784+ } :module
20785+
20786+ . = ALIGN(PAGE_SIZE);
20787+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20788+
20789+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20790+ MODULES_EXEC_VADDR = .;
20791+ BYTE(0)
20792+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20793+ . = ALIGN(HPAGE_SIZE);
20794+ MODULES_EXEC_END = . - 1;
20795+#endif
20796+
20797+ } :module
20798+#endif
20799+
20800+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20801+ /* End of text section */
20802+ _etext = . - __KERNEL_TEXT_OFFSET;
20803+ }
20804+
20805+#ifdef CONFIG_X86_32
20806+ . = ALIGN(PAGE_SIZE);
20807+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20808+ *(.idt)
20809+ . = ALIGN(PAGE_SIZE);
20810+ *(.empty_zero_page)
20811+ *(.swapper_pg_fixmap)
20812+ *(.swapper_pg_pmd)
20813+ *(.swapper_pg_dir)
20814+ *(.trampoline_pg_dir)
20815+ } :rodata
20816+#endif
20817+
20818+ . = ALIGN(PAGE_SIZE);
20819+ NOTES :rodata :note
20820+
20821+ EXCEPTION_TABLE(16) :rodata
20822
20823 RO_DATA(PAGE_SIZE)
20824
20825 /* Data */
20826 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20827+
20828+#ifdef CONFIG_PAX_KERNEXEC
20829+ . = ALIGN(HPAGE_SIZE);
20830+#else
20831+ . = ALIGN(PAGE_SIZE);
20832+#endif
20833+
20834 /* Start of data section */
20835 _sdata = .;
20836
20837 /* init_task */
20838 INIT_TASK_DATA(THREAD_SIZE)
20839
20840-#ifdef CONFIG_X86_32
20841- /* 32 bit has nosave before _edata */
20842 NOSAVE_DATA
20843-#endif
20844
20845 PAGE_ALIGNED_DATA(PAGE_SIZE)
20846
20847@@ -112,6 +175,8 @@ SECTIONS
20848 DATA_DATA
20849 CONSTRUCTORS
20850
20851+ jiffies = jiffies_64;
20852+
20853 /* rarely changed data like cpu maps */
20854 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20855
20856@@ -166,12 +231,6 @@ SECTIONS
20857 }
20858 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20859
20860- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20861- .jiffies : AT(VLOAD(.jiffies)) {
20862- *(.jiffies)
20863- }
20864- jiffies = VVIRT(.jiffies);
20865-
20866 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20867 *(.vsyscall_3)
20868 }
20869@@ -187,12 +246,19 @@ SECTIONS
20870 #endif /* CONFIG_X86_64 */
20871
20872 /* Init code and data - will be freed after init */
20873- . = ALIGN(PAGE_SIZE);
20874 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20875+ BYTE(0)
20876+
20877+#ifdef CONFIG_PAX_KERNEXEC
20878+ . = ALIGN(HPAGE_SIZE);
20879+#else
20880+ . = ALIGN(PAGE_SIZE);
20881+#endif
20882+
20883 __init_begin = .; /* paired with __init_end */
20884- }
20885+ } :init.begin
20886
20887-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20888+#ifdef CONFIG_SMP
20889 /*
20890 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20891 * output PHDR, so the next output section - .init.text - should
20892@@ -201,12 +267,27 @@ SECTIONS
20893 PERCPU_VADDR(0, :percpu)
20894 #endif
20895
20896- INIT_TEXT_SECTION(PAGE_SIZE)
20897-#ifdef CONFIG_X86_64
20898- :init
20899-#endif
20900+ . = ALIGN(PAGE_SIZE);
20901+ init_begin = .;
20902+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20903+ VMLINUX_SYMBOL(_sinittext) = .;
20904+ INIT_TEXT
20905+ VMLINUX_SYMBOL(_einittext) = .;
20906+ . = ALIGN(PAGE_SIZE);
20907+ } :text.init
20908
20909- INIT_DATA_SECTION(16)
20910+ /*
20911+ * .exit.text is discard at runtime, not link time, to deal with
20912+ * references from .altinstructions and .eh_frame
20913+ */
20914+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20915+ EXIT_TEXT
20916+ . = ALIGN(16);
20917+ } :text.exit
20918+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20919+
20920+ . = ALIGN(PAGE_SIZE);
20921+ INIT_DATA_SECTION(16) :init
20922
20923 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20924 __x86_cpu_dev_start = .;
20925@@ -232,19 +313,11 @@ SECTIONS
20926 *(.altinstr_replacement)
20927 }
20928
20929- /*
20930- * .exit.text is discard at runtime, not link time, to deal with
20931- * references from .altinstructions and .eh_frame
20932- */
20933- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20934- EXIT_TEXT
20935- }
20936-
20937 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20938 EXIT_DATA
20939 }
20940
20941-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20942+#ifndef CONFIG_SMP
20943 PERCPU(PAGE_SIZE)
20944 #endif
20945
20946@@ -267,12 +340,6 @@ SECTIONS
20947 . = ALIGN(PAGE_SIZE);
20948 }
20949
20950-#ifdef CONFIG_X86_64
20951- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20952- NOSAVE_DATA
20953- }
20954-#endif
20955-
20956 /* BSS */
20957 . = ALIGN(PAGE_SIZE);
20958 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20959@@ -288,6 +355,7 @@ SECTIONS
20960 __brk_base = .;
20961 . += 64 * 1024; /* 64k alignment slop space */
20962 *(.brk_reservation) /* areas brk users have reserved */
20963+ . = ALIGN(HPAGE_SIZE);
20964 __brk_limit = .;
20965 }
20966
20967@@ -316,13 +384,12 @@ SECTIONS
20968 * for the boot processor.
20969 */
20970 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20971-INIT_PER_CPU(gdt_page);
20972 INIT_PER_CPU(irq_stack_union);
20973
20974 /*
20975 * Build-time check on the image size:
20976 */
20977-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20978+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20979 "kernel image bigger than KERNEL_IMAGE_SIZE");
20980
20981 #ifdef CONFIG_SMP
20982diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20983index 62f39d7..3bc46a1 100644
20984--- a/arch/x86/kernel/vsyscall_64.c
20985+++ b/arch/x86/kernel/vsyscall_64.c
20986@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20987
20988 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20989 /* copy vsyscall data */
20990+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20991 vsyscall_gtod_data.clock.vread = clock->vread;
20992 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20993 vsyscall_gtod_data.clock.mask = clock->mask;
20994@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20995 We do this here because otherwise user space would do it on
20996 its own in a likely inferior way (no access to jiffies).
20997 If you don't like it pass NULL. */
20998- if (tcache && tcache->blob[0] == (j = __jiffies)) {
20999+ if (tcache && tcache->blob[0] == (j = jiffies)) {
21000 p = tcache->blob[1];
21001 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21002 /* Load per CPU data from RDTSCP */
21003diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21004index 3909e3b..5433a97 100644
21005--- a/arch/x86/kernel/x8664_ksyms_64.c
21006+++ b/arch/x86/kernel/x8664_ksyms_64.c
21007@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21008
21009 EXPORT_SYMBOL(copy_user_generic);
21010 EXPORT_SYMBOL(__copy_user_nocache);
21011-EXPORT_SYMBOL(copy_from_user);
21012-EXPORT_SYMBOL(copy_to_user);
21013 EXPORT_SYMBOL(__copy_from_user_inatomic);
21014
21015 EXPORT_SYMBOL(copy_page);
21016diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21017index c5ee17e..d63218f 100644
21018--- a/arch/x86/kernel/xsave.c
21019+++ b/arch/x86/kernel/xsave.c
21020@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21021 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21022 return -1;
21023
21024- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21025+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21026 fx_sw_user->extended_size -
21027 FP_XSTATE_MAGIC2_SIZE));
21028 /*
21029@@ -196,7 +196,7 @@ fx_only:
21030 * the other extended state.
21031 */
21032 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21033- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21034+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21035 }
21036
21037 /*
21038@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21039 if (task_thread_info(tsk)->status & TS_XSAVE)
21040 err = restore_user_xstate(buf);
21041 else
21042- err = fxrstor_checking((__force struct i387_fxsave_struct *)
21043+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
21044 buf);
21045 if (unlikely(err)) {
21046 /*
21047diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21048index 1350e43..a94b011 100644
21049--- a/arch/x86/kvm/emulate.c
21050+++ b/arch/x86/kvm/emulate.c
21051@@ -81,8 +81,8 @@
21052 #define Src2CL (1<<29)
21053 #define Src2ImmByte (2<<29)
21054 #define Src2One (3<<29)
21055-#define Src2Imm16 (4<<29)
21056-#define Src2Mask (7<<29)
21057+#define Src2Imm16 (4U<<29)
21058+#define Src2Mask (7U<<29)
21059
21060 enum {
21061 Group1_80, Group1_81, Group1_82, Group1_83,
21062@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21063
21064 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21065 do { \
21066+ unsigned long _tmp; \
21067 __asm__ __volatile__ ( \
21068 _PRE_EFLAGS("0", "4", "2") \
21069 _op _suffix " %"_x"3,%1; " \
21070@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21071 /* Raw emulation: instruction has two explicit operands. */
21072 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21073 do { \
21074- unsigned long _tmp; \
21075- \
21076 switch ((_dst).bytes) { \
21077 case 2: \
21078 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21079@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21080
21081 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21082 do { \
21083- unsigned long _tmp; \
21084 switch ((_dst).bytes) { \
21085 case 1: \
21086 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21087diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21088index 8dfeaaa..4daa395 100644
21089--- a/arch/x86/kvm/lapic.c
21090+++ b/arch/x86/kvm/lapic.c
21091@@ -52,7 +52,7 @@
21092 #define APIC_BUS_CYCLE_NS 1
21093
21094 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21095-#define apic_debug(fmt, arg...)
21096+#define apic_debug(fmt, arg...) do {} while (0)
21097
21098 #define APIC_LVT_NUM 6
21099 /* 14 is the version for Xeon and Pentium 8.4.8*/
21100diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21101index 3bc2707..dd157e2 100644
21102--- a/arch/x86/kvm/paging_tmpl.h
21103+++ b/arch/x86/kvm/paging_tmpl.h
21104@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21105 int level = PT_PAGE_TABLE_LEVEL;
21106 unsigned long mmu_seq;
21107
21108+ pax_track_stack();
21109+
21110 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21111 kvm_mmu_audit(vcpu, "pre page fault");
21112
21113@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21114 kvm_mmu_free_some_pages(vcpu);
21115 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21116 level, &write_pt, pfn);
21117+ (void)sptep;
21118 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21119 sptep, *sptep, write_pt);
21120
21121diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21122index 7c6e63e..c5d92c1 100644
21123--- a/arch/x86/kvm/svm.c
21124+++ b/arch/x86/kvm/svm.c
21125@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21126 int cpu = raw_smp_processor_id();
21127
21128 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21129+
21130+ pax_open_kernel();
21131 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21132+ pax_close_kernel();
21133+
21134 load_TR_desc();
21135 }
21136
21137@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21138 return true;
21139 }
21140
21141-static struct kvm_x86_ops svm_x86_ops = {
21142+static const struct kvm_x86_ops svm_x86_ops = {
21143 .cpu_has_kvm_support = has_svm,
21144 .disabled_by_bios = is_disabled,
21145 .hardware_setup = svm_hardware_setup,
21146diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21147index e6d925f..e7a4af8 100644
21148--- a/arch/x86/kvm/vmx.c
21149+++ b/arch/x86/kvm/vmx.c
21150@@ -570,7 +570,11 @@ static void reload_tss(void)
21151
21152 kvm_get_gdt(&gdt);
21153 descs = (void *)gdt.base;
21154+
21155+ pax_open_kernel();
21156 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21157+ pax_close_kernel();
21158+
21159 load_TR_desc();
21160 }
21161
21162@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21163 if (!cpu_has_vmx_flexpriority())
21164 flexpriority_enabled = 0;
21165
21166- if (!cpu_has_vmx_tpr_shadow())
21167- kvm_x86_ops->update_cr8_intercept = NULL;
21168+ if (!cpu_has_vmx_tpr_shadow()) {
21169+ pax_open_kernel();
21170+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21171+ pax_close_kernel();
21172+ }
21173
21174 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21175 kvm_disable_largepages();
21176@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21177 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21178
21179 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21180- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21181+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21182 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21183 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21184 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21185@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21186 "jmp .Lkvm_vmx_return \n\t"
21187 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21188 ".Lkvm_vmx_return: "
21189+
21190+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21191+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21192+ ".Lkvm_vmx_return2: "
21193+#endif
21194+
21195 /* Save guest registers, load host registers, keep flags */
21196 "xchg %0, (%%"R"sp) \n\t"
21197 "mov %%"R"ax, %c[rax](%0) \n\t"
21198@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21199 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21200 #endif
21201 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21202+
21203+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21204+ ,[cs]"i"(__KERNEL_CS)
21205+#endif
21206+
21207 : "cc", "memory"
21208- , R"bx", R"di", R"si"
21209+ , R"ax", R"bx", R"di", R"si"
21210 #ifdef CONFIG_X86_64
21211 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21212 #endif
21213@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21214 if (vmx->rmode.irq.pending)
21215 fixup_rmode_irq(vmx);
21216
21217- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21218+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21219+
21220+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21221+ loadsegment(fs, __KERNEL_PERCPU);
21222+#endif
21223+
21224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21225+ __set_fs(current_thread_info()->addr_limit);
21226+#endif
21227+
21228 vmx->launched = 1;
21229
21230 vmx_complete_interrupts(vmx);
21231@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21232 return false;
21233 }
21234
21235-static struct kvm_x86_ops vmx_x86_ops = {
21236+static const struct kvm_x86_ops vmx_x86_ops = {
21237 .cpu_has_kvm_support = cpu_has_kvm_support,
21238 .disabled_by_bios = vmx_disabled_by_bios,
21239 .hardware_setup = hardware_setup,
21240diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21241index df1cefb..5e882ad 100644
21242--- a/arch/x86/kvm/x86.c
21243+++ b/arch/x86/kvm/x86.c
21244@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21245 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21246 struct kvm_cpuid_entry2 __user *entries);
21247
21248-struct kvm_x86_ops *kvm_x86_ops;
21249+const struct kvm_x86_ops *kvm_x86_ops;
21250 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21251
21252 int ignore_msrs = 0;
21253@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21254 struct kvm_cpuid2 *cpuid,
21255 struct kvm_cpuid_entry2 __user *entries)
21256 {
21257- int r;
21258+ int r, i;
21259
21260 r = -E2BIG;
21261 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21262 goto out;
21263 r = -EFAULT;
21264- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21265- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21266+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21267 goto out;
21268+ for (i = 0; i < cpuid->nent; ++i) {
21269+ struct kvm_cpuid_entry2 cpuid_entry;
21270+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21271+ goto out;
21272+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21273+ }
21274 vcpu->arch.cpuid_nent = cpuid->nent;
21275 kvm_apic_set_version(vcpu);
21276 return 0;
21277@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21278 struct kvm_cpuid2 *cpuid,
21279 struct kvm_cpuid_entry2 __user *entries)
21280 {
21281- int r;
21282+ int r, i;
21283
21284 vcpu_load(vcpu);
21285 r = -E2BIG;
21286 if (cpuid->nent < vcpu->arch.cpuid_nent)
21287 goto out;
21288 r = -EFAULT;
21289- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21290- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21291+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21292 goto out;
21293+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21294+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21295+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21296+ goto out;
21297+ }
21298 return 0;
21299
21300 out:
21301@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21302 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21303 struct kvm_interrupt *irq)
21304 {
21305- if (irq->irq < 0 || irq->irq >= 256)
21306+ if (irq->irq >= 256)
21307 return -EINVAL;
21308 if (irqchip_in_kernel(vcpu->kvm))
21309 return -ENXIO;
21310@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21311 .notifier_call = kvmclock_cpufreq_notifier
21312 };
21313
21314-int kvm_arch_init(void *opaque)
21315+int kvm_arch_init(const void *opaque)
21316 {
21317 int r, cpu;
21318- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21319+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21320
21321 if (kvm_x86_ops) {
21322 printk(KERN_ERR "kvm: already loaded the other module\n");
21323diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21324index 7e59dc1..b88c98f 100644
21325--- a/arch/x86/lguest/boot.c
21326+++ b/arch/x86/lguest/boot.c
21327@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21328 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21329 * Launcher to reboot us.
21330 */
21331-static void lguest_restart(char *reason)
21332+static __noreturn void lguest_restart(char *reason)
21333 {
21334 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21335+ BUG();
21336 }
21337
21338 /*G:050
21339diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21340index 824fa0b..c619e96 100644
21341--- a/arch/x86/lib/atomic64_32.c
21342+++ b/arch/x86/lib/atomic64_32.c
21343@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21344 }
21345 EXPORT_SYMBOL(atomic64_cmpxchg);
21346
21347+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21348+{
21349+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21350+}
21351+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21352+
21353 /**
21354 * atomic64_xchg - xchg atomic64 variable
21355 * @ptr: pointer to type atomic64_t
21356@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21357 EXPORT_SYMBOL(atomic64_xchg);
21358
21359 /**
21360+ * atomic64_xchg_unchecked - xchg atomic64 variable
21361+ * @ptr: pointer to type atomic64_unchecked_t
21362+ * @new_val: value to assign
21363+ *
21364+ * Atomically xchgs the value of @ptr to @new_val and returns
21365+ * the old value.
21366+ */
21367+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21368+{
21369+ /*
21370+ * Try first with a (possibly incorrect) assumption about
21371+ * what we have there. We'll do two loops most likely,
21372+ * but we'll get an ownership MESI transaction straight away
21373+ * instead of a read transaction followed by a
21374+ * flush-for-ownership transaction:
21375+ */
21376+ u64 old_val, real_val = 0;
21377+
21378+ do {
21379+ old_val = real_val;
21380+
21381+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21382+
21383+ } while (real_val != old_val);
21384+
21385+ return old_val;
21386+}
21387+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21388+
21389+/**
21390 * atomic64_set - set atomic64 variable
21391 * @ptr: pointer to type atomic64_t
21392 * @new_val: value to assign
21393@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21394 EXPORT_SYMBOL(atomic64_set);
21395
21396 /**
21397-EXPORT_SYMBOL(atomic64_read);
21398+ * atomic64_unchecked_set - set atomic64 variable
21399+ * @ptr: pointer to type atomic64_unchecked_t
21400+ * @new_val: value to assign
21401+ *
21402+ * Atomically sets the value of @ptr to @new_val.
21403+ */
21404+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21405+{
21406+ atomic64_xchg_unchecked(ptr, new_val);
21407+}
21408+EXPORT_SYMBOL(atomic64_set_unchecked);
21409+
21410+/**
21411 * atomic64_add_return - add and return
21412 * @delta: integer value to add
21413 * @ptr: pointer to type atomic64_t
21414@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21415 }
21416 EXPORT_SYMBOL(atomic64_add_return);
21417
21418+/**
21419+ * atomic64_add_return_unchecked - add and return
21420+ * @delta: integer value to add
21421+ * @ptr: pointer to type atomic64_unchecked_t
21422+ *
21423+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21424+ */
21425+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21426+{
21427+ /*
21428+ * Try first with a (possibly incorrect) assumption about
21429+ * what we have there. We'll do two loops most likely,
21430+ * but we'll get an ownership MESI transaction straight away
21431+ * instead of a read transaction followed by a
21432+ * flush-for-ownership transaction:
21433+ */
21434+ u64 old_val, new_val, real_val = 0;
21435+
21436+ do {
21437+ old_val = real_val;
21438+ new_val = old_val + delta;
21439+
21440+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21441+
21442+ } while (real_val != old_val);
21443+
21444+ return new_val;
21445+}
21446+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21447+
21448 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21449 {
21450 return atomic64_add_return(-delta, ptr);
21451 }
21452 EXPORT_SYMBOL(atomic64_sub_return);
21453
21454+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21455+{
21456+ return atomic64_add_return_unchecked(-delta, ptr);
21457+}
21458+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21459+
21460 u64 atomic64_inc_return(atomic64_t *ptr)
21461 {
21462 return atomic64_add_return(1, ptr);
21463 }
21464 EXPORT_SYMBOL(atomic64_inc_return);
21465
21466+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21467+{
21468+ return atomic64_add_return_unchecked(1, ptr);
21469+}
21470+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21471+
21472 u64 atomic64_dec_return(atomic64_t *ptr)
21473 {
21474 return atomic64_sub_return(1, ptr);
21475 }
21476 EXPORT_SYMBOL(atomic64_dec_return);
21477
21478+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21479+{
21480+ return atomic64_sub_return_unchecked(1, ptr);
21481+}
21482+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21483+
21484 /**
21485 * atomic64_add - add integer to atomic64 variable
21486 * @delta: integer value to add
21487@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21488 EXPORT_SYMBOL(atomic64_add);
21489
21490 /**
21491+ * atomic64_add_unchecked - add integer to atomic64 variable
21492+ * @delta: integer value to add
21493+ * @ptr: pointer to type atomic64_unchecked_t
21494+ *
21495+ * Atomically adds @delta to @ptr.
21496+ */
21497+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21498+{
21499+ atomic64_add_return_unchecked(delta, ptr);
21500+}
21501+EXPORT_SYMBOL(atomic64_add_unchecked);
21502+
21503+/**
21504 * atomic64_sub - subtract the atomic64 variable
21505 * @delta: integer value to subtract
21506 * @ptr: pointer to type atomic64_t
21507@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21508 EXPORT_SYMBOL(atomic64_sub);
21509
21510 /**
21511+ * atomic64_sub_unchecked - subtract the atomic64 variable
21512+ * @delta: integer value to subtract
21513+ * @ptr: pointer to type atomic64_unchecked_t
21514+ *
21515+ * Atomically subtracts @delta from @ptr.
21516+ */
21517+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21518+{
21519+ atomic64_add_unchecked(-delta, ptr);
21520+}
21521+EXPORT_SYMBOL(atomic64_sub_unchecked);
21522+
21523+/**
21524 * atomic64_sub_and_test - subtract value from variable and test result
21525 * @delta: integer value to subtract
21526 * @ptr: pointer to type atomic64_t
21527@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21528 EXPORT_SYMBOL(atomic64_inc);
21529
21530 /**
21531+ * atomic64_inc_unchecked - increment atomic64 variable
21532+ * @ptr: pointer to type atomic64_unchecked_t
21533+ *
21534+ * Atomically increments @ptr by 1.
21535+ */
21536+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21537+{
21538+ atomic64_add_unchecked(1, ptr);
21539+}
21540+EXPORT_SYMBOL(atomic64_inc_unchecked);
21541+
21542+/**
21543 * atomic64_dec - decrement atomic64 variable
21544 * @ptr: pointer to type atomic64_t
21545 *
21546@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21547 EXPORT_SYMBOL(atomic64_dec);
21548
21549 /**
21550+ * atomic64_dec_unchecked - decrement atomic64 variable
21551+ * @ptr: pointer to type atomic64_unchecked_t
21552+ *
21553+ * Atomically decrements @ptr by 1.
21554+ */
21555+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21556+{
21557+ atomic64_sub_unchecked(1, ptr);
21558+}
21559+EXPORT_SYMBOL(atomic64_dec_unchecked);
21560+
21561+/**
21562 * atomic64_dec_and_test - decrement and test
21563 * @ptr: pointer to type atomic64_t
21564 *
21565diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21566index adbccd0..98f96c8 100644
21567--- a/arch/x86/lib/checksum_32.S
21568+++ b/arch/x86/lib/checksum_32.S
21569@@ -28,7 +28,8 @@
21570 #include <linux/linkage.h>
21571 #include <asm/dwarf2.h>
21572 #include <asm/errno.h>
21573-
21574+#include <asm/segment.h>
21575+
21576 /*
21577 * computes a partial checksum, e.g. for TCP/UDP fragments
21578 */
21579@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21580
21581 #define ARGBASE 16
21582 #define FP 12
21583-
21584-ENTRY(csum_partial_copy_generic)
21585+
21586+ENTRY(csum_partial_copy_generic_to_user)
21587 CFI_STARTPROC
21588+
21589+#ifdef CONFIG_PAX_MEMORY_UDEREF
21590+ pushl %gs
21591+ CFI_ADJUST_CFA_OFFSET 4
21592+ popl %es
21593+ CFI_ADJUST_CFA_OFFSET -4
21594+ jmp csum_partial_copy_generic
21595+#endif
21596+
21597+ENTRY(csum_partial_copy_generic_from_user)
21598+
21599+#ifdef CONFIG_PAX_MEMORY_UDEREF
21600+ pushl %gs
21601+ CFI_ADJUST_CFA_OFFSET 4
21602+ popl %ds
21603+ CFI_ADJUST_CFA_OFFSET -4
21604+#endif
21605+
21606+ENTRY(csum_partial_copy_generic)
21607 subl $4,%esp
21608 CFI_ADJUST_CFA_OFFSET 4
21609 pushl %edi
21610@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21611 jmp 4f
21612 SRC(1: movw (%esi), %bx )
21613 addl $2, %esi
21614-DST( movw %bx, (%edi) )
21615+DST( movw %bx, %es:(%edi) )
21616 addl $2, %edi
21617 addw %bx, %ax
21618 adcl $0, %eax
21619@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21620 SRC(1: movl (%esi), %ebx )
21621 SRC( movl 4(%esi), %edx )
21622 adcl %ebx, %eax
21623-DST( movl %ebx, (%edi) )
21624+DST( movl %ebx, %es:(%edi) )
21625 adcl %edx, %eax
21626-DST( movl %edx, 4(%edi) )
21627+DST( movl %edx, %es:4(%edi) )
21628
21629 SRC( movl 8(%esi), %ebx )
21630 SRC( movl 12(%esi), %edx )
21631 adcl %ebx, %eax
21632-DST( movl %ebx, 8(%edi) )
21633+DST( movl %ebx, %es:8(%edi) )
21634 adcl %edx, %eax
21635-DST( movl %edx, 12(%edi) )
21636+DST( movl %edx, %es:12(%edi) )
21637
21638 SRC( movl 16(%esi), %ebx )
21639 SRC( movl 20(%esi), %edx )
21640 adcl %ebx, %eax
21641-DST( movl %ebx, 16(%edi) )
21642+DST( movl %ebx, %es:16(%edi) )
21643 adcl %edx, %eax
21644-DST( movl %edx, 20(%edi) )
21645+DST( movl %edx, %es:20(%edi) )
21646
21647 SRC( movl 24(%esi), %ebx )
21648 SRC( movl 28(%esi), %edx )
21649 adcl %ebx, %eax
21650-DST( movl %ebx, 24(%edi) )
21651+DST( movl %ebx, %es:24(%edi) )
21652 adcl %edx, %eax
21653-DST( movl %edx, 28(%edi) )
21654+DST( movl %edx, %es:28(%edi) )
21655
21656 lea 32(%esi), %esi
21657 lea 32(%edi), %edi
21658@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21659 shrl $2, %edx # This clears CF
21660 SRC(3: movl (%esi), %ebx )
21661 adcl %ebx, %eax
21662-DST( movl %ebx, (%edi) )
21663+DST( movl %ebx, %es:(%edi) )
21664 lea 4(%esi), %esi
21665 lea 4(%edi), %edi
21666 dec %edx
21667@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21668 jb 5f
21669 SRC( movw (%esi), %cx )
21670 leal 2(%esi), %esi
21671-DST( movw %cx, (%edi) )
21672+DST( movw %cx, %es:(%edi) )
21673 leal 2(%edi), %edi
21674 je 6f
21675 shll $16,%ecx
21676 SRC(5: movb (%esi), %cl )
21677-DST( movb %cl, (%edi) )
21678+DST( movb %cl, %es:(%edi) )
21679 6: addl %ecx, %eax
21680 adcl $0, %eax
21681 7:
21682@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21683
21684 6001:
21685 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21686- movl $-EFAULT, (%ebx)
21687+ movl $-EFAULT, %ss:(%ebx)
21688
21689 # zero the complete destination - computing the rest
21690 # is too much work
21691@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21692
21693 6002:
21694 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21695- movl $-EFAULT,(%ebx)
21696+ movl $-EFAULT,%ss:(%ebx)
21697 jmp 5000b
21698
21699 .previous
21700
21701+ pushl %ss
21702+ CFI_ADJUST_CFA_OFFSET 4
21703+ popl %ds
21704+ CFI_ADJUST_CFA_OFFSET -4
21705+ pushl %ss
21706+ CFI_ADJUST_CFA_OFFSET 4
21707+ popl %es
21708+ CFI_ADJUST_CFA_OFFSET -4
21709 popl %ebx
21710 CFI_ADJUST_CFA_OFFSET -4
21711 CFI_RESTORE ebx
21712@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21713 CFI_ADJUST_CFA_OFFSET -4
21714 ret
21715 CFI_ENDPROC
21716-ENDPROC(csum_partial_copy_generic)
21717+ENDPROC(csum_partial_copy_generic_to_user)
21718
21719 #else
21720
21721 /* Version for PentiumII/PPro */
21722
21723 #define ROUND1(x) \
21724+ nop; nop; nop; \
21725 SRC(movl x(%esi), %ebx ) ; \
21726 addl %ebx, %eax ; \
21727- DST(movl %ebx, x(%edi) ) ;
21728+ DST(movl %ebx, %es:x(%edi)) ;
21729
21730 #define ROUND(x) \
21731+ nop; nop; nop; \
21732 SRC(movl x(%esi), %ebx ) ; \
21733 adcl %ebx, %eax ; \
21734- DST(movl %ebx, x(%edi) ) ;
21735+ DST(movl %ebx, %es:x(%edi)) ;
21736
21737 #define ARGBASE 12
21738-
21739-ENTRY(csum_partial_copy_generic)
21740+
21741+ENTRY(csum_partial_copy_generic_to_user)
21742 CFI_STARTPROC
21743+
21744+#ifdef CONFIG_PAX_MEMORY_UDEREF
21745+ pushl %gs
21746+ CFI_ADJUST_CFA_OFFSET 4
21747+ popl %es
21748+ CFI_ADJUST_CFA_OFFSET -4
21749+ jmp csum_partial_copy_generic
21750+#endif
21751+
21752+ENTRY(csum_partial_copy_generic_from_user)
21753+
21754+#ifdef CONFIG_PAX_MEMORY_UDEREF
21755+ pushl %gs
21756+ CFI_ADJUST_CFA_OFFSET 4
21757+ popl %ds
21758+ CFI_ADJUST_CFA_OFFSET -4
21759+#endif
21760+
21761+ENTRY(csum_partial_copy_generic)
21762 pushl %ebx
21763 CFI_ADJUST_CFA_OFFSET 4
21764 CFI_REL_OFFSET ebx, 0
21765@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21766 subl %ebx, %edi
21767 lea -1(%esi),%edx
21768 andl $-32,%edx
21769- lea 3f(%ebx,%ebx), %ebx
21770+ lea 3f(%ebx,%ebx,2), %ebx
21771 testl %esi, %esi
21772 jmp *%ebx
21773 1: addl $64,%esi
21774@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21775 jb 5f
21776 SRC( movw (%esi), %dx )
21777 leal 2(%esi), %esi
21778-DST( movw %dx, (%edi) )
21779+DST( movw %dx, %es:(%edi) )
21780 leal 2(%edi), %edi
21781 je 6f
21782 shll $16,%edx
21783 5:
21784 SRC( movb (%esi), %dl )
21785-DST( movb %dl, (%edi) )
21786+DST( movb %dl, %es:(%edi) )
21787 6: addl %edx, %eax
21788 adcl $0, %eax
21789 7:
21790 .section .fixup, "ax"
21791 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21792- movl $-EFAULT, (%ebx)
21793+ movl $-EFAULT, %ss:(%ebx)
21794 # zero the complete destination (computing the rest is too much work)
21795 movl ARGBASE+8(%esp),%edi # dst
21796 movl ARGBASE+12(%esp),%ecx # len
21797@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21798 rep; stosb
21799 jmp 7b
21800 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21801- movl $-EFAULT, (%ebx)
21802+ movl $-EFAULT, %ss:(%ebx)
21803 jmp 7b
21804 .previous
21805
21806+#ifdef CONFIG_PAX_MEMORY_UDEREF
21807+ pushl %ss
21808+ CFI_ADJUST_CFA_OFFSET 4
21809+ popl %ds
21810+ CFI_ADJUST_CFA_OFFSET -4
21811+ pushl %ss
21812+ CFI_ADJUST_CFA_OFFSET 4
21813+ popl %es
21814+ CFI_ADJUST_CFA_OFFSET -4
21815+#endif
21816+
21817 popl %esi
21818 CFI_ADJUST_CFA_OFFSET -4
21819 CFI_RESTORE esi
21820@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21821 CFI_RESTORE ebx
21822 ret
21823 CFI_ENDPROC
21824-ENDPROC(csum_partial_copy_generic)
21825+ENDPROC(csum_partial_copy_generic_to_user)
21826
21827 #undef ROUND
21828 #undef ROUND1
21829diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21830index ebeafcc..1e3a402 100644
21831--- a/arch/x86/lib/clear_page_64.S
21832+++ b/arch/x86/lib/clear_page_64.S
21833@@ -1,5 +1,6 @@
21834 #include <linux/linkage.h>
21835 #include <asm/dwarf2.h>
21836+#include <asm/alternative-asm.h>
21837
21838 /*
21839 * Zero a page.
21840@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21841 movl $4096/8,%ecx
21842 xorl %eax,%eax
21843 rep stosq
21844+ pax_force_retaddr
21845 ret
21846 CFI_ENDPROC
21847 ENDPROC(clear_page_c)
21848@@ -33,6 +35,7 @@ ENTRY(clear_page)
21849 leaq 64(%rdi),%rdi
21850 jnz .Lloop
21851 nop
21852+ pax_force_retaddr
21853 ret
21854 CFI_ENDPROC
21855 .Lclear_page_end:
21856@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21857
21858 #include <asm/cpufeature.h>
21859
21860- .section .altinstr_replacement,"ax"
21861+ .section .altinstr_replacement,"a"
21862 1: .byte 0xeb /* jmp <disp8> */
21863 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21864 2:
21865diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21866index 727a5d4..333818a 100644
21867--- a/arch/x86/lib/copy_page_64.S
21868+++ b/arch/x86/lib/copy_page_64.S
21869@@ -2,12 +2,14 @@
21870
21871 #include <linux/linkage.h>
21872 #include <asm/dwarf2.h>
21873+#include <asm/alternative-asm.h>
21874
21875 ALIGN
21876 copy_page_c:
21877 CFI_STARTPROC
21878 movl $4096/8,%ecx
21879 rep movsq
21880+ pax_force_retaddr
21881 ret
21882 CFI_ENDPROC
21883 ENDPROC(copy_page_c)
21884@@ -38,7 +40,7 @@ ENTRY(copy_page)
21885 movq 16 (%rsi), %rdx
21886 movq 24 (%rsi), %r8
21887 movq 32 (%rsi), %r9
21888- movq 40 (%rsi), %r10
21889+ movq 40 (%rsi), %r13
21890 movq 48 (%rsi), %r11
21891 movq 56 (%rsi), %r12
21892
21893@@ -49,7 +51,7 @@ ENTRY(copy_page)
21894 movq %rdx, 16 (%rdi)
21895 movq %r8, 24 (%rdi)
21896 movq %r9, 32 (%rdi)
21897- movq %r10, 40 (%rdi)
21898+ movq %r13, 40 (%rdi)
21899 movq %r11, 48 (%rdi)
21900 movq %r12, 56 (%rdi)
21901
21902@@ -68,7 +70,7 @@ ENTRY(copy_page)
21903 movq 16 (%rsi), %rdx
21904 movq 24 (%rsi), %r8
21905 movq 32 (%rsi), %r9
21906- movq 40 (%rsi), %r10
21907+ movq 40 (%rsi), %r13
21908 movq 48 (%rsi), %r11
21909 movq 56 (%rsi), %r12
21910
21911@@ -77,7 +79,7 @@ ENTRY(copy_page)
21912 movq %rdx, 16 (%rdi)
21913 movq %r8, 24 (%rdi)
21914 movq %r9, 32 (%rdi)
21915- movq %r10, 40 (%rdi)
21916+ movq %r13, 40 (%rdi)
21917 movq %r11, 48 (%rdi)
21918 movq %r12, 56 (%rdi)
21919
21920@@ -94,6 +96,7 @@ ENTRY(copy_page)
21921 CFI_RESTORE r13
21922 addq $3*8,%rsp
21923 CFI_ADJUST_CFA_OFFSET -3*8
21924+ pax_force_retaddr
21925 ret
21926 .Lcopy_page_end:
21927 CFI_ENDPROC
21928@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21929
21930 #include <asm/cpufeature.h>
21931
21932- .section .altinstr_replacement,"ax"
21933+ .section .altinstr_replacement,"a"
21934 1: .byte 0xeb /* jmp <disp8> */
21935 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21936 2:
21937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21938index af8debd..40c75f3 100644
21939--- a/arch/x86/lib/copy_user_64.S
21940+++ b/arch/x86/lib/copy_user_64.S
21941@@ -15,13 +15,15 @@
21942 #include <asm/asm-offsets.h>
21943 #include <asm/thread_info.h>
21944 #include <asm/cpufeature.h>
21945+#include <asm/pgtable.h>
21946+#include <asm/alternative-asm.h>
21947
21948 .macro ALTERNATIVE_JUMP feature,orig,alt
21949 0:
21950 .byte 0xe9 /* 32bit jump */
21951 .long \orig-1f /* by default jump to orig */
21952 1:
21953- .section .altinstr_replacement,"ax"
21954+ .section .altinstr_replacement,"a"
21955 2: .byte 0xe9 /* near jump with 32bit immediate */
21956 .long \alt-1b /* offset */ /* or alternatively to alt */
21957 .previous
21958@@ -64,55 +66,26 @@
21959 #endif
21960 .endm
21961
21962-/* Standard copy_to_user with segment limit checking */
21963-ENTRY(copy_to_user)
21964- CFI_STARTPROC
21965- GET_THREAD_INFO(%rax)
21966- movq %rdi,%rcx
21967- addq %rdx,%rcx
21968- jc bad_to_user
21969- cmpq TI_addr_limit(%rax),%rcx
21970- ja bad_to_user
21971- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21972- CFI_ENDPROC
21973-ENDPROC(copy_to_user)
21974-
21975-/* Standard copy_from_user with segment limit checking */
21976-ENTRY(copy_from_user)
21977- CFI_STARTPROC
21978- GET_THREAD_INFO(%rax)
21979- movq %rsi,%rcx
21980- addq %rdx,%rcx
21981- jc bad_from_user
21982- cmpq TI_addr_limit(%rax),%rcx
21983- ja bad_from_user
21984- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21985- CFI_ENDPROC
21986-ENDPROC(copy_from_user)
21987-
21988 ENTRY(copy_user_generic)
21989 CFI_STARTPROC
21990 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21991 CFI_ENDPROC
21992 ENDPROC(copy_user_generic)
21993
21994-ENTRY(__copy_from_user_inatomic)
21995- CFI_STARTPROC
21996- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21997- CFI_ENDPROC
21998-ENDPROC(__copy_from_user_inatomic)
21999-
22000 .section .fixup,"ax"
22001 /* must zero dest */
22002 ENTRY(bad_from_user)
22003 bad_from_user:
22004 CFI_STARTPROC
22005+ testl %edx,%edx
22006+ js bad_to_user
22007 movl %edx,%ecx
22008 xorl %eax,%eax
22009 rep
22010 stosb
22011 bad_to_user:
22012 movl %edx,%eax
22013+ pax_force_retaddr
22014 ret
22015 CFI_ENDPROC
22016 ENDPROC(bad_from_user)
22017@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22018 jz 17f
22019 1: movq (%rsi),%r8
22020 2: movq 1*8(%rsi),%r9
22021-3: movq 2*8(%rsi),%r10
22022+3: movq 2*8(%rsi),%rax
22023 4: movq 3*8(%rsi),%r11
22024 5: movq %r8,(%rdi)
22025 6: movq %r9,1*8(%rdi)
22026-7: movq %r10,2*8(%rdi)
22027+7: movq %rax,2*8(%rdi)
22028 8: movq %r11,3*8(%rdi)
22029 9: movq 4*8(%rsi),%r8
22030 10: movq 5*8(%rsi),%r9
22031-11: movq 6*8(%rsi),%r10
22032+11: movq 6*8(%rsi),%rax
22033 12: movq 7*8(%rsi),%r11
22034 13: movq %r8,4*8(%rdi)
22035 14: movq %r9,5*8(%rdi)
22036-15: movq %r10,6*8(%rdi)
22037+15: movq %rax,6*8(%rdi)
22038 16: movq %r11,7*8(%rdi)
22039 leaq 64(%rsi),%rsi
22040 leaq 64(%rdi),%rdi
22041@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22042 decl %ecx
22043 jnz 21b
22044 23: xor %eax,%eax
22045+ pax_force_retaddr
22046 ret
22047
22048 .section .fixup,"ax"
22049@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22050 3: rep
22051 movsb
22052 4: xorl %eax,%eax
22053+ pax_force_retaddr
22054 ret
22055
22056 .section .fixup,"ax"
22057diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22058index cb0c112..e3a6895 100644
22059--- a/arch/x86/lib/copy_user_nocache_64.S
22060+++ b/arch/x86/lib/copy_user_nocache_64.S
22061@@ -8,12 +8,14 @@
22062
22063 #include <linux/linkage.h>
22064 #include <asm/dwarf2.h>
22065+#include <asm/alternative-asm.h>
22066
22067 #define FIX_ALIGNMENT 1
22068
22069 #include <asm/current.h>
22070 #include <asm/asm-offsets.h>
22071 #include <asm/thread_info.h>
22072+#include <asm/pgtable.h>
22073
22074 .macro ALIGN_DESTINATION
22075 #ifdef FIX_ALIGNMENT
22076@@ -50,6 +52,15 @@
22077 */
22078 ENTRY(__copy_user_nocache)
22079 CFI_STARTPROC
22080+
22081+#ifdef CONFIG_PAX_MEMORY_UDEREF
22082+ mov $PAX_USER_SHADOW_BASE,%rcx
22083+ cmp %rcx,%rsi
22084+ jae 1f
22085+ add %rcx,%rsi
22086+1:
22087+#endif
22088+
22089 cmpl $8,%edx
22090 jb 20f /* less then 8 bytes, go to byte copy loop */
22091 ALIGN_DESTINATION
22092@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22093 jz 17f
22094 1: movq (%rsi),%r8
22095 2: movq 1*8(%rsi),%r9
22096-3: movq 2*8(%rsi),%r10
22097+3: movq 2*8(%rsi),%rax
22098 4: movq 3*8(%rsi),%r11
22099 5: movnti %r8,(%rdi)
22100 6: movnti %r9,1*8(%rdi)
22101-7: movnti %r10,2*8(%rdi)
22102+7: movnti %rax,2*8(%rdi)
22103 8: movnti %r11,3*8(%rdi)
22104 9: movq 4*8(%rsi),%r8
22105 10: movq 5*8(%rsi),%r9
22106-11: movq 6*8(%rsi),%r10
22107+11: movq 6*8(%rsi),%rax
22108 12: movq 7*8(%rsi),%r11
22109 13: movnti %r8,4*8(%rdi)
22110 14: movnti %r9,5*8(%rdi)
22111-15: movnti %r10,6*8(%rdi)
22112+15: movnti %rax,6*8(%rdi)
22113 16: movnti %r11,7*8(%rdi)
22114 leaq 64(%rsi),%rsi
22115 leaq 64(%rdi),%rdi
22116@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22117 jnz 21b
22118 23: xorl %eax,%eax
22119 sfence
22120+ pax_force_retaddr
22121 ret
22122
22123 .section .fixup,"ax"
22124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22125index f0dba36..48cb4d6 100644
22126--- a/arch/x86/lib/csum-copy_64.S
22127+++ b/arch/x86/lib/csum-copy_64.S
22128@@ -8,6 +8,7 @@
22129 #include <linux/linkage.h>
22130 #include <asm/dwarf2.h>
22131 #include <asm/errno.h>
22132+#include <asm/alternative-asm.h>
22133
22134 /*
22135 * Checksum copy with exception handling.
22136@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22137 CFI_RESTORE rbp
22138 addq $7*8,%rsp
22139 CFI_ADJUST_CFA_OFFSET -7*8
22140+ pax_force_retaddr 0, 1
22141 ret
22142 CFI_RESTORE_STATE
22143
22144diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22145index 459b58a..9570bc7 100644
22146--- a/arch/x86/lib/csum-wrappers_64.c
22147+++ b/arch/x86/lib/csum-wrappers_64.c
22148@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22149 len -= 2;
22150 }
22151 }
22152- isum = csum_partial_copy_generic((__force const void *)src,
22153+
22154+#ifdef CONFIG_PAX_MEMORY_UDEREF
22155+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22156+ src += PAX_USER_SHADOW_BASE;
22157+#endif
22158+
22159+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22160 dst, len, isum, errp, NULL);
22161 if (unlikely(*errp))
22162 goto out_err;
22163@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22164 }
22165
22166 *errp = 0;
22167- return csum_partial_copy_generic(src, (void __force *)dst,
22168+
22169+#ifdef CONFIG_PAX_MEMORY_UDEREF
22170+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22171+ dst += PAX_USER_SHADOW_BASE;
22172+#endif
22173+
22174+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22175 len, isum, NULL, errp);
22176 }
22177 EXPORT_SYMBOL(csum_partial_copy_to_user);
22178diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22179index 51f1504..ddac4c1 100644
22180--- a/arch/x86/lib/getuser.S
22181+++ b/arch/x86/lib/getuser.S
22182@@ -33,15 +33,38 @@
22183 #include <asm/asm-offsets.h>
22184 #include <asm/thread_info.h>
22185 #include <asm/asm.h>
22186+#include <asm/segment.h>
22187+#include <asm/pgtable.h>
22188+#include <asm/alternative-asm.h>
22189+
22190+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22191+#define __copyuser_seg gs;
22192+#else
22193+#define __copyuser_seg
22194+#endif
22195
22196 .text
22197 ENTRY(__get_user_1)
22198 CFI_STARTPROC
22199+
22200+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22201 GET_THREAD_INFO(%_ASM_DX)
22202 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22203 jae bad_get_user
22204-1: movzb (%_ASM_AX),%edx
22205+
22206+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22207+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22208+ cmp %_ASM_DX,%_ASM_AX
22209+ jae 1234f
22210+ add %_ASM_DX,%_ASM_AX
22211+1234:
22212+#endif
22213+
22214+#endif
22215+
22216+1: __copyuser_seg movzb (%_ASM_AX),%edx
22217 xor %eax,%eax
22218+ pax_force_retaddr
22219 ret
22220 CFI_ENDPROC
22221 ENDPROC(__get_user_1)
22222@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22223 ENTRY(__get_user_2)
22224 CFI_STARTPROC
22225 add $1,%_ASM_AX
22226+
22227+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22228 jc bad_get_user
22229 GET_THREAD_INFO(%_ASM_DX)
22230 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22231 jae bad_get_user
22232-2: movzwl -1(%_ASM_AX),%edx
22233+
22234+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22235+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22236+ cmp %_ASM_DX,%_ASM_AX
22237+ jae 1234f
22238+ add %_ASM_DX,%_ASM_AX
22239+1234:
22240+#endif
22241+
22242+#endif
22243+
22244+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22245 xor %eax,%eax
22246+ pax_force_retaddr
22247 ret
22248 CFI_ENDPROC
22249 ENDPROC(__get_user_2)
22250@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22251 ENTRY(__get_user_4)
22252 CFI_STARTPROC
22253 add $3,%_ASM_AX
22254+
22255+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22256 jc bad_get_user
22257 GET_THREAD_INFO(%_ASM_DX)
22258 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22259 jae bad_get_user
22260-3: mov -3(%_ASM_AX),%edx
22261+
22262+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22263+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22264+ cmp %_ASM_DX,%_ASM_AX
22265+ jae 1234f
22266+ add %_ASM_DX,%_ASM_AX
22267+1234:
22268+#endif
22269+
22270+#endif
22271+
22272+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22273 xor %eax,%eax
22274+ pax_force_retaddr
22275 ret
22276 CFI_ENDPROC
22277 ENDPROC(__get_user_4)
22278@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22279 GET_THREAD_INFO(%_ASM_DX)
22280 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22281 jae bad_get_user
22282+
22283+#ifdef CONFIG_PAX_MEMORY_UDEREF
22284+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22285+ cmp %_ASM_DX,%_ASM_AX
22286+ jae 1234f
22287+ add %_ASM_DX,%_ASM_AX
22288+1234:
22289+#endif
22290+
22291 4: movq -7(%_ASM_AX),%_ASM_DX
22292 xor %eax,%eax
22293+ pax_force_retaddr
22294 ret
22295 CFI_ENDPROC
22296 ENDPROC(__get_user_8)
22297@@ -91,6 +152,7 @@ bad_get_user:
22298 CFI_STARTPROC
22299 xor %edx,%edx
22300 mov $(-EFAULT),%_ASM_AX
22301+ pax_force_retaddr
22302 ret
22303 CFI_ENDPROC
22304 END(bad_get_user)
22305diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22306index 05a95e7..326f2fa 100644
22307--- a/arch/x86/lib/iomap_copy_64.S
22308+++ b/arch/x86/lib/iomap_copy_64.S
22309@@ -17,6 +17,7 @@
22310
22311 #include <linux/linkage.h>
22312 #include <asm/dwarf2.h>
22313+#include <asm/alternative-asm.h>
22314
22315 /*
22316 * override generic version in lib/iomap_copy.c
22317@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22318 CFI_STARTPROC
22319 movl %edx,%ecx
22320 rep movsd
22321+ pax_force_retaddr
22322 ret
22323 CFI_ENDPROC
22324 ENDPROC(__iowrite32_copy)
22325diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22326index ad5441e..610e351 100644
22327--- a/arch/x86/lib/memcpy_64.S
22328+++ b/arch/x86/lib/memcpy_64.S
22329@@ -4,6 +4,7 @@
22330
22331 #include <asm/cpufeature.h>
22332 #include <asm/dwarf2.h>
22333+#include <asm/alternative-asm.h>
22334
22335 /*
22336 * memcpy - Copy a memory block.
22337@@ -34,6 +35,7 @@ memcpy_c:
22338 rep movsq
22339 movl %edx, %ecx
22340 rep movsb
22341+ pax_force_retaddr
22342 ret
22343 CFI_ENDPROC
22344 ENDPROC(memcpy_c)
22345@@ -118,6 +120,7 @@ ENTRY(memcpy)
22346 jnz .Lloop_1
22347
22348 .Lend:
22349+ pax_force_retaddr 0, 1
22350 ret
22351 CFI_ENDPROC
22352 ENDPROC(memcpy)
22353@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22354 * It is also a lot simpler. Use this when possible:
22355 */
22356
22357- .section .altinstr_replacement, "ax"
22358+ .section .altinstr_replacement, "a"
22359 1: .byte 0xeb /* jmp <disp8> */
22360 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22361 2:
22362diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22363index 2c59481..7e9ba4e 100644
22364--- a/arch/x86/lib/memset_64.S
22365+++ b/arch/x86/lib/memset_64.S
22366@@ -2,6 +2,7 @@
22367
22368 #include <linux/linkage.h>
22369 #include <asm/dwarf2.h>
22370+#include <asm/alternative-asm.h>
22371
22372 /*
22373 * ISO C memset - set a memory block to a byte value.
22374@@ -28,6 +29,7 @@ memset_c:
22375 movl %r8d,%ecx
22376 rep stosb
22377 movq %r9,%rax
22378+ pax_force_retaddr
22379 ret
22380 CFI_ENDPROC
22381 ENDPROC(memset_c)
22382@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22383 ENTRY(memset)
22384 ENTRY(__memset)
22385 CFI_STARTPROC
22386- movq %rdi,%r10
22387 movq %rdx,%r11
22388
22389 /* expand byte value */
22390 movzbl %sil,%ecx
22391 movabs $0x0101010101010101,%rax
22392 mul %rcx /* with rax, clobbers rdx */
22393+ movq %rdi,%rdx
22394
22395 /* align dst */
22396 movl %edi,%r9d
22397@@ -95,7 +97,8 @@ ENTRY(__memset)
22398 jnz .Lloop_1
22399
22400 .Lende:
22401- movq %r10,%rax
22402+ movq %rdx,%rax
22403+ pax_force_retaddr
22404 ret
22405
22406 CFI_RESTORE_STATE
22407@@ -118,7 +121,7 @@ ENDPROC(__memset)
22408
22409 #include <asm/cpufeature.h>
22410
22411- .section .altinstr_replacement,"ax"
22412+ .section .altinstr_replacement,"a"
22413 1: .byte 0xeb /* jmp <disp8> */
22414 .byte (memset_c - memset) - (2f - 1b) /* offset */
22415 2:
22416diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22417index c9f2d9b..e7fd2c0 100644
22418--- a/arch/x86/lib/mmx_32.c
22419+++ b/arch/x86/lib/mmx_32.c
22420@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22421 {
22422 void *p;
22423 int i;
22424+ unsigned long cr0;
22425
22426 if (unlikely(in_interrupt()))
22427 return __memcpy(to, from, len);
22428@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22429 kernel_fpu_begin();
22430
22431 __asm__ __volatile__ (
22432- "1: prefetch (%0)\n" /* This set is 28 bytes */
22433- " prefetch 64(%0)\n"
22434- " prefetch 128(%0)\n"
22435- " prefetch 192(%0)\n"
22436- " prefetch 256(%0)\n"
22437+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22438+ " prefetch 64(%1)\n"
22439+ " prefetch 128(%1)\n"
22440+ " prefetch 192(%1)\n"
22441+ " prefetch 256(%1)\n"
22442 "2: \n"
22443 ".section .fixup, \"ax\"\n"
22444- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22445+ "3: \n"
22446+
22447+#ifdef CONFIG_PAX_KERNEXEC
22448+ " movl %%cr0, %0\n"
22449+ " movl %0, %%eax\n"
22450+ " andl $0xFFFEFFFF, %%eax\n"
22451+ " movl %%eax, %%cr0\n"
22452+#endif
22453+
22454+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22455+
22456+#ifdef CONFIG_PAX_KERNEXEC
22457+ " movl %0, %%cr0\n"
22458+#endif
22459+
22460 " jmp 2b\n"
22461 ".previous\n"
22462 _ASM_EXTABLE(1b, 3b)
22463- : : "r" (from));
22464+ : "=&r" (cr0) : "r" (from) : "ax");
22465
22466 for ( ; i > 5; i--) {
22467 __asm__ __volatile__ (
22468- "1: prefetch 320(%0)\n"
22469- "2: movq (%0), %%mm0\n"
22470- " movq 8(%0), %%mm1\n"
22471- " movq 16(%0), %%mm2\n"
22472- " movq 24(%0), %%mm3\n"
22473- " movq %%mm0, (%1)\n"
22474- " movq %%mm1, 8(%1)\n"
22475- " movq %%mm2, 16(%1)\n"
22476- " movq %%mm3, 24(%1)\n"
22477- " movq 32(%0), %%mm0\n"
22478- " movq 40(%0), %%mm1\n"
22479- " movq 48(%0), %%mm2\n"
22480- " movq 56(%0), %%mm3\n"
22481- " movq %%mm0, 32(%1)\n"
22482- " movq %%mm1, 40(%1)\n"
22483- " movq %%mm2, 48(%1)\n"
22484- " movq %%mm3, 56(%1)\n"
22485+ "1: prefetch 320(%1)\n"
22486+ "2: movq (%1), %%mm0\n"
22487+ " movq 8(%1), %%mm1\n"
22488+ " movq 16(%1), %%mm2\n"
22489+ " movq 24(%1), %%mm3\n"
22490+ " movq %%mm0, (%2)\n"
22491+ " movq %%mm1, 8(%2)\n"
22492+ " movq %%mm2, 16(%2)\n"
22493+ " movq %%mm3, 24(%2)\n"
22494+ " movq 32(%1), %%mm0\n"
22495+ " movq 40(%1), %%mm1\n"
22496+ " movq 48(%1), %%mm2\n"
22497+ " movq 56(%1), %%mm3\n"
22498+ " movq %%mm0, 32(%2)\n"
22499+ " movq %%mm1, 40(%2)\n"
22500+ " movq %%mm2, 48(%2)\n"
22501+ " movq %%mm3, 56(%2)\n"
22502 ".section .fixup, \"ax\"\n"
22503- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22504+ "3:\n"
22505+
22506+#ifdef CONFIG_PAX_KERNEXEC
22507+ " movl %%cr0, %0\n"
22508+ " movl %0, %%eax\n"
22509+ " andl $0xFFFEFFFF, %%eax\n"
22510+ " movl %%eax, %%cr0\n"
22511+#endif
22512+
22513+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22514+
22515+#ifdef CONFIG_PAX_KERNEXEC
22516+ " movl %0, %%cr0\n"
22517+#endif
22518+
22519 " jmp 2b\n"
22520 ".previous\n"
22521 _ASM_EXTABLE(1b, 3b)
22522- : : "r" (from), "r" (to) : "memory");
22523+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22524
22525 from += 64;
22526 to += 64;
22527@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22528 static void fast_copy_page(void *to, void *from)
22529 {
22530 int i;
22531+ unsigned long cr0;
22532
22533 kernel_fpu_begin();
22534
22535@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22536 * but that is for later. -AV
22537 */
22538 __asm__ __volatile__(
22539- "1: prefetch (%0)\n"
22540- " prefetch 64(%0)\n"
22541- " prefetch 128(%0)\n"
22542- " prefetch 192(%0)\n"
22543- " prefetch 256(%0)\n"
22544+ "1: prefetch (%1)\n"
22545+ " prefetch 64(%1)\n"
22546+ " prefetch 128(%1)\n"
22547+ " prefetch 192(%1)\n"
22548+ " prefetch 256(%1)\n"
22549 "2: \n"
22550 ".section .fixup, \"ax\"\n"
22551- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22552+ "3: \n"
22553+
22554+#ifdef CONFIG_PAX_KERNEXEC
22555+ " movl %%cr0, %0\n"
22556+ " movl %0, %%eax\n"
22557+ " andl $0xFFFEFFFF, %%eax\n"
22558+ " movl %%eax, %%cr0\n"
22559+#endif
22560+
22561+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22562+
22563+#ifdef CONFIG_PAX_KERNEXEC
22564+ " movl %0, %%cr0\n"
22565+#endif
22566+
22567 " jmp 2b\n"
22568 ".previous\n"
22569- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22570+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22571
22572 for (i = 0; i < (4096-320)/64; i++) {
22573 __asm__ __volatile__ (
22574- "1: prefetch 320(%0)\n"
22575- "2: movq (%0), %%mm0\n"
22576- " movntq %%mm0, (%1)\n"
22577- " movq 8(%0), %%mm1\n"
22578- " movntq %%mm1, 8(%1)\n"
22579- " movq 16(%0), %%mm2\n"
22580- " movntq %%mm2, 16(%1)\n"
22581- " movq 24(%0), %%mm3\n"
22582- " movntq %%mm3, 24(%1)\n"
22583- " movq 32(%0), %%mm4\n"
22584- " movntq %%mm4, 32(%1)\n"
22585- " movq 40(%0), %%mm5\n"
22586- " movntq %%mm5, 40(%1)\n"
22587- " movq 48(%0), %%mm6\n"
22588- " movntq %%mm6, 48(%1)\n"
22589- " movq 56(%0), %%mm7\n"
22590- " movntq %%mm7, 56(%1)\n"
22591+ "1: prefetch 320(%1)\n"
22592+ "2: movq (%1), %%mm0\n"
22593+ " movntq %%mm0, (%2)\n"
22594+ " movq 8(%1), %%mm1\n"
22595+ " movntq %%mm1, 8(%2)\n"
22596+ " movq 16(%1), %%mm2\n"
22597+ " movntq %%mm2, 16(%2)\n"
22598+ " movq 24(%1), %%mm3\n"
22599+ " movntq %%mm3, 24(%2)\n"
22600+ " movq 32(%1), %%mm4\n"
22601+ " movntq %%mm4, 32(%2)\n"
22602+ " movq 40(%1), %%mm5\n"
22603+ " movntq %%mm5, 40(%2)\n"
22604+ " movq 48(%1), %%mm6\n"
22605+ " movntq %%mm6, 48(%2)\n"
22606+ " movq 56(%1), %%mm7\n"
22607+ " movntq %%mm7, 56(%2)\n"
22608 ".section .fixup, \"ax\"\n"
22609- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22610+ "3:\n"
22611+
22612+#ifdef CONFIG_PAX_KERNEXEC
22613+ " movl %%cr0, %0\n"
22614+ " movl %0, %%eax\n"
22615+ " andl $0xFFFEFFFF, %%eax\n"
22616+ " movl %%eax, %%cr0\n"
22617+#endif
22618+
22619+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22620+
22621+#ifdef CONFIG_PAX_KERNEXEC
22622+ " movl %0, %%cr0\n"
22623+#endif
22624+
22625 " jmp 2b\n"
22626 ".previous\n"
22627- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22628+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22629
22630 from += 64;
22631 to += 64;
22632@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22633 static void fast_copy_page(void *to, void *from)
22634 {
22635 int i;
22636+ unsigned long cr0;
22637
22638 kernel_fpu_begin();
22639
22640 __asm__ __volatile__ (
22641- "1: prefetch (%0)\n"
22642- " prefetch 64(%0)\n"
22643- " prefetch 128(%0)\n"
22644- " prefetch 192(%0)\n"
22645- " prefetch 256(%0)\n"
22646+ "1: prefetch (%1)\n"
22647+ " prefetch 64(%1)\n"
22648+ " prefetch 128(%1)\n"
22649+ " prefetch 192(%1)\n"
22650+ " prefetch 256(%1)\n"
22651 "2: \n"
22652 ".section .fixup, \"ax\"\n"
22653- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22654+ "3: \n"
22655+
22656+#ifdef CONFIG_PAX_KERNEXEC
22657+ " movl %%cr0, %0\n"
22658+ " movl %0, %%eax\n"
22659+ " andl $0xFFFEFFFF, %%eax\n"
22660+ " movl %%eax, %%cr0\n"
22661+#endif
22662+
22663+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22664+
22665+#ifdef CONFIG_PAX_KERNEXEC
22666+ " movl %0, %%cr0\n"
22667+#endif
22668+
22669 " jmp 2b\n"
22670 ".previous\n"
22671- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22672+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22673
22674 for (i = 0; i < 4096/64; i++) {
22675 __asm__ __volatile__ (
22676- "1: prefetch 320(%0)\n"
22677- "2: movq (%0), %%mm0\n"
22678- " movq 8(%0), %%mm1\n"
22679- " movq 16(%0), %%mm2\n"
22680- " movq 24(%0), %%mm3\n"
22681- " movq %%mm0, (%1)\n"
22682- " movq %%mm1, 8(%1)\n"
22683- " movq %%mm2, 16(%1)\n"
22684- " movq %%mm3, 24(%1)\n"
22685- " movq 32(%0), %%mm0\n"
22686- " movq 40(%0), %%mm1\n"
22687- " movq 48(%0), %%mm2\n"
22688- " movq 56(%0), %%mm3\n"
22689- " movq %%mm0, 32(%1)\n"
22690- " movq %%mm1, 40(%1)\n"
22691- " movq %%mm2, 48(%1)\n"
22692- " movq %%mm3, 56(%1)\n"
22693+ "1: prefetch 320(%1)\n"
22694+ "2: movq (%1), %%mm0\n"
22695+ " movq 8(%1), %%mm1\n"
22696+ " movq 16(%1), %%mm2\n"
22697+ " movq 24(%1), %%mm3\n"
22698+ " movq %%mm0, (%2)\n"
22699+ " movq %%mm1, 8(%2)\n"
22700+ " movq %%mm2, 16(%2)\n"
22701+ " movq %%mm3, 24(%2)\n"
22702+ " movq 32(%1), %%mm0\n"
22703+ " movq 40(%1), %%mm1\n"
22704+ " movq 48(%1), %%mm2\n"
22705+ " movq 56(%1), %%mm3\n"
22706+ " movq %%mm0, 32(%2)\n"
22707+ " movq %%mm1, 40(%2)\n"
22708+ " movq %%mm2, 48(%2)\n"
22709+ " movq %%mm3, 56(%2)\n"
22710 ".section .fixup, \"ax\"\n"
22711- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22712+ "3:\n"
22713+
22714+#ifdef CONFIG_PAX_KERNEXEC
22715+ " movl %%cr0, %0\n"
22716+ " movl %0, %%eax\n"
22717+ " andl $0xFFFEFFFF, %%eax\n"
22718+ " movl %%eax, %%cr0\n"
22719+#endif
22720+
22721+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22722+
22723+#ifdef CONFIG_PAX_KERNEXEC
22724+ " movl %0, %%cr0\n"
22725+#endif
22726+
22727 " jmp 2b\n"
22728 ".previous\n"
22729 _ASM_EXTABLE(1b, 3b)
22730- : : "r" (from), "r" (to) : "memory");
22731+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22732
22733 from += 64;
22734 to += 64;
22735diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22736index 69fa106..adda88b 100644
22737--- a/arch/x86/lib/msr-reg.S
22738+++ b/arch/x86/lib/msr-reg.S
22739@@ -3,6 +3,7 @@
22740 #include <asm/dwarf2.h>
22741 #include <asm/asm.h>
22742 #include <asm/msr.h>
22743+#include <asm/alternative-asm.h>
22744
22745 #ifdef CONFIG_X86_64
22746 /*
22747@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22748 CFI_STARTPROC
22749 pushq_cfi %rbx
22750 pushq_cfi %rbp
22751- movq %rdi, %r10 /* Save pointer */
22752+ movq %rdi, %r9 /* Save pointer */
22753 xorl %r11d, %r11d /* Return value */
22754 movl (%rdi), %eax
22755 movl 4(%rdi), %ecx
22756@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22757 movl 28(%rdi), %edi
22758 CFI_REMEMBER_STATE
22759 1: \op
22760-2: movl %eax, (%r10)
22761+2: movl %eax, (%r9)
22762 movl %r11d, %eax /* Return value */
22763- movl %ecx, 4(%r10)
22764- movl %edx, 8(%r10)
22765- movl %ebx, 12(%r10)
22766- movl %ebp, 20(%r10)
22767- movl %esi, 24(%r10)
22768- movl %edi, 28(%r10)
22769+ movl %ecx, 4(%r9)
22770+ movl %edx, 8(%r9)
22771+ movl %ebx, 12(%r9)
22772+ movl %ebp, 20(%r9)
22773+ movl %esi, 24(%r9)
22774+ movl %edi, 28(%r9)
22775 popq_cfi %rbp
22776 popq_cfi %rbx
22777+ pax_force_retaddr
22778 ret
22779 3:
22780 CFI_RESTORE_STATE
22781diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22782index 36b0d15..d381858 100644
22783--- a/arch/x86/lib/putuser.S
22784+++ b/arch/x86/lib/putuser.S
22785@@ -15,7 +15,9 @@
22786 #include <asm/thread_info.h>
22787 #include <asm/errno.h>
22788 #include <asm/asm.h>
22789-
22790+#include <asm/segment.h>
22791+#include <asm/pgtable.h>
22792+#include <asm/alternative-asm.h>
22793
22794 /*
22795 * __put_user_X
22796@@ -29,52 +31,119 @@
22797 * as they get called from within inline assembly.
22798 */
22799
22800-#define ENTER CFI_STARTPROC ; \
22801- GET_THREAD_INFO(%_ASM_BX)
22802-#define EXIT ret ; \
22803+#define ENTER CFI_STARTPROC
22804+#define EXIT pax_force_retaddr; ret ; \
22805 CFI_ENDPROC
22806
22807+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22808+#define _DEST %_ASM_CX,%_ASM_BX
22809+#else
22810+#define _DEST %_ASM_CX
22811+#endif
22812+
22813+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22814+#define __copyuser_seg gs;
22815+#else
22816+#define __copyuser_seg
22817+#endif
22818+
22819 .text
22820 ENTRY(__put_user_1)
22821 ENTER
22822+
22823+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22824+ GET_THREAD_INFO(%_ASM_BX)
22825 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22826 jae bad_put_user
22827-1: movb %al,(%_ASM_CX)
22828+
22829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22830+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22831+ cmp %_ASM_BX,%_ASM_CX
22832+ jb 1234f
22833+ xor %ebx,%ebx
22834+1234:
22835+#endif
22836+
22837+#endif
22838+
22839+1: __copyuser_seg movb %al,(_DEST)
22840 xor %eax,%eax
22841 EXIT
22842 ENDPROC(__put_user_1)
22843
22844 ENTRY(__put_user_2)
22845 ENTER
22846+
22847+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22848+ GET_THREAD_INFO(%_ASM_BX)
22849 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22850 sub $1,%_ASM_BX
22851 cmp %_ASM_BX,%_ASM_CX
22852 jae bad_put_user
22853-2: movw %ax,(%_ASM_CX)
22854+
22855+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22856+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22857+ cmp %_ASM_BX,%_ASM_CX
22858+ jb 1234f
22859+ xor %ebx,%ebx
22860+1234:
22861+#endif
22862+
22863+#endif
22864+
22865+2: __copyuser_seg movw %ax,(_DEST)
22866 xor %eax,%eax
22867 EXIT
22868 ENDPROC(__put_user_2)
22869
22870 ENTRY(__put_user_4)
22871 ENTER
22872+
22873+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22874+ GET_THREAD_INFO(%_ASM_BX)
22875 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22876 sub $3,%_ASM_BX
22877 cmp %_ASM_BX,%_ASM_CX
22878 jae bad_put_user
22879-3: movl %eax,(%_ASM_CX)
22880+
22881+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22882+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22883+ cmp %_ASM_BX,%_ASM_CX
22884+ jb 1234f
22885+ xor %ebx,%ebx
22886+1234:
22887+#endif
22888+
22889+#endif
22890+
22891+3: __copyuser_seg movl %eax,(_DEST)
22892 xor %eax,%eax
22893 EXIT
22894 ENDPROC(__put_user_4)
22895
22896 ENTRY(__put_user_8)
22897 ENTER
22898+
22899+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22900+ GET_THREAD_INFO(%_ASM_BX)
22901 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22902 sub $7,%_ASM_BX
22903 cmp %_ASM_BX,%_ASM_CX
22904 jae bad_put_user
22905-4: mov %_ASM_AX,(%_ASM_CX)
22906+
22907+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22908+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22909+ cmp %_ASM_BX,%_ASM_CX
22910+ jb 1234f
22911+ xor %ebx,%ebx
22912+1234:
22913+#endif
22914+
22915+#endif
22916+
22917+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22918 #ifdef CONFIG_X86_32
22919-5: movl %edx,4(%_ASM_CX)
22920+5: __copyuser_seg movl %edx,4(_DEST)
22921 #endif
22922 xor %eax,%eax
22923 EXIT
22924diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22925index 05ea55f..6345b9a 100644
22926--- a/arch/x86/lib/rwlock_64.S
22927+++ b/arch/x86/lib/rwlock_64.S
22928@@ -2,6 +2,7 @@
22929
22930 #include <linux/linkage.h>
22931 #include <asm/rwlock.h>
22932+#include <asm/asm.h>
22933 #include <asm/alternative-asm.h>
22934 #include <asm/dwarf2.h>
22935
22936@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22937 CFI_STARTPROC
22938 LOCK_PREFIX
22939 addl $RW_LOCK_BIAS,(%rdi)
22940+
22941+#ifdef CONFIG_PAX_REFCOUNT
22942+ jno 1234f
22943+ LOCK_PREFIX
22944+ subl $RW_LOCK_BIAS,(%rdi)
22945+ int $4
22946+1234:
22947+ _ASM_EXTABLE(1234b, 1234b)
22948+#endif
22949+
22950 1: rep
22951 nop
22952 cmpl $RW_LOCK_BIAS,(%rdi)
22953 jne 1b
22954 LOCK_PREFIX
22955 subl $RW_LOCK_BIAS,(%rdi)
22956+
22957+#ifdef CONFIG_PAX_REFCOUNT
22958+ jno 1234f
22959+ LOCK_PREFIX
22960+ addl $RW_LOCK_BIAS,(%rdi)
22961+ int $4
22962+1234:
22963+ _ASM_EXTABLE(1234b, 1234b)
22964+#endif
22965+
22966 jnz __write_lock_failed
22967+ pax_force_retaddr
22968 ret
22969 CFI_ENDPROC
22970 END(__write_lock_failed)
22971@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22972 CFI_STARTPROC
22973 LOCK_PREFIX
22974 incl (%rdi)
22975+
22976+#ifdef CONFIG_PAX_REFCOUNT
22977+ jno 1234f
22978+ LOCK_PREFIX
22979+ decl (%rdi)
22980+ int $4
22981+1234:
22982+ _ASM_EXTABLE(1234b, 1234b)
22983+#endif
22984+
22985 1: rep
22986 nop
22987 cmpl $1,(%rdi)
22988 js 1b
22989 LOCK_PREFIX
22990 decl (%rdi)
22991+
22992+#ifdef CONFIG_PAX_REFCOUNT
22993+ jno 1234f
22994+ LOCK_PREFIX
22995+ incl (%rdi)
22996+ int $4
22997+1234:
22998+ _ASM_EXTABLE(1234b, 1234b)
22999+#endif
23000+
23001 js __read_lock_failed
23002+ pax_force_retaddr
23003 ret
23004 CFI_ENDPROC
23005 END(__read_lock_failed)
23006diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23007index 15acecf..f768b10 100644
23008--- a/arch/x86/lib/rwsem_64.S
23009+++ b/arch/x86/lib/rwsem_64.S
23010@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23011 call rwsem_down_read_failed
23012 popq %rdx
23013 restore_common_regs
23014+ pax_force_retaddr
23015 ret
23016 ENDPROC(call_rwsem_down_read_failed)
23017
23018@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23019 movq %rax,%rdi
23020 call rwsem_down_write_failed
23021 restore_common_regs
23022+ pax_force_retaddr
23023 ret
23024 ENDPROC(call_rwsem_down_write_failed)
23025
23026@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23027 movq %rax,%rdi
23028 call rwsem_wake
23029 restore_common_regs
23030-1: ret
23031+1: pax_force_retaddr
23032+ ret
23033 ENDPROC(call_rwsem_wake)
23034
23035 /* Fix up special calling conventions */
23036@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23037 call rwsem_downgrade_wake
23038 popq %rdx
23039 restore_common_regs
23040+ pax_force_retaddr
23041 ret
23042 ENDPROC(call_rwsem_downgrade_wake)
23043diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23044index bf9a7d5..fb06ab5 100644
23045--- a/arch/x86/lib/thunk_64.S
23046+++ b/arch/x86/lib/thunk_64.S
23047@@ -10,7 +10,8 @@
23048 #include <asm/dwarf2.h>
23049 #include <asm/calling.h>
23050 #include <asm/rwlock.h>
23051-
23052+ #include <asm/alternative-asm.h>
23053+
23054 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23055 .macro thunk name,func
23056 .globl \name
23057@@ -70,6 +71,7 @@
23058 SAVE_ARGS
23059 restore:
23060 RESTORE_ARGS
23061+ pax_force_retaddr
23062 ret
23063 CFI_ENDPROC
23064
23065@@ -77,5 +79,6 @@ restore:
23066 SAVE_ARGS
23067 restore_norax:
23068 RESTORE_ARGS 1
23069+ pax_force_retaddr
23070 ret
23071 CFI_ENDPROC
23072diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23073index 1f118d4..ec4a953 100644
23074--- a/arch/x86/lib/usercopy_32.c
23075+++ b/arch/x86/lib/usercopy_32.c
23076@@ -43,7 +43,7 @@ do { \
23077 __asm__ __volatile__( \
23078 " testl %1,%1\n" \
23079 " jz 2f\n" \
23080- "0: lodsb\n" \
23081+ "0: "__copyuser_seg"lodsb\n" \
23082 " stosb\n" \
23083 " testb %%al,%%al\n" \
23084 " jz 1f\n" \
23085@@ -128,10 +128,12 @@ do { \
23086 int __d0; \
23087 might_fault(); \
23088 __asm__ __volatile__( \
23089+ __COPYUSER_SET_ES \
23090 "0: rep; stosl\n" \
23091 " movl %2,%0\n" \
23092 "1: rep; stosb\n" \
23093 "2:\n" \
23094+ __COPYUSER_RESTORE_ES \
23095 ".section .fixup,\"ax\"\n" \
23096 "3: lea 0(%2,%0,4),%0\n" \
23097 " jmp 2b\n" \
23098@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23099 might_fault();
23100
23101 __asm__ __volatile__(
23102+ __COPYUSER_SET_ES
23103 " testl %0, %0\n"
23104 " jz 3f\n"
23105 " andl %0,%%ecx\n"
23106@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23107 " subl %%ecx,%0\n"
23108 " addl %0,%%eax\n"
23109 "1:\n"
23110+ __COPYUSER_RESTORE_ES
23111 ".section .fixup,\"ax\"\n"
23112 "2: xorl %%eax,%%eax\n"
23113 " jmp 1b\n"
23114@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23115
23116 #ifdef CONFIG_X86_INTEL_USERCOPY
23117 static unsigned long
23118-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23119+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23120 {
23121 int d0, d1;
23122 __asm__ __volatile__(
23123@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23124 " .align 2,0x90\n"
23125 "3: movl 0(%4), %%eax\n"
23126 "4: movl 4(%4), %%edx\n"
23127- "5: movl %%eax, 0(%3)\n"
23128- "6: movl %%edx, 4(%3)\n"
23129+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23130+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23131 "7: movl 8(%4), %%eax\n"
23132 "8: movl 12(%4),%%edx\n"
23133- "9: movl %%eax, 8(%3)\n"
23134- "10: movl %%edx, 12(%3)\n"
23135+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23136+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23137 "11: movl 16(%4), %%eax\n"
23138 "12: movl 20(%4), %%edx\n"
23139- "13: movl %%eax, 16(%3)\n"
23140- "14: movl %%edx, 20(%3)\n"
23141+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23142+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23143 "15: movl 24(%4), %%eax\n"
23144 "16: movl 28(%4), %%edx\n"
23145- "17: movl %%eax, 24(%3)\n"
23146- "18: movl %%edx, 28(%3)\n"
23147+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23148+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23149 "19: movl 32(%4), %%eax\n"
23150 "20: movl 36(%4), %%edx\n"
23151- "21: movl %%eax, 32(%3)\n"
23152- "22: movl %%edx, 36(%3)\n"
23153+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23154+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23155 "23: movl 40(%4), %%eax\n"
23156 "24: movl 44(%4), %%edx\n"
23157- "25: movl %%eax, 40(%3)\n"
23158- "26: movl %%edx, 44(%3)\n"
23159+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23160+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23161 "27: movl 48(%4), %%eax\n"
23162 "28: movl 52(%4), %%edx\n"
23163- "29: movl %%eax, 48(%3)\n"
23164- "30: movl %%edx, 52(%3)\n"
23165+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23166+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23167 "31: movl 56(%4), %%eax\n"
23168 "32: movl 60(%4), %%edx\n"
23169- "33: movl %%eax, 56(%3)\n"
23170- "34: movl %%edx, 60(%3)\n"
23171+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23172+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23173 " addl $-64, %0\n"
23174 " addl $64, %4\n"
23175 " addl $64, %3\n"
23176@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23177 " shrl $2, %0\n"
23178 " andl $3, %%eax\n"
23179 " cld\n"
23180+ __COPYUSER_SET_ES
23181 "99: rep; movsl\n"
23182 "36: movl %%eax, %0\n"
23183 "37: rep; movsb\n"
23184 "100:\n"
23185+ __COPYUSER_RESTORE_ES
23186+ ".section .fixup,\"ax\"\n"
23187+ "101: lea 0(%%eax,%0,4),%0\n"
23188+ " jmp 100b\n"
23189+ ".previous\n"
23190+ ".section __ex_table,\"a\"\n"
23191+ " .align 4\n"
23192+ " .long 1b,100b\n"
23193+ " .long 2b,100b\n"
23194+ " .long 3b,100b\n"
23195+ " .long 4b,100b\n"
23196+ " .long 5b,100b\n"
23197+ " .long 6b,100b\n"
23198+ " .long 7b,100b\n"
23199+ " .long 8b,100b\n"
23200+ " .long 9b,100b\n"
23201+ " .long 10b,100b\n"
23202+ " .long 11b,100b\n"
23203+ " .long 12b,100b\n"
23204+ " .long 13b,100b\n"
23205+ " .long 14b,100b\n"
23206+ " .long 15b,100b\n"
23207+ " .long 16b,100b\n"
23208+ " .long 17b,100b\n"
23209+ " .long 18b,100b\n"
23210+ " .long 19b,100b\n"
23211+ " .long 20b,100b\n"
23212+ " .long 21b,100b\n"
23213+ " .long 22b,100b\n"
23214+ " .long 23b,100b\n"
23215+ " .long 24b,100b\n"
23216+ " .long 25b,100b\n"
23217+ " .long 26b,100b\n"
23218+ " .long 27b,100b\n"
23219+ " .long 28b,100b\n"
23220+ " .long 29b,100b\n"
23221+ " .long 30b,100b\n"
23222+ " .long 31b,100b\n"
23223+ " .long 32b,100b\n"
23224+ " .long 33b,100b\n"
23225+ " .long 34b,100b\n"
23226+ " .long 35b,100b\n"
23227+ " .long 36b,100b\n"
23228+ " .long 37b,100b\n"
23229+ " .long 99b,101b\n"
23230+ ".previous"
23231+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23232+ : "1"(to), "2"(from), "0"(size)
23233+ : "eax", "edx", "memory");
23234+ return size;
23235+}
23236+
23237+static unsigned long
23238+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23239+{
23240+ int d0, d1;
23241+ __asm__ __volatile__(
23242+ " .align 2,0x90\n"
23243+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23244+ " cmpl $67, %0\n"
23245+ " jbe 3f\n"
23246+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23247+ " .align 2,0x90\n"
23248+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23249+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23250+ "5: movl %%eax, 0(%3)\n"
23251+ "6: movl %%edx, 4(%3)\n"
23252+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23253+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23254+ "9: movl %%eax, 8(%3)\n"
23255+ "10: movl %%edx, 12(%3)\n"
23256+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23257+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23258+ "13: movl %%eax, 16(%3)\n"
23259+ "14: movl %%edx, 20(%3)\n"
23260+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23261+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23262+ "17: movl %%eax, 24(%3)\n"
23263+ "18: movl %%edx, 28(%3)\n"
23264+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23265+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23266+ "21: movl %%eax, 32(%3)\n"
23267+ "22: movl %%edx, 36(%3)\n"
23268+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23269+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23270+ "25: movl %%eax, 40(%3)\n"
23271+ "26: movl %%edx, 44(%3)\n"
23272+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23273+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23274+ "29: movl %%eax, 48(%3)\n"
23275+ "30: movl %%edx, 52(%3)\n"
23276+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23277+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23278+ "33: movl %%eax, 56(%3)\n"
23279+ "34: movl %%edx, 60(%3)\n"
23280+ " addl $-64, %0\n"
23281+ " addl $64, %4\n"
23282+ " addl $64, %3\n"
23283+ " cmpl $63, %0\n"
23284+ " ja 1b\n"
23285+ "35: movl %0, %%eax\n"
23286+ " shrl $2, %0\n"
23287+ " andl $3, %%eax\n"
23288+ " cld\n"
23289+ "99: rep; "__copyuser_seg" movsl\n"
23290+ "36: movl %%eax, %0\n"
23291+ "37: rep; "__copyuser_seg" movsb\n"
23292+ "100:\n"
23293 ".section .fixup,\"ax\"\n"
23294 "101: lea 0(%%eax,%0,4),%0\n"
23295 " jmp 100b\n"
23296@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23297 int d0, d1;
23298 __asm__ __volatile__(
23299 " .align 2,0x90\n"
23300- "0: movl 32(%4), %%eax\n"
23301+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23302 " cmpl $67, %0\n"
23303 " jbe 2f\n"
23304- "1: movl 64(%4), %%eax\n"
23305+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23306 " .align 2,0x90\n"
23307- "2: movl 0(%4), %%eax\n"
23308- "21: movl 4(%4), %%edx\n"
23309+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23310+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23311 " movl %%eax, 0(%3)\n"
23312 " movl %%edx, 4(%3)\n"
23313- "3: movl 8(%4), %%eax\n"
23314- "31: movl 12(%4),%%edx\n"
23315+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23316+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23317 " movl %%eax, 8(%3)\n"
23318 " movl %%edx, 12(%3)\n"
23319- "4: movl 16(%4), %%eax\n"
23320- "41: movl 20(%4), %%edx\n"
23321+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23322+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23323 " movl %%eax, 16(%3)\n"
23324 " movl %%edx, 20(%3)\n"
23325- "10: movl 24(%4), %%eax\n"
23326- "51: movl 28(%4), %%edx\n"
23327+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23328+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23329 " movl %%eax, 24(%3)\n"
23330 " movl %%edx, 28(%3)\n"
23331- "11: movl 32(%4), %%eax\n"
23332- "61: movl 36(%4), %%edx\n"
23333+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23334+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23335 " movl %%eax, 32(%3)\n"
23336 " movl %%edx, 36(%3)\n"
23337- "12: movl 40(%4), %%eax\n"
23338- "71: movl 44(%4), %%edx\n"
23339+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23340+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23341 " movl %%eax, 40(%3)\n"
23342 " movl %%edx, 44(%3)\n"
23343- "13: movl 48(%4), %%eax\n"
23344- "81: movl 52(%4), %%edx\n"
23345+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23346+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23347 " movl %%eax, 48(%3)\n"
23348 " movl %%edx, 52(%3)\n"
23349- "14: movl 56(%4), %%eax\n"
23350- "91: movl 60(%4), %%edx\n"
23351+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23352+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23353 " movl %%eax, 56(%3)\n"
23354 " movl %%edx, 60(%3)\n"
23355 " addl $-64, %0\n"
23356@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23357 " shrl $2, %0\n"
23358 " andl $3, %%eax\n"
23359 " cld\n"
23360- "6: rep; movsl\n"
23361+ "6: rep; "__copyuser_seg" movsl\n"
23362 " movl %%eax,%0\n"
23363- "7: rep; movsb\n"
23364+ "7: rep; "__copyuser_seg" movsb\n"
23365 "8:\n"
23366 ".section .fixup,\"ax\"\n"
23367 "9: lea 0(%%eax,%0,4),%0\n"
23368@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23369
23370 __asm__ __volatile__(
23371 " .align 2,0x90\n"
23372- "0: movl 32(%4), %%eax\n"
23373+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23374 " cmpl $67, %0\n"
23375 " jbe 2f\n"
23376- "1: movl 64(%4), %%eax\n"
23377+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23378 " .align 2,0x90\n"
23379- "2: movl 0(%4), %%eax\n"
23380- "21: movl 4(%4), %%edx\n"
23381+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23382+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23383 " movnti %%eax, 0(%3)\n"
23384 " movnti %%edx, 4(%3)\n"
23385- "3: movl 8(%4), %%eax\n"
23386- "31: movl 12(%4),%%edx\n"
23387+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23388+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23389 " movnti %%eax, 8(%3)\n"
23390 " movnti %%edx, 12(%3)\n"
23391- "4: movl 16(%4), %%eax\n"
23392- "41: movl 20(%4), %%edx\n"
23393+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23394+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23395 " movnti %%eax, 16(%3)\n"
23396 " movnti %%edx, 20(%3)\n"
23397- "10: movl 24(%4), %%eax\n"
23398- "51: movl 28(%4), %%edx\n"
23399+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23400+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23401 " movnti %%eax, 24(%3)\n"
23402 " movnti %%edx, 28(%3)\n"
23403- "11: movl 32(%4), %%eax\n"
23404- "61: movl 36(%4), %%edx\n"
23405+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23406+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23407 " movnti %%eax, 32(%3)\n"
23408 " movnti %%edx, 36(%3)\n"
23409- "12: movl 40(%4), %%eax\n"
23410- "71: movl 44(%4), %%edx\n"
23411+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23412+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23413 " movnti %%eax, 40(%3)\n"
23414 " movnti %%edx, 44(%3)\n"
23415- "13: movl 48(%4), %%eax\n"
23416- "81: movl 52(%4), %%edx\n"
23417+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23418+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23419 " movnti %%eax, 48(%3)\n"
23420 " movnti %%edx, 52(%3)\n"
23421- "14: movl 56(%4), %%eax\n"
23422- "91: movl 60(%4), %%edx\n"
23423+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23424+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23425 " movnti %%eax, 56(%3)\n"
23426 " movnti %%edx, 60(%3)\n"
23427 " addl $-64, %0\n"
23428@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23429 " shrl $2, %0\n"
23430 " andl $3, %%eax\n"
23431 " cld\n"
23432- "6: rep; movsl\n"
23433+ "6: rep; "__copyuser_seg" movsl\n"
23434 " movl %%eax,%0\n"
23435- "7: rep; movsb\n"
23436+ "7: rep; "__copyuser_seg" movsb\n"
23437 "8:\n"
23438 ".section .fixup,\"ax\"\n"
23439 "9: lea 0(%%eax,%0,4),%0\n"
23440@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23441
23442 __asm__ __volatile__(
23443 " .align 2,0x90\n"
23444- "0: movl 32(%4), %%eax\n"
23445+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23446 " cmpl $67, %0\n"
23447 " jbe 2f\n"
23448- "1: movl 64(%4), %%eax\n"
23449+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23450 " .align 2,0x90\n"
23451- "2: movl 0(%4), %%eax\n"
23452- "21: movl 4(%4), %%edx\n"
23453+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23454+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23455 " movnti %%eax, 0(%3)\n"
23456 " movnti %%edx, 4(%3)\n"
23457- "3: movl 8(%4), %%eax\n"
23458- "31: movl 12(%4),%%edx\n"
23459+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23460+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23461 " movnti %%eax, 8(%3)\n"
23462 " movnti %%edx, 12(%3)\n"
23463- "4: movl 16(%4), %%eax\n"
23464- "41: movl 20(%4), %%edx\n"
23465+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23466+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23467 " movnti %%eax, 16(%3)\n"
23468 " movnti %%edx, 20(%3)\n"
23469- "10: movl 24(%4), %%eax\n"
23470- "51: movl 28(%4), %%edx\n"
23471+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23472+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23473 " movnti %%eax, 24(%3)\n"
23474 " movnti %%edx, 28(%3)\n"
23475- "11: movl 32(%4), %%eax\n"
23476- "61: movl 36(%4), %%edx\n"
23477+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23478+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23479 " movnti %%eax, 32(%3)\n"
23480 " movnti %%edx, 36(%3)\n"
23481- "12: movl 40(%4), %%eax\n"
23482- "71: movl 44(%4), %%edx\n"
23483+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23484+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23485 " movnti %%eax, 40(%3)\n"
23486 " movnti %%edx, 44(%3)\n"
23487- "13: movl 48(%4), %%eax\n"
23488- "81: movl 52(%4), %%edx\n"
23489+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23490+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23491 " movnti %%eax, 48(%3)\n"
23492 " movnti %%edx, 52(%3)\n"
23493- "14: movl 56(%4), %%eax\n"
23494- "91: movl 60(%4), %%edx\n"
23495+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23496+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23497 " movnti %%eax, 56(%3)\n"
23498 " movnti %%edx, 60(%3)\n"
23499 " addl $-64, %0\n"
23500@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23501 " shrl $2, %0\n"
23502 " andl $3, %%eax\n"
23503 " cld\n"
23504- "6: rep; movsl\n"
23505+ "6: rep; "__copyuser_seg" movsl\n"
23506 " movl %%eax,%0\n"
23507- "7: rep; movsb\n"
23508+ "7: rep; "__copyuser_seg" movsb\n"
23509 "8:\n"
23510 ".section .fixup,\"ax\"\n"
23511 "9: lea 0(%%eax,%0,4),%0\n"
23512@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23513 */
23514 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23515 unsigned long size);
23516-unsigned long __copy_user_intel(void __user *to, const void *from,
23517+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23518+ unsigned long size);
23519+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23520 unsigned long size);
23521 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23522 const void __user *from, unsigned long size);
23523 #endif /* CONFIG_X86_INTEL_USERCOPY */
23524
23525 /* Generic arbitrary sized copy. */
23526-#define __copy_user(to, from, size) \
23527+#define __copy_user(to, from, size, prefix, set, restore) \
23528 do { \
23529 int __d0, __d1, __d2; \
23530 __asm__ __volatile__( \
23531+ set \
23532 " cmp $7,%0\n" \
23533 " jbe 1f\n" \
23534 " movl %1,%0\n" \
23535 " negl %0\n" \
23536 " andl $7,%0\n" \
23537 " subl %0,%3\n" \
23538- "4: rep; movsb\n" \
23539+ "4: rep; "prefix"movsb\n" \
23540 " movl %3,%0\n" \
23541 " shrl $2,%0\n" \
23542 " andl $3,%3\n" \
23543 " .align 2,0x90\n" \
23544- "0: rep; movsl\n" \
23545+ "0: rep; "prefix"movsl\n" \
23546 " movl %3,%0\n" \
23547- "1: rep; movsb\n" \
23548+ "1: rep; "prefix"movsb\n" \
23549 "2:\n" \
23550+ restore \
23551 ".section .fixup,\"ax\"\n" \
23552 "5: addl %3,%0\n" \
23553 " jmp 2b\n" \
23554@@ -682,14 +799,14 @@ do { \
23555 " negl %0\n" \
23556 " andl $7,%0\n" \
23557 " subl %0,%3\n" \
23558- "4: rep; movsb\n" \
23559+ "4: rep; "__copyuser_seg"movsb\n" \
23560 " movl %3,%0\n" \
23561 " shrl $2,%0\n" \
23562 " andl $3,%3\n" \
23563 " .align 2,0x90\n" \
23564- "0: rep; movsl\n" \
23565+ "0: rep; "__copyuser_seg"movsl\n" \
23566 " movl %3,%0\n" \
23567- "1: rep; movsb\n" \
23568+ "1: rep; "__copyuser_seg"movsb\n" \
23569 "2:\n" \
23570 ".section .fixup,\"ax\"\n" \
23571 "5: addl %3,%0\n" \
23572@@ -775,9 +892,9 @@ survive:
23573 }
23574 #endif
23575 if (movsl_is_ok(to, from, n))
23576- __copy_user(to, from, n);
23577+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23578 else
23579- n = __copy_user_intel(to, from, n);
23580+ n = __generic_copy_to_user_intel(to, from, n);
23581 return n;
23582 }
23583 EXPORT_SYMBOL(__copy_to_user_ll);
23584@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23585 unsigned long n)
23586 {
23587 if (movsl_is_ok(to, from, n))
23588- __copy_user(to, from, n);
23589+ __copy_user(to, from, n, __copyuser_seg, "", "");
23590 else
23591- n = __copy_user_intel((void __user *)to,
23592- (const void *)from, n);
23593+ n = __generic_copy_from_user_intel(to, from, n);
23594 return n;
23595 }
23596 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23597@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23598 if (n > 64 && cpu_has_xmm2)
23599 n = __copy_user_intel_nocache(to, from, n);
23600 else
23601- __copy_user(to, from, n);
23602+ __copy_user(to, from, n, __copyuser_seg, "", "");
23603 #else
23604- __copy_user(to, from, n);
23605+ __copy_user(to, from, n, __copyuser_seg, "", "");
23606 #endif
23607 return n;
23608 }
23609 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23610
23611-/**
23612- * copy_to_user: - Copy a block of data into user space.
23613- * @to: Destination address, in user space.
23614- * @from: Source address, in kernel space.
23615- * @n: Number of bytes to copy.
23616- *
23617- * Context: User context only. This function may sleep.
23618- *
23619- * Copy data from kernel space to user space.
23620- *
23621- * Returns number of bytes that could not be copied.
23622- * On success, this will be zero.
23623- */
23624-unsigned long
23625-copy_to_user(void __user *to, const void *from, unsigned long n)
23626+#ifdef CONFIG_PAX_MEMORY_UDEREF
23627+void __set_fs(mm_segment_t x)
23628 {
23629- if (access_ok(VERIFY_WRITE, to, n))
23630- n = __copy_to_user(to, from, n);
23631- return n;
23632+ switch (x.seg) {
23633+ case 0:
23634+ loadsegment(gs, 0);
23635+ break;
23636+ case TASK_SIZE_MAX:
23637+ loadsegment(gs, __USER_DS);
23638+ break;
23639+ case -1UL:
23640+ loadsegment(gs, __KERNEL_DS);
23641+ break;
23642+ default:
23643+ BUG();
23644+ }
23645+ return;
23646 }
23647-EXPORT_SYMBOL(copy_to_user);
23648+EXPORT_SYMBOL(__set_fs);
23649
23650-/**
23651- * copy_from_user: - Copy a block of data from user space.
23652- * @to: Destination address, in kernel space.
23653- * @from: Source address, in user space.
23654- * @n: Number of bytes to copy.
23655- *
23656- * Context: User context only. This function may sleep.
23657- *
23658- * Copy data from user space to kernel space.
23659- *
23660- * Returns number of bytes that could not be copied.
23661- * On success, this will be zero.
23662- *
23663- * If some data could not be copied, this function will pad the copied
23664- * data to the requested size using zero bytes.
23665- */
23666-unsigned long
23667-copy_from_user(void *to, const void __user *from, unsigned long n)
23668+void set_fs(mm_segment_t x)
23669 {
23670- if (access_ok(VERIFY_READ, from, n))
23671- n = __copy_from_user(to, from, n);
23672- else
23673- memset(to, 0, n);
23674- return n;
23675+ current_thread_info()->addr_limit = x;
23676+ __set_fs(x);
23677 }
23678-EXPORT_SYMBOL(copy_from_user);
23679+EXPORT_SYMBOL(set_fs);
23680+#endif
23681diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23682index b7c2849..8633ad8 100644
23683--- a/arch/x86/lib/usercopy_64.c
23684+++ b/arch/x86/lib/usercopy_64.c
23685@@ -42,6 +42,12 @@ long
23686 __strncpy_from_user(char *dst, const char __user *src, long count)
23687 {
23688 long res;
23689+
23690+#ifdef CONFIG_PAX_MEMORY_UDEREF
23691+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23692+ src += PAX_USER_SHADOW_BASE;
23693+#endif
23694+
23695 __do_strncpy_from_user(dst, src, count, res);
23696 return res;
23697 }
23698@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23699 {
23700 long __d0;
23701 might_fault();
23702+
23703+#ifdef CONFIG_PAX_MEMORY_UDEREF
23704+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23705+ addr += PAX_USER_SHADOW_BASE;
23706+#endif
23707+
23708 /* no memory constraint because it doesn't change any memory gcc knows
23709 about */
23710 asm volatile(
23711@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23712 }
23713 EXPORT_SYMBOL(strlen_user);
23714
23715-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23716+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23717 {
23718- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23719- return copy_user_generic((__force void *)to, (__force void *)from, len);
23720- }
23721- return len;
23722+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23723+
23724+#ifdef CONFIG_PAX_MEMORY_UDEREF
23725+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23726+ to += PAX_USER_SHADOW_BASE;
23727+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23728+ from += PAX_USER_SHADOW_BASE;
23729+#endif
23730+
23731+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23732+ }
23733+ return len;
23734 }
23735 EXPORT_SYMBOL(copy_in_user);
23736
23737@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23738 * it is not necessary to optimize tail handling.
23739 */
23740 unsigned long
23741-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23742+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23743 {
23744 char c;
23745 unsigned zero_len;
23746diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23747index 61b41ca..5fef66a 100644
23748--- a/arch/x86/mm/extable.c
23749+++ b/arch/x86/mm/extable.c
23750@@ -1,14 +1,71 @@
23751 #include <linux/module.h>
23752 #include <linux/spinlock.h>
23753+#include <linux/sort.h>
23754 #include <asm/uaccess.h>
23755+#include <asm/pgtable.h>
23756
23757+/*
23758+ * The exception table needs to be sorted so that the binary
23759+ * search that we use to find entries in it works properly.
23760+ * This is used both for the kernel exception table and for
23761+ * the exception tables of modules that get loaded.
23762+ */
23763+static int cmp_ex(const void *a, const void *b)
23764+{
23765+ const struct exception_table_entry *x = a, *y = b;
23766+
23767+ /* avoid overflow */
23768+ if (x->insn > y->insn)
23769+ return 1;
23770+ if (x->insn < y->insn)
23771+ return -1;
23772+ return 0;
23773+}
23774+
23775+static void swap_ex(void *a, void *b, int size)
23776+{
23777+ struct exception_table_entry t, *x = a, *y = b;
23778+
23779+ t = *x;
23780+
23781+ pax_open_kernel();
23782+ *x = *y;
23783+ *y = t;
23784+ pax_close_kernel();
23785+}
23786+
23787+void sort_extable(struct exception_table_entry *start,
23788+ struct exception_table_entry *finish)
23789+{
23790+ sort(start, finish - start, sizeof(struct exception_table_entry),
23791+ cmp_ex, swap_ex);
23792+}
23793+
23794+#ifdef CONFIG_MODULES
23795+/*
23796+ * If the exception table is sorted, any referring to the module init
23797+ * will be at the beginning or the end.
23798+ */
23799+void trim_init_extable(struct module *m)
23800+{
23801+ /*trim the beginning*/
23802+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23803+ m->extable++;
23804+ m->num_exentries--;
23805+ }
23806+ /*trim the end*/
23807+ while (m->num_exentries &&
23808+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23809+ m->num_exentries--;
23810+}
23811+#endif /* CONFIG_MODULES */
23812
23813 int fixup_exception(struct pt_regs *regs)
23814 {
23815 const struct exception_table_entry *fixup;
23816
23817 #ifdef CONFIG_PNPBIOS
23818- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23819+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23820 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23821 extern u32 pnp_bios_is_utter_crap;
23822 pnp_bios_is_utter_crap = 1;
23823diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23824index 8ac0d76..87899a4 100644
23825--- a/arch/x86/mm/fault.c
23826+++ b/arch/x86/mm/fault.c
23827@@ -11,10 +11,19 @@
23828 #include <linux/kprobes.h> /* __kprobes, ... */
23829 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23830 #include <linux/perf_event.h> /* perf_sw_event */
23831+#include <linux/unistd.h>
23832+#include <linux/compiler.h>
23833
23834 #include <asm/traps.h> /* dotraplinkage, ... */
23835 #include <asm/pgalloc.h> /* pgd_*(), ... */
23836 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23837+#include <asm/vsyscall.h>
23838+#include <asm/tlbflush.h>
23839+
23840+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23841+#include <asm/stacktrace.h>
23842+#include "../kernel/dumpstack.h"
23843+#endif
23844
23845 /*
23846 * Page fault error code bits:
23847@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23848 int ret = 0;
23849
23850 /* kprobe_running() needs smp_processor_id() */
23851- if (kprobes_built_in() && !user_mode_vm(regs)) {
23852+ if (kprobes_built_in() && !user_mode(regs)) {
23853 preempt_disable();
23854 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23855 ret = 1;
23856@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23857 return !instr_lo || (instr_lo>>1) == 1;
23858 case 0x00:
23859 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23860- if (probe_kernel_address(instr, opcode))
23861+ if (user_mode(regs)) {
23862+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23863+ return 0;
23864+ } else if (probe_kernel_address(instr, opcode))
23865 return 0;
23866
23867 *prefetch = (instr_lo == 0xF) &&
23868@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23869 while (instr < max_instr) {
23870 unsigned char opcode;
23871
23872- if (probe_kernel_address(instr, opcode))
23873+ if (user_mode(regs)) {
23874+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23875+ break;
23876+ } else if (probe_kernel_address(instr, opcode))
23877 break;
23878
23879 instr++;
23880@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23881 force_sig_info(si_signo, &info, tsk);
23882 }
23883
23884+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23885+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23886+#endif
23887+
23888+#ifdef CONFIG_PAX_EMUTRAMP
23889+static int pax_handle_fetch_fault(struct pt_regs *regs);
23890+#endif
23891+
23892+#ifdef CONFIG_PAX_PAGEEXEC
23893+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23894+{
23895+ pgd_t *pgd;
23896+ pud_t *pud;
23897+ pmd_t *pmd;
23898+
23899+ pgd = pgd_offset(mm, address);
23900+ if (!pgd_present(*pgd))
23901+ return NULL;
23902+ pud = pud_offset(pgd, address);
23903+ if (!pud_present(*pud))
23904+ return NULL;
23905+ pmd = pmd_offset(pud, address);
23906+ if (!pmd_present(*pmd))
23907+ return NULL;
23908+ return pmd;
23909+}
23910+#endif
23911+
23912 DEFINE_SPINLOCK(pgd_lock);
23913 LIST_HEAD(pgd_list);
23914
23915@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23916 address += PMD_SIZE) {
23917
23918 unsigned long flags;
23919+
23920+#ifdef CONFIG_PAX_PER_CPU_PGD
23921+ unsigned long cpu;
23922+#else
23923 struct page *page;
23924+#endif
23925
23926 spin_lock_irqsave(&pgd_lock, flags);
23927+
23928+#ifdef CONFIG_PAX_PER_CPU_PGD
23929+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23930+ pgd_t *pgd = get_cpu_pgd(cpu);
23931+#else
23932 list_for_each_entry(page, &pgd_list, lru) {
23933- if (!vmalloc_sync_one(page_address(page), address))
23934+ pgd_t *pgd = page_address(page);
23935+#endif
23936+
23937+ if (!vmalloc_sync_one(pgd, address))
23938 break;
23939 }
23940 spin_unlock_irqrestore(&pgd_lock, flags);
23941@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23942 * an interrupt in the middle of a task switch..
23943 */
23944 pgd_paddr = read_cr3();
23945+
23946+#ifdef CONFIG_PAX_PER_CPU_PGD
23947+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23948+#endif
23949+
23950 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23951 if (!pmd_k)
23952 return -1;
23953@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23954
23955 const pgd_t *pgd_ref = pgd_offset_k(address);
23956 unsigned long flags;
23957+
23958+#ifdef CONFIG_PAX_PER_CPU_PGD
23959+ unsigned long cpu;
23960+#else
23961 struct page *page;
23962+#endif
23963
23964 if (pgd_none(*pgd_ref))
23965 continue;
23966
23967 spin_lock_irqsave(&pgd_lock, flags);
23968+
23969+#ifdef CONFIG_PAX_PER_CPU_PGD
23970+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23971+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23972+#else
23973 list_for_each_entry(page, &pgd_list, lru) {
23974 pgd_t *pgd;
23975 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23976+#endif
23977+
23978 if (pgd_none(*pgd))
23979 set_pgd(pgd, *pgd_ref);
23980 else
23981@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23982 * happen within a race in page table update. In the later
23983 * case just flush:
23984 */
23985+
23986+#ifdef CONFIG_PAX_PER_CPU_PGD
23987+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23988+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23989+#else
23990 pgd = pgd_offset(current->active_mm, address);
23991+#endif
23992+
23993 pgd_ref = pgd_offset_k(address);
23994 if (pgd_none(*pgd_ref))
23995 return -1;
23996@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23997 static int is_errata100(struct pt_regs *regs, unsigned long address)
23998 {
23999 #ifdef CONFIG_X86_64
24000- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24001+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24002 return 1;
24003 #endif
24004 return 0;
24005@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24006 }
24007
24008 static const char nx_warning[] = KERN_CRIT
24009-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24010+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24011
24012 static void
24013 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24014@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24015 if (!oops_may_print())
24016 return;
24017
24018- if (error_code & PF_INSTR) {
24019+ if (nx_enabled && (error_code & PF_INSTR)) {
24020 unsigned int level;
24021
24022 pte_t *pte = lookup_address(address, &level);
24023
24024 if (pte && pte_present(*pte) && !pte_exec(*pte))
24025- printk(nx_warning, current_uid());
24026+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24027 }
24028
24029+#ifdef CONFIG_PAX_KERNEXEC
24030+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24031+ if (current->signal->curr_ip)
24032+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24033+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24034+ else
24035+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24036+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24037+ }
24038+#endif
24039+
24040 printk(KERN_ALERT "BUG: unable to handle kernel ");
24041 if (address < PAGE_SIZE)
24042 printk(KERN_CONT "NULL pointer dereference");
24043@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24044 {
24045 struct task_struct *tsk = current;
24046
24047+#ifdef CONFIG_X86_64
24048+ struct mm_struct *mm = tsk->mm;
24049+
24050+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24051+ if (regs->ip == (unsigned long)vgettimeofday) {
24052+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24053+ return;
24054+ } else if (regs->ip == (unsigned long)vtime) {
24055+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24056+ return;
24057+ } else if (regs->ip == (unsigned long)vgetcpu) {
24058+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24059+ return;
24060+ }
24061+ }
24062+#endif
24063+
24064 /* User mode accesses just cause a SIGSEGV */
24065 if (error_code & PF_USER) {
24066 /*
24067@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24068 if (is_errata100(regs, address))
24069 return;
24070
24071+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24072+ if (pax_is_fetch_fault(regs, error_code, address)) {
24073+
24074+#ifdef CONFIG_PAX_EMUTRAMP
24075+ switch (pax_handle_fetch_fault(regs)) {
24076+ case 2:
24077+ return;
24078+ }
24079+#endif
24080+
24081+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24082+ do_group_exit(SIGKILL);
24083+ }
24084+#endif
24085+
24086 if (unlikely(show_unhandled_signals))
24087 show_signal_msg(regs, error_code, address, tsk);
24088
24089@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24090 if (fault & VM_FAULT_HWPOISON) {
24091 printk(KERN_ERR
24092 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24093- tsk->comm, tsk->pid, address);
24094+ tsk->comm, task_pid_nr(tsk), address);
24095 code = BUS_MCEERR_AR;
24096 }
24097 #endif
24098@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24099 return 1;
24100 }
24101
24102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24103+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24104+{
24105+ pte_t *pte;
24106+ pmd_t *pmd;
24107+ spinlock_t *ptl;
24108+ unsigned char pte_mask;
24109+
24110+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24111+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24112+ return 0;
24113+
24114+ /* PaX: it's our fault, let's handle it if we can */
24115+
24116+ /* PaX: take a look at read faults before acquiring any locks */
24117+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24118+ /* instruction fetch attempt from a protected page in user mode */
24119+ up_read(&mm->mmap_sem);
24120+
24121+#ifdef CONFIG_PAX_EMUTRAMP
24122+ switch (pax_handle_fetch_fault(regs)) {
24123+ case 2:
24124+ return 1;
24125+ }
24126+#endif
24127+
24128+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24129+ do_group_exit(SIGKILL);
24130+ }
24131+
24132+ pmd = pax_get_pmd(mm, address);
24133+ if (unlikely(!pmd))
24134+ return 0;
24135+
24136+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24137+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24138+ pte_unmap_unlock(pte, ptl);
24139+ return 0;
24140+ }
24141+
24142+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24143+ /* write attempt to a protected page in user mode */
24144+ pte_unmap_unlock(pte, ptl);
24145+ return 0;
24146+ }
24147+
24148+#ifdef CONFIG_SMP
24149+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24150+#else
24151+ if (likely(address > get_limit(regs->cs)))
24152+#endif
24153+ {
24154+ set_pte(pte, pte_mkread(*pte));
24155+ __flush_tlb_one(address);
24156+ pte_unmap_unlock(pte, ptl);
24157+ up_read(&mm->mmap_sem);
24158+ return 1;
24159+ }
24160+
24161+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24162+
24163+ /*
24164+ * PaX: fill DTLB with user rights and retry
24165+ */
24166+ __asm__ __volatile__ (
24167+ "orb %2,(%1)\n"
24168+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24169+/*
24170+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24171+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24172+ * page fault when examined during a TLB load attempt. this is true not only
24173+ * for PTEs holding a non-present entry but also present entries that will
24174+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24175+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24176+ * for our target pages since their PTEs are simply not in the TLBs at all.
24177+
24178+ * the best thing in omitting it is that we gain around 15-20% speed in the
24179+ * fast path of the page fault handler and can get rid of tracing since we
24180+ * can no longer flush unintended entries.
24181+ */
24182+ "invlpg (%0)\n"
24183+#endif
24184+ __copyuser_seg"testb $0,(%0)\n"
24185+ "xorb %3,(%1)\n"
24186+ :
24187+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24188+ : "memory", "cc");
24189+ pte_unmap_unlock(pte, ptl);
24190+ up_read(&mm->mmap_sem);
24191+ return 1;
24192+}
24193+#endif
24194+
24195 /*
24196 * Handle a spurious fault caused by a stale TLB entry.
24197 *
24198@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24199 static inline int
24200 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24201 {
24202+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24203+ return 1;
24204+
24205 if (write) {
24206 /* write, present and write, not present: */
24207 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24208@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24209 {
24210 struct vm_area_struct *vma;
24211 struct task_struct *tsk;
24212- unsigned long address;
24213 struct mm_struct *mm;
24214 int write;
24215 int fault;
24216
24217- tsk = current;
24218- mm = tsk->mm;
24219-
24220 /* Get the faulting address: */
24221- address = read_cr2();
24222+ unsigned long address = read_cr2();
24223+
24224+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24225+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24226+ if (!search_exception_tables(regs->ip)) {
24227+ bad_area_nosemaphore(regs, error_code, address);
24228+ return;
24229+ }
24230+ if (address < PAX_USER_SHADOW_BASE) {
24231+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24232+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24233+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24234+ } else
24235+ address -= PAX_USER_SHADOW_BASE;
24236+ }
24237+#endif
24238+
24239+ tsk = current;
24240+ mm = tsk->mm;
24241
24242 /*
24243 * Detect and handle instructions that would cause a page fault for
24244@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24245 * User-mode registers count as a user access even for any
24246 * potential system fault or CPU buglet:
24247 */
24248- if (user_mode_vm(regs)) {
24249+ if (user_mode(regs)) {
24250 local_irq_enable();
24251 error_code |= PF_USER;
24252 } else {
24253@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24254 might_sleep();
24255 }
24256
24257+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24258+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24259+ return;
24260+#endif
24261+
24262 vma = find_vma(mm, address);
24263 if (unlikely(!vma)) {
24264 bad_area(regs, error_code, address);
24265@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24266 bad_area(regs, error_code, address);
24267 return;
24268 }
24269- if (error_code & PF_USER) {
24270- /*
24271- * Accessing the stack below %sp is always a bug.
24272- * The large cushion allows instructions like enter
24273- * and pusha to work. ("enter $65535, $31" pushes
24274- * 32 pointers and then decrements %sp by 65535.)
24275- */
24276- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24277- bad_area(regs, error_code, address);
24278- return;
24279- }
24280+ /*
24281+ * Accessing the stack below %sp is always a bug.
24282+ * The large cushion allows instructions like enter
24283+ * and pusha to work. ("enter $65535, $31" pushes
24284+ * 32 pointers and then decrements %sp by 65535.)
24285+ */
24286+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24287+ bad_area(regs, error_code, address);
24288+ return;
24289 }
24290+
24291+#ifdef CONFIG_PAX_SEGMEXEC
24292+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24293+ bad_area(regs, error_code, address);
24294+ return;
24295+ }
24296+#endif
24297+
24298 if (unlikely(expand_stack(vma, address))) {
24299 bad_area(regs, error_code, address);
24300 return;
24301@@ -1146,3 +1390,292 @@ good_area:
24302
24303 up_read(&mm->mmap_sem);
24304 }
24305+
24306+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24307+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24308+{
24309+ struct mm_struct *mm = current->mm;
24310+ unsigned long ip = regs->ip;
24311+
24312+ if (v8086_mode(regs))
24313+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24314+
24315+#ifdef CONFIG_PAX_PAGEEXEC
24316+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24317+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24318+ return true;
24319+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24320+ return true;
24321+ return false;
24322+ }
24323+#endif
24324+
24325+#ifdef CONFIG_PAX_SEGMEXEC
24326+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24327+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24328+ return true;
24329+ return false;
24330+ }
24331+#endif
24332+
24333+ return false;
24334+}
24335+#endif
24336+
24337+#ifdef CONFIG_PAX_EMUTRAMP
24338+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24339+{
24340+ int err;
24341+
24342+ do { /* PaX: libffi trampoline emulation */
24343+ unsigned char mov, jmp;
24344+ unsigned int addr1, addr2;
24345+
24346+#ifdef CONFIG_X86_64
24347+ if ((regs->ip + 9) >> 32)
24348+ break;
24349+#endif
24350+
24351+ err = get_user(mov, (unsigned char __user *)regs->ip);
24352+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24353+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24354+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24355+
24356+ if (err)
24357+ break;
24358+
24359+ if (mov == 0xB8 && jmp == 0xE9) {
24360+ regs->ax = addr1;
24361+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24362+ return 2;
24363+ }
24364+ } while (0);
24365+
24366+ do { /* PaX: gcc trampoline emulation #1 */
24367+ unsigned char mov1, mov2;
24368+ unsigned short jmp;
24369+ unsigned int addr1, addr2;
24370+
24371+#ifdef CONFIG_X86_64
24372+ if ((regs->ip + 11) >> 32)
24373+ break;
24374+#endif
24375+
24376+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24377+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24378+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24379+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24380+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24381+
24382+ if (err)
24383+ break;
24384+
24385+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24386+ regs->cx = addr1;
24387+ regs->ax = addr2;
24388+ regs->ip = addr2;
24389+ return 2;
24390+ }
24391+ } while (0);
24392+
24393+ do { /* PaX: gcc trampoline emulation #2 */
24394+ unsigned char mov, jmp;
24395+ unsigned int addr1, addr2;
24396+
24397+#ifdef CONFIG_X86_64
24398+ if ((regs->ip + 9) >> 32)
24399+ break;
24400+#endif
24401+
24402+ err = get_user(mov, (unsigned char __user *)regs->ip);
24403+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24404+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24405+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24406+
24407+ if (err)
24408+ break;
24409+
24410+ if (mov == 0xB9 && jmp == 0xE9) {
24411+ regs->cx = addr1;
24412+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24413+ return 2;
24414+ }
24415+ } while (0);
24416+
24417+ return 1; /* PaX in action */
24418+}
24419+
24420+#ifdef CONFIG_X86_64
24421+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24422+{
24423+ int err;
24424+
24425+ do { /* PaX: libffi trampoline emulation */
24426+ unsigned short mov1, mov2, jmp1;
24427+ unsigned char stcclc, jmp2;
24428+ unsigned long addr1, addr2;
24429+
24430+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24431+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24432+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24433+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24434+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24435+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24436+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24437+
24438+ if (err)
24439+ break;
24440+
24441+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24442+ regs->r11 = addr1;
24443+ regs->r10 = addr2;
24444+ if (stcclc == 0xF8)
24445+ regs->flags &= ~X86_EFLAGS_CF;
24446+ else
24447+ regs->flags |= X86_EFLAGS_CF;
24448+ regs->ip = addr1;
24449+ return 2;
24450+ }
24451+ } while (0);
24452+
24453+ do { /* PaX: gcc trampoline emulation #1 */
24454+ unsigned short mov1, mov2, jmp1;
24455+ unsigned char jmp2;
24456+ unsigned int addr1;
24457+ unsigned long addr2;
24458+
24459+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24460+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24461+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24462+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24463+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24464+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24465+
24466+ if (err)
24467+ break;
24468+
24469+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24470+ regs->r11 = addr1;
24471+ regs->r10 = addr2;
24472+ regs->ip = addr1;
24473+ return 2;
24474+ }
24475+ } while (0);
24476+
24477+ do { /* PaX: gcc trampoline emulation #2 */
24478+ unsigned short mov1, mov2, jmp1;
24479+ unsigned char jmp2;
24480+ unsigned long addr1, addr2;
24481+
24482+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24483+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24484+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24485+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24486+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24487+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24488+
24489+ if (err)
24490+ break;
24491+
24492+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24493+ regs->r11 = addr1;
24494+ regs->r10 = addr2;
24495+ regs->ip = addr1;
24496+ return 2;
24497+ }
24498+ } while (0);
24499+
24500+ return 1; /* PaX in action */
24501+}
24502+#endif
24503+
24504+/*
24505+ * PaX: decide what to do with offenders (regs->ip = fault address)
24506+ *
24507+ * returns 1 when task should be killed
24508+ * 2 when gcc trampoline was detected
24509+ */
24510+static int pax_handle_fetch_fault(struct pt_regs *regs)
24511+{
24512+ if (v8086_mode(regs))
24513+ return 1;
24514+
24515+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24516+ return 1;
24517+
24518+#ifdef CONFIG_X86_32
24519+ return pax_handle_fetch_fault_32(regs);
24520+#else
24521+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24522+ return pax_handle_fetch_fault_32(regs);
24523+ else
24524+ return pax_handle_fetch_fault_64(regs);
24525+#endif
24526+}
24527+#endif
24528+
24529+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24531+{
24532+ long i;
24533+
24534+ printk(KERN_ERR "PAX: bytes at PC: ");
24535+ for (i = 0; i < 20; i++) {
24536+ unsigned char c;
24537+ if (get_user(c, (unsigned char __force_user *)pc+i))
24538+ printk(KERN_CONT "?? ");
24539+ else
24540+ printk(KERN_CONT "%02x ", c);
24541+ }
24542+ printk("\n");
24543+
24544+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24545+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24546+ unsigned long c;
24547+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24548+#ifdef CONFIG_X86_32
24549+ printk(KERN_CONT "???????? ");
24550+#else
24551+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24552+ printk(KERN_CONT "???????? ???????? ");
24553+ else
24554+ printk(KERN_CONT "???????????????? ");
24555+#endif
24556+ } else {
24557+#ifdef CONFIG_X86_64
24558+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24559+ printk(KERN_CONT "%08x ", (unsigned int)c);
24560+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24561+ } else
24562+#endif
24563+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24564+ }
24565+ }
24566+ printk("\n");
24567+}
24568+#endif
24569+
24570+/**
24571+ * probe_kernel_write(): safely attempt to write to a location
24572+ * @dst: address to write to
24573+ * @src: pointer to the data that shall be written
24574+ * @size: size of the data chunk
24575+ *
24576+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24577+ * happens, handle that and return -EFAULT.
24578+ */
24579+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24580+{
24581+ long ret;
24582+ mm_segment_t old_fs = get_fs();
24583+
24584+ set_fs(KERNEL_DS);
24585+ pagefault_disable();
24586+ pax_open_kernel();
24587+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24588+ pax_close_kernel();
24589+ pagefault_enable();
24590+ set_fs(old_fs);
24591+
24592+ return ret ? -EFAULT : 0;
24593+}
24594diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24595index 71da1bc..7a16bf4 100644
24596--- a/arch/x86/mm/gup.c
24597+++ b/arch/x86/mm/gup.c
24598@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24599 addr = start;
24600 len = (unsigned long) nr_pages << PAGE_SHIFT;
24601 end = start + len;
24602- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24603+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24604 (void __user *)start, len)))
24605 return 0;
24606
24607diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24608index 63a6ba6..79abd7a 100644
24609--- a/arch/x86/mm/highmem_32.c
24610+++ b/arch/x86/mm/highmem_32.c
24611@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24612 idx = type + KM_TYPE_NR*smp_processor_id();
24613 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24614 BUG_ON(!pte_none(*(kmap_pte-idx)));
24615+
24616+ pax_open_kernel();
24617 set_pte(kmap_pte-idx, mk_pte(page, prot));
24618+ pax_close_kernel();
24619
24620 return (void *)vaddr;
24621 }
24622diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24623index f46c340..6ff9a26 100644
24624--- a/arch/x86/mm/hugetlbpage.c
24625+++ b/arch/x86/mm/hugetlbpage.c
24626@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24627 struct hstate *h = hstate_file(file);
24628 struct mm_struct *mm = current->mm;
24629 struct vm_area_struct *vma;
24630- unsigned long start_addr;
24631+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24632+
24633+#ifdef CONFIG_PAX_SEGMEXEC
24634+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24635+ pax_task_size = SEGMEXEC_TASK_SIZE;
24636+#endif
24637+
24638+ pax_task_size -= PAGE_SIZE;
24639
24640 if (len > mm->cached_hole_size) {
24641- start_addr = mm->free_area_cache;
24642+ start_addr = mm->free_area_cache;
24643 } else {
24644- start_addr = TASK_UNMAPPED_BASE;
24645- mm->cached_hole_size = 0;
24646+ start_addr = mm->mmap_base;
24647+ mm->cached_hole_size = 0;
24648 }
24649
24650 full_search:
24651@@ -281,26 +288,27 @@ full_search:
24652
24653 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24654 /* At this point: (!vma || addr < vma->vm_end). */
24655- if (TASK_SIZE - len < addr) {
24656+ if (pax_task_size - len < addr) {
24657 /*
24658 * Start a new search - just in case we missed
24659 * some holes.
24660 */
24661- if (start_addr != TASK_UNMAPPED_BASE) {
24662- start_addr = TASK_UNMAPPED_BASE;
24663+ if (start_addr != mm->mmap_base) {
24664+ start_addr = mm->mmap_base;
24665 mm->cached_hole_size = 0;
24666 goto full_search;
24667 }
24668 return -ENOMEM;
24669 }
24670- if (!vma || addr + len <= vma->vm_start) {
24671- mm->free_area_cache = addr + len;
24672- return addr;
24673- }
24674+ if (check_heap_stack_gap(vma, addr, len))
24675+ break;
24676 if (addr + mm->cached_hole_size < vma->vm_start)
24677 mm->cached_hole_size = vma->vm_start - addr;
24678 addr = ALIGN(vma->vm_end, huge_page_size(h));
24679 }
24680+
24681+ mm->free_area_cache = addr + len;
24682+ return addr;
24683 }
24684
24685 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24686@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24687 {
24688 struct hstate *h = hstate_file(file);
24689 struct mm_struct *mm = current->mm;
24690- struct vm_area_struct *vma, *prev_vma;
24691- unsigned long base = mm->mmap_base, addr = addr0;
24692+ struct vm_area_struct *vma;
24693+ unsigned long base = mm->mmap_base, addr;
24694 unsigned long largest_hole = mm->cached_hole_size;
24695- int first_time = 1;
24696
24697 /* don't allow allocations above current base */
24698 if (mm->free_area_cache > base)
24699@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24700 largest_hole = 0;
24701 mm->free_area_cache = base;
24702 }
24703-try_again:
24704+
24705 /* make sure it can fit in the remaining address space */
24706 if (mm->free_area_cache < len)
24707 goto fail;
24708
24709 /* either no address requested or cant fit in requested address hole */
24710- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24711+ addr = (mm->free_area_cache - len);
24712 do {
24713+ addr &= huge_page_mask(h);
24714+ vma = find_vma(mm, addr);
24715 /*
24716 * Lookup failure means no vma is above this address,
24717 * i.e. return with success:
24718- */
24719- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24720- return addr;
24721-
24722- /*
24723 * new region fits between prev_vma->vm_end and
24724 * vma->vm_start, use it:
24725 */
24726- if (addr + len <= vma->vm_start &&
24727- (!prev_vma || (addr >= prev_vma->vm_end))) {
24728+ if (check_heap_stack_gap(vma, addr, len)) {
24729 /* remember the address as a hint for next time */
24730- mm->cached_hole_size = largest_hole;
24731- return (mm->free_area_cache = addr);
24732- } else {
24733- /* pull free_area_cache down to the first hole */
24734- if (mm->free_area_cache == vma->vm_end) {
24735- mm->free_area_cache = vma->vm_start;
24736- mm->cached_hole_size = largest_hole;
24737- }
24738+ mm->cached_hole_size = largest_hole;
24739+ return (mm->free_area_cache = addr);
24740+ }
24741+ /* pull free_area_cache down to the first hole */
24742+ if (mm->free_area_cache == vma->vm_end) {
24743+ mm->free_area_cache = vma->vm_start;
24744+ mm->cached_hole_size = largest_hole;
24745 }
24746
24747 /* remember the largest hole we saw so far */
24748 if (addr + largest_hole < vma->vm_start)
24749- largest_hole = vma->vm_start - addr;
24750+ largest_hole = vma->vm_start - addr;
24751
24752 /* try just below the current vma->vm_start */
24753- addr = (vma->vm_start - len) & huge_page_mask(h);
24754- } while (len <= vma->vm_start);
24755+ addr = skip_heap_stack_gap(vma, len);
24756+ } while (!IS_ERR_VALUE(addr));
24757
24758 fail:
24759 /*
24760- * if hint left us with no space for the requested
24761- * mapping then try again:
24762- */
24763- if (first_time) {
24764- mm->free_area_cache = base;
24765- largest_hole = 0;
24766- first_time = 0;
24767- goto try_again;
24768- }
24769- /*
24770 * A failed mmap() very likely causes application failure,
24771 * so fall back to the bottom-up function here. This scenario
24772 * can happen with large stack limits and large mmap()
24773 * allocations.
24774 */
24775- mm->free_area_cache = TASK_UNMAPPED_BASE;
24776+
24777+#ifdef CONFIG_PAX_SEGMEXEC
24778+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24779+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24780+ else
24781+#endif
24782+
24783+ mm->mmap_base = TASK_UNMAPPED_BASE;
24784+
24785+#ifdef CONFIG_PAX_RANDMMAP
24786+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24787+ mm->mmap_base += mm->delta_mmap;
24788+#endif
24789+
24790+ mm->free_area_cache = mm->mmap_base;
24791 mm->cached_hole_size = ~0UL;
24792 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24793 len, pgoff, flags);
24794@@ -387,6 +393,7 @@ fail:
24795 /*
24796 * Restore the topdown base:
24797 */
24798+ mm->mmap_base = base;
24799 mm->free_area_cache = base;
24800 mm->cached_hole_size = ~0UL;
24801
24802@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24803 struct hstate *h = hstate_file(file);
24804 struct mm_struct *mm = current->mm;
24805 struct vm_area_struct *vma;
24806+ unsigned long pax_task_size = TASK_SIZE;
24807
24808 if (len & ~huge_page_mask(h))
24809 return -EINVAL;
24810- if (len > TASK_SIZE)
24811+
24812+#ifdef CONFIG_PAX_SEGMEXEC
24813+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24814+ pax_task_size = SEGMEXEC_TASK_SIZE;
24815+#endif
24816+
24817+ pax_task_size -= PAGE_SIZE;
24818+
24819+ if (len > pax_task_size)
24820 return -ENOMEM;
24821
24822 if (flags & MAP_FIXED) {
24823@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24824 if (addr) {
24825 addr = ALIGN(addr, huge_page_size(h));
24826 vma = find_vma(mm, addr);
24827- if (TASK_SIZE - len >= addr &&
24828- (!vma || addr + len <= vma->vm_start))
24829+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24830 return addr;
24831 }
24832 if (mm->get_unmapped_area == arch_get_unmapped_area)
24833diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24834index 73ffd55..ad78676 100644
24835--- a/arch/x86/mm/init.c
24836+++ b/arch/x86/mm/init.c
24837@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24838 * cause a hotspot and fill up ZONE_DMA. The page tables
24839 * need roughly 0.5KB per GB.
24840 */
24841-#ifdef CONFIG_X86_32
24842- start = 0x7000;
24843-#else
24844- start = 0x8000;
24845-#endif
24846+ start = 0x100000;
24847 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24848 tables, PAGE_SIZE);
24849 if (e820_table_start == -1UL)
24850@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24851 #endif
24852
24853 set_nx();
24854- if (nx_enabled)
24855+ if (nx_enabled && cpu_has_nx)
24856 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24857
24858 /* Enable PSE if available */
24859@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24860 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24861 * mmio resources as well as potential bios/acpi data regions.
24862 */
24863+
24864 int devmem_is_allowed(unsigned long pagenr)
24865 {
24866+#ifdef CONFIG_GRKERNSEC_KMEM
24867+ /* allow BDA */
24868+ if (!pagenr)
24869+ return 1;
24870+ /* allow EBDA */
24871+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24872+ return 1;
24873+ /* allow ISA/video mem */
24874+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24875+ return 1;
24876+ /* throw out everything else below 1MB */
24877+ if (pagenr <= 256)
24878+ return 0;
24879+#else
24880 if (pagenr <= 256)
24881 return 1;
24882+#endif
24883+
24884 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24885 return 0;
24886 if (!page_is_ram(pagenr))
24887@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24888
24889 void free_initmem(void)
24890 {
24891+
24892+#ifdef CONFIG_PAX_KERNEXEC
24893+#ifdef CONFIG_X86_32
24894+ /* PaX: limit KERNEL_CS to actual size */
24895+ unsigned long addr, limit;
24896+ struct desc_struct d;
24897+ int cpu;
24898+
24899+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24900+ limit = (limit - 1UL) >> PAGE_SHIFT;
24901+
24902+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24903+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24904+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24905+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24906+ }
24907+
24908+ /* PaX: make KERNEL_CS read-only */
24909+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24910+ if (!paravirt_enabled())
24911+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24912+/*
24913+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24914+ pgd = pgd_offset_k(addr);
24915+ pud = pud_offset(pgd, addr);
24916+ pmd = pmd_offset(pud, addr);
24917+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24918+ }
24919+*/
24920+#ifdef CONFIG_X86_PAE
24921+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24922+/*
24923+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24924+ pgd = pgd_offset_k(addr);
24925+ pud = pud_offset(pgd, addr);
24926+ pmd = pmd_offset(pud, addr);
24927+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24928+ }
24929+*/
24930+#endif
24931+
24932+#ifdef CONFIG_MODULES
24933+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24934+#endif
24935+
24936+#else
24937+ pgd_t *pgd;
24938+ pud_t *pud;
24939+ pmd_t *pmd;
24940+ unsigned long addr, end;
24941+
24942+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24943+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24944+ pgd = pgd_offset_k(addr);
24945+ pud = pud_offset(pgd, addr);
24946+ pmd = pmd_offset(pud, addr);
24947+ if (!pmd_present(*pmd))
24948+ continue;
24949+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24950+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24951+ else
24952+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24953+ }
24954+
24955+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24956+ end = addr + KERNEL_IMAGE_SIZE;
24957+ for (; addr < end; addr += PMD_SIZE) {
24958+ pgd = pgd_offset_k(addr);
24959+ pud = pud_offset(pgd, addr);
24960+ pmd = pmd_offset(pud, addr);
24961+ if (!pmd_present(*pmd))
24962+ continue;
24963+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24964+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24965+ }
24966+#endif
24967+
24968+ flush_tlb_all();
24969+#endif
24970+
24971 free_init_pages("unused kernel memory",
24972 (unsigned long)(&__init_begin),
24973 (unsigned long)(&__init_end));
24974diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24975index 30938c1..bda3d5d 100644
24976--- a/arch/x86/mm/init_32.c
24977+++ b/arch/x86/mm/init_32.c
24978@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24979 }
24980
24981 /*
24982- * Creates a middle page table and puts a pointer to it in the
24983- * given global directory entry. This only returns the gd entry
24984- * in non-PAE compilation mode, since the middle layer is folded.
24985- */
24986-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24987-{
24988- pud_t *pud;
24989- pmd_t *pmd_table;
24990-
24991-#ifdef CONFIG_X86_PAE
24992- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24993- if (after_bootmem)
24994- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24995- else
24996- pmd_table = (pmd_t *)alloc_low_page();
24997- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24998- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24999- pud = pud_offset(pgd, 0);
25000- BUG_ON(pmd_table != pmd_offset(pud, 0));
25001-
25002- return pmd_table;
25003- }
25004-#endif
25005- pud = pud_offset(pgd, 0);
25006- pmd_table = pmd_offset(pud, 0);
25007-
25008- return pmd_table;
25009-}
25010-
25011-/*
25012 * Create a page table and place a pointer to it in a middle page
25013 * directory entry:
25014 */
25015@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25016 page_table = (pte_t *)alloc_low_page();
25017
25018 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25019+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25020+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25021+#else
25022 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25023+#endif
25024 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25025 }
25026
25027 return pte_offset_kernel(pmd, 0);
25028 }
25029
25030+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25031+{
25032+ pud_t *pud;
25033+ pmd_t *pmd_table;
25034+
25035+ pud = pud_offset(pgd, 0);
25036+ pmd_table = pmd_offset(pud, 0);
25037+
25038+ return pmd_table;
25039+}
25040+
25041 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25042 {
25043 int pgd_idx = pgd_index(vaddr);
25044@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25045 int pgd_idx, pmd_idx;
25046 unsigned long vaddr;
25047 pgd_t *pgd;
25048+ pud_t *pud;
25049 pmd_t *pmd;
25050 pte_t *pte = NULL;
25051
25052@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25053 pgd = pgd_base + pgd_idx;
25054
25055 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25056- pmd = one_md_table_init(pgd);
25057- pmd = pmd + pmd_index(vaddr);
25058+ pud = pud_offset(pgd, vaddr);
25059+ pmd = pmd_offset(pud, vaddr);
25060+
25061+#ifdef CONFIG_X86_PAE
25062+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25063+#endif
25064+
25065 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25066 pmd++, pmd_idx++) {
25067 pte = page_table_kmap_check(one_page_table_init(pmd),
25068@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25069 }
25070 }
25071
25072-static inline int is_kernel_text(unsigned long addr)
25073+static inline int is_kernel_text(unsigned long start, unsigned long end)
25074 {
25075- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25076- return 1;
25077- return 0;
25078+ if ((start > ktla_ktva((unsigned long)_etext) ||
25079+ end <= ktla_ktva((unsigned long)_stext)) &&
25080+ (start > ktla_ktva((unsigned long)_einittext) ||
25081+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25082+
25083+#ifdef CONFIG_ACPI_SLEEP
25084+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25085+#endif
25086+
25087+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25088+ return 0;
25089+ return 1;
25090 }
25091
25092 /*
25093@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25094 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25095 unsigned long start_pfn, end_pfn;
25096 pgd_t *pgd_base = swapper_pg_dir;
25097- int pgd_idx, pmd_idx, pte_ofs;
25098+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25099 unsigned long pfn;
25100 pgd_t *pgd;
25101+ pud_t *pud;
25102 pmd_t *pmd;
25103 pte_t *pte;
25104 unsigned pages_2m, pages_4k;
25105@@ -278,8 +279,13 @@ repeat:
25106 pfn = start_pfn;
25107 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25108 pgd = pgd_base + pgd_idx;
25109- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25110- pmd = one_md_table_init(pgd);
25111+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25112+ pud = pud_offset(pgd, 0);
25113+ pmd = pmd_offset(pud, 0);
25114+
25115+#ifdef CONFIG_X86_PAE
25116+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25117+#endif
25118
25119 if (pfn >= end_pfn)
25120 continue;
25121@@ -291,14 +297,13 @@ repeat:
25122 #endif
25123 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25124 pmd++, pmd_idx++) {
25125- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25126+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25127
25128 /*
25129 * Map with big pages if possible, otherwise
25130 * create normal page tables:
25131 */
25132 if (use_pse) {
25133- unsigned int addr2;
25134 pgprot_t prot = PAGE_KERNEL_LARGE;
25135 /*
25136 * first pass will use the same initial
25137@@ -308,11 +313,7 @@ repeat:
25138 __pgprot(PTE_IDENT_ATTR |
25139 _PAGE_PSE);
25140
25141- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25142- PAGE_OFFSET + PAGE_SIZE-1;
25143-
25144- if (is_kernel_text(addr) ||
25145- is_kernel_text(addr2))
25146+ if (is_kernel_text(address, address + PMD_SIZE))
25147 prot = PAGE_KERNEL_LARGE_EXEC;
25148
25149 pages_2m++;
25150@@ -329,7 +330,7 @@ repeat:
25151 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25152 pte += pte_ofs;
25153 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25154- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25155+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25156 pgprot_t prot = PAGE_KERNEL;
25157 /*
25158 * first pass will use the same initial
25159@@ -337,7 +338,7 @@ repeat:
25160 */
25161 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25162
25163- if (is_kernel_text(addr))
25164+ if (is_kernel_text(address, address + PAGE_SIZE))
25165 prot = PAGE_KERNEL_EXEC;
25166
25167 pages_4k++;
25168@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25169
25170 pud = pud_offset(pgd, va);
25171 pmd = pmd_offset(pud, va);
25172- if (!pmd_present(*pmd))
25173+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25174 break;
25175
25176 pte = pte_offset_kernel(pmd, va);
25177@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25178
25179 static void __init pagetable_init(void)
25180 {
25181- pgd_t *pgd_base = swapper_pg_dir;
25182-
25183- permanent_kmaps_init(pgd_base);
25184+ permanent_kmaps_init(swapper_pg_dir);
25185 }
25186
25187 #ifdef CONFIG_ACPI_SLEEP
25188@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25189 * ACPI suspend needs this for resume, because things like the intel-agp
25190 * driver might have split up a kernel 4MB mapping.
25191 */
25192-char swsusp_pg_dir[PAGE_SIZE]
25193+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25194 __attribute__ ((aligned(PAGE_SIZE)));
25195
25196 static inline void save_pg_dir(void)
25197 {
25198- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25199+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25200 }
25201 #else /* !CONFIG_ACPI_SLEEP */
25202 static inline void save_pg_dir(void)
25203@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25204 flush_tlb_all();
25205 }
25206
25207-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25208+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25209 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25210
25211 /* user-defined highmem size */
25212@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25213 * Initialize the boot-time allocator (with low memory only):
25214 */
25215 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25216- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25217+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25218 PAGE_SIZE);
25219 if (bootmap == -1L)
25220 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25221@@ -864,6 +863,12 @@ void __init mem_init(void)
25222
25223 pci_iommu_alloc();
25224
25225+#ifdef CONFIG_PAX_PER_CPU_PGD
25226+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25227+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25228+ KERNEL_PGD_PTRS);
25229+#endif
25230+
25231 #ifdef CONFIG_FLATMEM
25232 BUG_ON(!mem_map);
25233 #endif
25234@@ -881,7 +886,7 @@ void __init mem_init(void)
25235 set_highmem_pages_init();
25236
25237 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25238- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25239+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25240 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25241
25242 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25243@@ -923,10 +928,10 @@ void __init mem_init(void)
25244 ((unsigned long)&__init_end -
25245 (unsigned long)&__init_begin) >> 10,
25246
25247- (unsigned long)&_etext, (unsigned long)&_edata,
25248- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25249+ (unsigned long)&_sdata, (unsigned long)&_edata,
25250+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25251
25252- (unsigned long)&_text, (unsigned long)&_etext,
25253+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25254 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25255
25256 /*
25257@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25258 if (!kernel_set_to_readonly)
25259 return;
25260
25261+ start = ktla_ktva(start);
25262 pr_debug("Set kernel text: %lx - %lx for read write\n",
25263 start, start+size);
25264
25265@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25266 if (!kernel_set_to_readonly)
25267 return;
25268
25269+ start = ktla_ktva(start);
25270 pr_debug("Set kernel text: %lx - %lx for read only\n",
25271 start, start+size);
25272
25273@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25274 unsigned long start = PFN_ALIGN(_text);
25275 unsigned long size = PFN_ALIGN(_etext) - start;
25276
25277+ start = ktla_ktva(start);
25278 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25279 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25280 size >> 10);
25281diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25282index 7d095ad..25d2549 100644
25283--- a/arch/x86/mm/init_64.c
25284+++ b/arch/x86/mm/init_64.c
25285@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25286 pmd = fill_pmd(pud, vaddr);
25287 pte = fill_pte(pmd, vaddr);
25288
25289+ pax_open_kernel();
25290 set_pte(pte, new_pte);
25291+ pax_close_kernel();
25292
25293 /*
25294 * It's enough to flush this one mapping.
25295@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25296 pgd = pgd_offset_k((unsigned long)__va(phys));
25297 if (pgd_none(*pgd)) {
25298 pud = (pud_t *) spp_getpage();
25299- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25300- _PAGE_USER));
25301+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25302 }
25303 pud = pud_offset(pgd, (unsigned long)__va(phys));
25304 if (pud_none(*pud)) {
25305 pmd = (pmd_t *) spp_getpage();
25306- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25307- _PAGE_USER));
25308+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25309 }
25310 pmd = pmd_offset(pud, phys);
25311 BUG_ON(!pmd_none(*pmd));
25312@@ -675,6 +675,12 @@ void __init mem_init(void)
25313
25314 pci_iommu_alloc();
25315
25316+#ifdef CONFIG_PAX_PER_CPU_PGD
25317+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25318+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25319+ KERNEL_PGD_PTRS);
25320+#endif
25321+
25322 /* clear_bss() already clear the empty_zero_page */
25323
25324 reservedpages = 0;
25325@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25326 static struct vm_area_struct gate_vma = {
25327 .vm_start = VSYSCALL_START,
25328 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25329- .vm_page_prot = PAGE_READONLY_EXEC,
25330- .vm_flags = VM_READ | VM_EXEC
25331+ .vm_page_prot = PAGE_READONLY,
25332+ .vm_flags = VM_READ
25333 };
25334
25335 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25336@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25337
25338 const char *arch_vma_name(struct vm_area_struct *vma)
25339 {
25340- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25341+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25342 return "[vdso]";
25343 if (vma == &gate_vma)
25344 return "[vsyscall]";
25345diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25346index 84e236c..69bd3f6 100644
25347--- a/arch/x86/mm/iomap_32.c
25348+++ b/arch/x86/mm/iomap_32.c
25349@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25350 debug_kmap_atomic(type);
25351 idx = type + KM_TYPE_NR * smp_processor_id();
25352 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25353+
25354+ pax_open_kernel();
25355 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25356+ pax_close_kernel();
25357+
25358 arch_flush_lazy_mmu_mode();
25359
25360 return (void *)vaddr;
25361diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25362index 2feb9bd..ab91e7b 100644
25363--- a/arch/x86/mm/ioremap.c
25364+++ b/arch/x86/mm/ioremap.c
25365@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25366 * Second special case: Some BIOSen report the PC BIOS
25367 * area (640->1Mb) as ram even though it is not.
25368 */
25369- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25370- pagenr < (BIOS_END >> PAGE_SHIFT))
25371+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25372+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25373 return 0;
25374
25375 for (i = 0; i < e820.nr_map; i++) {
25376@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25377 /*
25378 * Don't allow anybody to remap normal RAM that we're using..
25379 */
25380- for (pfn = phys_addr >> PAGE_SHIFT;
25381- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25382- pfn++) {
25383-
25384+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25385 int is_ram = page_is_ram(pfn);
25386
25387- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25388+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25389 return NULL;
25390 WARN_ON_ONCE(is_ram);
25391 }
25392@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25393
25394 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25395 if (page_is_ram(start >> PAGE_SHIFT))
25396+#ifdef CONFIG_HIGHMEM
25397+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25398+#endif
25399 return __va(phys);
25400
25401 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25402@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25403 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25404
25405 static __initdata int after_paging_init;
25406-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25407+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25408
25409 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25410 {
25411@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25412 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25413
25414 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25415- memset(bm_pte, 0, sizeof(bm_pte));
25416- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25417+ pmd_populate_user(&init_mm, pmd, bm_pte);
25418
25419 /*
25420 * The boot-ioremap range spans multiple pmds, for which
25421diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25422index 8cc1833..1abbc5b 100644
25423--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25424+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25425@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25426 * memory (e.g. tracked pages)? For now, we need this to avoid
25427 * invoking kmemcheck for PnP BIOS calls.
25428 */
25429- if (regs->flags & X86_VM_MASK)
25430+ if (v8086_mode(regs))
25431 return false;
25432- if (regs->cs != __KERNEL_CS)
25433+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25434 return false;
25435
25436 pte = kmemcheck_pte_lookup(address);
25437diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25438index c9e57af..07a321b 100644
25439--- a/arch/x86/mm/mmap.c
25440+++ b/arch/x86/mm/mmap.c
25441@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25442 * Leave an at least ~128 MB hole with possible stack randomization.
25443 */
25444 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25445-#define MAX_GAP (TASK_SIZE/6*5)
25446+#define MAX_GAP (pax_task_size/6*5)
25447
25448 /*
25449 * True on X86_32 or when emulating IA32 on X86_64
25450@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25451 return rnd << PAGE_SHIFT;
25452 }
25453
25454-static unsigned long mmap_base(void)
25455+static unsigned long mmap_base(struct mm_struct *mm)
25456 {
25457 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25458+ unsigned long pax_task_size = TASK_SIZE;
25459+
25460+#ifdef CONFIG_PAX_SEGMEXEC
25461+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25462+ pax_task_size = SEGMEXEC_TASK_SIZE;
25463+#endif
25464
25465 if (gap < MIN_GAP)
25466 gap = MIN_GAP;
25467 else if (gap > MAX_GAP)
25468 gap = MAX_GAP;
25469
25470- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25471+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25472 }
25473
25474 /*
25475 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25476 * does, but not when emulating X86_32
25477 */
25478-static unsigned long mmap_legacy_base(void)
25479+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25480 {
25481- if (mmap_is_ia32())
25482+ if (mmap_is_ia32()) {
25483+
25484+#ifdef CONFIG_PAX_SEGMEXEC
25485+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25486+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25487+ else
25488+#endif
25489+
25490 return TASK_UNMAPPED_BASE;
25491- else
25492+ } else
25493 return TASK_UNMAPPED_BASE + mmap_rnd();
25494 }
25495
25496@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25497 void arch_pick_mmap_layout(struct mm_struct *mm)
25498 {
25499 if (mmap_is_legacy()) {
25500- mm->mmap_base = mmap_legacy_base();
25501+ mm->mmap_base = mmap_legacy_base(mm);
25502+
25503+#ifdef CONFIG_PAX_RANDMMAP
25504+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25505+ mm->mmap_base += mm->delta_mmap;
25506+#endif
25507+
25508 mm->get_unmapped_area = arch_get_unmapped_area;
25509 mm->unmap_area = arch_unmap_area;
25510 } else {
25511- mm->mmap_base = mmap_base();
25512+ mm->mmap_base = mmap_base(mm);
25513+
25514+#ifdef CONFIG_PAX_RANDMMAP
25515+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25516+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25517+#endif
25518+
25519 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25520 mm->unmap_area = arch_unmap_area_topdown;
25521 }
25522diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25523index 132772a..b961f11 100644
25524--- a/arch/x86/mm/mmio-mod.c
25525+++ b/arch/x86/mm/mmio-mod.c
25526@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25527 break;
25528 default:
25529 {
25530- unsigned char *ip = (unsigned char *)instptr;
25531+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25532 my_trace->opcode = MMIO_UNKNOWN_OP;
25533 my_trace->width = 0;
25534 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25535@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25536 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25537 void __iomem *addr)
25538 {
25539- static atomic_t next_id;
25540+ static atomic_unchecked_t next_id;
25541 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25542 /* These are page-unaligned. */
25543 struct mmiotrace_map map = {
25544@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25545 .private = trace
25546 },
25547 .phys = offset,
25548- .id = atomic_inc_return(&next_id)
25549+ .id = atomic_inc_return_unchecked(&next_id)
25550 };
25551 map.map_id = trace->id;
25552
25553diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25554index d253006..e56dd6a 100644
25555--- a/arch/x86/mm/numa_32.c
25556+++ b/arch/x86/mm/numa_32.c
25557@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25558 }
25559 #endif
25560
25561-extern unsigned long find_max_low_pfn(void);
25562 extern unsigned long highend_pfn, highstart_pfn;
25563
25564 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25565diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25566index e1d1069..2251ff3 100644
25567--- a/arch/x86/mm/pageattr-test.c
25568+++ b/arch/x86/mm/pageattr-test.c
25569@@ -36,7 +36,7 @@ enum {
25570
25571 static int pte_testbit(pte_t pte)
25572 {
25573- return pte_flags(pte) & _PAGE_UNUSED1;
25574+ return pte_flags(pte) & _PAGE_CPA_TEST;
25575 }
25576
25577 struct split_state {
25578diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25579index dd38bfb..8c12306 100644
25580--- a/arch/x86/mm/pageattr.c
25581+++ b/arch/x86/mm/pageattr.c
25582@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25583 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25584 */
25585 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25586- pgprot_val(forbidden) |= _PAGE_NX;
25587+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25588
25589 /*
25590 * The kernel text needs to be executable for obvious reasons
25591 * Does not cover __inittext since that is gone later on. On
25592 * 64bit we do not enforce !NX on the low mapping
25593 */
25594- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25595- pgprot_val(forbidden) |= _PAGE_NX;
25596+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25597+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25598
25599+#ifdef CONFIG_DEBUG_RODATA
25600 /*
25601 * The .rodata section needs to be read-only. Using the pfn
25602 * catches all aliases.
25603@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25604 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25605 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25606 pgprot_val(forbidden) |= _PAGE_RW;
25607+#endif
25608+
25609+#ifdef CONFIG_PAX_KERNEXEC
25610+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25611+ pgprot_val(forbidden) |= _PAGE_RW;
25612+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25613+ }
25614+#endif
25615
25616 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25617
25618@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25619 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25620 {
25621 /* change init_mm */
25622+ pax_open_kernel();
25623 set_pte_atomic(kpte, pte);
25624+
25625 #ifdef CONFIG_X86_32
25626 if (!SHARED_KERNEL_PMD) {
25627+
25628+#ifdef CONFIG_PAX_PER_CPU_PGD
25629+ unsigned long cpu;
25630+#else
25631 struct page *page;
25632+#endif
25633
25634+#ifdef CONFIG_PAX_PER_CPU_PGD
25635+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25636+ pgd_t *pgd = get_cpu_pgd(cpu);
25637+#else
25638 list_for_each_entry(page, &pgd_list, lru) {
25639- pgd_t *pgd;
25640+ pgd_t *pgd = (pgd_t *)page_address(page);
25641+#endif
25642+
25643 pud_t *pud;
25644 pmd_t *pmd;
25645
25646- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25647+ pgd += pgd_index(address);
25648 pud = pud_offset(pgd, address);
25649 pmd = pmd_offset(pud, address);
25650 set_pte_atomic((pte_t *)pmd, pte);
25651 }
25652 }
25653 #endif
25654+ pax_close_kernel();
25655 }
25656
25657 static int
25658diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25659index e78cd0e..de0a817 100644
25660--- a/arch/x86/mm/pat.c
25661+++ b/arch/x86/mm/pat.c
25662@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25663
25664 conflict:
25665 printk(KERN_INFO "%s:%d conflicting memory types "
25666- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25667+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25668 new->end, cattr_name(new->type), cattr_name(entry->type));
25669 return -EBUSY;
25670 }
25671@@ -559,7 +559,7 @@ unlock_ret:
25672
25673 if (err) {
25674 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25675- current->comm, current->pid, start, end);
25676+ current->comm, task_pid_nr(current), start, end);
25677 }
25678
25679 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25680@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25681 while (cursor < to) {
25682 if (!devmem_is_allowed(pfn)) {
25683 printk(KERN_INFO
25684- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25685- current->comm, from, to);
25686+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25687+ current->comm, from, to, cursor);
25688 return 0;
25689 }
25690 cursor += PAGE_SIZE;
25691@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25692 printk(KERN_INFO
25693 "%s:%d ioremap_change_attr failed %s "
25694 "for %Lx-%Lx\n",
25695- current->comm, current->pid,
25696+ current->comm, task_pid_nr(current),
25697 cattr_name(flags),
25698 base, (unsigned long long)(base + size));
25699 return -EINVAL;
25700@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25701 free_memtype(paddr, paddr + size);
25702 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25703 " for %Lx-%Lx, got %s\n",
25704- current->comm, current->pid,
25705+ current->comm, task_pid_nr(current),
25706 cattr_name(want_flags),
25707 (unsigned long long)paddr,
25708 (unsigned long long)(paddr + size),
25709diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25710index df3d5c8..c2223e1 100644
25711--- a/arch/x86/mm/pf_in.c
25712+++ b/arch/x86/mm/pf_in.c
25713@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25714 int i;
25715 enum reason_type rv = OTHERS;
25716
25717- p = (unsigned char *)ins_addr;
25718+ p = (unsigned char *)ktla_ktva(ins_addr);
25719 p += skip_prefix(p, &prf);
25720 p += get_opcode(p, &opcode);
25721
25722@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25723 struct prefix_bits prf;
25724 int i;
25725
25726- p = (unsigned char *)ins_addr;
25727+ p = (unsigned char *)ktla_ktva(ins_addr);
25728 p += skip_prefix(p, &prf);
25729 p += get_opcode(p, &opcode);
25730
25731@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25732 struct prefix_bits prf;
25733 int i;
25734
25735- p = (unsigned char *)ins_addr;
25736+ p = (unsigned char *)ktla_ktva(ins_addr);
25737 p += skip_prefix(p, &prf);
25738 p += get_opcode(p, &opcode);
25739
25740@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25741 int i;
25742 unsigned long rv;
25743
25744- p = (unsigned char *)ins_addr;
25745+ p = (unsigned char *)ktla_ktva(ins_addr);
25746 p += skip_prefix(p, &prf);
25747 p += get_opcode(p, &opcode);
25748 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25749@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25750 int i;
25751 unsigned long rv;
25752
25753- p = (unsigned char *)ins_addr;
25754+ p = (unsigned char *)ktla_ktva(ins_addr);
25755 p += skip_prefix(p, &prf);
25756 p += get_opcode(p, &opcode);
25757 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25758diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25759index e0e6fad..6b90017 100644
25760--- a/arch/x86/mm/pgtable.c
25761+++ b/arch/x86/mm/pgtable.c
25762@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25763 list_del(&page->lru);
25764 }
25765
25766-#define UNSHARED_PTRS_PER_PGD \
25767- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25768+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25769+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25770
25771+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25772+{
25773+ while (count--)
25774+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25775+}
25776+#endif
25777+
25778+#ifdef CONFIG_PAX_PER_CPU_PGD
25779+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25780+{
25781+ while (count--)
25782+
25783+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25784+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25785+#else
25786+ *dst++ = *src++;
25787+#endif
25788+
25789+}
25790+#endif
25791+
25792+#ifdef CONFIG_X86_64
25793+#define pxd_t pud_t
25794+#define pyd_t pgd_t
25795+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25796+#define pxd_free(mm, pud) pud_free((mm), (pud))
25797+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25798+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25799+#define PYD_SIZE PGDIR_SIZE
25800+#else
25801+#define pxd_t pmd_t
25802+#define pyd_t pud_t
25803+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25804+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25805+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25806+#define pyd_offset(mm ,address) pud_offset((mm), (address))
25807+#define PYD_SIZE PUD_SIZE
25808+#endif
25809+
25810+#ifdef CONFIG_PAX_PER_CPU_PGD
25811+static inline void pgd_ctor(pgd_t *pgd) {}
25812+static inline void pgd_dtor(pgd_t *pgd) {}
25813+#else
25814 static void pgd_ctor(pgd_t *pgd)
25815 {
25816 /* If the pgd points to a shared pagetable level (either the
25817@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25818 pgd_list_del(pgd);
25819 spin_unlock_irqrestore(&pgd_lock, flags);
25820 }
25821+#endif
25822
25823 /*
25824 * List of all pgd's needed for non-PAE so it can invalidate entries
25825@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25826 * -- wli
25827 */
25828
25829-#ifdef CONFIG_X86_PAE
25830+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25831 /*
25832 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25833 * updating the top-level pagetable entries to guarantee the
25834@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25835 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25836 * and initialize the kernel pmds here.
25837 */
25838-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25839+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25840
25841 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25842 {
25843@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25844 */
25845 flush_tlb_mm(mm);
25846 }
25847+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25848+#define PREALLOCATED_PXDS USER_PGD_PTRS
25849 #else /* !CONFIG_X86_PAE */
25850
25851 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25852-#define PREALLOCATED_PMDS 0
25853+#define PREALLOCATED_PXDS 0
25854
25855 #endif /* CONFIG_X86_PAE */
25856
25857-static void free_pmds(pmd_t *pmds[])
25858+static void free_pxds(pxd_t *pxds[])
25859 {
25860 int i;
25861
25862- for(i = 0; i < PREALLOCATED_PMDS; i++)
25863- if (pmds[i])
25864- free_page((unsigned long)pmds[i]);
25865+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25866+ if (pxds[i])
25867+ free_page((unsigned long)pxds[i]);
25868 }
25869
25870-static int preallocate_pmds(pmd_t *pmds[])
25871+static int preallocate_pxds(pxd_t *pxds[])
25872 {
25873 int i;
25874 bool failed = false;
25875
25876- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25877- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25878- if (pmd == NULL)
25879+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25880+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25881+ if (pxd == NULL)
25882 failed = true;
25883- pmds[i] = pmd;
25884+ pxds[i] = pxd;
25885 }
25886
25887 if (failed) {
25888- free_pmds(pmds);
25889+ free_pxds(pxds);
25890 return -ENOMEM;
25891 }
25892
25893@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25894 * preallocate which never got a corresponding vma will need to be
25895 * freed manually.
25896 */
25897-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25898+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25899 {
25900 int i;
25901
25902- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25903+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25904 pgd_t pgd = pgdp[i];
25905
25906 if (pgd_val(pgd) != 0) {
25907- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25908+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25909
25910- pgdp[i] = native_make_pgd(0);
25911+ set_pgd(pgdp + i, native_make_pgd(0));
25912
25913- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25914- pmd_free(mm, pmd);
25915+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25916+ pxd_free(mm, pxd);
25917 }
25918 }
25919 }
25920
25921-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25922+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25923 {
25924- pud_t *pud;
25925+ pyd_t *pyd;
25926 unsigned long addr;
25927 int i;
25928
25929- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25930+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25931 return;
25932
25933- pud = pud_offset(pgd, 0);
25934+#ifdef CONFIG_X86_64
25935+ pyd = pyd_offset(mm, 0L);
25936+#else
25937+ pyd = pyd_offset(pgd, 0L);
25938+#endif
25939
25940- for (addr = i = 0; i < PREALLOCATED_PMDS;
25941- i++, pud++, addr += PUD_SIZE) {
25942- pmd_t *pmd = pmds[i];
25943+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25944+ i++, pyd++, addr += PYD_SIZE) {
25945+ pxd_t *pxd = pxds[i];
25946
25947 if (i >= KERNEL_PGD_BOUNDARY)
25948- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25949- sizeof(pmd_t) * PTRS_PER_PMD);
25950+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25951+ sizeof(pxd_t) * PTRS_PER_PMD);
25952
25953- pud_populate(mm, pud, pmd);
25954+ pyd_populate(mm, pyd, pxd);
25955 }
25956 }
25957
25958 pgd_t *pgd_alloc(struct mm_struct *mm)
25959 {
25960 pgd_t *pgd;
25961- pmd_t *pmds[PREALLOCATED_PMDS];
25962+ pxd_t *pxds[PREALLOCATED_PXDS];
25963+
25964 unsigned long flags;
25965
25966 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25967@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25968
25969 mm->pgd = pgd;
25970
25971- if (preallocate_pmds(pmds) != 0)
25972+ if (preallocate_pxds(pxds) != 0)
25973 goto out_free_pgd;
25974
25975 if (paravirt_pgd_alloc(mm) != 0)
25976- goto out_free_pmds;
25977+ goto out_free_pxds;
25978
25979 /*
25980 * Make sure that pre-populating the pmds is atomic with
25981@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25982 spin_lock_irqsave(&pgd_lock, flags);
25983
25984 pgd_ctor(pgd);
25985- pgd_prepopulate_pmd(mm, pgd, pmds);
25986+ pgd_prepopulate_pxd(mm, pgd, pxds);
25987
25988 spin_unlock_irqrestore(&pgd_lock, flags);
25989
25990 return pgd;
25991
25992-out_free_pmds:
25993- free_pmds(pmds);
25994+out_free_pxds:
25995+ free_pxds(pxds);
25996 out_free_pgd:
25997 free_page((unsigned long)pgd);
25998 out:
25999@@ -287,7 +338,7 @@ out:
26000
26001 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26002 {
26003- pgd_mop_up_pmds(mm, pgd);
26004+ pgd_mop_up_pxds(mm, pgd);
26005 pgd_dtor(pgd);
26006 paravirt_pgd_free(mm, pgd);
26007 free_page((unsigned long)pgd);
26008diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26009index 46c8834..fcab43d 100644
26010--- a/arch/x86/mm/pgtable_32.c
26011+++ b/arch/x86/mm/pgtable_32.c
26012@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26013 return;
26014 }
26015 pte = pte_offset_kernel(pmd, vaddr);
26016+
26017+ pax_open_kernel();
26018 if (pte_val(pteval))
26019 set_pte_at(&init_mm, vaddr, pte, pteval);
26020 else
26021 pte_clear(&init_mm, vaddr, pte);
26022+ pax_close_kernel();
26023
26024 /*
26025 * It's enough to flush this one mapping.
26026diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26027index 513d8ed..978c161 100644
26028--- a/arch/x86/mm/setup_nx.c
26029+++ b/arch/x86/mm/setup_nx.c
26030@@ -4,11 +4,10 @@
26031
26032 #include <asm/pgtable.h>
26033
26034+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26035 int nx_enabled;
26036
26037-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26038-static int disable_nx __cpuinitdata;
26039-
26040+#ifndef CONFIG_PAX_PAGEEXEC
26041 /*
26042 * noexec = on|off
26043 *
26044@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26045 if (!str)
26046 return -EINVAL;
26047 if (!strncmp(str, "on", 2)) {
26048- __supported_pte_mask |= _PAGE_NX;
26049- disable_nx = 0;
26050+ nx_enabled = 1;
26051 } else if (!strncmp(str, "off", 3)) {
26052- disable_nx = 1;
26053- __supported_pte_mask &= ~_PAGE_NX;
26054+ nx_enabled = 0;
26055 }
26056 return 0;
26057 }
26058 early_param("noexec", noexec_setup);
26059 #endif
26060+#endif
26061
26062 #ifdef CONFIG_X86_PAE
26063 void __init set_nx(void)
26064 {
26065- unsigned int v[4], l, h;
26066+ if (!nx_enabled && cpu_has_nx) {
26067+ unsigned l, h;
26068
26069- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26070- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26071-
26072- if ((v[3] & (1 << 20)) && !disable_nx) {
26073- rdmsr(MSR_EFER, l, h);
26074- l |= EFER_NX;
26075- wrmsr(MSR_EFER, l, h);
26076- nx_enabled = 1;
26077- __supported_pte_mask |= _PAGE_NX;
26078- }
26079+ __supported_pte_mask &= ~_PAGE_NX;
26080+ rdmsr(MSR_EFER, l, h);
26081+ l &= ~EFER_NX;
26082+ wrmsr(MSR_EFER, l, h);
26083 }
26084 }
26085 #else
26086@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26087 unsigned long efer;
26088
26089 rdmsrl(MSR_EFER, efer);
26090- if (!(efer & EFER_NX) || disable_nx)
26091+ if (!(efer & EFER_NX) || !nx_enabled)
26092 __supported_pte_mask &= ~_PAGE_NX;
26093 }
26094 #endif
26095diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26096index 36fe08e..b123d3a 100644
26097--- a/arch/x86/mm/tlb.c
26098+++ b/arch/x86/mm/tlb.c
26099@@ -61,7 +61,11 @@ void leave_mm(int cpu)
26100 BUG();
26101 cpumask_clear_cpu(cpu,
26102 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26103+
26104+#ifndef CONFIG_PAX_PER_CPU_PGD
26105 load_cr3(swapper_pg_dir);
26106+#endif
26107+
26108 }
26109 EXPORT_SYMBOL_GPL(leave_mm);
26110
26111diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26112index 829edf0..672adb3 100644
26113--- a/arch/x86/oprofile/backtrace.c
26114+++ b/arch/x86/oprofile/backtrace.c
26115@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26116 {
26117 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26118
26119- if (!user_mode_vm(regs)) {
26120+ if (!user_mode(regs)) {
26121 unsigned long stack = kernel_stack_pointer(regs);
26122 if (depth)
26123 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26124diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26125index e6a160a..36deff6 100644
26126--- a/arch/x86/oprofile/op_model_p4.c
26127+++ b/arch/x86/oprofile/op_model_p4.c
26128@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26129 #endif
26130 }
26131
26132-static int inline addr_increment(void)
26133+static inline int addr_increment(void)
26134 {
26135 #ifdef CONFIG_SMP
26136 return smp_num_siblings == 2 ? 2 : 1;
26137diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26138index 1331fcf..03901b2 100644
26139--- a/arch/x86/pci/common.c
26140+++ b/arch/x86/pci/common.c
26141@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26142 int pcibios_last_bus = -1;
26143 unsigned long pirq_table_addr;
26144 struct pci_bus *pci_root_bus;
26145-struct pci_raw_ops *raw_pci_ops;
26146-struct pci_raw_ops *raw_pci_ext_ops;
26147+const struct pci_raw_ops *raw_pci_ops;
26148+const struct pci_raw_ops *raw_pci_ext_ops;
26149
26150 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26151 int reg, int len, u32 *val)
26152diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26153index 347d882..4baf6b6 100644
26154--- a/arch/x86/pci/direct.c
26155+++ b/arch/x86/pci/direct.c
26156@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26157
26158 #undef PCI_CONF1_ADDRESS
26159
26160-struct pci_raw_ops pci_direct_conf1 = {
26161+const struct pci_raw_ops pci_direct_conf1 = {
26162 .read = pci_conf1_read,
26163 .write = pci_conf1_write,
26164 };
26165@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26166
26167 #undef PCI_CONF2_ADDRESS
26168
26169-struct pci_raw_ops pci_direct_conf2 = {
26170+const struct pci_raw_ops pci_direct_conf2 = {
26171 .read = pci_conf2_read,
26172 .write = pci_conf2_write,
26173 };
26174@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26175 * This should be close to trivial, but it isn't, because there are buggy
26176 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26177 */
26178-static int __init pci_sanity_check(struct pci_raw_ops *o)
26179+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26180 {
26181 u32 x = 0;
26182 int year, devfn;
26183diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26184index f10a7e9..0425342 100644
26185--- a/arch/x86/pci/mmconfig_32.c
26186+++ b/arch/x86/pci/mmconfig_32.c
26187@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26188 return 0;
26189 }
26190
26191-static struct pci_raw_ops pci_mmcfg = {
26192+static const struct pci_raw_ops pci_mmcfg = {
26193 .read = pci_mmcfg_read,
26194 .write = pci_mmcfg_write,
26195 };
26196diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26197index 94349f8..41600a7 100644
26198--- a/arch/x86/pci/mmconfig_64.c
26199+++ b/arch/x86/pci/mmconfig_64.c
26200@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26201 return 0;
26202 }
26203
26204-static struct pci_raw_ops pci_mmcfg = {
26205+static const struct pci_raw_ops pci_mmcfg = {
26206 .read = pci_mmcfg_read,
26207 .write = pci_mmcfg_write,
26208 };
26209diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26210index 8eb295e..86bd657 100644
26211--- a/arch/x86/pci/numaq_32.c
26212+++ b/arch/x86/pci/numaq_32.c
26213@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26214
26215 #undef PCI_CONF1_MQ_ADDRESS
26216
26217-static struct pci_raw_ops pci_direct_conf1_mq = {
26218+static const struct pci_raw_ops pci_direct_conf1_mq = {
26219 .read = pci_conf1_mq_read,
26220 .write = pci_conf1_mq_write
26221 };
26222diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26223index b889d82..5a58a0a 100644
26224--- a/arch/x86/pci/olpc.c
26225+++ b/arch/x86/pci/olpc.c
26226@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26227 return 0;
26228 }
26229
26230-static struct pci_raw_ops pci_olpc_conf = {
26231+static const struct pci_raw_ops pci_olpc_conf = {
26232 .read = pci_olpc_read,
26233 .write = pci_olpc_write,
26234 };
26235diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26236index 1c975cc..ffd0536 100644
26237--- a/arch/x86/pci/pcbios.c
26238+++ b/arch/x86/pci/pcbios.c
26239@@ -56,50 +56,93 @@ union bios32 {
26240 static struct {
26241 unsigned long address;
26242 unsigned short segment;
26243-} bios32_indirect = { 0, __KERNEL_CS };
26244+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26245
26246 /*
26247 * Returns the entry point for the given service, NULL on error
26248 */
26249
26250-static unsigned long bios32_service(unsigned long service)
26251+static unsigned long __devinit bios32_service(unsigned long service)
26252 {
26253 unsigned char return_code; /* %al */
26254 unsigned long address; /* %ebx */
26255 unsigned long length; /* %ecx */
26256 unsigned long entry; /* %edx */
26257 unsigned long flags;
26258+ struct desc_struct d, *gdt;
26259
26260 local_irq_save(flags);
26261- __asm__("lcall *(%%edi); cld"
26262+
26263+ gdt = get_cpu_gdt_table(smp_processor_id());
26264+
26265+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26266+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26267+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26268+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26269+
26270+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26271 : "=a" (return_code),
26272 "=b" (address),
26273 "=c" (length),
26274 "=d" (entry)
26275 : "0" (service),
26276 "1" (0),
26277- "D" (&bios32_indirect));
26278+ "D" (&bios32_indirect),
26279+ "r"(__PCIBIOS_DS)
26280+ : "memory");
26281+
26282+ pax_open_kernel();
26283+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26284+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26285+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26286+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26287+ pax_close_kernel();
26288+
26289 local_irq_restore(flags);
26290
26291 switch (return_code) {
26292- case 0:
26293- return address + entry;
26294- case 0x80: /* Not present */
26295- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26296- return 0;
26297- default: /* Shouldn't happen */
26298- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26299- service, return_code);
26300+ case 0: {
26301+ int cpu;
26302+ unsigned char flags;
26303+
26304+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26305+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26306+ printk(KERN_WARNING "bios32_service: not valid\n");
26307 return 0;
26308+ }
26309+ address = address + PAGE_OFFSET;
26310+ length += 16UL; /* some BIOSs underreport this... */
26311+ flags = 4;
26312+ if (length >= 64*1024*1024) {
26313+ length >>= PAGE_SHIFT;
26314+ flags |= 8;
26315+ }
26316+
26317+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
26318+ gdt = get_cpu_gdt_table(cpu);
26319+ pack_descriptor(&d, address, length, 0x9b, flags);
26320+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26321+ pack_descriptor(&d, address, length, 0x93, flags);
26322+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26323+ }
26324+ return entry;
26325+ }
26326+ case 0x80: /* Not present */
26327+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26328+ return 0;
26329+ default: /* Shouldn't happen */
26330+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26331+ service, return_code);
26332+ return 0;
26333 }
26334 }
26335
26336 static struct {
26337 unsigned long address;
26338 unsigned short segment;
26339-} pci_indirect = { 0, __KERNEL_CS };
26340+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26341
26342-static int pci_bios_present;
26343+static int pci_bios_present __read_only;
26344
26345 static int __devinit check_pcibios(void)
26346 {
26347@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26348 unsigned long flags, pcibios_entry;
26349
26350 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26351- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26352+ pci_indirect.address = pcibios_entry;
26353
26354 local_irq_save(flags);
26355- __asm__(
26356- "lcall *(%%edi); cld\n\t"
26357+ __asm__("movw %w6, %%ds\n\t"
26358+ "lcall *%%ss:(%%edi); cld\n\t"
26359+ "push %%ss\n\t"
26360+ "pop %%ds\n\t"
26361 "jc 1f\n\t"
26362 "xor %%ah, %%ah\n"
26363 "1:"
26364@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26365 "=b" (ebx),
26366 "=c" (ecx)
26367 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26368- "D" (&pci_indirect)
26369+ "D" (&pci_indirect),
26370+ "r" (__PCIBIOS_DS)
26371 : "memory");
26372 local_irq_restore(flags);
26373
26374@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26375
26376 switch (len) {
26377 case 1:
26378- __asm__("lcall *(%%esi); cld\n\t"
26379+ __asm__("movw %w6, %%ds\n\t"
26380+ "lcall *%%ss:(%%esi); cld\n\t"
26381+ "push %%ss\n\t"
26382+ "pop %%ds\n\t"
26383 "jc 1f\n\t"
26384 "xor %%ah, %%ah\n"
26385 "1:"
26386@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26387 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26388 "b" (bx),
26389 "D" ((long)reg),
26390- "S" (&pci_indirect));
26391+ "S" (&pci_indirect),
26392+ "r" (__PCIBIOS_DS));
26393 /*
26394 * Zero-extend the result beyond 8 bits, do not trust the
26395 * BIOS having done it:
26396@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26397 *value &= 0xff;
26398 break;
26399 case 2:
26400- __asm__("lcall *(%%esi); cld\n\t"
26401+ __asm__("movw %w6, %%ds\n\t"
26402+ "lcall *%%ss:(%%esi); cld\n\t"
26403+ "push %%ss\n\t"
26404+ "pop %%ds\n\t"
26405 "jc 1f\n\t"
26406 "xor %%ah, %%ah\n"
26407 "1:"
26408@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26409 : "1" (PCIBIOS_READ_CONFIG_WORD),
26410 "b" (bx),
26411 "D" ((long)reg),
26412- "S" (&pci_indirect));
26413+ "S" (&pci_indirect),
26414+ "r" (__PCIBIOS_DS));
26415 /*
26416 * Zero-extend the result beyond 16 bits, do not trust the
26417 * BIOS having done it:
26418@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26419 *value &= 0xffff;
26420 break;
26421 case 4:
26422- __asm__("lcall *(%%esi); cld\n\t"
26423+ __asm__("movw %w6, %%ds\n\t"
26424+ "lcall *%%ss:(%%esi); cld\n\t"
26425+ "push %%ss\n\t"
26426+ "pop %%ds\n\t"
26427 "jc 1f\n\t"
26428 "xor %%ah, %%ah\n"
26429 "1:"
26430@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26431 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26432 "b" (bx),
26433 "D" ((long)reg),
26434- "S" (&pci_indirect));
26435+ "S" (&pci_indirect),
26436+ "r" (__PCIBIOS_DS));
26437 break;
26438 }
26439
26440@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26441
26442 switch (len) {
26443 case 1:
26444- __asm__("lcall *(%%esi); cld\n\t"
26445+ __asm__("movw %w6, %%ds\n\t"
26446+ "lcall *%%ss:(%%esi); cld\n\t"
26447+ "push %%ss\n\t"
26448+ "pop %%ds\n\t"
26449 "jc 1f\n\t"
26450 "xor %%ah, %%ah\n"
26451 "1:"
26452@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26453 "c" (value),
26454 "b" (bx),
26455 "D" ((long)reg),
26456- "S" (&pci_indirect));
26457+ "S" (&pci_indirect),
26458+ "r" (__PCIBIOS_DS));
26459 break;
26460 case 2:
26461- __asm__("lcall *(%%esi); cld\n\t"
26462+ __asm__("movw %w6, %%ds\n\t"
26463+ "lcall *%%ss:(%%esi); cld\n\t"
26464+ "push %%ss\n\t"
26465+ "pop %%ds\n\t"
26466 "jc 1f\n\t"
26467 "xor %%ah, %%ah\n"
26468 "1:"
26469@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26470 "c" (value),
26471 "b" (bx),
26472 "D" ((long)reg),
26473- "S" (&pci_indirect));
26474+ "S" (&pci_indirect),
26475+ "r" (__PCIBIOS_DS));
26476 break;
26477 case 4:
26478- __asm__("lcall *(%%esi); cld\n\t"
26479+ __asm__("movw %w6, %%ds\n\t"
26480+ "lcall *%%ss:(%%esi); cld\n\t"
26481+ "push %%ss\n\t"
26482+ "pop %%ds\n\t"
26483 "jc 1f\n\t"
26484 "xor %%ah, %%ah\n"
26485 "1:"
26486@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26487 "c" (value),
26488 "b" (bx),
26489 "D" ((long)reg),
26490- "S" (&pci_indirect));
26491+ "S" (&pci_indirect),
26492+ "r" (__PCIBIOS_DS));
26493 break;
26494 }
26495
26496@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26497 * Function table for BIOS32 access
26498 */
26499
26500-static struct pci_raw_ops pci_bios_access = {
26501+static const struct pci_raw_ops pci_bios_access = {
26502 .read = pci_bios_read,
26503 .write = pci_bios_write
26504 };
26505@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26506 * Try to find PCI BIOS.
26507 */
26508
26509-static struct pci_raw_ops * __devinit pci_find_bios(void)
26510+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26511 {
26512 union bios32 *check;
26513 unsigned char sum;
26514@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26515
26516 DBG("PCI: Fetching IRQ routing table... ");
26517 __asm__("push %%es\n\t"
26518+ "movw %w8, %%ds\n\t"
26519 "push %%ds\n\t"
26520 "pop %%es\n\t"
26521- "lcall *(%%esi); cld\n\t"
26522+ "lcall *%%ss:(%%esi); cld\n\t"
26523 "pop %%es\n\t"
26524+ "push %%ss\n\t"
26525+ "pop %%ds\n"
26526 "jc 1f\n\t"
26527 "xor %%ah, %%ah\n"
26528 "1:"
26529@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26530 "1" (0),
26531 "D" ((long) &opt),
26532 "S" (&pci_indirect),
26533- "m" (opt)
26534+ "m" (opt),
26535+ "r" (__PCIBIOS_DS)
26536 : "memory");
26537 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26538 if (ret & 0xff00)
26539@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26540 {
26541 int ret;
26542
26543- __asm__("lcall *(%%esi); cld\n\t"
26544+ __asm__("movw %w5, %%ds\n\t"
26545+ "lcall *%%ss:(%%esi); cld\n\t"
26546+ "push %%ss\n\t"
26547+ "pop %%ds\n"
26548 "jc 1f\n\t"
26549 "xor %%ah, %%ah\n"
26550 "1:"
26551@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26552 : "0" (PCIBIOS_SET_PCI_HW_INT),
26553 "b" ((dev->bus->number << 8) | dev->devfn),
26554 "c" ((irq << 8) | (pin + 10)),
26555- "S" (&pci_indirect));
26556+ "S" (&pci_indirect),
26557+ "r" (__PCIBIOS_DS));
26558 return !(ret & 0xff00);
26559 }
26560 EXPORT_SYMBOL(pcibios_set_irq_routing);
26561diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26562index fa0f651..9d8f3d9 100644
26563--- a/arch/x86/power/cpu.c
26564+++ b/arch/x86/power/cpu.c
26565@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26566 static void fix_processor_context(void)
26567 {
26568 int cpu = smp_processor_id();
26569- struct tss_struct *t = &per_cpu(init_tss, cpu);
26570+ struct tss_struct *t = init_tss + cpu;
26571
26572 set_tss_desc(cpu, t); /*
26573 * This just modifies memory; should not be
26574@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26575 */
26576
26577 #ifdef CONFIG_X86_64
26578+ pax_open_kernel();
26579 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26580+ pax_close_kernel();
26581
26582 syscall_init(); /* This sets MSR_*STAR and related */
26583 #endif
26584diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26585index dd78ef6..f9d928d 100644
26586--- a/arch/x86/vdso/Makefile
26587+++ b/arch/x86/vdso/Makefile
26588@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26589 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26590 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26591
26592-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26593+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26594 GCOV_PROFILE := n
26595
26596 #
26597diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26598index ee55754..0013b2e 100644
26599--- a/arch/x86/vdso/vclock_gettime.c
26600+++ b/arch/x86/vdso/vclock_gettime.c
26601@@ -22,24 +22,48 @@
26602 #include <asm/hpet.h>
26603 #include <asm/unistd.h>
26604 #include <asm/io.h>
26605+#include <asm/fixmap.h>
26606 #include "vextern.h"
26607
26608 #define gtod vdso_vsyscall_gtod_data
26609
26610+notrace noinline long __vdso_fallback_time(long *t)
26611+{
26612+ long secs;
26613+ asm volatile("syscall"
26614+ : "=a" (secs)
26615+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26616+ return secs;
26617+}
26618+
26619 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26620 {
26621 long ret;
26622 asm("syscall" : "=a" (ret) :
26623- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26624+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26625 return ret;
26626 }
26627
26628+notrace static inline cycle_t __vdso_vread_hpet(void)
26629+{
26630+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26631+}
26632+
26633+notrace static inline cycle_t __vdso_vread_tsc(void)
26634+{
26635+ cycle_t ret = (cycle_t)vget_cycles();
26636+
26637+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26638+}
26639+
26640 notrace static inline long vgetns(void)
26641 {
26642 long v;
26643- cycles_t (*vread)(void);
26644- vread = gtod->clock.vread;
26645- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26646+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26647+ v = __vdso_vread_tsc();
26648+ else
26649+ v = __vdso_vread_hpet();
26650+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26651 return (v * gtod->clock.mult) >> gtod->clock.shift;
26652 }
26653
26654@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26655
26656 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26657 {
26658- if (likely(gtod->sysctl_enabled))
26659+ if (likely(gtod->sysctl_enabled &&
26660+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26661+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26662 switch (clock) {
26663 case CLOCK_REALTIME:
26664 if (likely(gtod->clock.vread))
26665@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26666 int clock_gettime(clockid_t, struct timespec *)
26667 __attribute__((weak, alias("__vdso_clock_gettime")));
26668
26669+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26670+{
26671+ long ret;
26672+ asm("syscall" : "=a" (ret) :
26673+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26674+ return ret;
26675+}
26676+
26677 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26678 {
26679- long ret;
26680- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26681+ if (likely(gtod->sysctl_enabled &&
26682+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26683+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26684+ {
26685 if (likely(tv != NULL)) {
26686 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26687 offsetof(struct timespec, tv_nsec) ||
26688@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26689 }
26690 return 0;
26691 }
26692- asm("syscall" : "=a" (ret) :
26693- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26694- return ret;
26695+ return __vdso_fallback_gettimeofday(tv, tz);
26696 }
26697 int gettimeofday(struct timeval *, struct timezone *)
26698 __attribute__((weak, alias("__vdso_gettimeofday")));
26699diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26700index 4e5dd3b..00ba15e 100644
26701--- a/arch/x86/vdso/vdso.lds.S
26702+++ b/arch/x86/vdso/vdso.lds.S
26703@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26704 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26705 #include "vextern.h"
26706 #undef VEXTERN
26707+
26708+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26709+VEXTERN(fallback_gettimeofday)
26710+VEXTERN(fallback_time)
26711+VEXTERN(getcpu)
26712+#undef VEXTERN
26713diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26714index 58bc00f..d53fb48 100644
26715--- a/arch/x86/vdso/vdso32-setup.c
26716+++ b/arch/x86/vdso/vdso32-setup.c
26717@@ -25,6 +25,7 @@
26718 #include <asm/tlbflush.h>
26719 #include <asm/vdso.h>
26720 #include <asm/proto.h>
26721+#include <asm/mman.h>
26722
26723 enum {
26724 VDSO_DISABLED = 0,
26725@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26726 void enable_sep_cpu(void)
26727 {
26728 int cpu = get_cpu();
26729- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26730+ struct tss_struct *tss = init_tss + cpu;
26731
26732 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26733 put_cpu();
26734@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26735 gate_vma.vm_start = FIXADDR_USER_START;
26736 gate_vma.vm_end = FIXADDR_USER_END;
26737 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26738- gate_vma.vm_page_prot = __P101;
26739+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26740 /*
26741 * Make sure the vDSO gets into every core dump.
26742 * Dumping its contents makes post-mortem fully interpretable later
26743@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26744 if (compat)
26745 addr = VDSO_HIGH_BASE;
26746 else {
26747- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26748+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26749 if (IS_ERR_VALUE(addr)) {
26750 ret = addr;
26751 goto up_fail;
26752 }
26753 }
26754
26755- current->mm->context.vdso = (void *)addr;
26756+ current->mm->context.vdso = addr;
26757
26758 if (compat_uses_vma || !compat) {
26759 /*
26760@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26761 }
26762
26763 current_thread_info()->sysenter_return =
26764- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26765+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26766
26767 up_fail:
26768 if (ret)
26769- current->mm->context.vdso = NULL;
26770+ current->mm->context.vdso = 0;
26771
26772 up_write(&mm->mmap_sem);
26773
26774@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26775
26776 const char *arch_vma_name(struct vm_area_struct *vma)
26777 {
26778- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26779+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26780 return "[vdso]";
26781+
26782+#ifdef CONFIG_PAX_SEGMEXEC
26783+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26784+ return "[vdso]";
26785+#endif
26786+
26787 return NULL;
26788 }
26789
26790@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26791 struct mm_struct *mm = tsk->mm;
26792
26793 /* Check to see if this task was created in compat vdso mode */
26794- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26795+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26796 return &gate_vma;
26797 return NULL;
26798 }
26799diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26800index 1683ba2..48d07f3 100644
26801--- a/arch/x86/vdso/vextern.h
26802+++ b/arch/x86/vdso/vextern.h
26803@@ -11,6 +11,5 @@
26804 put into vextern.h and be referenced as a pointer with vdso prefix.
26805 The main kernel later fills in the values. */
26806
26807-VEXTERN(jiffies)
26808 VEXTERN(vgetcpu_mode)
26809 VEXTERN(vsyscall_gtod_data)
26810diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26811index 21e1aeb..2c0b3c4 100644
26812--- a/arch/x86/vdso/vma.c
26813+++ b/arch/x86/vdso/vma.c
26814@@ -17,8 +17,6 @@
26815 #include "vextern.h" /* Just for VMAGIC. */
26816 #undef VEXTERN
26817
26818-unsigned int __read_mostly vdso_enabled = 1;
26819-
26820 extern char vdso_start[], vdso_end[];
26821 extern unsigned short vdso_sync_cpuid;
26822
26823@@ -27,10 +25,8 @@ static unsigned vdso_size;
26824
26825 static inline void *var_ref(void *p, char *name)
26826 {
26827- if (*(void **)p != (void *)VMAGIC) {
26828- printk("VDSO: variable %s broken\n", name);
26829- vdso_enabled = 0;
26830- }
26831+ if (*(void **)p != (void *)VMAGIC)
26832+ panic("VDSO: variable %s broken\n", name);
26833 return p;
26834 }
26835
26836@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26837 if (!vbase)
26838 goto oom;
26839
26840- if (memcmp(vbase, "\177ELF", 4)) {
26841- printk("VDSO: I'm broken; not ELF\n");
26842- vdso_enabled = 0;
26843- }
26844+ if (memcmp(vbase, ELFMAG, SELFMAG))
26845+ panic("VDSO: I'm broken; not ELF\n");
26846
26847 #define VEXTERN(x) \
26848 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26849 #include "vextern.h"
26850 #undef VEXTERN
26851+ vunmap(vbase);
26852 return 0;
26853
26854 oom:
26855- printk("Cannot allocate vdso\n");
26856- vdso_enabled = 0;
26857- return -ENOMEM;
26858+ panic("Cannot allocate vdso\n");
26859 }
26860 __initcall(init_vdso_vars);
26861
26862@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26863 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26864 {
26865 struct mm_struct *mm = current->mm;
26866- unsigned long addr;
26867+ unsigned long addr = 0;
26868 int ret;
26869
26870- if (!vdso_enabled)
26871- return 0;
26872-
26873 down_write(&mm->mmap_sem);
26874+
26875+#ifdef CONFIG_PAX_RANDMMAP
26876+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26877+#endif
26878+
26879 addr = vdso_addr(mm->start_stack, vdso_size);
26880 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26881 if (IS_ERR_VALUE(addr)) {
26882@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26883 goto up_fail;
26884 }
26885
26886- current->mm->context.vdso = (void *)addr;
26887+ current->mm->context.vdso = addr;
26888
26889 ret = install_special_mapping(mm, addr, vdso_size,
26890 VM_READ|VM_EXEC|
26891@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26892 VM_ALWAYSDUMP,
26893 vdso_pages);
26894 if (ret) {
26895- current->mm->context.vdso = NULL;
26896+ current->mm->context.vdso = 0;
26897 goto up_fail;
26898 }
26899
26900@@ -132,10 +127,3 @@ up_fail:
26901 up_write(&mm->mmap_sem);
26902 return ret;
26903 }
26904-
26905-static __init int vdso_setup(char *s)
26906-{
26907- vdso_enabled = simple_strtoul(s, NULL, 0);
26908- return 0;
26909-}
26910-__setup("vdso=", vdso_setup);
26911diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26912index 0087b00..eecb34f 100644
26913--- a/arch/x86/xen/enlighten.c
26914+++ b/arch/x86/xen/enlighten.c
26915@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26916
26917 struct shared_info xen_dummy_shared_info;
26918
26919-void *xen_initial_gdt;
26920-
26921 /*
26922 * Point at some empty memory to start with. We map the real shared_info
26923 * page as soon as fixmap is up and running.
26924@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26925
26926 preempt_disable();
26927
26928- start = __get_cpu_var(idt_desc).address;
26929+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26930 end = start + __get_cpu_var(idt_desc).size + 1;
26931
26932 xen_mc_flush();
26933@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26934 #endif
26935 };
26936
26937-static void xen_reboot(int reason)
26938+static __noreturn void xen_reboot(int reason)
26939 {
26940 struct sched_shutdown r = { .reason = reason };
26941
26942@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26943 BUG();
26944 }
26945
26946-static void xen_restart(char *msg)
26947+static __noreturn void xen_restart(char *msg)
26948 {
26949 xen_reboot(SHUTDOWN_reboot);
26950 }
26951
26952-static void xen_emergency_restart(void)
26953+static __noreturn void xen_emergency_restart(void)
26954 {
26955 xen_reboot(SHUTDOWN_reboot);
26956 }
26957
26958-static void xen_machine_halt(void)
26959+static __noreturn void xen_machine_halt(void)
26960 {
26961 xen_reboot(SHUTDOWN_poweroff);
26962 }
26963@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26964 */
26965 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26966
26967-#ifdef CONFIG_X86_64
26968 /* Work out if we support NX */
26969- check_efer();
26970+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26971+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26972+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26973+ unsigned l, h;
26974+
26975+#ifdef CONFIG_X86_PAE
26976+ nx_enabled = 1;
26977+#endif
26978+ __supported_pte_mask |= _PAGE_NX;
26979+ rdmsr(MSR_EFER, l, h);
26980+ l |= EFER_NX;
26981+ wrmsr(MSR_EFER, l, h);
26982+ }
26983 #endif
26984
26985 xen_setup_features();
26986@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26987
26988 machine_ops = xen_machine_ops;
26989
26990- /*
26991- * The only reliable way to retain the initial address of the
26992- * percpu gdt_page is to remember it here, so we can go and
26993- * mark it RW later, when the initial percpu area is freed.
26994- */
26995- xen_initial_gdt = &per_cpu(gdt_page, 0);
26996-
26997 xen_smp_init();
26998
26999 pgd = (pgd_t *)xen_start_info->pt_base;
27000diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27001index 3f90a2c..2c2ad84 100644
27002--- a/arch/x86/xen/mmu.c
27003+++ b/arch/x86/xen/mmu.c
27004@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27005 convert_pfn_mfn(init_level4_pgt);
27006 convert_pfn_mfn(level3_ident_pgt);
27007 convert_pfn_mfn(level3_kernel_pgt);
27008+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27009+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27010+ convert_pfn_mfn(level3_vmemmap_pgt);
27011
27012 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27013 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27014@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27015 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27016 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27017 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27018+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27019+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27020+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27021 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27022+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27023 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27024 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27025
27026@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27027 pv_mmu_ops.set_pud = xen_set_pud;
27028 #if PAGETABLE_LEVELS == 4
27029 pv_mmu_ops.set_pgd = xen_set_pgd;
27030+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27031 #endif
27032
27033 /* This will work as long as patching hasn't happened yet
27034@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27035 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27036 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27037 .set_pgd = xen_set_pgd_hyper,
27038+ .set_pgd_batched = xen_set_pgd_hyper,
27039
27040 .alloc_pud = xen_alloc_pmd_init,
27041 .release_pud = xen_release_pmd_init,
27042diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27043index a96204a..fca9b8e 100644
27044--- a/arch/x86/xen/smp.c
27045+++ b/arch/x86/xen/smp.c
27046@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27047 {
27048 BUG_ON(smp_processor_id() != 0);
27049 native_smp_prepare_boot_cpu();
27050-
27051- /* We've switched to the "real" per-cpu gdt, so make sure the
27052- old memory can be recycled */
27053- make_lowmem_page_readwrite(xen_initial_gdt);
27054-
27055 xen_setup_vcpu_info_placement();
27056 }
27057
27058@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27059 gdt = get_cpu_gdt_table(cpu);
27060
27061 ctxt->flags = VGCF_IN_KERNEL;
27062- ctxt->user_regs.ds = __USER_DS;
27063- ctxt->user_regs.es = __USER_DS;
27064+ ctxt->user_regs.ds = __KERNEL_DS;
27065+ ctxt->user_regs.es = __KERNEL_DS;
27066 ctxt->user_regs.ss = __KERNEL_DS;
27067 #ifdef CONFIG_X86_32
27068 ctxt->user_regs.fs = __KERNEL_PERCPU;
27069- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27070+ savesegment(gs, ctxt->user_regs.gs);
27071 #else
27072 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27073 #endif
27074@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27075 int rc;
27076
27077 per_cpu(current_task, cpu) = idle;
27078+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27079 #ifdef CONFIG_X86_32
27080 irq_ctx_init(cpu);
27081 #else
27082 clear_tsk_thread_flag(idle, TIF_FORK);
27083- per_cpu(kernel_stack, cpu) =
27084- (unsigned long)task_stack_page(idle) -
27085- KERNEL_STACK_OFFSET + THREAD_SIZE;
27086+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27087 #endif
27088 xen_setup_runstate_info(cpu);
27089 xen_setup_timer(cpu);
27090diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27091index 9a95a9c..4f39e774 100644
27092--- a/arch/x86/xen/xen-asm_32.S
27093+++ b/arch/x86/xen/xen-asm_32.S
27094@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27095 ESP_OFFSET=4 # bytes pushed onto stack
27096
27097 /*
27098- * Store vcpu_info pointer for easy access. Do it this way to
27099- * avoid having to reload %fs
27100+ * Store vcpu_info pointer for easy access.
27101 */
27102 #ifdef CONFIG_SMP
27103- GET_THREAD_INFO(%eax)
27104- movl TI_cpu(%eax), %eax
27105- movl __per_cpu_offset(,%eax,4), %eax
27106- mov per_cpu__xen_vcpu(%eax), %eax
27107+ push %fs
27108+ mov $(__KERNEL_PERCPU), %eax
27109+ mov %eax, %fs
27110+ mov PER_CPU_VAR(xen_vcpu), %eax
27111+ pop %fs
27112 #else
27113 movl per_cpu__xen_vcpu, %eax
27114 #endif
27115diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27116index 1a5ff24..a187d40 100644
27117--- a/arch/x86/xen/xen-head.S
27118+++ b/arch/x86/xen/xen-head.S
27119@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27120 #ifdef CONFIG_X86_32
27121 mov %esi,xen_start_info
27122 mov $init_thread_union+THREAD_SIZE,%esp
27123+#ifdef CONFIG_SMP
27124+ movl $cpu_gdt_table,%edi
27125+ movl $__per_cpu_load,%eax
27126+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27127+ rorl $16,%eax
27128+ movb %al,__KERNEL_PERCPU + 4(%edi)
27129+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27130+ movl $__per_cpu_end - 1,%eax
27131+ subl $__per_cpu_start,%eax
27132+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27133+#endif
27134 #else
27135 mov %rsi,xen_start_info
27136 mov $init_thread_union+THREAD_SIZE,%rsp
27137diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27138index f9153a3..51eab3d 100644
27139--- a/arch/x86/xen/xen-ops.h
27140+++ b/arch/x86/xen/xen-ops.h
27141@@ -10,8 +10,6 @@
27142 extern const char xen_hypervisor_callback[];
27143 extern const char xen_failsafe_callback[];
27144
27145-extern void *xen_initial_gdt;
27146-
27147 struct trap_info;
27148 void xen_copy_trap_info(struct trap_info *traps);
27149
27150diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27151index 15c6308..96e83c2 100644
27152--- a/block/blk-integrity.c
27153+++ b/block/blk-integrity.c
27154@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27155 NULL,
27156 };
27157
27158-static struct sysfs_ops integrity_ops = {
27159+static const struct sysfs_ops integrity_ops = {
27160 .show = &integrity_attr_show,
27161 .store = &integrity_attr_store,
27162 };
27163diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27164index ca56420..f2fc409 100644
27165--- a/block/blk-iopoll.c
27166+++ b/block/blk-iopoll.c
27167@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27168 }
27169 EXPORT_SYMBOL(blk_iopoll_complete);
27170
27171-static void blk_iopoll_softirq(struct softirq_action *h)
27172+static void blk_iopoll_softirq(void)
27173 {
27174 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27175 int rearm = 0, budget = blk_iopoll_budget;
27176diff --git a/block/blk-map.c b/block/blk-map.c
27177index 30a7e51..0aeec6a 100644
27178--- a/block/blk-map.c
27179+++ b/block/blk-map.c
27180@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27181 * direct dma. else, set up kernel bounce buffers
27182 */
27183 uaddr = (unsigned long) ubuf;
27184- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27185+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27186 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27187 else
27188 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27189@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27190 for (i = 0; i < iov_count; i++) {
27191 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27192
27193+ if (!iov[i].iov_len)
27194+ return -EINVAL;
27195+
27196 if (uaddr & queue_dma_alignment(q)) {
27197 unaligned = 1;
27198 break;
27199 }
27200- if (!iov[i].iov_len)
27201- return -EINVAL;
27202 }
27203
27204 if (unaligned || (q->dma_pad_mask & len) || map_data)
27205@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27206 if (!len || !kbuf)
27207 return -EINVAL;
27208
27209- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27210+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27211 if (do_copy)
27212 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27213 else
27214diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27215index ee9c216..58d410a 100644
27216--- a/block/blk-softirq.c
27217+++ b/block/blk-softirq.c
27218@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27219 * Softirq action handler - move entries to local list and loop over them
27220 * while passing them to the queue registered handler.
27221 */
27222-static void blk_done_softirq(struct softirq_action *h)
27223+static void blk_done_softirq(void)
27224 {
27225 struct list_head *cpu_list, local_list;
27226
27227diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27228index bb9c5ea..5330d48 100644
27229--- a/block/blk-sysfs.c
27230+++ b/block/blk-sysfs.c
27231@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27232 kmem_cache_free(blk_requestq_cachep, q);
27233 }
27234
27235-static struct sysfs_ops queue_sysfs_ops = {
27236+static const struct sysfs_ops queue_sysfs_ops = {
27237 .show = queue_attr_show,
27238 .store = queue_attr_store,
27239 };
27240diff --git a/block/bsg.c b/block/bsg.c
27241index 7154a7a..08ac2f0 100644
27242--- a/block/bsg.c
27243+++ b/block/bsg.c
27244@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27245 struct sg_io_v4 *hdr, struct bsg_device *bd,
27246 fmode_t has_write_perm)
27247 {
27248+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27249+ unsigned char *cmdptr;
27250+
27251 if (hdr->request_len > BLK_MAX_CDB) {
27252 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27253 if (!rq->cmd)
27254 return -ENOMEM;
27255- }
27256+ cmdptr = rq->cmd;
27257+ } else
27258+ cmdptr = tmpcmd;
27259
27260- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27261+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27262 hdr->request_len))
27263 return -EFAULT;
27264
27265+ if (cmdptr != rq->cmd)
27266+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27267+
27268 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27269 if (blk_verify_command(rq->cmd, has_write_perm))
27270 return -EPERM;
27271@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27272 rq->next_rq = next_rq;
27273 next_rq->cmd_type = rq->cmd_type;
27274
27275- dxferp = (void*)(unsigned long)hdr->din_xferp;
27276+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27277 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27278 hdr->din_xfer_len, GFP_KERNEL);
27279 if (ret)
27280@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27281
27282 if (hdr->dout_xfer_len) {
27283 dxfer_len = hdr->dout_xfer_len;
27284- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27285+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27286 } else if (hdr->din_xfer_len) {
27287 dxfer_len = hdr->din_xfer_len;
27288- dxferp = (void*)(unsigned long)hdr->din_xferp;
27289+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27290 } else
27291 dxfer_len = 0;
27292
27293@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27294 int len = min_t(unsigned int, hdr->max_response_len,
27295 rq->sense_len);
27296
27297- ret = copy_to_user((void*)(unsigned long)hdr->response,
27298+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27299 rq->sense, len);
27300 if (!ret)
27301 hdr->response_len = len;
27302diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27303index 9bd086c..ca1fc22 100644
27304--- a/block/compat_ioctl.c
27305+++ b/block/compat_ioctl.c
27306@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27307 err |= __get_user(f->spec1, &uf->spec1);
27308 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27309 err |= __get_user(name, &uf->name);
27310- f->name = compat_ptr(name);
27311+ f->name = (void __force_kernel *)compat_ptr(name);
27312 if (err) {
27313 err = -EFAULT;
27314 goto out;
27315diff --git a/block/elevator.c b/block/elevator.c
27316index a847046..75a1746 100644
27317--- a/block/elevator.c
27318+++ b/block/elevator.c
27319@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27320 return error;
27321 }
27322
27323-static struct sysfs_ops elv_sysfs_ops = {
27324+static const struct sysfs_ops elv_sysfs_ops = {
27325 .show = elv_attr_show,
27326 .store = elv_attr_store,
27327 };
27328diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27329index 2be0a97..bded3fd 100644
27330--- a/block/scsi_ioctl.c
27331+++ b/block/scsi_ioctl.c
27332@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27333 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27334 struct sg_io_hdr *hdr, fmode_t mode)
27335 {
27336- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27337+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27338+ unsigned char *cmdptr;
27339+
27340+ if (rq->cmd != rq->__cmd)
27341+ cmdptr = rq->cmd;
27342+ else
27343+ cmdptr = tmpcmd;
27344+
27345+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27346 return -EFAULT;
27347+
27348+ if (cmdptr != rq->cmd)
27349+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27350+
27351 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27352 return -EPERM;
27353
27354@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27355 int err;
27356 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27357 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27358+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27359+ unsigned char *cmdptr;
27360
27361 if (!sic)
27362 return -EINVAL;
27363@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27364 */
27365 err = -EFAULT;
27366 rq->cmd_len = cmdlen;
27367- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27368+
27369+ if (rq->cmd != rq->__cmd)
27370+ cmdptr = rq->cmd;
27371+ else
27372+ cmdptr = tmpcmd;
27373+
27374+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27375 goto error;
27376
27377+ if (rq->cmd != cmdptr)
27378+ memcpy(rq->cmd, cmdptr, cmdlen);
27379+
27380 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27381 goto error;
27382
27383diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27384index 3533582..f143117 100644
27385--- a/crypto/cryptd.c
27386+++ b/crypto/cryptd.c
27387@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27388
27389 struct cryptd_blkcipher_request_ctx {
27390 crypto_completion_t complete;
27391-};
27392+} __no_const;
27393
27394 struct cryptd_hash_ctx {
27395 struct crypto_shash *child;
27396diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27397index a90d260..7a9765e 100644
27398--- a/crypto/gf128mul.c
27399+++ b/crypto/gf128mul.c
27400@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27401 for (i = 0; i < 7; ++i)
27402 gf128mul_x_lle(&p[i + 1], &p[i]);
27403
27404- memset(r, 0, sizeof(r));
27405+ memset(r, 0, sizeof(*r));
27406 for (i = 0;;) {
27407 u8 ch = ((u8 *)b)[15 - i];
27408
27409@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27410 for (i = 0; i < 7; ++i)
27411 gf128mul_x_bbe(&p[i + 1], &p[i]);
27412
27413- memset(r, 0, sizeof(r));
27414+ memset(r, 0, sizeof(*r));
27415 for (i = 0;;) {
27416 u8 ch = ((u8 *)b)[i];
27417
27418diff --git a/crypto/serpent.c b/crypto/serpent.c
27419index b651a55..023297d 100644
27420--- a/crypto/serpent.c
27421+++ b/crypto/serpent.c
27422@@ -21,6 +21,7 @@
27423 #include <asm/byteorder.h>
27424 #include <linux/crypto.h>
27425 #include <linux/types.h>
27426+#include <linux/sched.h>
27427
27428 /* Key is padded to the maximum of 256 bits before round key generation.
27429 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27430@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27431 u32 r0,r1,r2,r3,r4;
27432 int i;
27433
27434+ pax_track_stack();
27435+
27436 /* Copy key, add padding */
27437
27438 for (i = 0; i < keylen; ++i)
27439diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27440index 0d2cdb8..d8de48d 100644
27441--- a/drivers/acpi/acpi_pad.c
27442+++ b/drivers/acpi/acpi_pad.c
27443@@ -30,7 +30,7 @@
27444 #include <acpi/acpi_bus.h>
27445 #include <acpi/acpi_drivers.h>
27446
27447-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27448+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27449 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27450 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27451 static DEFINE_MUTEX(isolated_cpus_lock);
27452diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27453index 3f4602b..2e41d36 100644
27454--- a/drivers/acpi/battery.c
27455+++ b/drivers/acpi/battery.c
27456@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27457 }
27458
27459 static struct battery_file {
27460- struct file_operations ops;
27461+ const struct file_operations ops;
27462 mode_t mode;
27463 const char *name;
27464 } acpi_battery_file[] = {
27465diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27466index 7338b6a..82f0257 100644
27467--- a/drivers/acpi/dock.c
27468+++ b/drivers/acpi/dock.c
27469@@ -77,7 +77,7 @@ struct dock_dependent_device {
27470 struct list_head list;
27471 struct list_head hotplug_list;
27472 acpi_handle handle;
27473- struct acpi_dock_ops *ops;
27474+ const struct acpi_dock_ops *ops;
27475 void *context;
27476 };
27477
27478@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27479 * the dock driver after _DCK is executed.
27480 */
27481 int
27482-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27483+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27484 void *context)
27485 {
27486 struct dock_dependent_device *dd;
27487diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27488index 7c1c59e..2993595 100644
27489--- a/drivers/acpi/osl.c
27490+++ b/drivers/acpi/osl.c
27491@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27492 void __iomem *virt_addr;
27493
27494 virt_addr = ioremap(phys_addr, width);
27495+ if (!virt_addr)
27496+ return AE_NO_MEMORY;
27497 if (!value)
27498 value = &dummy;
27499
27500@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27501 void __iomem *virt_addr;
27502
27503 virt_addr = ioremap(phys_addr, width);
27504+ if (!virt_addr)
27505+ return AE_NO_MEMORY;
27506
27507 switch (width) {
27508 case 8:
27509diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27510index c216062..eec10d2 100644
27511--- a/drivers/acpi/power_meter.c
27512+++ b/drivers/acpi/power_meter.c
27513@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27514 return res;
27515
27516 temp /= 1000;
27517- if (temp < 0)
27518- return -EINVAL;
27519
27520 mutex_lock(&resource->lock);
27521 resource->trip[attr->index - 7] = temp;
27522diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27523index d0d25e2..961643d 100644
27524--- a/drivers/acpi/proc.c
27525+++ b/drivers/acpi/proc.c
27526@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27527 size_t count, loff_t * ppos)
27528 {
27529 struct list_head *node, *next;
27530- char strbuf[5];
27531- char str[5] = "";
27532- unsigned int len = count;
27533+ char strbuf[5] = {0};
27534 struct acpi_device *found_dev = NULL;
27535
27536- if (len > 4)
27537- len = 4;
27538- if (len < 0)
27539- return -EFAULT;
27540+ if (count > 4)
27541+ count = 4;
27542
27543- if (copy_from_user(strbuf, buffer, len))
27544+ if (copy_from_user(strbuf, buffer, count))
27545 return -EFAULT;
27546- strbuf[len] = '\0';
27547- sscanf(strbuf, "%s", str);
27548+ strbuf[count] = '\0';
27549
27550 mutex_lock(&acpi_device_lock);
27551 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27552@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27553 if (!dev->wakeup.flags.valid)
27554 continue;
27555
27556- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27557+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27558 dev->wakeup.state.enabled =
27559 dev->wakeup.state.enabled ? 0 : 1;
27560 found_dev = dev;
27561diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27562index 7102474..de8ad22 100644
27563--- a/drivers/acpi/processor_core.c
27564+++ b/drivers/acpi/processor_core.c
27565@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27566 return 0;
27567 }
27568
27569- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27570+ BUG_ON(pr->id >= nr_cpu_ids);
27571
27572 /*
27573 * Buggy BIOS check
27574diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27575index d933980..5761f13 100644
27576--- a/drivers/acpi/sbshc.c
27577+++ b/drivers/acpi/sbshc.c
27578@@ -17,7 +17,7 @@
27579
27580 #define PREFIX "ACPI: "
27581
27582-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27583+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27584 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27585
27586 struct acpi_smb_hc {
27587diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27588index 0458094..6978e7b 100644
27589--- a/drivers/acpi/sleep.c
27590+++ b/drivers/acpi/sleep.c
27591@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27592 }
27593 }
27594
27595-static struct platform_suspend_ops acpi_suspend_ops = {
27596+static const struct platform_suspend_ops acpi_suspend_ops = {
27597 .valid = acpi_suspend_state_valid,
27598 .begin = acpi_suspend_begin,
27599 .prepare_late = acpi_pm_prepare,
27600@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27601 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27602 * been requested.
27603 */
27604-static struct platform_suspend_ops acpi_suspend_ops_old = {
27605+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27606 .valid = acpi_suspend_state_valid,
27607 .begin = acpi_suspend_begin_old,
27608 .prepare_late = acpi_pm_disable_gpes,
27609@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27610 acpi_enable_all_runtime_gpes();
27611 }
27612
27613-static struct platform_hibernation_ops acpi_hibernation_ops = {
27614+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27615 .begin = acpi_hibernation_begin,
27616 .end = acpi_pm_end,
27617 .pre_snapshot = acpi_hibernation_pre_snapshot,
27618@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27619 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27620 * been requested.
27621 */
27622-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27623+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27624 .begin = acpi_hibernation_begin_old,
27625 .end = acpi_pm_end,
27626 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27627diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27628index 05dff63..b662ab7 100644
27629--- a/drivers/acpi/video.c
27630+++ b/drivers/acpi/video.c
27631@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27632 vd->brightness->levels[request_level]);
27633 }
27634
27635-static struct backlight_ops acpi_backlight_ops = {
27636+static const struct backlight_ops acpi_backlight_ops = {
27637 .get_brightness = acpi_video_get_brightness,
27638 .update_status = acpi_video_set_brightness,
27639 };
27640diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27641index 6787aab..23ffb0e 100644
27642--- a/drivers/ata/ahci.c
27643+++ b/drivers/ata/ahci.c
27644@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27645 .sdev_attrs = ahci_sdev_attrs,
27646 };
27647
27648-static struct ata_port_operations ahci_ops = {
27649+static const struct ata_port_operations ahci_ops = {
27650 .inherits = &sata_pmp_port_ops,
27651
27652 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27653@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27654 .port_stop = ahci_port_stop,
27655 };
27656
27657-static struct ata_port_operations ahci_vt8251_ops = {
27658+static const struct ata_port_operations ahci_vt8251_ops = {
27659 .inherits = &ahci_ops,
27660 .hardreset = ahci_vt8251_hardreset,
27661 };
27662
27663-static struct ata_port_operations ahci_p5wdh_ops = {
27664+static const struct ata_port_operations ahci_p5wdh_ops = {
27665 .inherits = &ahci_ops,
27666 .hardreset = ahci_p5wdh_hardreset,
27667 };
27668
27669-static struct ata_port_operations ahci_sb600_ops = {
27670+static const struct ata_port_operations ahci_sb600_ops = {
27671 .inherits = &ahci_ops,
27672 .softreset = ahci_sb600_softreset,
27673 .pmp_softreset = ahci_sb600_softreset,
27674diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27675index 99e7196..4968c77 100644
27676--- a/drivers/ata/ata_generic.c
27677+++ b/drivers/ata/ata_generic.c
27678@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27679 ATA_BMDMA_SHT(DRV_NAME),
27680 };
27681
27682-static struct ata_port_operations generic_port_ops = {
27683+static const struct ata_port_operations generic_port_ops = {
27684 .inherits = &ata_bmdma_port_ops,
27685 .cable_detect = ata_cable_unknown,
27686 .set_mode = generic_set_mode,
27687diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27688index c33591d..000c121 100644
27689--- a/drivers/ata/ata_piix.c
27690+++ b/drivers/ata/ata_piix.c
27691@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27692 ATA_BMDMA_SHT(DRV_NAME),
27693 };
27694
27695-static struct ata_port_operations piix_pata_ops = {
27696+static const struct ata_port_operations piix_pata_ops = {
27697 .inherits = &ata_bmdma32_port_ops,
27698 .cable_detect = ata_cable_40wire,
27699 .set_piomode = piix_set_piomode,
27700@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27701 .prereset = piix_pata_prereset,
27702 };
27703
27704-static struct ata_port_operations piix_vmw_ops = {
27705+static const struct ata_port_operations piix_vmw_ops = {
27706 .inherits = &piix_pata_ops,
27707 .bmdma_status = piix_vmw_bmdma_status,
27708 };
27709
27710-static struct ata_port_operations ich_pata_ops = {
27711+static const struct ata_port_operations ich_pata_ops = {
27712 .inherits = &piix_pata_ops,
27713 .cable_detect = ich_pata_cable_detect,
27714 .set_dmamode = ich_set_dmamode,
27715 };
27716
27717-static struct ata_port_operations piix_sata_ops = {
27718+static const struct ata_port_operations piix_sata_ops = {
27719 .inherits = &ata_bmdma_port_ops,
27720 };
27721
27722-static struct ata_port_operations piix_sidpr_sata_ops = {
27723+static const struct ata_port_operations piix_sidpr_sata_ops = {
27724 .inherits = &piix_sata_ops,
27725 .hardreset = sata_std_hardreset,
27726 .scr_read = piix_sidpr_scr_read,
27727diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27728index b0882cd..c295d65 100644
27729--- a/drivers/ata/libata-acpi.c
27730+++ b/drivers/ata/libata-acpi.c
27731@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27732 ata_acpi_uevent(dev->link->ap, dev, event);
27733 }
27734
27735-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27736+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27737 .handler = ata_acpi_dev_notify_dock,
27738 .uevent = ata_acpi_dev_uevent,
27739 };
27740
27741-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27742+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27743 .handler = ata_acpi_ap_notify_dock,
27744 .uevent = ata_acpi_ap_uevent,
27745 };
27746diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27747index d4f7f99..94f603e 100644
27748--- a/drivers/ata/libata-core.c
27749+++ b/drivers/ata/libata-core.c
27750@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27751 struct ata_port *ap;
27752 unsigned int tag;
27753
27754- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27755+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27756 ap = qc->ap;
27757
27758 qc->flags = 0;
27759@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27760 struct ata_port *ap;
27761 struct ata_link *link;
27762
27763- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27764+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27765 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27766 ap = qc->ap;
27767 link = qc->dev->link;
27768@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27769 * LOCKING:
27770 * None.
27771 */
27772-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27773+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27774 {
27775 static DEFINE_SPINLOCK(lock);
27776 const struct ata_port_operations *cur;
27777@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27778 return;
27779
27780 spin_lock(&lock);
27781+ pax_open_kernel();
27782
27783 for (cur = ops->inherits; cur; cur = cur->inherits) {
27784 void **inherit = (void **)cur;
27785@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27786 if (IS_ERR(*pp))
27787 *pp = NULL;
27788
27789- ops->inherits = NULL;
27790+ *(struct ata_port_operations **)&ops->inherits = NULL;
27791
27792+ pax_close_kernel();
27793 spin_unlock(&lock);
27794 }
27795
27796@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27797 */
27798 /* KILLME - the only user left is ipr */
27799 void ata_host_init(struct ata_host *host, struct device *dev,
27800- unsigned long flags, struct ata_port_operations *ops)
27801+ unsigned long flags, const struct ata_port_operations *ops)
27802 {
27803 spin_lock_init(&host->lock);
27804 host->dev = dev;
27805@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27806 /* truly dummy */
27807 }
27808
27809-struct ata_port_operations ata_dummy_port_ops = {
27810+const struct ata_port_operations ata_dummy_port_ops = {
27811 .qc_prep = ata_noop_qc_prep,
27812 .qc_issue = ata_dummy_qc_issue,
27813 .error_handler = ata_dummy_error_handler,
27814diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27815index e5bdb9b..45a8e72 100644
27816--- a/drivers/ata/libata-eh.c
27817+++ b/drivers/ata/libata-eh.c
27818@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27819 {
27820 struct ata_link *link;
27821
27822+ pax_track_stack();
27823+
27824 ata_for_each_link(link, ap, HOST_FIRST)
27825 ata_eh_link_report(link);
27826 }
27827@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27828 */
27829 void ata_std_error_handler(struct ata_port *ap)
27830 {
27831- struct ata_port_operations *ops = ap->ops;
27832+ const struct ata_port_operations *ops = ap->ops;
27833 ata_reset_fn_t hardreset = ops->hardreset;
27834
27835 /* ignore built-in hardreset if SCR access is not available */
27836diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27837index 51f0ffb..19ce3e3 100644
27838--- a/drivers/ata/libata-pmp.c
27839+++ b/drivers/ata/libata-pmp.c
27840@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27841 */
27842 static int sata_pmp_eh_recover(struct ata_port *ap)
27843 {
27844- struct ata_port_operations *ops = ap->ops;
27845+ const struct ata_port_operations *ops = ap->ops;
27846 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27847 struct ata_link *pmp_link = &ap->link;
27848 struct ata_device *pmp_dev = pmp_link->device;
27849diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27850index d8f35fe..288180a 100644
27851--- a/drivers/ata/pata_acpi.c
27852+++ b/drivers/ata/pata_acpi.c
27853@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27854 ATA_BMDMA_SHT(DRV_NAME),
27855 };
27856
27857-static struct ata_port_operations pacpi_ops = {
27858+static const struct ata_port_operations pacpi_ops = {
27859 .inherits = &ata_bmdma_port_ops,
27860 .qc_issue = pacpi_qc_issue,
27861 .cable_detect = pacpi_cable_detect,
27862diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27863index 9434114..1f2f364 100644
27864--- a/drivers/ata/pata_ali.c
27865+++ b/drivers/ata/pata_ali.c
27866@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27867 * Port operations for PIO only ALi
27868 */
27869
27870-static struct ata_port_operations ali_early_port_ops = {
27871+static const struct ata_port_operations ali_early_port_ops = {
27872 .inherits = &ata_sff_port_ops,
27873 .cable_detect = ata_cable_40wire,
27874 .set_piomode = ali_set_piomode,
27875@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27876 * Port operations for DMA capable ALi without cable
27877 * detect
27878 */
27879-static struct ata_port_operations ali_20_port_ops = {
27880+static const struct ata_port_operations ali_20_port_ops = {
27881 .inherits = &ali_dma_base_ops,
27882 .cable_detect = ata_cable_40wire,
27883 .mode_filter = ali_20_filter,
27884@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27885 /*
27886 * Port operations for DMA capable ALi with cable detect
27887 */
27888-static struct ata_port_operations ali_c2_port_ops = {
27889+static const struct ata_port_operations ali_c2_port_ops = {
27890 .inherits = &ali_dma_base_ops,
27891 .check_atapi_dma = ali_check_atapi_dma,
27892 .cable_detect = ali_c2_cable_detect,
27893@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27894 /*
27895 * Port operations for DMA capable ALi with cable detect
27896 */
27897-static struct ata_port_operations ali_c4_port_ops = {
27898+static const struct ata_port_operations ali_c4_port_ops = {
27899 .inherits = &ali_dma_base_ops,
27900 .check_atapi_dma = ali_check_atapi_dma,
27901 .cable_detect = ali_c2_cable_detect,
27902@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27903 /*
27904 * Port operations for DMA capable ALi with cable detect and LBA48
27905 */
27906-static struct ata_port_operations ali_c5_port_ops = {
27907+static const struct ata_port_operations ali_c5_port_ops = {
27908 .inherits = &ali_dma_base_ops,
27909 .check_atapi_dma = ali_check_atapi_dma,
27910 .dev_config = ali_warn_atapi_dma,
27911diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27912index 567f3f7..c8ee0da 100644
27913--- a/drivers/ata/pata_amd.c
27914+++ b/drivers/ata/pata_amd.c
27915@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27916 .prereset = amd_pre_reset,
27917 };
27918
27919-static struct ata_port_operations amd33_port_ops = {
27920+static const struct ata_port_operations amd33_port_ops = {
27921 .inherits = &amd_base_port_ops,
27922 .cable_detect = ata_cable_40wire,
27923 .set_piomode = amd33_set_piomode,
27924 .set_dmamode = amd33_set_dmamode,
27925 };
27926
27927-static struct ata_port_operations amd66_port_ops = {
27928+static const struct ata_port_operations amd66_port_ops = {
27929 .inherits = &amd_base_port_ops,
27930 .cable_detect = ata_cable_unknown,
27931 .set_piomode = amd66_set_piomode,
27932 .set_dmamode = amd66_set_dmamode,
27933 };
27934
27935-static struct ata_port_operations amd100_port_ops = {
27936+static const struct ata_port_operations amd100_port_ops = {
27937 .inherits = &amd_base_port_ops,
27938 .cable_detect = ata_cable_unknown,
27939 .set_piomode = amd100_set_piomode,
27940 .set_dmamode = amd100_set_dmamode,
27941 };
27942
27943-static struct ata_port_operations amd133_port_ops = {
27944+static const struct ata_port_operations amd133_port_ops = {
27945 .inherits = &amd_base_port_ops,
27946 .cable_detect = amd_cable_detect,
27947 .set_piomode = amd133_set_piomode,
27948@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27949 .host_stop = nv_host_stop,
27950 };
27951
27952-static struct ata_port_operations nv100_port_ops = {
27953+static const struct ata_port_operations nv100_port_ops = {
27954 .inherits = &nv_base_port_ops,
27955 .set_piomode = nv100_set_piomode,
27956 .set_dmamode = nv100_set_dmamode,
27957 };
27958
27959-static struct ata_port_operations nv133_port_ops = {
27960+static const struct ata_port_operations nv133_port_ops = {
27961 .inherits = &nv_base_port_ops,
27962 .set_piomode = nv133_set_piomode,
27963 .set_dmamode = nv133_set_dmamode,
27964diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27965index d332cfd..4b7eaae 100644
27966--- a/drivers/ata/pata_artop.c
27967+++ b/drivers/ata/pata_artop.c
27968@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27969 ATA_BMDMA_SHT(DRV_NAME),
27970 };
27971
27972-static struct ata_port_operations artop6210_ops = {
27973+static const struct ata_port_operations artop6210_ops = {
27974 .inherits = &ata_bmdma_port_ops,
27975 .cable_detect = ata_cable_40wire,
27976 .set_piomode = artop6210_set_piomode,
27977@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27978 .qc_defer = artop6210_qc_defer,
27979 };
27980
27981-static struct ata_port_operations artop6260_ops = {
27982+static const struct ata_port_operations artop6260_ops = {
27983 .inherits = &ata_bmdma_port_ops,
27984 .cable_detect = artop6260_cable_detect,
27985 .set_piomode = artop6260_set_piomode,
27986diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27987index 5c129f9..7bb7ccb 100644
27988--- a/drivers/ata/pata_at32.c
27989+++ b/drivers/ata/pata_at32.c
27990@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27991 ATA_PIO_SHT(DRV_NAME),
27992 };
27993
27994-static struct ata_port_operations at32_port_ops = {
27995+static const struct ata_port_operations at32_port_ops = {
27996 .inherits = &ata_sff_port_ops,
27997 .cable_detect = ata_cable_40wire,
27998 .set_piomode = pata_at32_set_piomode,
27999diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28000index 41c94b1..829006d 100644
28001--- a/drivers/ata/pata_at91.c
28002+++ b/drivers/ata/pata_at91.c
28003@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28004 ATA_PIO_SHT(DRV_NAME),
28005 };
28006
28007-static struct ata_port_operations pata_at91_port_ops = {
28008+static const struct ata_port_operations pata_at91_port_ops = {
28009 .inherits = &ata_sff_port_ops,
28010
28011 .sff_data_xfer = pata_at91_data_xfer_noirq,
28012diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28013index ae4454d..d391eb4 100644
28014--- a/drivers/ata/pata_atiixp.c
28015+++ b/drivers/ata/pata_atiixp.c
28016@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28017 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28018 };
28019
28020-static struct ata_port_operations atiixp_port_ops = {
28021+static const struct ata_port_operations atiixp_port_ops = {
28022 .inherits = &ata_bmdma_port_ops,
28023
28024 .qc_prep = ata_sff_dumb_qc_prep,
28025diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28026index 6fe7ded..2a425dc 100644
28027--- a/drivers/ata/pata_atp867x.c
28028+++ b/drivers/ata/pata_atp867x.c
28029@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28030 ATA_BMDMA_SHT(DRV_NAME),
28031 };
28032
28033-static struct ata_port_operations atp867x_ops = {
28034+static const struct ata_port_operations atp867x_ops = {
28035 .inherits = &ata_bmdma_port_ops,
28036 .cable_detect = atp867x_cable_detect,
28037 .set_piomode = atp867x_set_piomode,
28038diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28039index c4b47a3..b27a367 100644
28040--- a/drivers/ata/pata_bf54x.c
28041+++ b/drivers/ata/pata_bf54x.c
28042@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28043 .dma_boundary = ATA_DMA_BOUNDARY,
28044 };
28045
28046-static struct ata_port_operations bfin_pata_ops = {
28047+static const struct ata_port_operations bfin_pata_ops = {
28048 .inherits = &ata_sff_port_ops,
28049
28050 .set_piomode = bfin_set_piomode,
28051diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28052index 5acf9fa..84248be 100644
28053--- a/drivers/ata/pata_cmd640.c
28054+++ b/drivers/ata/pata_cmd640.c
28055@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28056 ATA_BMDMA_SHT(DRV_NAME),
28057 };
28058
28059-static struct ata_port_operations cmd640_port_ops = {
28060+static const struct ata_port_operations cmd640_port_ops = {
28061 .inherits = &ata_bmdma_port_ops,
28062 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28063 .sff_data_xfer = ata_sff_data_xfer_noirq,
28064diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28065index ccd2694..c869c3d 100644
28066--- a/drivers/ata/pata_cmd64x.c
28067+++ b/drivers/ata/pata_cmd64x.c
28068@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28069 .set_dmamode = cmd64x_set_dmamode,
28070 };
28071
28072-static struct ata_port_operations cmd64x_port_ops = {
28073+static const struct ata_port_operations cmd64x_port_ops = {
28074 .inherits = &cmd64x_base_ops,
28075 .cable_detect = ata_cable_40wire,
28076 };
28077
28078-static struct ata_port_operations cmd646r1_port_ops = {
28079+static const struct ata_port_operations cmd646r1_port_ops = {
28080 .inherits = &cmd64x_base_ops,
28081 .bmdma_stop = cmd646r1_bmdma_stop,
28082 .cable_detect = ata_cable_40wire,
28083 };
28084
28085-static struct ata_port_operations cmd648_port_ops = {
28086+static const struct ata_port_operations cmd648_port_ops = {
28087 .inherits = &cmd64x_base_ops,
28088 .bmdma_stop = cmd648_bmdma_stop,
28089 .cable_detect = cmd648_cable_detect,
28090diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28091index 0df83cf..d7595b0 100644
28092--- a/drivers/ata/pata_cs5520.c
28093+++ b/drivers/ata/pata_cs5520.c
28094@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28095 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28096 };
28097
28098-static struct ata_port_operations cs5520_port_ops = {
28099+static const struct ata_port_operations cs5520_port_ops = {
28100 .inherits = &ata_bmdma_port_ops,
28101 .qc_prep = ata_sff_dumb_qc_prep,
28102 .cable_detect = ata_cable_40wire,
28103diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28104index c974b05..6d26b11 100644
28105--- a/drivers/ata/pata_cs5530.c
28106+++ b/drivers/ata/pata_cs5530.c
28107@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28108 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28109 };
28110
28111-static struct ata_port_operations cs5530_port_ops = {
28112+static const struct ata_port_operations cs5530_port_ops = {
28113 .inherits = &ata_bmdma_port_ops,
28114
28115 .qc_prep = ata_sff_dumb_qc_prep,
28116diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28117index 403f561..aacd26b 100644
28118--- a/drivers/ata/pata_cs5535.c
28119+++ b/drivers/ata/pata_cs5535.c
28120@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28121 ATA_BMDMA_SHT(DRV_NAME),
28122 };
28123
28124-static struct ata_port_operations cs5535_port_ops = {
28125+static const struct ata_port_operations cs5535_port_ops = {
28126 .inherits = &ata_bmdma_port_ops,
28127 .cable_detect = cs5535_cable_detect,
28128 .set_piomode = cs5535_set_piomode,
28129diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28130index 6da4cb4..de24a25 100644
28131--- a/drivers/ata/pata_cs5536.c
28132+++ b/drivers/ata/pata_cs5536.c
28133@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28134 ATA_BMDMA_SHT(DRV_NAME),
28135 };
28136
28137-static struct ata_port_operations cs5536_port_ops = {
28138+static const struct ata_port_operations cs5536_port_ops = {
28139 .inherits = &ata_bmdma_port_ops,
28140 .cable_detect = cs5536_cable_detect,
28141 .set_piomode = cs5536_set_piomode,
28142diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28143index 8fb040b..b16a9c9 100644
28144--- a/drivers/ata/pata_cypress.c
28145+++ b/drivers/ata/pata_cypress.c
28146@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28147 ATA_BMDMA_SHT(DRV_NAME),
28148 };
28149
28150-static struct ata_port_operations cy82c693_port_ops = {
28151+static const struct ata_port_operations cy82c693_port_ops = {
28152 .inherits = &ata_bmdma_port_ops,
28153 .cable_detect = ata_cable_40wire,
28154 .set_piomode = cy82c693_set_piomode,
28155diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28156index 2a6412f..555ee11 100644
28157--- a/drivers/ata/pata_efar.c
28158+++ b/drivers/ata/pata_efar.c
28159@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28160 ATA_BMDMA_SHT(DRV_NAME),
28161 };
28162
28163-static struct ata_port_operations efar_ops = {
28164+static const struct ata_port_operations efar_ops = {
28165 .inherits = &ata_bmdma_port_ops,
28166 .cable_detect = efar_cable_detect,
28167 .set_piomode = efar_set_piomode,
28168diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28169index b9d8836..0b92030 100644
28170--- a/drivers/ata/pata_hpt366.c
28171+++ b/drivers/ata/pata_hpt366.c
28172@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28173 * Configuration for HPT366/68
28174 */
28175
28176-static struct ata_port_operations hpt366_port_ops = {
28177+static const struct ata_port_operations hpt366_port_ops = {
28178 .inherits = &ata_bmdma_port_ops,
28179 .cable_detect = hpt36x_cable_detect,
28180 .mode_filter = hpt366_filter,
28181diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28182index 5af7f19..00c4980 100644
28183--- a/drivers/ata/pata_hpt37x.c
28184+++ b/drivers/ata/pata_hpt37x.c
28185@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28186 * Configuration for HPT370
28187 */
28188
28189-static struct ata_port_operations hpt370_port_ops = {
28190+static const struct ata_port_operations hpt370_port_ops = {
28191 .inherits = &ata_bmdma_port_ops,
28192
28193 .bmdma_stop = hpt370_bmdma_stop,
28194@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28195 * Configuration for HPT370A. Close to 370 but less filters
28196 */
28197
28198-static struct ata_port_operations hpt370a_port_ops = {
28199+static const struct ata_port_operations hpt370a_port_ops = {
28200 .inherits = &hpt370_port_ops,
28201 .mode_filter = hpt370a_filter,
28202 };
28203@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28204 * and DMA mode setting functionality.
28205 */
28206
28207-static struct ata_port_operations hpt372_port_ops = {
28208+static const struct ata_port_operations hpt372_port_ops = {
28209 .inherits = &ata_bmdma_port_ops,
28210
28211 .bmdma_stop = hpt37x_bmdma_stop,
28212@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28213 * but we have a different cable detection procedure for function 1.
28214 */
28215
28216-static struct ata_port_operations hpt374_fn1_port_ops = {
28217+static const struct ata_port_operations hpt374_fn1_port_ops = {
28218 .inherits = &hpt372_port_ops,
28219 .prereset = hpt374_fn1_pre_reset,
28220 };
28221diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28222index 100f227..2e39382 100644
28223--- a/drivers/ata/pata_hpt3x2n.c
28224+++ b/drivers/ata/pata_hpt3x2n.c
28225@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28226 * Configuration for HPT3x2n.
28227 */
28228
28229-static struct ata_port_operations hpt3x2n_port_ops = {
28230+static const struct ata_port_operations hpt3x2n_port_ops = {
28231 .inherits = &ata_bmdma_port_ops,
28232
28233 .bmdma_stop = hpt3x2n_bmdma_stop,
28234diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28235index 7e31025..6fca8f4 100644
28236--- a/drivers/ata/pata_hpt3x3.c
28237+++ b/drivers/ata/pata_hpt3x3.c
28238@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28239 ATA_BMDMA_SHT(DRV_NAME),
28240 };
28241
28242-static struct ata_port_operations hpt3x3_port_ops = {
28243+static const struct ata_port_operations hpt3x3_port_ops = {
28244 .inherits = &ata_bmdma_port_ops,
28245 .cable_detect = ata_cable_40wire,
28246 .set_piomode = hpt3x3_set_piomode,
28247diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28248index b663b7f..9a26c2a 100644
28249--- a/drivers/ata/pata_icside.c
28250+++ b/drivers/ata/pata_icside.c
28251@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28252 }
28253 }
28254
28255-static struct ata_port_operations pata_icside_port_ops = {
28256+static const struct ata_port_operations pata_icside_port_ops = {
28257 .inherits = &ata_sff_port_ops,
28258 /* no need to build any PRD tables for DMA */
28259 .qc_prep = ata_noop_qc_prep,
28260diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28261index 4bceb88..457dfb6 100644
28262--- a/drivers/ata/pata_isapnp.c
28263+++ b/drivers/ata/pata_isapnp.c
28264@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28265 ATA_PIO_SHT(DRV_NAME),
28266 };
28267
28268-static struct ata_port_operations isapnp_port_ops = {
28269+static const struct ata_port_operations isapnp_port_ops = {
28270 .inherits = &ata_sff_port_ops,
28271 .cable_detect = ata_cable_40wire,
28272 };
28273
28274-static struct ata_port_operations isapnp_noalt_port_ops = {
28275+static const struct ata_port_operations isapnp_noalt_port_ops = {
28276 .inherits = &ata_sff_port_ops,
28277 .cable_detect = ata_cable_40wire,
28278 /* No altstatus so we don't want to use the lost interrupt poll */
28279diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28280index f156da8..24976e2 100644
28281--- a/drivers/ata/pata_it8213.c
28282+++ b/drivers/ata/pata_it8213.c
28283@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28284 };
28285
28286
28287-static struct ata_port_operations it8213_ops = {
28288+static const struct ata_port_operations it8213_ops = {
28289 .inherits = &ata_bmdma_port_ops,
28290 .cable_detect = it8213_cable_detect,
28291 .set_piomode = it8213_set_piomode,
28292diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28293index 188bc2f..ca9e785 100644
28294--- a/drivers/ata/pata_it821x.c
28295+++ b/drivers/ata/pata_it821x.c
28296@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28297 ATA_BMDMA_SHT(DRV_NAME),
28298 };
28299
28300-static struct ata_port_operations it821x_smart_port_ops = {
28301+static const struct ata_port_operations it821x_smart_port_ops = {
28302 .inherits = &ata_bmdma_port_ops,
28303
28304 .check_atapi_dma= it821x_check_atapi_dma,
28305@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28306 .port_start = it821x_port_start,
28307 };
28308
28309-static struct ata_port_operations it821x_passthru_port_ops = {
28310+static const struct ata_port_operations it821x_passthru_port_ops = {
28311 .inherits = &ata_bmdma_port_ops,
28312
28313 .check_atapi_dma= it821x_check_atapi_dma,
28314@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28315 .port_start = it821x_port_start,
28316 };
28317
28318-static struct ata_port_operations it821x_rdc_port_ops = {
28319+static const struct ata_port_operations it821x_rdc_port_ops = {
28320 .inherits = &ata_bmdma_port_ops,
28321
28322 .check_atapi_dma= it821x_check_atapi_dma,
28323diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28324index ba54b08..4b952b7 100644
28325--- a/drivers/ata/pata_ixp4xx_cf.c
28326+++ b/drivers/ata/pata_ixp4xx_cf.c
28327@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28328 ATA_PIO_SHT(DRV_NAME),
28329 };
28330
28331-static struct ata_port_operations ixp4xx_port_ops = {
28332+static const struct ata_port_operations ixp4xx_port_ops = {
28333 .inherits = &ata_sff_port_ops,
28334 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28335 .cable_detect = ata_cable_40wire,
28336diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28337index 3a1474a..434b0ff 100644
28338--- a/drivers/ata/pata_jmicron.c
28339+++ b/drivers/ata/pata_jmicron.c
28340@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28341 ATA_BMDMA_SHT(DRV_NAME),
28342 };
28343
28344-static struct ata_port_operations jmicron_ops = {
28345+static const struct ata_port_operations jmicron_ops = {
28346 .inherits = &ata_bmdma_port_ops,
28347 .prereset = jmicron_pre_reset,
28348 };
28349diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28350index 6932e56..220e71d 100644
28351--- a/drivers/ata/pata_legacy.c
28352+++ b/drivers/ata/pata_legacy.c
28353@@ -106,7 +106,7 @@ struct legacy_probe {
28354
28355 struct legacy_controller {
28356 const char *name;
28357- struct ata_port_operations *ops;
28358+ const struct ata_port_operations *ops;
28359 unsigned int pio_mask;
28360 unsigned int flags;
28361 unsigned int pflags;
28362@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28363 * pio_mask as well.
28364 */
28365
28366-static struct ata_port_operations simple_port_ops = {
28367+static const struct ata_port_operations simple_port_ops = {
28368 .inherits = &legacy_base_port_ops,
28369 .sff_data_xfer = ata_sff_data_xfer_noirq,
28370 };
28371
28372-static struct ata_port_operations legacy_port_ops = {
28373+static const struct ata_port_operations legacy_port_ops = {
28374 .inherits = &legacy_base_port_ops,
28375 .sff_data_xfer = ata_sff_data_xfer_noirq,
28376 .set_mode = legacy_set_mode,
28377@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28378 return buflen;
28379 }
28380
28381-static struct ata_port_operations pdc20230_port_ops = {
28382+static const struct ata_port_operations pdc20230_port_ops = {
28383 .inherits = &legacy_base_port_ops,
28384 .set_piomode = pdc20230_set_piomode,
28385 .sff_data_xfer = pdc_data_xfer_vlb,
28386@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28387 ioread8(ap->ioaddr.status_addr);
28388 }
28389
28390-static struct ata_port_operations ht6560a_port_ops = {
28391+static const struct ata_port_operations ht6560a_port_ops = {
28392 .inherits = &legacy_base_port_ops,
28393 .set_piomode = ht6560a_set_piomode,
28394 };
28395@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28396 ioread8(ap->ioaddr.status_addr);
28397 }
28398
28399-static struct ata_port_operations ht6560b_port_ops = {
28400+static const struct ata_port_operations ht6560b_port_ops = {
28401 .inherits = &legacy_base_port_ops,
28402 .set_piomode = ht6560b_set_piomode,
28403 };
28404@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28405 }
28406
28407
28408-static struct ata_port_operations opti82c611a_port_ops = {
28409+static const struct ata_port_operations opti82c611a_port_ops = {
28410 .inherits = &legacy_base_port_ops,
28411 .set_piomode = opti82c611a_set_piomode,
28412 };
28413@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28414 return ata_sff_qc_issue(qc);
28415 }
28416
28417-static struct ata_port_operations opti82c46x_port_ops = {
28418+static const struct ata_port_operations opti82c46x_port_ops = {
28419 .inherits = &legacy_base_port_ops,
28420 .set_piomode = opti82c46x_set_piomode,
28421 .qc_issue = opti82c46x_qc_issue,
28422@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28423 return 0;
28424 }
28425
28426-static struct ata_port_operations qdi6500_port_ops = {
28427+static const struct ata_port_operations qdi6500_port_ops = {
28428 .inherits = &legacy_base_port_ops,
28429 .set_piomode = qdi6500_set_piomode,
28430 .qc_issue = qdi_qc_issue,
28431 .sff_data_xfer = vlb32_data_xfer,
28432 };
28433
28434-static struct ata_port_operations qdi6580_port_ops = {
28435+static const struct ata_port_operations qdi6580_port_ops = {
28436 .inherits = &legacy_base_port_ops,
28437 .set_piomode = qdi6580_set_piomode,
28438 .sff_data_xfer = vlb32_data_xfer,
28439 };
28440
28441-static struct ata_port_operations qdi6580dp_port_ops = {
28442+static const struct ata_port_operations qdi6580dp_port_ops = {
28443 .inherits = &legacy_base_port_ops,
28444 .set_piomode = qdi6580dp_set_piomode,
28445 .sff_data_xfer = vlb32_data_xfer,
28446@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28447 return 0;
28448 }
28449
28450-static struct ata_port_operations winbond_port_ops = {
28451+static const struct ata_port_operations winbond_port_ops = {
28452 .inherits = &legacy_base_port_ops,
28453 .set_piomode = winbond_set_piomode,
28454 .sff_data_xfer = vlb32_data_xfer,
28455@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28456 int pio_modes = controller->pio_mask;
28457 unsigned long io = probe->port;
28458 u32 mask = (1 << probe->slot);
28459- struct ata_port_operations *ops = controller->ops;
28460+ const struct ata_port_operations *ops = controller->ops;
28461 struct legacy_data *ld = &legacy_data[probe->slot];
28462 struct ata_host *host = NULL;
28463 struct ata_port *ap;
28464diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28465index 2096fb7..4d090fc 100644
28466--- a/drivers/ata/pata_marvell.c
28467+++ b/drivers/ata/pata_marvell.c
28468@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28469 ATA_BMDMA_SHT(DRV_NAME),
28470 };
28471
28472-static struct ata_port_operations marvell_ops = {
28473+static const struct ata_port_operations marvell_ops = {
28474 .inherits = &ata_bmdma_port_ops,
28475 .cable_detect = marvell_cable_detect,
28476 .prereset = marvell_pre_reset,
28477diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28478index 99d41be..7d56aa8 100644
28479--- a/drivers/ata/pata_mpc52xx.c
28480+++ b/drivers/ata/pata_mpc52xx.c
28481@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28482 ATA_PIO_SHT(DRV_NAME),
28483 };
28484
28485-static struct ata_port_operations mpc52xx_ata_port_ops = {
28486+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28487 .inherits = &ata_bmdma_port_ops,
28488 .sff_dev_select = mpc52xx_ata_dev_select,
28489 .set_piomode = mpc52xx_ata_set_piomode,
28490diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28491index b21f002..0a27e7f 100644
28492--- a/drivers/ata/pata_mpiix.c
28493+++ b/drivers/ata/pata_mpiix.c
28494@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28495 ATA_PIO_SHT(DRV_NAME),
28496 };
28497
28498-static struct ata_port_operations mpiix_port_ops = {
28499+static const struct ata_port_operations mpiix_port_ops = {
28500 .inherits = &ata_sff_port_ops,
28501 .qc_issue = mpiix_qc_issue,
28502 .cable_detect = ata_cable_40wire,
28503diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28504index f0d52f7..89c3be3 100644
28505--- a/drivers/ata/pata_netcell.c
28506+++ b/drivers/ata/pata_netcell.c
28507@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28508 ATA_BMDMA_SHT(DRV_NAME),
28509 };
28510
28511-static struct ata_port_operations netcell_ops = {
28512+static const struct ata_port_operations netcell_ops = {
28513 .inherits = &ata_bmdma_port_ops,
28514 .cable_detect = ata_cable_80wire,
28515 .read_id = netcell_read_id,
28516diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28517index dd53a66..a3f4317 100644
28518--- a/drivers/ata/pata_ninja32.c
28519+++ b/drivers/ata/pata_ninja32.c
28520@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28521 ATA_BMDMA_SHT(DRV_NAME),
28522 };
28523
28524-static struct ata_port_operations ninja32_port_ops = {
28525+static const struct ata_port_operations ninja32_port_ops = {
28526 .inherits = &ata_bmdma_port_ops,
28527 .sff_dev_select = ninja32_dev_select,
28528 .cable_detect = ata_cable_40wire,
28529diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28530index ca53fac..9aa93ef 100644
28531--- a/drivers/ata/pata_ns87410.c
28532+++ b/drivers/ata/pata_ns87410.c
28533@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28534 ATA_PIO_SHT(DRV_NAME),
28535 };
28536
28537-static struct ata_port_operations ns87410_port_ops = {
28538+static const struct ata_port_operations ns87410_port_ops = {
28539 .inherits = &ata_sff_port_ops,
28540 .qc_issue = ns87410_qc_issue,
28541 .cable_detect = ata_cable_40wire,
28542diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28543index 773b159..55f454e 100644
28544--- a/drivers/ata/pata_ns87415.c
28545+++ b/drivers/ata/pata_ns87415.c
28546@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28547 }
28548 #endif /* 87560 SuperIO Support */
28549
28550-static struct ata_port_operations ns87415_pata_ops = {
28551+static const struct ata_port_operations ns87415_pata_ops = {
28552 .inherits = &ata_bmdma_port_ops,
28553
28554 .check_atapi_dma = ns87415_check_atapi_dma,
28555@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28556 };
28557
28558 #if defined(CONFIG_SUPERIO)
28559-static struct ata_port_operations ns87560_pata_ops = {
28560+static const struct ata_port_operations ns87560_pata_ops = {
28561 .inherits = &ns87415_pata_ops,
28562 .sff_tf_read = ns87560_tf_read,
28563 .sff_check_status = ns87560_check_status,
28564diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28565index d6f6956..639295b 100644
28566--- a/drivers/ata/pata_octeon_cf.c
28567+++ b/drivers/ata/pata_octeon_cf.c
28568@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28569 return 0;
28570 }
28571
28572+/* cannot be const */
28573 static struct ata_port_operations octeon_cf_ops = {
28574 .inherits = &ata_sff_port_ops,
28575 .check_atapi_dma = octeon_cf_check_atapi_dma,
28576diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28577index 84ac503..adee1cd 100644
28578--- a/drivers/ata/pata_oldpiix.c
28579+++ b/drivers/ata/pata_oldpiix.c
28580@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28581 ATA_BMDMA_SHT(DRV_NAME),
28582 };
28583
28584-static struct ata_port_operations oldpiix_pata_ops = {
28585+static const struct ata_port_operations oldpiix_pata_ops = {
28586 .inherits = &ata_bmdma_port_ops,
28587 .qc_issue = oldpiix_qc_issue,
28588 .cable_detect = ata_cable_40wire,
28589diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28590index 99eddda..3a4c0aa 100644
28591--- a/drivers/ata/pata_opti.c
28592+++ b/drivers/ata/pata_opti.c
28593@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28594 ATA_PIO_SHT(DRV_NAME),
28595 };
28596
28597-static struct ata_port_operations opti_port_ops = {
28598+static const struct ata_port_operations opti_port_ops = {
28599 .inherits = &ata_sff_port_ops,
28600 .cable_detect = ata_cable_40wire,
28601 .set_piomode = opti_set_piomode,
28602diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28603index 86885a4..8e9968d 100644
28604--- a/drivers/ata/pata_optidma.c
28605+++ b/drivers/ata/pata_optidma.c
28606@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28607 ATA_BMDMA_SHT(DRV_NAME),
28608 };
28609
28610-static struct ata_port_operations optidma_port_ops = {
28611+static const struct ata_port_operations optidma_port_ops = {
28612 .inherits = &ata_bmdma_port_ops,
28613 .cable_detect = ata_cable_40wire,
28614 .set_piomode = optidma_set_pio_mode,
28615@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28616 .prereset = optidma_pre_reset,
28617 };
28618
28619-static struct ata_port_operations optiplus_port_ops = {
28620+static const struct ata_port_operations optiplus_port_ops = {
28621 .inherits = &optidma_port_ops,
28622 .set_piomode = optiplus_set_pio_mode,
28623 .set_dmamode = optiplus_set_dma_mode,
28624diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28625index 11fb4cc..1a14022 100644
28626--- a/drivers/ata/pata_palmld.c
28627+++ b/drivers/ata/pata_palmld.c
28628@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28629 ATA_PIO_SHT(DRV_NAME),
28630 };
28631
28632-static struct ata_port_operations palmld_port_ops = {
28633+static const struct ata_port_operations palmld_port_ops = {
28634 .inherits = &ata_sff_port_ops,
28635 .sff_data_xfer = ata_sff_data_xfer_noirq,
28636 .cable_detect = ata_cable_40wire,
28637diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28638index dc99e26..7f4b1e4 100644
28639--- a/drivers/ata/pata_pcmcia.c
28640+++ b/drivers/ata/pata_pcmcia.c
28641@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28642 ATA_PIO_SHT(DRV_NAME),
28643 };
28644
28645-static struct ata_port_operations pcmcia_port_ops = {
28646+static const struct ata_port_operations pcmcia_port_ops = {
28647 .inherits = &ata_sff_port_ops,
28648 .sff_data_xfer = ata_sff_data_xfer_noirq,
28649 .cable_detect = ata_cable_40wire,
28650 .set_mode = pcmcia_set_mode,
28651 };
28652
28653-static struct ata_port_operations pcmcia_8bit_port_ops = {
28654+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28655 .inherits = &ata_sff_port_ops,
28656 .sff_data_xfer = ata_data_xfer_8bit,
28657 .cable_detect = ata_cable_40wire,
28658@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28659 unsigned long io_base, ctl_base;
28660 void __iomem *io_addr, *ctl_addr;
28661 int n_ports = 1;
28662- struct ata_port_operations *ops = &pcmcia_port_ops;
28663+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28664
28665 info = kzalloc(sizeof(*info), GFP_KERNEL);
28666 if (info == NULL)
28667diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28668index ca5cad0..3a1f125 100644
28669--- a/drivers/ata/pata_pdc2027x.c
28670+++ b/drivers/ata/pata_pdc2027x.c
28671@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28672 ATA_BMDMA_SHT(DRV_NAME),
28673 };
28674
28675-static struct ata_port_operations pdc2027x_pata100_ops = {
28676+static const struct ata_port_operations pdc2027x_pata100_ops = {
28677 .inherits = &ata_bmdma_port_ops,
28678 .check_atapi_dma = pdc2027x_check_atapi_dma,
28679 .cable_detect = pdc2027x_cable_detect,
28680 .prereset = pdc2027x_prereset,
28681 };
28682
28683-static struct ata_port_operations pdc2027x_pata133_ops = {
28684+static const struct ata_port_operations pdc2027x_pata133_ops = {
28685 .inherits = &pdc2027x_pata100_ops,
28686 .mode_filter = pdc2027x_mode_filter,
28687 .set_piomode = pdc2027x_set_piomode,
28688diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28689index 2911120..4bf62aa 100644
28690--- a/drivers/ata/pata_pdc202xx_old.c
28691+++ b/drivers/ata/pata_pdc202xx_old.c
28692@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28693 ATA_BMDMA_SHT(DRV_NAME),
28694 };
28695
28696-static struct ata_port_operations pdc2024x_port_ops = {
28697+static const struct ata_port_operations pdc2024x_port_ops = {
28698 .inherits = &ata_bmdma_port_ops,
28699
28700 .cable_detect = ata_cable_40wire,
28701@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28702 .sff_exec_command = pdc202xx_exec_command,
28703 };
28704
28705-static struct ata_port_operations pdc2026x_port_ops = {
28706+static const struct ata_port_operations pdc2026x_port_ops = {
28707 .inherits = &pdc2024x_port_ops,
28708
28709 .check_atapi_dma = pdc2026x_check_atapi_dma,
28710diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28711index 3f6ebc6..a18c358 100644
28712--- a/drivers/ata/pata_platform.c
28713+++ b/drivers/ata/pata_platform.c
28714@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28715 ATA_PIO_SHT(DRV_NAME),
28716 };
28717
28718-static struct ata_port_operations pata_platform_port_ops = {
28719+static const struct ata_port_operations pata_platform_port_ops = {
28720 .inherits = &ata_sff_port_ops,
28721 .sff_data_xfer = ata_sff_data_xfer_noirq,
28722 .cable_detect = ata_cable_unknown,
28723diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28724index 45879dc..165a9f9 100644
28725--- a/drivers/ata/pata_qdi.c
28726+++ b/drivers/ata/pata_qdi.c
28727@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28728 ATA_PIO_SHT(DRV_NAME),
28729 };
28730
28731-static struct ata_port_operations qdi6500_port_ops = {
28732+static const struct ata_port_operations qdi6500_port_ops = {
28733 .inherits = &ata_sff_port_ops,
28734 .qc_issue = qdi_qc_issue,
28735 .sff_data_xfer = qdi_data_xfer,
28736@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28737 .set_piomode = qdi6500_set_piomode,
28738 };
28739
28740-static struct ata_port_operations qdi6580_port_ops = {
28741+static const struct ata_port_operations qdi6580_port_ops = {
28742 .inherits = &qdi6500_port_ops,
28743 .set_piomode = qdi6580_set_piomode,
28744 };
28745diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28746index 4401b33..716c5cc 100644
28747--- a/drivers/ata/pata_radisys.c
28748+++ b/drivers/ata/pata_radisys.c
28749@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28750 ATA_BMDMA_SHT(DRV_NAME),
28751 };
28752
28753-static struct ata_port_operations radisys_pata_ops = {
28754+static const struct ata_port_operations radisys_pata_ops = {
28755 .inherits = &ata_bmdma_port_ops,
28756 .qc_issue = radisys_qc_issue,
28757 .cable_detect = ata_cable_unknown,
28758diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28759index 45f1e10..fab6bca 100644
28760--- a/drivers/ata/pata_rb532_cf.c
28761+++ b/drivers/ata/pata_rb532_cf.c
28762@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28763 return IRQ_HANDLED;
28764 }
28765
28766-static struct ata_port_operations rb532_pata_port_ops = {
28767+static const struct ata_port_operations rb532_pata_port_ops = {
28768 .inherits = &ata_sff_port_ops,
28769 .sff_data_xfer = ata_sff_data_xfer32,
28770 };
28771diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28772index c843a1e..b5853c3 100644
28773--- a/drivers/ata/pata_rdc.c
28774+++ b/drivers/ata/pata_rdc.c
28775@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28776 pci_write_config_byte(dev, 0x48, udma_enable);
28777 }
28778
28779-static struct ata_port_operations rdc_pata_ops = {
28780+static const struct ata_port_operations rdc_pata_ops = {
28781 .inherits = &ata_bmdma32_port_ops,
28782 .cable_detect = rdc_pata_cable_detect,
28783 .set_piomode = rdc_set_piomode,
28784diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28785index a5e4dfe..080c8c9 100644
28786--- a/drivers/ata/pata_rz1000.c
28787+++ b/drivers/ata/pata_rz1000.c
28788@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28789 ATA_PIO_SHT(DRV_NAME),
28790 };
28791
28792-static struct ata_port_operations rz1000_port_ops = {
28793+static const struct ata_port_operations rz1000_port_ops = {
28794 .inherits = &ata_sff_port_ops,
28795 .cable_detect = ata_cable_40wire,
28796 .set_mode = rz1000_set_mode,
28797diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28798index 3bbed83..e309daf 100644
28799--- a/drivers/ata/pata_sc1200.c
28800+++ b/drivers/ata/pata_sc1200.c
28801@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28802 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28803 };
28804
28805-static struct ata_port_operations sc1200_port_ops = {
28806+static const struct ata_port_operations sc1200_port_ops = {
28807 .inherits = &ata_bmdma_port_ops,
28808 .qc_prep = ata_sff_dumb_qc_prep,
28809 .qc_issue = sc1200_qc_issue,
28810diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28811index 4257d6b..4c1d9d5 100644
28812--- a/drivers/ata/pata_scc.c
28813+++ b/drivers/ata/pata_scc.c
28814@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28815 ATA_BMDMA_SHT(DRV_NAME),
28816 };
28817
28818-static struct ata_port_operations scc_pata_ops = {
28819+static const struct ata_port_operations scc_pata_ops = {
28820 .inherits = &ata_bmdma_port_ops,
28821
28822 .set_piomode = scc_set_piomode,
28823diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28824index 99cceb4..e2e0a87 100644
28825--- a/drivers/ata/pata_sch.c
28826+++ b/drivers/ata/pata_sch.c
28827@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28828 ATA_BMDMA_SHT(DRV_NAME),
28829 };
28830
28831-static struct ata_port_operations sch_pata_ops = {
28832+static const struct ata_port_operations sch_pata_ops = {
28833 .inherits = &ata_bmdma_port_ops,
28834 .cable_detect = ata_cable_unknown,
28835 .set_piomode = sch_set_piomode,
28836diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28837index beaed12..39969f1 100644
28838--- a/drivers/ata/pata_serverworks.c
28839+++ b/drivers/ata/pata_serverworks.c
28840@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28841 ATA_BMDMA_SHT(DRV_NAME),
28842 };
28843
28844-static struct ata_port_operations serverworks_osb4_port_ops = {
28845+static const struct ata_port_operations serverworks_osb4_port_ops = {
28846 .inherits = &ata_bmdma_port_ops,
28847 .cable_detect = serverworks_cable_detect,
28848 .mode_filter = serverworks_osb4_filter,
28849@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28850 .set_dmamode = serverworks_set_dmamode,
28851 };
28852
28853-static struct ata_port_operations serverworks_csb_port_ops = {
28854+static const struct ata_port_operations serverworks_csb_port_ops = {
28855 .inherits = &serverworks_osb4_port_ops,
28856 .mode_filter = serverworks_csb_filter,
28857 };
28858diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28859index a2ace48..0463b44 100644
28860--- a/drivers/ata/pata_sil680.c
28861+++ b/drivers/ata/pata_sil680.c
28862@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28863 ATA_BMDMA_SHT(DRV_NAME),
28864 };
28865
28866-static struct ata_port_operations sil680_port_ops = {
28867+static const struct ata_port_operations sil680_port_ops = {
28868 .inherits = &ata_bmdma32_port_ops,
28869 .cable_detect = sil680_cable_detect,
28870 .set_piomode = sil680_set_piomode,
28871diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28872index 488e77b..b3724d5 100644
28873--- a/drivers/ata/pata_sis.c
28874+++ b/drivers/ata/pata_sis.c
28875@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28876 ATA_BMDMA_SHT(DRV_NAME),
28877 };
28878
28879-static struct ata_port_operations sis_133_for_sata_ops = {
28880+static const struct ata_port_operations sis_133_for_sata_ops = {
28881 .inherits = &ata_bmdma_port_ops,
28882 .set_piomode = sis_133_set_piomode,
28883 .set_dmamode = sis_133_set_dmamode,
28884 .cable_detect = sis_133_cable_detect,
28885 };
28886
28887-static struct ata_port_operations sis_base_ops = {
28888+static const struct ata_port_operations sis_base_ops = {
28889 .inherits = &ata_bmdma_port_ops,
28890 .prereset = sis_pre_reset,
28891 };
28892
28893-static struct ata_port_operations sis_133_ops = {
28894+static const struct ata_port_operations sis_133_ops = {
28895 .inherits = &sis_base_ops,
28896 .set_piomode = sis_133_set_piomode,
28897 .set_dmamode = sis_133_set_dmamode,
28898 .cable_detect = sis_133_cable_detect,
28899 };
28900
28901-static struct ata_port_operations sis_133_early_ops = {
28902+static const struct ata_port_operations sis_133_early_ops = {
28903 .inherits = &sis_base_ops,
28904 .set_piomode = sis_100_set_piomode,
28905 .set_dmamode = sis_133_early_set_dmamode,
28906 .cable_detect = sis_66_cable_detect,
28907 };
28908
28909-static struct ata_port_operations sis_100_ops = {
28910+static const struct ata_port_operations sis_100_ops = {
28911 .inherits = &sis_base_ops,
28912 .set_piomode = sis_100_set_piomode,
28913 .set_dmamode = sis_100_set_dmamode,
28914 .cable_detect = sis_66_cable_detect,
28915 };
28916
28917-static struct ata_port_operations sis_66_ops = {
28918+static const struct ata_port_operations sis_66_ops = {
28919 .inherits = &sis_base_ops,
28920 .set_piomode = sis_old_set_piomode,
28921 .set_dmamode = sis_66_set_dmamode,
28922 .cable_detect = sis_66_cable_detect,
28923 };
28924
28925-static struct ata_port_operations sis_old_ops = {
28926+static const struct ata_port_operations sis_old_ops = {
28927 .inherits = &sis_base_ops,
28928 .set_piomode = sis_old_set_piomode,
28929 .set_dmamode = sis_old_set_dmamode,
28930diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28931index 29f733c..43e9ca0 100644
28932--- a/drivers/ata/pata_sl82c105.c
28933+++ b/drivers/ata/pata_sl82c105.c
28934@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28935 ATA_BMDMA_SHT(DRV_NAME),
28936 };
28937
28938-static struct ata_port_operations sl82c105_port_ops = {
28939+static const struct ata_port_operations sl82c105_port_ops = {
28940 .inherits = &ata_bmdma_port_ops,
28941 .qc_defer = sl82c105_qc_defer,
28942 .bmdma_start = sl82c105_bmdma_start,
28943diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28944index f1f13ff..df39e99 100644
28945--- a/drivers/ata/pata_triflex.c
28946+++ b/drivers/ata/pata_triflex.c
28947@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28948 ATA_BMDMA_SHT(DRV_NAME),
28949 };
28950
28951-static struct ata_port_operations triflex_port_ops = {
28952+static const struct ata_port_operations triflex_port_ops = {
28953 .inherits = &ata_bmdma_port_ops,
28954 .bmdma_start = triflex_bmdma_start,
28955 .bmdma_stop = triflex_bmdma_stop,
28956diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28957index 1d73b8d..98a4b29 100644
28958--- a/drivers/ata/pata_via.c
28959+++ b/drivers/ata/pata_via.c
28960@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28961 ATA_BMDMA_SHT(DRV_NAME),
28962 };
28963
28964-static struct ata_port_operations via_port_ops = {
28965+static const struct ata_port_operations via_port_ops = {
28966 .inherits = &ata_bmdma_port_ops,
28967 .cable_detect = via_cable_detect,
28968 .set_piomode = via_set_piomode,
28969@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28970 .port_start = via_port_start,
28971 };
28972
28973-static struct ata_port_operations via_port_ops_noirq = {
28974+static const struct ata_port_operations via_port_ops_noirq = {
28975 .inherits = &via_port_ops,
28976 .sff_data_xfer = ata_sff_data_xfer_noirq,
28977 };
28978diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28979index 6d8619b..ad511c4 100644
28980--- a/drivers/ata/pata_winbond.c
28981+++ b/drivers/ata/pata_winbond.c
28982@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28983 ATA_PIO_SHT(DRV_NAME),
28984 };
28985
28986-static struct ata_port_operations winbond_port_ops = {
28987+static const struct ata_port_operations winbond_port_ops = {
28988 .inherits = &ata_sff_port_ops,
28989 .sff_data_xfer = winbond_data_xfer,
28990 .cable_detect = ata_cable_40wire,
28991diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28992index 6c65b07..f996ec7 100644
28993--- a/drivers/ata/pdc_adma.c
28994+++ b/drivers/ata/pdc_adma.c
28995@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28996 .dma_boundary = ADMA_DMA_BOUNDARY,
28997 };
28998
28999-static struct ata_port_operations adma_ata_ops = {
29000+static const struct ata_port_operations adma_ata_ops = {
29001 .inherits = &ata_sff_port_ops,
29002
29003 .lost_interrupt = ATA_OP_NULL,
29004diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29005index 172b57e..c49bc1e 100644
29006--- a/drivers/ata/sata_fsl.c
29007+++ b/drivers/ata/sata_fsl.c
29008@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29009 .dma_boundary = ATA_DMA_BOUNDARY,
29010 };
29011
29012-static struct ata_port_operations sata_fsl_ops = {
29013+static const struct ata_port_operations sata_fsl_ops = {
29014 .inherits = &sata_pmp_port_ops,
29015
29016 .qc_defer = ata_std_qc_defer,
29017diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29018index 4406902..60603ef 100644
29019--- a/drivers/ata/sata_inic162x.c
29020+++ b/drivers/ata/sata_inic162x.c
29021@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29022 return 0;
29023 }
29024
29025-static struct ata_port_operations inic_port_ops = {
29026+static const struct ata_port_operations inic_port_ops = {
29027 .inherits = &sata_port_ops,
29028
29029 .check_atapi_dma = inic_check_atapi_dma,
29030diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29031index cf41126..8107be6 100644
29032--- a/drivers/ata/sata_mv.c
29033+++ b/drivers/ata/sata_mv.c
29034@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29035 .dma_boundary = MV_DMA_BOUNDARY,
29036 };
29037
29038-static struct ata_port_operations mv5_ops = {
29039+static const struct ata_port_operations mv5_ops = {
29040 .inherits = &ata_sff_port_ops,
29041
29042 .lost_interrupt = ATA_OP_NULL,
29043@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29044 .port_stop = mv_port_stop,
29045 };
29046
29047-static struct ata_port_operations mv6_ops = {
29048+static const struct ata_port_operations mv6_ops = {
29049 .inherits = &mv5_ops,
29050 .dev_config = mv6_dev_config,
29051 .scr_read = mv_scr_read,
29052@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29053 .bmdma_status = mv_bmdma_status,
29054 };
29055
29056-static struct ata_port_operations mv_iie_ops = {
29057+static const struct ata_port_operations mv_iie_ops = {
29058 .inherits = &mv6_ops,
29059 .dev_config = ATA_OP_NULL,
29060 .qc_prep = mv_qc_prep_iie,
29061diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29062index ae2297c..d5c9c33 100644
29063--- a/drivers/ata/sata_nv.c
29064+++ b/drivers/ata/sata_nv.c
29065@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29066 * cases. Define nv_hardreset() which only kicks in for post-boot
29067 * probing and use it for all variants.
29068 */
29069-static struct ata_port_operations nv_generic_ops = {
29070+static const struct ata_port_operations nv_generic_ops = {
29071 .inherits = &ata_bmdma_port_ops,
29072 .lost_interrupt = ATA_OP_NULL,
29073 .scr_read = nv_scr_read,
29074@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29075 .hardreset = nv_hardreset,
29076 };
29077
29078-static struct ata_port_operations nv_nf2_ops = {
29079+static const struct ata_port_operations nv_nf2_ops = {
29080 .inherits = &nv_generic_ops,
29081 .freeze = nv_nf2_freeze,
29082 .thaw = nv_nf2_thaw,
29083 };
29084
29085-static struct ata_port_operations nv_ck804_ops = {
29086+static const struct ata_port_operations nv_ck804_ops = {
29087 .inherits = &nv_generic_ops,
29088 .freeze = nv_ck804_freeze,
29089 .thaw = nv_ck804_thaw,
29090 .host_stop = nv_ck804_host_stop,
29091 };
29092
29093-static struct ata_port_operations nv_adma_ops = {
29094+static const struct ata_port_operations nv_adma_ops = {
29095 .inherits = &nv_ck804_ops,
29096
29097 .check_atapi_dma = nv_adma_check_atapi_dma,
29098@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29099 .host_stop = nv_adma_host_stop,
29100 };
29101
29102-static struct ata_port_operations nv_swncq_ops = {
29103+static const struct ata_port_operations nv_swncq_ops = {
29104 .inherits = &nv_generic_ops,
29105
29106 .qc_defer = ata_std_qc_defer,
29107diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29108index 07d8d00..6cc70bb 100644
29109--- a/drivers/ata/sata_promise.c
29110+++ b/drivers/ata/sata_promise.c
29111@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29112 .error_handler = pdc_error_handler,
29113 };
29114
29115-static struct ata_port_operations pdc_sata_ops = {
29116+static const struct ata_port_operations pdc_sata_ops = {
29117 .inherits = &pdc_common_ops,
29118 .cable_detect = pdc_sata_cable_detect,
29119 .freeze = pdc_sata_freeze,
29120@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29121
29122 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29123 and ->freeze/thaw that ignore the hotplug controls. */
29124-static struct ata_port_operations pdc_old_sata_ops = {
29125+static const struct ata_port_operations pdc_old_sata_ops = {
29126 .inherits = &pdc_sata_ops,
29127 .freeze = pdc_freeze,
29128 .thaw = pdc_thaw,
29129 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29130 };
29131
29132-static struct ata_port_operations pdc_pata_ops = {
29133+static const struct ata_port_operations pdc_pata_ops = {
29134 .inherits = &pdc_common_ops,
29135 .cable_detect = pdc_pata_cable_detect,
29136 .freeze = pdc_freeze,
29137diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29138index 326c0cf..36ecebe 100644
29139--- a/drivers/ata/sata_qstor.c
29140+++ b/drivers/ata/sata_qstor.c
29141@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29142 .dma_boundary = QS_DMA_BOUNDARY,
29143 };
29144
29145-static struct ata_port_operations qs_ata_ops = {
29146+static const struct ata_port_operations qs_ata_ops = {
29147 .inherits = &ata_sff_port_ops,
29148
29149 .check_atapi_dma = qs_check_atapi_dma,
29150diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29151index 3cb69d5..0871d3c 100644
29152--- a/drivers/ata/sata_sil.c
29153+++ b/drivers/ata/sata_sil.c
29154@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29155 .sg_tablesize = ATA_MAX_PRD
29156 };
29157
29158-static struct ata_port_operations sil_ops = {
29159+static const struct ata_port_operations sil_ops = {
29160 .inherits = &ata_bmdma32_port_ops,
29161 .dev_config = sil_dev_config,
29162 .set_mode = sil_set_mode,
29163diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29164index e6946fc..eddb794 100644
29165--- a/drivers/ata/sata_sil24.c
29166+++ b/drivers/ata/sata_sil24.c
29167@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29168 .dma_boundary = ATA_DMA_BOUNDARY,
29169 };
29170
29171-static struct ata_port_operations sil24_ops = {
29172+static const struct ata_port_operations sil24_ops = {
29173 .inherits = &sata_pmp_port_ops,
29174
29175 .qc_defer = sil24_qc_defer,
29176diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29177index f8a91bf..9cb06b6 100644
29178--- a/drivers/ata/sata_sis.c
29179+++ b/drivers/ata/sata_sis.c
29180@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29181 ATA_BMDMA_SHT(DRV_NAME),
29182 };
29183
29184-static struct ata_port_operations sis_ops = {
29185+static const struct ata_port_operations sis_ops = {
29186 .inherits = &ata_bmdma_port_ops,
29187 .scr_read = sis_scr_read,
29188 .scr_write = sis_scr_write,
29189diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29190index 7257f2d..d04c6f5 100644
29191--- a/drivers/ata/sata_svw.c
29192+++ b/drivers/ata/sata_svw.c
29193@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29194 };
29195
29196
29197-static struct ata_port_operations k2_sata_ops = {
29198+static const struct ata_port_operations k2_sata_ops = {
29199 .inherits = &ata_bmdma_port_ops,
29200 .sff_tf_load = k2_sata_tf_load,
29201 .sff_tf_read = k2_sata_tf_read,
29202diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29203index bbcf970..cd0df0d 100644
29204--- a/drivers/ata/sata_sx4.c
29205+++ b/drivers/ata/sata_sx4.c
29206@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29207 };
29208
29209 /* TODO: inherit from base port_ops after converting to new EH */
29210-static struct ata_port_operations pdc_20621_ops = {
29211+static const struct ata_port_operations pdc_20621_ops = {
29212 .inherits = &ata_sff_port_ops,
29213
29214 .check_atapi_dma = pdc_check_atapi_dma,
29215diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29216index e5bff47..089d859 100644
29217--- a/drivers/ata/sata_uli.c
29218+++ b/drivers/ata/sata_uli.c
29219@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29220 ATA_BMDMA_SHT(DRV_NAME),
29221 };
29222
29223-static struct ata_port_operations uli_ops = {
29224+static const struct ata_port_operations uli_ops = {
29225 .inherits = &ata_bmdma_port_ops,
29226 .scr_read = uli_scr_read,
29227 .scr_write = uli_scr_write,
29228diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29229index f5dcca7..77b94eb 100644
29230--- a/drivers/ata/sata_via.c
29231+++ b/drivers/ata/sata_via.c
29232@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29233 ATA_BMDMA_SHT(DRV_NAME),
29234 };
29235
29236-static struct ata_port_operations svia_base_ops = {
29237+static const struct ata_port_operations svia_base_ops = {
29238 .inherits = &ata_bmdma_port_ops,
29239 .sff_tf_load = svia_tf_load,
29240 };
29241
29242-static struct ata_port_operations vt6420_sata_ops = {
29243+static const struct ata_port_operations vt6420_sata_ops = {
29244 .inherits = &svia_base_ops,
29245 .freeze = svia_noop_freeze,
29246 .prereset = vt6420_prereset,
29247 .bmdma_start = vt6420_bmdma_start,
29248 };
29249
29250-static struct ata_port_operations vt6421_pata_ops = {
29251+static const struct ata_port_operations vt6421_pata_ops = {
29252 .inherits = &svia_base_ops,
29253 .cable_detect = vt6421_pata_cable_detect,
29254 .set_piomode = vt6421_set_pio_mode,
29255 .set_dmamode = vt6421_set_dma_mode,
29256 };
29257
29258-static struct ata_port_operations vt6421_sata_ops = {
29259+static const struct ata_port_operations vt6421_sata_ops = {
29260 .inherits = &svia_base_ops,
29261 .scr_read = svia_scr_read,
29262 .scr_write = svia_scr_write,
29263 };
29264
29265-static struct ata_port_operations vt8251_ops = {
29266+static const struct ata_port_operations vt8251_ops = {
29267 .inherits = &svia_base_ops,
29268 .hardreset = sata_std_hardreset,
29269 .scr_read = vt8251_scr_read,
29270diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29271index 8b2a278..51e65d3 100644
29272--- a/drivers/ata/sata_vsc.c
29273+++ b/drivers/ata/sata_vsc.c
29274@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29275 };
29276
29277
29278-static struct ata_port_operations vsc_sata_ops = {
29279+static const struct ata_port_operations vsc_sata_ops = {
29280 .inherits = &ata_bmdma_port_ops,
29281 /* The IRQ handling is not quite standard SFF behaviour so we
29282 cannot use the default lost interrupt handler */
29283diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29284index 5effec6..7e4019a 100644
29285--- a/drivers/atm/adummy.c
29286+++ b/drivers/atm/adummy.c
29287@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29288 vcc->pop(vcc, skb);
29289 else
29290 dev_kfree_skb_any(skb);
29291- atomic_inc(&vcc->stats->tx);
29292+ atomic_inc_unchecked(&vcc->stats->tx);
29293
29294 return 0;
29295 }
29296diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29297index 66e1813..26a27c6 100644
29298--- a/drivers/atm/ambassador.c
29299+++ b/drivers/atm/ambassador.c
29300@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29301 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29302
29303 // VC layer stats
29304- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29305+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29306
29307 // free the descriptor
29308 kfree (tx_descr);
29309@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29310 dump_skb ("<<<", vc, skb);
29311
29312 // VC layer stats
29313- atomic_inc(&atm_vcc->stats->rx);
29314+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29315 __net_timestamp(skb);
29316 // end of our responsability
29317 atm_vcc->push (atm_vcc, skb);
29318@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29319 } else {
29320 PRINTK (KERN_INFO, "dropped over-size frame");
29321 // should we count this?
29322- atomic_inc(&atm_vcc->stats->rx_drop);
29323+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29324 }
29325
29326 } else {
29327@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29328 }
29329
29330 if (check_area (skb->data, skb->len)) {
29331- atomic_inc(&atm_vcc->stats->tx_err);
29332+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29333 return -ENOMEM; // ?
29334 }
29335
29336diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29337index 02ad83d..6daffeb 100644
29338--- a/drivers/atm/atmtcp.c
29339+++ b/drivers/atm/atmtcp.c
29340@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29341 if (vcc->pop) vcc->pop(vcc,skb);
29342 else dev_kfree_skb(skb);
29343 if (dev_data) return 0;
29344- atomic_inc(&vcc->stats->tx_err);
29345+ atomic_inc_unchecked(&vcc->stats->tx_err);
29346 return -ENOLINK;
29347 }
29348 size = skb->len+sizeof(struct atmtcp_hdr);
29349@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29350 if (!new_skb) {
29351 if (vcc->pop) vcc->pop(vcc,skb);
29352 else dev_kfree_skb(skb);
29353- atomic_inc(&vcc->stats->tx_err);
29354+ atomic_inc_unchecked(&vcc->stats->tx_err);
29355 return -ENOBUFS;
29356 }
29357 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29358@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29359 if (vcc->pop) vcc->pop(vcc,skb);
29360 else dev_kfree_skb(skb);
29361 out_vcc->push(out_vcc,new_skb);
29362- atomic_inc(&vcc->stats->tx);
29363- atomic_inc(&out_vcc->stats->rx);
29364+ atomic_inc_unchecked(&vcc->stats->tx);
29365+ atomic_inc_unchecked(&out_vcc->stats->rx);
29366 return 0;
29367 }
29368
29369@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29370 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29371 read_unlock(&vcc_sklist_lock);
29372 if (!out_vcc) {
29373- atomic_inc(&vcc->stats->tx_err);
29374+ atomic_inc_unchecked(&vcc->stats->tx_err);
29375 goto done;
29376 }
29377 skb_pull(skb,sizeof(struct atmtcp_hdr));
29378@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29379 __net_timestamp(new_skb);
29380 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29381 out_vcc->push(out_vcc,new_skb);
29382- atomic_inc(&vcc->stats->tx);
29383- atomic_inc(&out_vcc->stats->rx);
29384+ atomic_inc_unchecked(&vcc->stats->tx);
29385+ atomic_inc_unchecked(&out_vcc->stats->rx);
29386 done:
29387 if (vcc->pop) vcc->pop(vcc,skb);
29388 else dev_kfree_skb(skb);
29389diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29390index 0c30261..3da356e 100644
29391--- a/drivers/atm/eni.c
29392+++ b/drivers/atm/eni.c
29393@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29394 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29395 vcc->dev->number);
29396 length = 0;
29397- atomic_inc(&vcc->stats->rx_err);
29398+ atomic_inc_unchecked(&vcc->stats->rx_err);
29399 }
29400 else {
29401 length = ATM_CELL_SIZE-1; /* no HEC */
29402@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29403 size);
29404 }
29405 eff = length = 0;
29406- atomic_inc(&vcc->stats->rx_err);
29407+ atomic_inc_unchecked(&vcc->stats->rx_err);
29408 }
29409 else {
29410 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29411@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29412 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29413 vcc->dev->number,vcc->vci,length,size << 2,descr);
29414 length = eff = 0;
29415- atomic_inc(&vcc->stats->rx_err);
29416+ atomic_inc_unchecked(&vcc->stats->rx_err);
29417 }
29418 }
29419 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29420@@ -770,7 +770,7 @@ rx_dequeued++;
29421 vcc->push(vcc,skb);
29422 pushed++;
29423 }
29424- atomic_inc(&vcc->stats->rx);
29425+ atomic_inc_unchecked(&vcc->stats->rx);
29426 }
29427 wake_up(&eni_dev->rx_wait);
29428 }
29429@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29430 PCI_DMA_TODEVICE);
29431 if (vcc->pop) vcc->pop(vcc,skb);
29432 else dev_kfree_skb_irq(skb);
29433- atomic_inc(&vcc->stats->tx);
29434+ atomic_inc_unchecked(&vcc->stats->tx);
29435 wake_up(&eni_dev->tx_wait);
29436 dma_complete++;
29437 }
29438@@ -1570,7 +1570,7 @@ tx_complete++;
29439 /*--------------------------------- entries ---------------------------------*/
29440
29441
29442-static const char *media_name[] __devinitdata = {
29443+static const char *media_name[] __devinitconst = {
29444 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29445 "UTP", "05?", "06?", "07?", /* 4- 7 */
29446 "TAXI","09?", "10?", "11?", /* 8-11 */
29447diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29448index cd5049a..a51209f 100644
29449--- a/drivers/atm/firestream.c
29450+++ b/drivers/atm/firestream.c
29451@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29452 }
29453 }
29454
29455- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29456+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29457
29458 fs_dprintk (FS_DEBUG_TXMEM, "i");
29459 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29460@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29461 #endif
29462 skb_put (skb, qe->p1 & 0xffff);
29463 ATM_SKB(skb)->vcc = atm_vcc;
29464- atomic_inc(&atm_vcc->stats->rx);
29465+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29466 __net_timestamp(skb);
29467 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29468 atm_vcc->push (atm_vcc, skb);
29469@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29470 kfree (pe);
29471 }
29472 if (atm_vcc)
29473- atomic_inc(&atm_vcc->stats->rx_drop);
29474+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29475 break;
29476 case 0x1f: /* Reassembly abort: no buffers. */
29477 /* Silently increment error counter. */
29478 if (atm_vcc)
29479- atomic_inc(&atm_vcc->stats->rx_drop);
29480+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29481 break;
29482 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29483 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29484diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29485index f766cc4..a34002e 100644
29486--- a/drivers/atm/fore200e.c
29487+++ b/drivers/atm/fore200e.c
29488@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29489 #endif
29490 /* check error condition */
29491 if (*entry->status & STATUS_ERROR)
29492- atomic_inc(&vcc->stats->tx_err);
29493+ atomic_inc_unchecked(&vcc->stats->tx_err);
29494 else
29495- atomic_inc(&vcc->stats->tx);
29496+ atomic_inc_unchecked(&vcc->stats->tx);
29497 }
29498 }
29499
29500@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29501 if (skb == NULL) {
29502 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29503
29504- atomic_inc(&vcc->stats->rx_drop);
29505+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29506 return -ENOMEM;
29507 }
29508
29509@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29510
29511 dev_kfree_skb_any(skb);
29512
29513- atomic_inc(&vcc->stats->rx_drop);
29514+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29515 return -ENOMEM;
29516 }
29517
29518 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29519
29520 vcc->push(vcc, skb);
29521- atomic_inc(&vcc->stats->rx);
29522+ atomic_inc_unchecked(&vcc->stats->rx);
29523
29524 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29525
29526@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29527 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29528 fore200e->atm_dev->number,
29529 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29530- atomic_inc(&vcc->stats->rx_err);
29531+ atomic_inc_unchecked(&vcc->stats->rx_err);
29532 }
29533 }
29534
29535@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29536 goto retry_here;
29537 }
29538
29539- atomic_inc(&vcc->stats->tx_err);
29540+ atomic_inc_unchecked(&vcc->stats->tx_err);
29541
29542 fore200e->tx_sat++;
29543 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29544diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29545index 7066703..2b130de 100644
29546--- a/drivers/atm/he.c
29547+++ b/drivers/atm/he.c
29548@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29549
29550 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29551 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29552- atomic_inc(&vcc->stats->rx_drop);
29553+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29554 goto return_host_buffers;
29555 }
29556
29557@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29558 RBRQ_LEN_ERR(he_dev->rbrq_head)
29559 ? "LEN_ERR" : "",
29560 vcc->vpi, vcc->vci);
29561- atomic_inc(&vcc->stats->rx_err);
29562+ atomic_inc_unchecked(&vcc->stats->rx_err);
29563 goto return_host_buffers;
29564 }
29565
29566@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29567 vcc->push(vcc, skb);
29568 spin_lock(&he_dev->global_lock);
29569
29570- atomic_inc(&vcc->stats->rx);
29571+ atomic_inc_unchecked(&vcc->stats->rx);
29572
29573 return_host_buffers:
29574 ++pdus_assembled;
29575@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29576 tpd->vcc->pop(tpd->vcc, tpd->skb);
29577 else
29578 dev_kfree_skb_any(tpd->skb);
29579- atomic_inc(&tpd->vcc->stats->tx_err);
29580+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29581 }
29582 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29583 return;
29584@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29585 vcc->pop(vcc, skb);
29586 else
29587 dev_kfree_skb_any(skb);
29588- atomic_inc(&vcc->stats->tx_err);
29589+ atomic_inc_unchecked(&vcc->stats->tx_err);
29590 return -EINVAL;
29591 }
29592
29593@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29594 vcc->pop(vcc, skb);
29595 else
29596 dev_kfree_skb_any(skb);
29597- atomic_inc(&vcc->stats->tx_err);
29598+ atomic_inc_unchecked(&vcc->stats->tx_err);
29599 return -EINVAL;
29600 }
29601 #endif
29602@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29603 vcc->pop(vcc, skb);
29604 else
29605 dev_kfree_skb_any(skb);
29606- atomic_inc(&vcc->stats->tx_err);
29607+ atomic_inc_unchecked(&vcc->stats->tx_err);
29608 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29609 return -ENOMEM;
29610 }
29611@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29612 vcc->pop(vcc, skb);
29613 else
29614 dev_kfree_skb_any(skb);
29615- atomic_inc(&vcc->stats->tx_err);
29616+ atomic_inc_unchecked(&vcc->stats->tx_err);
29617 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29618 return -ENOMEM;
29619 }
29620@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29621 __enqueue_tpd(he_dev, tpd, cid);
29622 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29623
29624- atomic_inc(&vcc->stats->tx);
29625+ atomic_inc_unchecked(&vcc->stats->tx);
29626
29627 return 0;
29628 }
29629diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29630index 4e49021..01b1512 100644
29631--- a/drivers/atm/horizon.c
29632+++ b/drivers/atm/horizon.c
29633@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29634 {
29635 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29636 // VC layer stats
29637- atomic_inc(&vcc->stats->rx);
29638+ atomic_inc_unchecked(&vcc->stats->rx);
29639 __net_timestamp(skb);
29640 // end of our responsability
29641 vcc->push (vcc, skb);
29642@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29643 dev->tx_iovec = NULL;
29644
29645 // VC layer stats
29646- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29647+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29648
29649 // free the skb
29650 hrz_kfree_skb (skb);
29651diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29652index e33ae00..9deb4ab 100644
29653--- a/drivers/atm/idt77252.c
29654+++ b/drivers/atm/idt77252.c
29655@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29656 else
29657 dev_kfree_skb(skb);
29658
29659- atomic_inc(&vcc->stats->tx);
29660+ atomic_inc_unchecked(&vcc->stats->tx);
29661 }
29662
29663 atomic_dec(&scq->used);
29664@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29665 if ((sb = dev_alloc_skb(64)) == NULL) {
29666 printk("%s: Can't allocate buffers for aal0.\n",
29667 card->name);
29668- atomic_add(i, &vcc->stats->rx_drop);
29669+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29670 break;
29671 }
29672 if (!atm_charge(vcc, sb->truesize)) {
29673 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29674 card->name);
29675- atomic_add(i - 1, &vcc->stats->rx_drop);
29676+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29677 dev_kfree_skb(sb);
29678 break;
29679 }
29680@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29681 ATM_SKB(sb)->vcc = vcc;
29682 __net_timestamp(sb);
29683 vcc->push(vcc, sb);
29684- atomic_inc(&vcc->stats->rx);
29685+ atomic_inc_unchecked(&vcc->stats->rx);
29686
29687 cell += ATM_CELL_PAYLOAD;
29688 }
29689@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29690 "(CDC: %08x)\n",
29691 card->name, len, rpp->len, readl(SAR_REG_CDC));
29692 recycle_rx_pool_skb(card, rpp);
29693- atomic_inc(&vcc->stats->rx_err);
29694+ atomic_inc_unchecked(&vcc->stats->rx_err);
29695 return;
29696 }
29697 if (stat & SAR_RSQE_CRC) {
29698 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29699 recycle_rx_pool_skb(card, rpp);
29700- atomic_inc(&vcc->stats->rx_err);
29701+ atomic_inc_unchecked(&vcc->stats->rx_err);
29702 return;
29703 }
29704 if (skb_queue_len(&rpp->queue) > 1) {
29705@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29706 RXPRINTK("%s: Can't alloc RX skb.\n",
29707 card->name);
29708 recycle_rx_pool_skb(card, rpp);
29709- atomic_inc(&vcc->stats->rx_err);
29710+ atomic_inc_unchecked(&vcc->stats->rx_err);
29711 return;
29712 }
29713 if (!atm_charge(vcc, skb->truesize)) {
29714@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29715 __net_timestamp(skb);
29716
29717 vcc->push(vcc, skb);
29718- atomic_inc(&vcc->stats->rx);
29719+ atomic_inc_unchecked(&vcc->stats->rx);
29720
29721 return;
29722 }
29723@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29724 __net_timestamp(skb);
29725
29726 vcc->push(vcc, skb);
29727- atomic_inc(&vcc->stats->rx);
29728+ atomic_inc_unchecked(&vcc->stats->rx);
29729
29730 if (skb->truesize > SAR_FB_SIZE_3)
29731 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29732@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29733 if (vcc->qos.aal != ATM_AAL0) {
29734 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29735 card->name, vpi, vci);
29736- atomic_inc(&vcc->stats->rx_drop);
29737+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29738 goto drop;
29739 }
29740
29741 if ((sb = dev_alloc_skb(64)) == NULL) {
29742 printk("%s: Can't allocate buffers for AAL0.\n",
29743 card->name);
29744- atomic_inc(&vcc->stats->rx_err);
29745+ atomic_inc_unchecked(&vcc->stats->rx_err);
29746 goto drop;
29747 }
29748
29749@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29750 ATM_SKB(sb)->vcc = vcc;
29751 __net_timestamp(sb);
29752 vcc->push(vcc, sb);
29753- atomic_inc(&vcc->stats->rx);
29754+ atomic_inc_unchecked(&vcc->stats->rx);
29755
29756 drop:
29757 skb_pull(queue, 64);
29758@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29759
29760 if (vc == NULL) {
29761 printk("%s: NULL connection in send().\n", card->name);
29762- atomic_inc(&vcc->stats->tx_err);
29763+ atomic_inc_unchecked(&vcc->stats->tx_err);
29764 dev_kfree_skb(skb);
29765 return -EINVAL;
29766 }
29767 if (!test_bit(VCF_TX, &vc->flags)) {
29768 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29769- atomic_inc(&vcc->stats->tx_err);
29770+ atomic_inc_unchecked(&vcc->stats->tx_err);
29771 dev_kfree_skb(skb);
29772 return -EINVAL;
29773 }
29774@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29775 break;
29776 default:
29777 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29778- atomic_inc(&vcc->stats->tx_err);
29779+ atomic_inc_unchecked(&vcc->stats->tx_err);
29780 dev_kfree_skb(skb);
29781 return -EINVAL;
29782 }
29783
29784 if (skb_shinfo(skb)->nr_frags != 0) {
29785 printk("%s: No scatter-gather yet.\n", card->name);
29786- atomic_inc(&vcc->stats->tx_err);
29787+ atomic_inc_unchecked(&vcc->stats->tx_err);
29788 dev_kfree_skb(skb);
29789 return -EINVAL;
29790 }
29791@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29792
29793 err = queue_skb(card, vc, skb, oam);
29794 if (err) {
29795- atomic_inc(&vcc->stats->tx_err);
29796+ atomic_inc_unchecked(&vcc->stats->tx_err);
29797 dev_kfree_skb(skb);
29798 return err;
29799 }
29800@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29801 skb = dev_alloc_skb(64);
29802 if (!skb) {
29803 printk("%s: Out of memory in send_oam().\n", card->name);
29804- atomic_inc(&vcc->stats->tx_err);
29805+ atomic_inc_unchecked(&vcc->stats->tx_err);
29806 return -ENOMEM;
29807 }
29808 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29809diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29810index b2c1b37..faa672b 100644
29811--- a/drivers/atm/iphase.c
29812+++ b/drivers/atm/iphase.c
29813@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29814 status = (u_short) (buf_desc_ptr->desc_mode);
29815 if (status & (RX_CER | RX_PTE | RX_OFL))
29816 {
29817- atomic_inc(&vcc->stats->rx_err);
29818+ atomic_inc_unchecked(&vcc->stats->rx_err);
29819 IF_ERR(printk("IA: bad packet, dropping it");)
29820 if (status & RX_CER) {
29821 IF_ERR(printk(" cause: packet CRC error\n");)
29822@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29823 len = dma_addr - buf_addr;
29824 if (len > iadev->rx_buf_sz) {
29825 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29826- atomic_inc(&vcc->stats->rx_err);
29827+ atomic_inc_unchecked(&vcc->stats->rx_err);
29828 goto out_free_desc;
29829 }
29830
29831@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29832 ia_vcc = INPH_IA_VCC(vcc);
29833 if (ia_vcc == NULL)
29834 {
29835- atomic_inc(&vcc->stats->rx_err);
29836+ atomic_inc_unchecked(&vcc->stats->rx_err);
29837 dev_kfree_skb_any(skb);
29838 atm_return(vcc, atm_guess_pdu2truesize(len));
29839 goto INCR_DLE;
29840@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29841 if ((length > iadev->rx_buf_sz) || (length >
29842 (skb->len - sizeof(struct cpcs_trailer))))
29843 {
29844- atomic_inc(&vcc->stats->rx_err);
29845+ atomic_inc_unchecked(&vcc->stats->rx_err);
29846 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29847 length, skb->len);)
29848 dev_kfree_skb_any(skb);
29849@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29850
29851 IF_RX(printk("rx_dle_intr: skb push");)
29852 vcc->push(vcc,skb);
29853- atomic_inc(&vcc->stats->rx);
29854+ atomic_inc_unchecked(&vcc->stats->rx);
29855 iadev->rx_pkt_cnt++;
29856 }
29857 INCR_DLE:
29858@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29859 {
29860 struct k_sonet_stats *stats;
29861 stats = &PRIV(_ia_dev[board])->sonet_stats;
29862- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29863- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29864- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29865- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29866- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29867- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29868- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29869- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29870- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29871+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29872+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29873+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29874+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29875+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29876+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29877+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29878+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29879+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29880 }
29881 ia_cmds.status = 0;
29882 break;
29883@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29884 if ((desc == 0) || (desc > iadev->num_tx_desc))
29885 {
29886 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29887- atomic_inc(&vcc->stats->tx);
29888+ atomic_inc_unchecked(&vcc->stats->tx);
29889 if (vcc->pop)
29890 vcc->pop(vcc, skb);
29891 else
29892@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29893 ATM_DESC(skb) = vcc->vci;
29894 skb_queue_tail(&iadev->tx_dma_q, skb);
29895
29896- atomic_inc(&vcc->stats->tx);
29897+ atomic_inc_unchecked(&vcc->stats->tx);
29898 iadev->tx_pkt_cnt++;
29899 /* Increment transaction counter */
29900 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29901
29902 #if 0
29903 /* add flow control logic */
29904- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29905+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29906 if (iavcc->vc_desc_cnt > 10) {
29907 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29908 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29909diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29910index cf97c34..8d30655 100644
29911--- a/drivers/atm/lanai.c
29912+++ b/drivers/atm/lanai.c
29913@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29914 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29915 lanai_endtx(lanai, lvcc);
29916 lanai_free_skb(lvcc->tx.atmvcc, skb);
29917- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29918+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29919 }
29920
29921 /* Try to fill the buffer - don't call unless there is backlog */
29922@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29923 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29924 __net_timestamp(skb);
29925 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29926- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29927+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29928 out:
29929 lvcc->rx.buf.ptr = end;
29930 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29931@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29932 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29933 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29934 lanai->stats.service_rxnotaal5++;
29935- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29936+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29937 return 0;
29938 }
29939 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29940@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29941 int bytes;
29942 read_unlock(&vcc_sklist_lock);
29943 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29944- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29945+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29946 lvcc->stats.x.aal5.service_trash++;
29947 bytes = (SERVICE_GET_END(s) * 16) -
29948 (((unsigned long) lvcc->rx.buf.ptr) -
29949@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29950 }
29951 if (s & SERVICE_STREAM) {
29952 read_unlock(&vcc_sklist_lock);
29953- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29954+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29955 lvcc->stats.x.aal5.service_stream++;
29956 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29957 "PDU on VCI %d!\n", lanai->number, vci);
29958@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29959 return 0;
29960 }
29961 DPRINTK("got rx crc error on vci %d\n", vci);
29962- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29963+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29964 lvcc->stats.x.aal5.service_rxcrc++;
29965 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29966 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29967diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29968index 3da804b..d3b0eed 100644
29969--- a/drivers/atm/nicstar.c
29970+++ b/drivers/atm/nicstar.c
29971@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29972 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29973 {
29974 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29975- atomic_inc(&vcc->stats->tx_err);
29976+ atomic_inc_unchecked(&vcc->stats->tx_err);
29977 dev_kfree_skb_any(skb);
29978 return -EINVAL;
29979 }
29980@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29981 if (!vc->tx)
29982 {
29983 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29984- atomic_inc(&vcc->stats->tx_err);
29985+ atomic_inc_unchecked(&vcc->stats->tx_err);
29986 dev_kfree_skb_any(skb);
29987 return -EINVAL;
29988 }
29989@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29990 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29991 {
29992 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29993- atomic_inc(&vcc->stats->tx_err);
29994+ atomic_inc_unchecked(&vcc->stats->tx_err);
29995 dev_kfree_skb_any(skb);
29996 return -EINVAL;
29997 }
29998@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29999 if (skb_shinfo(skb)->nr_frags != 0)
30000 {
30001 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30002- atomic_inc(&vcc->stats->tx_err);
30003+ atomic_inc_unchecked(&vcc->stats->tx_err);
30004 dev_kfree_skb_any(skb);
30005 return -EINVAL;
30006 }
30007@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30008
30009 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30010 {
30011- atomic_inc(&vcc->stats->tx_err);
30012+ atomic_inc_unchecked(&vcc->stats->tx_err);
30013 dev_kfree_skb_any(skb);
30014 return -EIO;
30015 }
30016- atomic_inc(&vcc->stats->tx);
30017+ atomic_inc_unchecked(&vcc->stats->tx);
30018
30019 return 0;
30020 }
30021@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30022 {
30023 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30024 card->index);
30025- atomic_add(i,&vcc->stats->rx_drop);
30026+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
30027 break;
30028 }
30029 if (!atm_charge(vcc, sb->truesize))
30030 {
30031 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30032 card->index);
30033- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30034+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30035 dev_kfree_skb_any(sb);
30036 break;
30037 }
30038@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30039 ATM_SKB(sb)->vcc = vcc;
30040 __net_timestamp(sb);
30041 vcc->push(vcc, sb);
30042- atomic_inc(&vcc->stats->rx);
30043+ atomic_inc_unchecked(&vcc->stats->rx);
30044 cell += ATM_CELL_PAYLOAD;
30045 }
30046
30047@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30048 if (iovb == NULL)
30049 {
30050 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30051- atomic_inc(&vcc->stats->rx_drop);
30052+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30053 recycle_rx_buf(card, skb);
30054 return;
30055 }
30056@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30057 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30058 {
30059 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30060- atomic_inc(&vcc->stats->rx_err);
30061+ atomic_inc_unchecked(&vcc->stats->rx_err);
30062 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30063 NS_SKB(iovb)->iovcnt = 0;
30064 iovb->len = 0;
30065@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30066 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30067 card->index);
30068 which_list(card, skb);
30069- atomic_inc(&vcc->stats->rx_err);
30070+ atomic_inc_unchecked(&vcc->stats->rx_err);
30071 recycle_rx_buf(card, skb);
30072 vc->rx_iov = NULL;
30073 recycle_iov_buf(card, iovb);
30074@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30075 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30076 card->index);
30077 which_list(card, skb);
30078- atomic_inc(&vcc->stats->rx_err);
30079+ atomic_inc_unchecked(&vcc->stats->rx_err);
30080 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30081 NS_SKB(iovb)->iovcnt);
30082 vc->rx_iov = NULL;
30083@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30084 printk(" - PDU size mismatch.\n");
30085 else
30086 printk(".\n");
30087- atomic_inc(&vcc->stats->rx_err);
30088+ atomic_inc_unchecked(&vcc->stats->rx_err);
30089 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30090 NS_SKB(iovb)->iovcnt);
30091 vc->rx_iov = NULL;
30092@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30093 if (!atm_charge(vcc, skb->truesize))
30094 {
30095 push_rxbufs(card, skb);
30096- atomic_inc(&vcc->stats->rx_drop);
30097+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30098 }
30099 else
30100 {
30101@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30102 ATM_SKB(skb)->vcc = vcc;
30103 __net_timestamp(skb);
30104 vcc->push(vcc, skb);
30105- atomic_inc(&vcc->stats->rx);
30106+ atomic_inc_unchecked(&vcc->stats->rx);
30107 }
30108 }
30109 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30110@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30111 if (!atm_charge(vcc, sb->truesize))
30112 {
30113 push_rxbufs(card, sb);
30114- atomic_inc(&vcc->stats->rx_drop);
30115+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30116 }
30117 else
30118 {
30119@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30120 ATM_SKB(sb)->vcc = vcc;
30121 __net_timestamp(sb);
30122 vcc->push(vcc, sb);
30123- atomic_inc(&vcc->stats->rx);
30124+ atomic_inc_unchecked(&vcc->stats->rx);
30125 }
30126
30127 push_rxbufs(card, skb);
30128@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30129 if (!atm_charge(vcc, skb->truesize))
30130 {
30131 push_rxbufs(card, skb);
30132- atomic_inc(&vcc->stats->rx_drop);
30133+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30134 }
30135 else
30136 {
30137@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30138 ATM_SKB(skb)->vcc = vcc;
30139 __net_timestamp(skb);
30140 vcc->push(vcc, skb);
30141- atomic_inc(&vcc->stats->rx);
30142+ atomic_inc_unchecked(&vcc->stats->rx);
30143 }
30144
30145 push_rxbufs(card, sb);
30146@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30147 if (hb == NULL)
30148 {
30149 printk("nicstar%d: Out of huge buffers.\n", card->index);
30150- atomic_inc(&vcc->stats->rx_drop);
30151+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30152 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30153 NS_SKB(iovb)->iovcnt);
30154 vc->rx_iov = NULL;
30155@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30156 }
30157 else
30158 dev_kfree_skb_any(hb);
30159- atomic_inc(&vcc->stats->rx_drop);
30160+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30161 }
30162 else
30163 {
30164@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30165 #endif /* NS_USE_DESTRUCTORS */
30166 __net_timestamp(hb);
30167 vcc->push(vcc, hb);
30168- atomic_inc(&vcc->stats->rx);
30169+ atomic_inc_unchecked(&vcc->stats->rx);
30170 }
30171 }
30172
30173diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30174index 84c93ff..e6ed269 100644
30175--- a/drivers/atm/solos-pci.c
30176+++ b/drivers/atm/solos-pci.c
30177@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30178 }
30179 atm_charge(vcc, skb->truesize);
30180 vcc->push(vcc, skb);
30181- atomic_inc(&vcc->stats->rx);
30182+ atomic_inc_unchecked(&vcc->stats->rx);
30183 break;
30184
30185 case PKT_STATUS:
30186@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30187 char msg[500];
30188 char item[10];
30189
30190+ pax_track_stack();
30191+
30192 len = buf->len;
30193 for (i = 0; i < len; i++){
30194 if(i % 8 == 0)
30195@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30196 vcc = SKB_CB(oldskb)->vcc;
30197
30198 if (vcc) {
30199- atomic_inc(&vcc->stats->tx);
30200+ atomic_inc_unchecked(&vcc->stats->tx);
30201 solos_pop(vcc, oldskb);
30202 } else
30203 dev_kfree_skb_irq(oldskb);
30204diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30205index 6dd3f59..ee377f3 100644
30206--- a/drivers/atm/suni.c
30207+++ b/drivers/atm/suni.c
30208@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30209
30210
30211 #define ADD_LIMITED(s,v) \
30212- atomic_add((v),&stats->s); \
30213- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30214+ atomic_add_unchecked((v),&stats->s); \
30215+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30216
30217
30218 static void suni_hz(unsigned long from_timer)
30219diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30220index fc8cb07..4a80e53 100644
30221--- a/drivers/atm/uPD98402.c
30222+++ b/drivers/atm/uPD98402.c
30223@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30224 struct sonet_stats tmp;
30225 int error = 0;
30226
30227- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30228+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30229 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30230 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30231 if (zero && !error) {
30232@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30233
30234
30235 #define ADD_LIMITED(s,v) \
30236- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30237- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30238- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30239+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30240+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30241+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30242
30243
30244 static void stat_event(struct atm_dev *dev)
30245@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30246 if (reason & uPD98402_INT_PFM) stat_event(dev);
30247 if (reason & uPD98402_INT_PCO) {
30248 (void) GET(PCOCR); /* clear interrupt cause */
30249- atomic_add(GET(HECCT),
30250+ atomic_add_unchecked(GET(HECCT),
30251 &PRIV(dev)->sonet_stats.uncorr_hcs);
30252 }
30253 if ((reason & uPD98402_INT_RFO) &&
30254@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30255 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30256 uPD98402_INT_LOS),PIMR); /* enable them */
30257 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30258- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30259- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30260- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30261+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30262+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30263+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30264 return 0;
30265 }
30266
30267diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30268index 2e9635b..32927b4 100644
30269--- a/drivers/atm/zatm.c
30270+++ b/drivers/atm/zatm.c
30271@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30272 }
30273 if (!size) {
30274 dev_kfree_skb_irq(skb);
30275- if (vcc) atomic_inc(&vcc->stats->rx_err);
30276+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30277 continue;
30278 }
30279 if (!atm_charge(vcc,skb->truesize)) {
30280@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30281 skb->len = size;
30282 ATM_SKB(skb)->vcc = vcc;
30283 vcc->push(vcc,skb);
30284- atomic_inc(&vcc->stats->rx);
30285+ atomic_inc_unchecked(&vcc->stats->rx);
30286 }
30287 zout(pos & 0xffff,MTA(mbx));
30288 #if 0 /* probably a stupid idea */
30289@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30290 skb_queue_head(&zatm_vcc->backlog,skb);
30291 break;
30292 }
30293- atomic_inc(&vcc->stats->tx);
30294+ atomic_inc_unchecked(&vcc->stats->tx);
30295 wake_up(&zatm_vcc->tx_wait);
30296 }
30297
30298diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30299index 63c143e..fece183 100644
30300--- a/drivers/base/bus.c
30301+++ b/drivers/base/bus.c
30302@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30303 return ret;
30304 }
30305
30306-static struct sysfs_ops driver_sysfs_ops = {
30307+static const struct sysfs_ops driver_sysfs_ops = {
30308 .show = drv_attr_show,
30309 .store = drv_attr_store,
30310 };
30311@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30312 return ret;
30313 }
30314
30315-static struct sysfs_ops bus_sysfs_ops = {
30316+static const struct sysfs_ops bus_sysfs_ops = {
30317 .show = bus_attr_show,
30318 .store = bus_attr_store,
30319 };
30320@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30321 return 0;
30322 }
30323
30324-static struct kset_uevent_ops bus_uevent_ops = {
30325+static const struct kset_uevent_ops bus_uevent_ops = {
30326 .filter = bus_uevent_filter,
30327 };
30328
30329diff --git a/drivers/base/class.c b/drivers/base/class.c
30330index 6e2c3b0..cb61871 100644
30331--- a/drivers/base/class.c
30332+++ b/drivers/base/class.c
30333@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30334 kfree(cp);
30335 }
30336
30337-static struct sysfs_ops class_sysfs_ops = {
30338+static const struct sysfs_ops class_sysfs_ops = {
30339 .show = class_attr_show,
30340 .store = class_attr_store,
30341 };
30342diff --git a/drivers/base/core.c b/drivers/base/core.c
30343index f33d768..a9358d0 100644
30344--- a/drivers/base/core.c
30345+++ b/drivers/base/core.c
30346@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30347 return ret;
30348 }
30349
30350-static struct sysfs_ops dev_sysfs_ops = {
30351+static const struct sysfs_ops dev_sysfs_ops = {
30352 .show = dev_attr_show,
30353 .store = dev_attr_store,
30354 };
30355@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30356 return retval;
30357 }
30358
30359-static struct kset_uevent_ops device_uevent_ops = {
30360+static const struct kset_uevent_ops device_uevent_ops = {
30361 .filter = dev_uevent_filter,
30362 .name = dev_uevent_name,
30363 .uevent = dev_uevent,
30364diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30365index 989429c..2272b00 100644
30366--- a/drivers/base/memory.c
30367+++ b/drivers/base/memory.c
30368@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30369 return retval;
30370 }
30371
30372-static struct kset_uevent_ops memory_uevent_ops = {
30373+static const struct kset_uevent_ops memory_uevent_ops = {
30374 .name = memory_uevent_name,
30375 .uevent = memory_uevent,
30376 };
30377diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30378index 3f202f7..61c4a6f 100644
30379--- a/drivers/base/sys.c
30380+++ b/drivers/base/sys.c
30381@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30382 return -EIO;
30383 }
30384
30385-static struct sysfs_ops sysfs_ops = {
30386+static const struct sysfs_ops sysfs_ops = {
30387 .show = sysdev_show,
30388 .store = sysdev_store,
30389 };
30390@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30391 return -EIO;
30392 }
30393
30394-static struct sysfs_ops sysfs_class_ops = {
30395+static const struct sysfs_ops sysfs_class_ops = {
30396 .show = sysdev_class_show,
30397 .store = sysdev_class_store,
30398 };
30399diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30400index eb4fa19..1954777 100644
30401--- a/drivers/block/DAC960.c
30402+++ b/drivers/block/DAC960.c
30403@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30404 unsigned long flags;
30405 int Channel, TargetID;
30406
30407+ pax_track_stack();
30408+
30409 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30410 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30411 sizeof(DAC960_SCSI_Inquiry_T) +
30412diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30413index 68b90d9..7e2e3f3 100644
30414--- a/drivers/block/cciss.c
30415+++ b/drivers/block/cciss.c
30416@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30417 int err;
30418 u32 cp;
30419
30420+ memset(&arg64, 0, sizeof(arg64));
30421+
30422 err = 0;
30423 err |=
30424 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30425@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30426 /* Wait (up to 20 seconds) for a command to complete */
30427
30428 for (i = 20 * HZ; i > 0; i--) {
30429- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30430+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30431 if (done == FIFO_EMPTY)
30432 schedule_timeout_uninterruptible(1);
30433 else
30434@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30435 resend_cmd1:
30436
30437 /* Disable interrupt on the board. */
30438- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30439+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30440
30441 /* Make sure there is room in the command FIFO */
30442 /* Actually it should be completely empty at this time */
30443@@ -2884,13 +2886,13 @@ resend_cmd1:
30444 /* tape side of the driver. */
30445 for (i = 200000; i > 0; i--) {
30446 /* if fifo isn't full go */
30447- if (!(h->access.fifo_full(h)))
30448+ if (!(h->access->fifo_full(h)))
30449 break;
30450 udelay(10);
30451 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30452 " waiting!\n", h->ctlr);
30453 }
30454- h->access.submit_command(h, c); /* Send the cmd */
30455+ h->access->submit_command(h, c); /* Send the cmd */
30456 do {
30457 complete = pollcomplete(h->ctlr);
30458
30459@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30460 while (!hlist_empty(&h->reqQ)) {
30461 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30462 /* can't do anything if fifo is full */
30463- if ((h->access.fifo_full(h))) {
30464+ if ((h->access->fifo_full(h))) {
30465 printk(KERN_WARNING "cciss: fifo full\n");
30466 break;
30467 }
30468@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30469 h->Qdepth--;
30470
30471 /* Tell the controller execute command */
30472- h->access.submit_command(h, c);
30473+ h->access->submit_command(h, c);
30474
30475 /* Put job onto the completed Q */
30476 addQ(&h->cmpQ, c);
30477@@ -3393,17 +3395,17 @@ startio:
30478
30479 static inline unsigned long get_next_completion(ctlr_info_t *h)
30480 {
30481- return h->access.command_completed(h);
30482+ return h->access->command_completed(h);
30483 }
30484
30485 static inline int interrupt_pending(ctlr_info_t *h)
30486 {
30487- return h->access.intr_pending(h);
30488+ return h->access->intr_pending(h);
30489 }
30490
30491 static inline long interrupt_not_for_us(ctlr_info_t *h)
30492 {
30493- return (((h->access.intr_pending(h) == 0) ||
30494+ return (((h->access->intr_pending(h) == 0) ||
30495 (h->interrupts_enabled == 0)));
30496 }
30497
30498@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30499 */
30500 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30501 c->product_name = products[prod_index].product_name;
30502- c->access = *(products[prod_index].access);
30503+ c->access = products[prod_index].access;
30504 c->nr_cmds = c->max_commands - 4;
30505 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30506 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30507@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30508 }
30509
30510 /* make sure the board interrupts are off */
30511- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30512+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30513 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30514 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30515 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30516@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30517 cciss_scsi_setup(i);
30518
30519 /* Turn the interrupts on so we can service requests */
30520- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30521+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30522
30523 /* Get the firmware version */
30524 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30525diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30526index 04d6bf8..36e712d 100644
30527--- a/drivers/block/cciss.h
30528+++ b/drivers/block/cciss.h
30529@@ -90,7 +90,7 @@ struct ctlr_info
30530 // information about each logical volume
30531 drive_info_struct *drv[CISS_MAX_LUN];
30532
30533- struct access_method access;
30534+ struct access_method *access;
30535
30536 /* queue and queue Info */
30537 struct hlist_head reqQ;
30538diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30539index 6422651..bb1bdef 100644
30540--- a/drivers/block/cpqarray.c
30541+++ b/drivers/block/cpqarray.c
30542@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30543 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30544 goto Enomem4;
30545 }
30546- hba[i]->access.set_intr_mask(hba[i], 0);
30547+ hba[i]->access->set_intr_mask(hba[i], 0);
30548 if (request_irq(hba[i]->intr, do_ida_intr,
30549 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30550 {
30551@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30552 add_timer(&hba[i]->timer);
30553
30554 /* Enable IRQ now that spinlock and rate limit timer are set up */
30555- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30556+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30557
30558 for(j=0; j<NWD; j++) {
30559 struct gendisk *disk = ida_gendisk[i][j];
30560@@ -695,7 +695,7 @@ DBGINFO(
30561 for(i=0; i<NR_PRODUCTS; i++) {
30562 if (board_id == products[i].board_id) {
30563 c->product_name = products[i].product_name;
30564- c->access = *(products[i].access);
30565+ c->access = products[i].access;
30566 break;
30567 }
30568 }
30569@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30570 hba[ctlr]->intr = intr;
30571 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30572 hba[ctlr]->product_name = products[j].product_name;
30573- hba[ctlr]->access = *(products[j].access);
30574+ hba[ctlr]->access = products[j].access;
30575 hba[ctlr]->ctlr = ctlr;
30576 hba[ctlr]->board_id = board_id;
30577 hba[ctlr]->pci_dev = NULL; /* not PCI */
30578@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30579 struct scatterlist tmp_sg[SG_MAX];
30580 int i, dir, seg;
30581
30582+ pax_track_stack();
30583+
30584 if (blk_queue_plugged(q))
30585 goto startio;
30586
30587@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30588
30589 while((c = h->reqQ) != NULL) {
30590 /* Can't do anything if we're busy */
30591- if (h->access.fifo_full(h) == 0)
30592+ if (h->access->fifo_full(h) == 0)
30593 return;
30594
30595 /* Get the first entry from the request Q */
30596@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30597 h->Qdepth--;
30598
30599 /* Tell the controller to do our bidding */
30600- h->access.submit_command(h, c);
30601+ h->access->submit_command(h, c);
30602
30603 /* Get onto the completion Q */
30604 addQ(&h->cmpQ, c);
30605@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30606 unsigned long flags;
30607 __u32 a,a1;
30608
30609- istat = h->access.intr_pending(h);
30610+ istat = h->access->intr_pending(h);
30611 /* Is this interrupt for us? */
30612 if (istat == 0)
30613 return IRQ_NONE;
30614@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30615 */
30616 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30617 if (istat & FIFO_NOT_EMPTY) {
30618- while((a = h->access.command_completed(h))) {
30619+ while((a = h->access->command_completed(h))) {
30620 a1 = a; a &= ~3;
30621 if ((c = h->cmpQ) == NULL)
30622 {
30623@@ -1434,11 +1436,11 @@ static int sendcmd(
30624 /*
30625 * Disable interrupt
30626 */
30627- info_p->access.set_intr_mask(info_p, 0);
30628+ info_p->access->set_intr_mask(info_p, 0);
30629 /* Make sure there is room in the command FIFO */
30630 /* Actually it should be completely empty at this time. */
30631 for (i = 200000; i > 0; i--) {
30632- temp = info_p->access.fifo_full(info_p);
30633+ temp = info_p->access->fifo_full(info_p);
30634 if (temp != 0) {
30635 break;
30636 }
30637@@ -1451,7 +1453,7 @@ DBG(
30638 /*
30639 * Send the cmd
30640 */
30641- info_p->access.submit_command(info_p, c);
30642+ info_p->access->submit_command(info_p, c);
30643 complete = pollcomplete(ctlr);
30644
30645 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30646@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30647 * we check the new geometry. Then turn interrupts back on when
30648 * we're done.
30649 */
30650- host->access.set_intr_mask(host, 0);
30651+ host->access->set_intr_mask(host, 0);
30652 getgeometry(ctlr);
30653- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30654+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30655
30656 for(i=0; i<NWD; i++) {
30657 struct gendisk *disk = ida_gendisk[ctlr][i];
30658@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30659 /* Wait (up to 2 seconds) for a command to complete */
30660
30661 for (i = 200000; i > 0; i--) {
30662- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30663+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30664 if (done == 0) {
30665 udelay(10); /* a short fixed delay */
30666 } else
30667diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30668index be73e9d..7fbf140 100644
30669--- a/drivers/block/cpqarray.h
30670+++ b/drivers/block/cpqarray.h
30671@@ -99,7 +99,7 @@ struct ctlr_info {
30672 drv_info_t drv[NWD];
30673 struct proc_dir_entry *proc;
30674
30675- struct access_method access;
30676+ struct access_method *access;
30677
30678 cmdlist_t *reqQ;
30679 cmdlist_t *cmpQ;
30680diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30681index 8ec2d70..2804b30 100644
30682--- a/drivers/block/loop.c
30683+++ b/drivers/block/loop.c
30684@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30685 mm_segment_t old_fs = get_fs();
30686
30687 set_fs(get_ds());
30688- bw = file->f_op->write(file, buf, len, &pos);
30689+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30690 set_fs(old_fs);
30691 if (likely(bw == len))
30692 return 0;
30693diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30694index 26ada47..083c480 100644
30695--- a/drivers/block/nbd.c
30696+++ b/drivers/block/nbd.c
30697@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30698 struct kvec iov;
30699 sigset_t blocked, oldset;
30700
30701+ pax_track_stack();
30702+
30703 if (unlikely(!sock)) {
30704 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30705 lo->disk->disk_name, (send ? "send" : "recv"));
30706@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30707 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30708 unsigned int cmd, unsigned long arg)
30709 {
30710+ pax_track_stack();
30711+
30712 switch (cmd) {
30713 case NBD_DISCONNECT: {
30714 struct request sreq;
30715diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30716index a5d585d..d087be3 100644
30717--- a/drivers/block/pktcdvd.c
30718+++ b/drivers/block/pktcdvd.c
30719@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30720 return len;
30721 }
30722
30723-static struct sysfs_ops kobj_pkt_ops = {
30724+static const struct sysfs_ops kobj_pkt_ops = {
30725 .show = kobj_pkt_show,
30726 .store = kobj_pkt_store
30727 };
30728diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30729index 6aad99e..89cd142 100644
30730--- a/drivers/char/Kconfig
30731+++ b/drivers/char/Kconfig
30732@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30733
30734 config DEVKMEM
30735 bool "/dev/kmem virtual device support"
30736- default y
30737+ default n
30738+ depends on !GRKERNSEC_KMEM
30739 help
30740 Say Y here if you want to support the /dev/kmem device. The
30741 /dev/kmem device is rarely used, but can be used for certain
30742@@ -1114,6 +1115,7 @@ config DEVPORT
30743 bool
30744 depends on !M68K
30745 depends on ISA || PCI
30746+ depends on !GRKERNSEC_KMEM
30747 default y
30748
30749 source "drivers/s390/char/Kconfig"
30750diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30751index a96f319..a778a5b 100644
30752--- a/drivers/char/agp/frontend.c
30753+++ b/drivers/char/agp/frontend.c
30754@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30755 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30756 return -EFAULT;
30757
30758- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30759+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30760 return -EFAULT;
30761
30762 client = agp_find_client_by_pid(reserve.pid);
30763diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30764index d8cff90..9628e70 100644
30765--- a/drivers/char/briq_panel.c
30766+++ b/drivers/char/briq_panel.c
30767@@ -10,6 +10,7 @@
30768 #include <linux/types.h>
30769 #include <linux/errno.h>
30770 #include <linux/tty.h>
30771+#include <linux/mutex.h>
30772 #include <linux/timer.h>
30773 #include <linux/kernel.h>
30774 #include <linux/wait.h>
30775@@ -36,6 +37,7 @@ static int vfd_is_open;
30776 static unsigned char vfd[40];
30777 static int vfd_cursor;
30778 static unsigned char ledpb, led;
30779+static DEFINE_MUTEX(vfd_mutex);
30780
30781 static void update_vfd(void)
30782 {
30783@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30784 if (!vfd_is_open)
30785 return -EBUSY;
30786
30787+ mutex_lock(&vfd_mutex);
30788 for (;;) {
30789 char c;
30790 if (!indx)
30791 break;
30792- if (get_user(c, buf))
30793+ if (get_user(c, buf)) {
30794+ mutex_unlock(&vfd_mutex);
30795 return -EFAULT;
30796+ }
30797 if (esc) {
30798 set_led(c);
30799 esc = 0;
30800@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30801 buf++;
30802 }
30803 update_vfd();
30804+ mutex_unlock(&vfd_mutex);
30805
30806 return len;
30807 }
30808diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30809index 31e7c91..161afc0 100644
30810--- a/drivers/char/genrtc.c
30811+++ b/drivers/char/genrtc.c
30812@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30813 switch (cmd) {
30814
30815 case RTC_PLL_GET:
30816+ memset(&pll, 0, sizeof(pll));
30817 if (get_rtc_pll(&pll))
30818 return -EINVAL;
30819 else
30820diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30821index 006466d..a2bb21c 100644
30822--- a/drivers/char/hpet.c
30823+++ b/drivers/char/hpet.c
30824@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30825 return 0;
30826 }
30827
30828-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30829+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30830
30831 static int
30832 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30833@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30834 }
30835
30836 static int
30837-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30838+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30839 {
30840 struct hpet_timer __iomem *timer;
30841 struct hpet __iomem *hpet;
30842@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30843 {
30844 struct hpet_info info;
30845
30846+ memset(&info, 0, sizeof(info));
30847+
30848 if (devp->hd_ireqfreq)
30849 info.hi_ireqfreq =
30850 hpet_time_div(hpetp, devp->hd_ireqfreq);
30851- else
30852- info.hi_ireqfreq = 0;
30853 info.hi_flags =
30854 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30855 info.hi_hpet = hpetp->hp_which;
30856diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30857index 0afc8b8..6913fc3 100644
30858--- a/drivers/char/hvc_beat.c
30859+++ b/drivers/char/hvc_beat.c
30860@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30861 return cnt;
30862 }
30863
30864-static struct hv_ops hvc_beat_get_put_ops = {
30865+static const struct hv_ops hvc_beat_get_put_ops = {
30866 .get_chars = hvc_beat_get_chars,
30867 .put_chars = hvc_beat_put_chars,
30868 };
30869diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30870index 98097f2..407dddc 100644
30871--- a/drivers/char/hvc_console.c
30872+++ b/drivers/char/hvc_console.c
30873@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30874 * console interfaces but can still be used as a tty device. This has to be
30875 * static because kmalloc will not work during early console init.
30876 */
30877-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30878+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30879 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30880 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30881
30882@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30883 * vty adapters do NOT get an hvc_instantiate() callback since they
30884 * appear after early console init.
30885 */
30886-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30887+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30888 {
30889 struct hvc_struct *hp;
30890
30891@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30892 };
30893
30894 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30895- struct hv_ops *ops, int outbuf_size)
30896+ const struct hv_ops *ops, int outbuf_size)
30897 {
30898 struct hvc_struct *hp;
30899 int i;
30900diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30901index 10950ca..ed176c3 100644
30902--- a/drivers/char/hvc_console.h
30903+++ b/drivers/char/hvc_console.h
30904@@ -55,7 +55,7 @@ struct hvc_struct {
30905 int outbuf_size;
30906 int n_outbuf;
30907 uint32_t vtermno;
30908- struct hv_ops *ops;
30909+ const struct hv_ops *ops;
30910 int irq_requested;
30911 int data;
30912 struct winsize ws;
30913@@ -76,11 +76,11 @@ struct hv_ops {
30914 };
30915
30916 /* Register a vterm and a slot index for use as a console (console_init) */
30917-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30918+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30919
30920 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30921 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30922- struct hv_ops *ops, int outbuf_size);
30923+ const struct hv_ops *ops, int outbuf_size);
30924 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30925 extern int hvc_remove(struct hvc_struct *hp);
30926
30927diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30928index 936d05b..fd02426 100644
30929--- a/drivers/char/hvc_iseries.c
30930+++ b/drivers/char/hvc_iseries.c
30931@@ -197,7 +197,7 @@ done:
30932 return sent;
30933 }
30934
30935-static struct hv_ops hvc_get_put_ops = {
30936+static const struct hv_ops hvc_get_put_ops = {
30937 .get_chars = get_chars,
30938 .put_chars = put_chars,
30939 .notifier_add = notifier_add_irq,
30940diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30941index b0e168f..69cda2a 100644
30942--- a/drivers/char/hvc_iucv.c
30943+++ b/drivers/char/hvc_iucv.c
30944@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30945
30946
30947 /* HVC operations */
30948-static struct hv_ops hvc_iucv_ops = {
30949+static const struct hv_ops hvc_iucv_ops = {
30950 .get_chars = hvc_iucv_get_chars,
30951 .put_chars = hvc_iucv_put_chars,
30952 .notifier_add = hvc_iucv_notifier_add,
30953diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30954index 88590d0..61c4a61 100644
30955--- a/drivers/char/hvc_rtas.c
30956+++ b/drivers/char/hvc_rtas.c
30957@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30958 return i;
30959 }
30960
30961-static struct hv_ops hvc_rtas_get_put_ops = {
30962+static const struct hv_ops hvc_rtas_get_put_ops = {
30963 .get_chars = hvc_rtas_read_console,
30964 .put_chars = hvc_rtas_write_console,
30965 };
30966diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30967index bd63ba8..b0957e6 100644
30968--- a/drivers/char/hvc_udbg.c
30969+++ b/drivers/char/hvc_udbg.c
30970@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30971 return i;
30972 }
30973
30974-static struct hv_ops hvc_udbg_ops = {
30975+static const struct hv_ops hvc_udbg_ops = {
30976 .get_chars = hvc_udbg_get,
30977 .put_chars = hvc_udbg_put,
30978 };
30979diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30980index 10be343..27370e9 100644
30981--- a/drivers/char/hvc_vio.c
30982+++ b/drivers/char/hvc_vio.c
30983@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30984 return got;
30985 }
30986
30987-static struct hv_ops hvc_get_put_ops = {
30988+static const struct hv_ops hvc_get_put_ops = {
30989 .get_chars = filtered_get_chars,
30990 .put_chars = hvc_put_chars,
30991 .notifier_add = notifier_add_irq,
30992diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30993index a6ee32b..94f8c26 100644
30994--- a/drivers/char/hvc_xen.c
30995+++ b/drivers/char/hvc_xen.c
30996@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30997 return recv;
30998 }
30999
31000-static struct hv_ops hvc_ops = {
31001+static const struct hv_ops hvc_ops = {
31002 .get_chars = read_console,
31003 .put_chars = write_console,
31004 .notifier_add = notifier_add_irq,
31005diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31006index 266b858..f3ee0bb 100644
31007--- a/drivers/char/hvcs.c
31008+++ b/drivers/char/hvcs.c
31009@@ -82,6 +82,7 @@
31010 #include <asm/hvcserver.h>
31011 #include <asm/uaccess.h>
31012 #include <asm/vio.h>
31013+#include <asm/local.h>
31014
31015 /*
31016 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31017@@ -269,7 +270,7 @@ struct hvcs_struct {
31018 unsigned int index;
31019
31020 struct tty_struct *tty;
31021- int open_count;
31022+ local_t open_count;
31023
31024 /*
31025 * Used to tell the driver kernel_thread what operations need to take
31026@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31027
31028 spin_lock_irqsave(&hvcsd->lock, flags);
31029
31030- if (hvcsd->open_count > 0) {
31031+ if (local_read(&hvcsd->open_count) > 0) {
31032 spin_unlock_irqrestore(&hvcsd->lock, flags);
31033 printk(KERN_INFO "HVCS: vterm state unchanged. "
31034 "The hvcs device node is still in use.\n");
31035@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31036 if ((retval = hvcs_partner_connect(hvcsd)))
31037 goto error_release;
31038
31039- hvcsd->open_count = 1;
31040+ local_set(&hvcsd->open_count, 1);
31041 hvcsd->tty = tty;
31042 tty->driver_data = hvcsd;
31043
31044@@ -1169,7 +1170,7 @@ fast_open:
31045
31046 spin_lock_irqsave(&hvcsd->lock, flags);
31047 kref_get(&hvcsd->kref);
31048- hvcsd->open_count++;
31049+ local_inc(&hvcsd->open_count);
31050 hvcsd->todo_mask |= HVCS_SCHED_READ;
31051 spin_unlock_irqrestore(&hvcsd->lock, flags);
31052
31053@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31054 hvcsd = tty->driver_data;
31055
31056 spin_lock_irqsave(&hvcsd->lock, flags);
31057- if (--hvcsd->open_count == 0) {
31058+ if (local_dec_and_test(&hvcsd->open_count)) {
31059
31060 vio_disable_interrupts(hvcsd->vdev);
31061
31062@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31063 free_irq(irq, hvcsd);
31064 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31065 return;
31066- } else if (hvcsd->open_count < 0) {
31067+ } else if (local_read(&hvcsd->open_count) < 0) {
31068 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31069 " is missmanaged.\n",
31070- hvcsd->vdev->unit_address, hvcsd->open_count);
31071+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31072 }
31073
31074 spin_unlock_irqrestore(&hvcsd->lock, flags);
31075@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31076
31077 spin_lock_irqsave(&hvcsd->lock, flags);
31078 /* Preserve this so that we know how many kref refs to put */
31079- temp_open_count = hvcsd->open_count;
31080+ temp_open_count = local_read(&hvcsd->open_count);
31081
31082 /*
31083 * Don't kref put inside the spinlock because the destruction
31084@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31085 hvcsd->tty->driver_data = NULL;
31086 hvcsd->tty = NULL;
31087
31088- hvcsd->open_count = 0;
31089+ local_set(&hvcsd->open_count, 0);
31090
31091 /* This will drop any buffered data on the floor which is OK in a hangup
31092 * scenario. */
31093@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31094 * the middle of a write operation? This is a crummy place to do this
31095 * but we want to keep it all in the spinlock.
31096 */
31097- if (hvcsd->open_count <= 0) {
31098+ if (local_read(&hvcsd->open_count) <= 0) {
31099 spin_unlock_irqrestore(&hvcsd->lock, flags);
31100 return -ENODEV;
31101 }
31102@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31103 {
31104 struct hvcs_struct *hvcsd = tty->driver_data;
31105
31106- if (!hvcsd || hvcsd->open_count <= 0)
31107+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31108 return 0;
31109
31110 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31111diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31112index ec5e3f8..02455ba 100644
31113--- a/drivers/char/ipmi/ipmi_msghandler.c
31114+++ b/drivers/char/ipmi/ipmi_msghandler.c
31115@@ -414,7 +414,7 @@ struct ipmi_smi {
31116 struct proc_dir_entry *proc_dir;
31117 char proc_dir_name[10];
31118
31119- atomic_t stats[IPMI_NUM_STATS];
31120+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31121
31122 /*
31123 * run_to_completion duplicate of smb_info, smi_info
31124@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31125
31126
31127 #define ipmi_inc_stat(intf, stat) \
31128- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31129+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31130 #define ipmi_get_stat(intf, stat) \
31131- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31132+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31133
31134 static int is_lan_addr(struct ipmi_addr *addr)
31135 {
31136@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31137 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31138 init_waitqueue_head(&intf->waitq);
31139 for (i = 0; i < IPMI_NUM_STATS; i++)
31140- atomic_set(&intf->stats[i], 0);
31141+ atomic_set_unchecked(&intf->stats[i], 0);
31142
31143 intf->proc_dir = NULL;
31144
31145@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31146 struct ipmi_smi_msg smi_msg;
31147 struct ipmi_recv_msg recv_msg;
31148
31149+ pax_track_stack();
31150+
31151 si = (struct ipmi_system_interface_addr *) &addr;
31152 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31153 si->channel = IPMI_BMC_CHANNEL;
31154diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31155index abae8c9..8021979 100644
31156--- a/drivers/char/ipmi/ipmi_si_intf.c
31157+++ b/drivers/char/ipmi/ipmi_si_intf.c
31158@@ -277,7 +277,7 @@ struct smi_info {
31159 unsigned char slave_addr;
31160
31161 /* Counters and things for the proc filesystem. */
31162- atomic_t stats[SI_NUM_STATS];
31163+ atomic_unchecked_t stats[SI_NUM_STATS];
31164
31165 struct task_struct *thread;
31166
31167@@ -285,9 +285,9 @@ struct smi_info {
31168 };
31169
31170 #define smi_inc_stat(smi, stat) \
31171- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31172+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31173 #define smi_get_stat(smi, stat) \
31174- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31175+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31176
31177 #define SI_MAX_PARMS 4
31178
31179@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31180 atomic_set(&new_smi->req_events, 0);
31181 new_smi->run_to_completion = 0;
31182 for (i = 0; i < SI_NUM_STATS; i++)
31183- atomic_set(&new_smi->stats[i], 0);
31184+ atomic_set_unchecked(&new_smi->stats[i], 0);
31185
31186 new_smi->interrupt_disabled = 0;
31187 atomic_set(&new_smi->stop_operation, 0);
31188diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31189index 402838f..55e2200 100644
31190--- a/drivers/char/istallion.c
31191+++ b/drivers/char/istallion.c
31192@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31193 * re-used for each stats call.
31194 */
31195 static comstats_t stli_comstats;
31196-static combrd_t stli_brdstats;
31197 static struct asystats stli_cdkstats;
31198
31199 /*****************************************************************************/
31200@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31201 {
31202 struct stlibrd *brdp;
31203 unsigned int i;
31204+ combrd_t stli_brdstats;
31205
31206 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31207 return -EFAULT;
31208@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31209 struct stliport stli_dummyport;
31210 struct stliport *portp;
31211
31212+ pax_track_stack();
31213+
31214 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31215 return -EFAULT;
31216 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31217@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31218 struct stlibrd stli_dummybrd;
31219 struct stlibrd *brdp;
31220
31221+ pax_track_stack();
31222+
31223 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31224 return -EFAULT;
31225 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31226diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31227index 950837c..e55a288 100644
31228--- a/drivers/char/keyboard.c
31229+++ b/drivers/char/keyboard.c
31230@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31231 kbd->kbdmode == VC_MEDIUMRAW) &&
31232 value != KVAL(K_SAK))
31233 return; /* SAK is allowed even in raw mode */
31234+
31235+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31236+ {
31237+ void *func = fn_handler[value];
31238+ if (func == fn_show_state || func == fn_show_ptregs ||
31239+ func == fn_show_mem)
31240+ return;
31241+ }
31242+#endif
31243+
31244 fn_handler[value](vc);
31245 }
31246
31247@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31248 .evbit = { BIT_MASK(EV_SND) },
31249 },
31250
31251- { }, /* Terminating entry */
31252+ { 0 }, /* Terminating entry */
31253 };
31254
31255 MODULE_DEVICE_TABLE(input, kbd_ids);
31256diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31257index 87c67b4..230527a 100644
31258--- a/drivers/char/mbcs.c
31259+++ b/drivers/char/mbcs.c
31260@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31261 return 0;
31262 }
31263
31264-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31265+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31266 {
31267 .part_num = MBCS_PART_NUM,
31268 .mfg_num = MBCS_MFG_NUM,
31269diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31270index 1270f64..8495f49 100644
31271--- a/drivers/char/mem.c
31272+++ b/drivers/char/mem.c
31273@@ -18,6 +18,7 @@
31274 #include <linux/raw.h>
31275 #include <linux/tty.h>
31276 #include <linux/capability.h>
31277+#include <linux/security.h>
31278 #include <linux/ptrace.h>
31279 #include <linux/device.h>
31280 #include <linux/highmem.h>
31281@@ -35,6 +36,10 @@
31282 # include <linux/efi.h>
31283 #endif
31284
31285+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31286+extern struct file_operations grsec_fops;
31287+#endif
31288+
31289 static inline unsigned long size_inside_page(unsigned long start,
31290 unsigned long size)
31291 {
31292@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31293
31294 while (cursor < to) {
31295 if (!devmem_is_allowed(pfn)) {
31296+#ifdef CONFIG_GRKERNSEC_KMEM
31297+ gr_handle_mem_readwrite(from, to);
31298+#else
31299 printk(KERN_INFO
31300 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31301 current->comm, from, to);
31302+#endif
31303 return 0;
31304 }
31305 cursor += PAGE_SIZE;
31306@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31307 }
31308 return 1;
31309 }
31310+#elif defined(CONFIG_GRKERNSEC_KMEM)
31311+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31312+{
31313+ return 0;
31314+}
31315 #else
31316 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31317 {
31318@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31319 #endif
31320
31321 while (count > 0) {
31322+ char *temp;
31323+
31324 /*
31325 * Handle first page in case it's not aligned
31326 */
31327@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31328 if (!ptr)
31329 return -EFAULT;
31330
31331- if (copy_to_user(buf, ptr, sz)) {
31332+#ifdef CONFIG_PAX_USERCOPY
31333+ temp = kmalloc(sz, GFP_KERNEL);
31334+ if (!temp) {
31335+ unxlate_dev_mem_ptr(p, ptr);
31336+ return -ENOMEM;
31337+ }
31338+ memcpy(temp, ptr, sz);
31339+#else
31340+ temp = ptr;
31341+#endif
31342+
31343+ if (copy_to_user(buf, temp, sz)) {
31344+
31345+#ifdef CONFIG_PAX_USERCOPY
31346+ kfree(temp);
31347+#endif
31348+
31349 unxlate_dev_mem_ptr(p, ptr);
31350 return -EFAULT;
31351 }
31352
31353+#ifdef CONFIG_PAX_USERCOPY
31354+ kfree(temp);
31355+#endif
31356+
31357 unxlate_dev_mem_ptr(p, ptr);
31358
31359 buf += sz;
31360@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31361 size_t count, loff_t *ppos)
31362 {
31363 unsigned long p = *ppos;
31364- ssize_t low_count, read, sz;
31365+ ssize_t low_count, read, sz, err = 0;
31366 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31367- int err = 0;
31368
31369 read = 0;
31370 if (p < (unsigned long) high_memory) {
31371@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31372 }
31373 #endif
31374 while (low_count > 0) {
31375+ char *temp;
31376+
31377 sz = size_inside_page(p, low_count);
31378
31379 /*
31380@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31381 */
31382 kbuf = xlate_dev_kmem_ptr((char *)p);
31383
31384- if (copy_to_user(buf, kbuf, sz))
31385+#ifdef CONFIG_PAX_USERCOPY
31386+ temp = kmalloc(sz, GFP_KERNEL);
31387+ if (!temp)
31388+ return -ENOMEM;
31389+ memcpy(temp, kbuf, sz);
31390+#else
31391+ temp = kbuf;
31392+#endif
31393+
31394+ err = copy_to_user(buf, temp, sz);
31395+
31396+#ifdef CONFIG_PAX_USERCOPY
31397+ kfree(temp);
31398+#endif
31399+
31400+ if (err)
31401 return -EFAULT;
31402 buf += sz;
31403 p += sz;
31404@@ -889,6 +941,9 @@ static const struct memdev {
31405 #ifdef CONFIG_CRASH_DUMP
31406 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31407 #endif
31408+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31409+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31410+#endif
31411 };
31412
31413 static int memory_open(struct inode *inode, struct file *filp)
31414diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31415index 674b3ab..a8d1970 100644
31416--- a/drivers/char/pcmcia/ipwireless/tty.c
31417+++ b/drivers/char/pcmcia/ipwireless/tty.c
31418@@ -29,6 +29,7 @@
31419 #include <linux/tty_driver.h>
31420 #include <linux/tty_flip.h>
31421 #include <linux/uaccess.h>
31422+#include <asm/local.h>
31423
31424 #include "tty.h"
31425 #include "network.h"
31426@@ -51,7 +52,7 @@ struct ipw_tty {
31427 int tty_type;
31428 struct ipw_network *network;
31429 struct tty_struct *linux_tty;
31430- int open_count;
31431+ local_t open_count;
31432 unsigned int control_lines;
31433 struct mutex ipw_tty_mutex;
31434 int tx_bytes_queued;
31435@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31436 mutex_unlock(&tty->ipw_tty_mutex);
31437 return -ENODEV;
31438 }
31439- if (tty->open_count == 0)
31440+ if (local_read(&tty->open_count) == 0)
31441 tty->tx_bytes_queued = 0;
31442
31443- tty->open_count++;
31444+ local_inc(&tty->open_count);
31445
31446 tty->linux_tty = linux_tty;
31447 linux_tty->driver_data = tty;
31448@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31449
31450 static void do_ipw_close(struct ipw_tty *tty)
31451 {
31452- tty->open_count--;
31453-
31454- if (tty->open_count == 0) {
31455+ if (local_dec_return(&tty->open_count) == 0) {
31456 struct tty_struct *linux_tty = tty->linux_tty;
31457
31458 if (linux_tty != NULL) {
31459@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31460 return;
31461
31462 mutex_lock(&tty->ipw_tty_mutex);
31463- if (tty->open_count == 0) {
31464+ if (local_read(&tty->open_count) == 0) {
31465 mutex_unlock(&tty->ipw_tty_mutex);
31466 return;
31467 }
31468@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31469 return;
31470 }
31471
31472- if (!tty->open_count) {
31473+ if (!local_read(&tty->open_count)) {
31474 mutex_unlock(&tty->ipw_tty_mutex);
31475 return;
31476 }
31477@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31478 return -ENODEV;
31479
31480 mutex_lock(&tty->ipw_tty_mutex);
31481- if (!tty->open_count) {
31482+ if (!local_read(&tty->open_count)) {
31483 mutex_unlock(&tty->ipw_tty_mutex);
31484 return -EINVAL;
31485 }
31486@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31487 if (!tty)
31488 return -ENODEV;
31489
31490- if (!tty->open_count)
31491+ if (!local_read(&tty->open_count))
31492 return -EINVAL;
31493
31494 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31495@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31496 if (!tty)
31497 return 0;
31498
31499- if (!tty->open_count)
31500+ if (!local_read(&tty->open_count))
31501 return 0;
31502
31503 return tty->tx_bytes_queued;
31504@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31505 if (!tty)
31506 return -ENODEV;
31507
31508- if (!tty->open_count)
31509+ if (!local_read(&tty->open_count))
31510 return -EINVAL;
31511
31512 return get_control_lines(tty);
31513@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31514 if (!tty)
31515 return -ENODEV;
31516
31517- if (!tty->open_count)
31518+ if (!local_read(&tty->open_count))
31519 return -EINVAL;
31520
31521 return set_control_lines(tty, set, clear);
31522@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31523 if (!tty)
31524 return -ENODEV;
31525
31526- if (!tty->open_count)
31527+ if (!local_read(&tty->open_count))
31528 return -EINVAL;
31529
31530 /* FIXME: Exactly how is the tty object locked here .. */
31531@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31532 against a parallel ioctl etc */
31533 mutex_lock(&ttyj->ipw_tty_mutex);
31534 }
31535- while (ttyj->open_count)
31536+ while (local_read(&ttyj->open_count))
31537 do_ipw_close(ttyj);
31538 ipwireless_disassociate_network_ttys(network,
31539 ttyj->channel_idx);
31540diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31541index 62f282e..e45c45c 100644
31542--- a/drivers/char/pty.c
31543+++ b/drivers/char/pty.c
31544@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31545 register_sysctl_table(pty_root_table);
31546
31547 /* Now create the /dev/ptmx special device */
31548+ pax_open_kernel();
31549 tty_default_fops(&ptmx_fops);
31550- ptmx_fops.open = ptmx_open;
31551+ *(void **)&ptmx_fops.open = ptmx_open;
31552+ pax_close_kernel();
31553
31554 cdev_init(&ptmx_cdev, &ptmx_fops);
31555 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31556diff --git a/drivers/char/random.c b/drivers/char/random.c
31557index 3a19e2d..6ed09d3 100644
31558--- a/drivers/char/random.c
31559+++ b/drivers/char/random.c
31560@@ -254,8 +254,13 @@
31561 /*
31562 * Configuration information
31563 */
31564+#ifdef CONFIG_GRKERNSEC_RANDNET
31565+#define INPUT_POOL_WORDS 512
31566+#define OUTPUT_POOL_WORDS 128
31567+#else
31568 #define INPUT_POOL_WORDS 128
31569 #define OUTPUT_POOL_WORDS 32
31570+#endif
31571 #define SEC_XFER_SIZE 512
31572
31573 /*
31574@@ -292,10 +297,17 @@ static struct poolinfo {
31575 int poolwords;
31576 int tap1, tap2, tap3, tap4, tap5;
31577 } poolinfo_table[] = {
31578+#ifdef CONFIG_GRKERNSEC_RANDNET
31579+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31580+ { 512, 411, 308, 208, 104, 1 },
31581+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31582+ { 128, 103, 76, 51, 25, 1 },
31583+#else
31584 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31585 { 128, 103, 76, 51, 25, 1 },
31586 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31587 { 32, 26, 20, 14, 7, 1 },
31588+#endif
31589 #if 0
31590 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31591 { 2048, 1638, 1231, 819, 411, 1 },
31592@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31593 #include <linux/sysctl.h>
31594
31595 static int min_read_thresh = 8, min_write_thresh;
31596-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31597+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31598 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31599 static char sysctl_bootid[16];
31600
31601diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31602index 0e29a23..0efc2c2 100644
31603--- a/drivers/char/rocket.c
31604+++ b/drivers/char/rocket.c
31605@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31606 struct rocket_ports tmp;
31607 int board;
31608
31609+ pax_track_stack();
31610+
31611 if (!retports)
31612 return -EFAULT;
31613 memset(&tmp, 0, sizeof (tmp));
31614diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31615index 8c262aa..4d3b058 100644
31616--- a/drivers/char/sonypi.c
31617+++ b/drivers/char/sonypi.c
31618@@ -55,6 +55,7 @@
31619 #include <asm/uaccess.h>
31620 #include <asm/io.h>
31621 #include <asm/system.h>
31622+#include <asm/local.h>
31623
31624 #include <linux/sonypi.h>
31625
31626@@ -491,7 +492,7 @@ static struct sonypi_device {
31627 spinlock_t fifo_lock;
31628 wait_queue_head_t fifo_proc_list;
31629 struct fasync_struct *fifo_async;
31630- int open_count;
31631+ local_t open_count;
31632 int model;
31633 struct input_dev *input_jog_dev;
31634 struct input_dev *input_key_dev;
31635@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31636 static int sonypi_misc_release(struct inode *inode, struct file *file)
31637 {
31638 mutex_lock(&sonypi_device.lock);
31639- sonypi_device.open_count--;
31640+ local_dec(&sonypi_device.open_count);
31641 mutex_unlock(&sonypi_device.lock);
31642 return 0;
31643 }
31644@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31645 lock_kernel();
31646 mutex_lock(&sonypi_device.lock);
31647 /* Flush input queue on first open */
31648- if (!sonypi_device.open_count)
31649+ if (!local_read(&sonypi_device.open_count))
31650 kfifo_reset(sonypi_device.fifo);
31651- sonypi_device.open_count++;
31652+ local_inc(&sonypi_device.open_count);
31653 mutex_unlock(&sonypi_device.lock);
31654 unlock_kernel();
31655 return 0;
31656diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31657index db6dcfa..13834cb 100644
31658--- a/drivers/char/stallion.c
31659+++ b/drivers/char/stallion.c
31660@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31661 struct stlport stl_dummyport;
31662 struct stlport *portp;
31663
31664+ pax_track_stack();
31665+
31666 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31667 return -EFAULT;
31668 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31669diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31670index a0789f6..cea3902 100644
31671--- a/drivers/char/tpm/tpm.c
31672+++ b/drivers/char/tpm/tpm.c
31673@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31674 chip->vendor.req_complete_val)
31675 goto out_recv;
31676
31677- if ((status == chip->vendor.req_canceled)) {
31678+ if (status == chip->vendor.req_canceled) {
31679 dev_err(chip->dev, "Operation Canceled\n");
31680 rc = -ECANCELED;
31681 goto out;
31682@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31683
31684 struct tpm_chip *chip = dev_get_drvdata(dev);
31685
31686+ pax_track_stack();
31687+
31688 tpm_cmd.header.in = tpm_readpubek_header;
31689 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31690 "attempting to read the PUBEK");
31691diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31692index bf2170f..ce8cab9 100644
31693--- a/drivers/char/tpm/tpm_bios.c
31694+++ b/drivers/char/tpm/tpm_bios.c
31695@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31696 event = addr;
31697
31698 if ((event->event_type == 0 && event->event_size == 0) ||
31699- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31700+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31701 return NULL;
31702
31703 return addr;
31704@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31705 return NULL;
31706
31707 if ((event->event_type == 0 && event->event_size == 0) ||
31708- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31709+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31710 return NULL;
31711
31712 (*pos)++;
31713@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31714 int i;
31715
31716 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31717- seq_putc(m, data[i]);
31718+ if (!seq_putc(m, data[i]))
31719+ return -EFAULT;
31720
31721 return 0;
31722 }
31723@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31724 log->bios_event_log_end = log->bios_event_log + len;
31725
31726 virt = acpi_os_map_memory(start, len);
31727+ if (!virt) {
31728+ kfree(log->bios_event_log);
31729+ log->bios_event_log = NULL;
31730+ return -EFAULT;
31731+ }
31732
31733- memcpy(log->bios_event_log, virt, len);
31734+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31735
31736 acpi_os_unmap_memory(virt, len);
31737 return 0;
31738diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31739index 123cedf..6664cb4 100644
31740--- a/drivers/char/tty_io.c
31741+++ b/drivers/char/tty_io.c
31742@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
31743 static int tty_release(struct inode *, struct file *);
31744 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
31745 #ifdef CONFIG_COMPAT
31746-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31747+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31748 unsigned long arg);
31749 #else
31750 #define tty_compat_ioctl NULL
31751@@ -1774,6 +1774,7 @@ got_driver:
31752
31753 if (IS_ERR(tty)) {
31754 mutex_unlock(&tty_mutex);
31755+ tty_driver_kref_put(driver);
31756 return PTR_ERR(tty);
31757 }
31758 }
31759@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31760 return retval;
31761 }
31762
31763+EXPORT_SYMBOL(tty_ioctl);
31764+
31765 #ifdef CONFIG_COMPAT
31766-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31767+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31768 unsigned long arg)
31769 {
31770 struct inode *inode = file->f_dentry->d_inode;
31771@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31772
31773 return retval;
31774 }
31775+
31776+EXPORT_SYMBOL(tty_compat_ioctl);
31777 #endif
31778
31779 /*
31780@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31781
31782 void tty_default_fops(struct file_operations *fops)
31783 {
31784- *fops = tty_fops;
31785+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31786 }
31787
31788 /*
31789diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31790index d814a3d..b55b9c9 100644
31791--- a/drivers/char/tty_ldisc.c
31792+++ b/drivers/char/tty_ldisc.c
31793@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31794 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31795 struct tty_ldisc_ops *ldo = ld->ops;
31796
31797- ldo->refcount--;
31798+ atomic_dec(&ldo->refcount);
31799 module_put(ldo->owner);
31800 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31801
31802@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31803 spin_lock_irqsave(&tty_ldisc_lock, flags);
31804 tty_ldiscs[disc] = new_ldisc;
31805 new_ldisc->num = disc;
31806- new_ldisc->refcount = 0;
31807+ atomic_set(&new_ldisc->refcount, 0);
31808 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31809
31810 return ret;
31811@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31812 return -EINVAL;
31813
31814 spin_lock_irqsave(&tty_ldisc_lock, flags);
31815- if (tty_ldiscs[disc]->refcount)
31816+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31817 ret = -EBUSY;
31818 else
31819 tty_ldiscs[disc] = NULL;
31820@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31821 if (ldops) {
31822 ret = ERR_PTR(-EAGAIN);
31823 if (try_module_get(ldops->owner)) {
31824- ldops->refcount++;
31825+ atomic_inc(&ldops->refcount);
31826 ret = ldops;
31827 }
31828 }
31829@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31830 unsigned long flags;
31831
31832 spin_lock_irqsave(&tty_ldisc_lock, flags);
31833- ldops->refcount--;
31834+ atomic_dec(&ldops->refcount);
31835 module_put(ldops->owner);
31836 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31837 }
31838diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31839index a035ae3..c27fe2c 100644
31840--- a/drivers/char/virtio_console.c
31841+++ b/drivers/char/virtio_console.c
31842@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31843 * virtqueue, so we let the drivers do some boutique early-output thing. */
31844 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31845 {
31846- virtio_cons.put_chars = put_chars;
31847+ pax_open_kernel();
31848+ *(void **)&virtio_cons.put_chars = put_chars;
31849+ pax_close_kernel();
31850 return hvc_instantiate(0, 0, &virtio_cons);
31851 }
31852
31853@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31854 out_vq = vqs[1];
31855
31856 /* Start using the new console output. */
31857- virtio_cons.get_chars = get_chars;
31858- virtio_cons.put_chars = put_chars;
31859- virtio_cons.notifier_add = notifier_add_vio;
31860- virtio_cons.notifier_del = notifier_del_vio;
31861- virtio_cons.notifier_hangup = notifier_del_vio;
31862+ pax_open_kernel();
31863+ *(void **)&virtio_cons.get_chars = get_chars;
31864+ *(void **)&virtio_cons.put_chars = put_chars;
31865+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31866+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31867+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31868+ pax_close_kernel();
31869
31870 /* The first argument of hvc_alloc() is the virtual console number, so
31871 * we use zero. The second argument is the parameter for the
31872diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31873index 0c80c68..53d59c1 100644
31874--- a/drivers/char/vt.c
31875+++ b/drivers/char/vt.c
31876@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31877
31878 static void notify_write(struct vc_data *vc, unsigned int unicode)
31879 {
31880- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31881+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31882 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31883 }
31884
31885diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31886index 6351a26..999af95 100644
31887--- a/drivers/char/vt_ioctl.c
31888+++ b/drivers/char/vt_ioctl.c
31889@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31890 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31891 return -EFAULT;
31892
31893- if (!capable(CAP_SYS_TTY_CONFIG))
31894- perm = 0;
31895-
31896 switch (cmd) {
31897 case KDGKBENT:
31898 key_map = key_maps[s];
31899@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31900 val = (i ? K_HOLE : K_NOSUCHMAP);
31901 return put_user(val, &user_kbe->kb_value);
31902 case KDSKBENT:
31903+ if (!capable(CAP_SYS_TTY_CONFIG))
31904+ perm = 0;
31905+
31906 if (!perm)
31907 return -EPERM;
31908+
31909 if (!i && v == K_NOSUCHMAP) {
31910 /* deallocate map */
31911 key_map = key_maps[s];
31912@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31913 int i, j, k;
31914 int ret;
31915
31916- if (!capable(CAP_SYS_TTY_CONFIG))
31917- perm = 0;
31918-
31919 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31920 if (!kbs) {
31921 ret = -ENOMEM;
31922@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31923 kfree(kbs);
31924 return ((p && *p) ? -EOVERFLOW : 0);
31925 case KDSKBSENT:
31926+ if (!capable(CAP_SYS_TTY_CONFIG))
31927+ perm = 0;
31928+
31929 if (!perm) {
31930 ret = -EPERM;
31931 goto reterr;
31932diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31933index c7ae026..1769c1d 100644
31934--- a/drivers/cpufreq/cpufreq.c
31935+++ b/drivers/cpufreq/cpufreq.c
31936@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31937 complete(&policy->kobj_unregister);
31938 }
31939
31940-static struct sysfs_ops sysfs_ops = {
31941+static const struct sysfs_ops sysfs_ops = {
31942 .show = show,
31943 .store = store,
31944 };
31945diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31946index 97b0038..2056670 100644
31947--- a/drivers/cpuidle/sysfs.c
31948+++ b/drivers/cpuidle/sysfs.c
31949@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31950 return ret;
31951 }
31952
31953-static struct sysfs_ops cpuidle_sysfs_ops = {
31954+static const struct sysfs_ops cpuidle_sysfs_ops = {
31955 .show = cpuidle_show,
31956 .store = cpuidle_store,
31957 };
31958@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31959 return ret;
31960 }
31961
31962-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31963+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31964 .show = cpuidle_state_show,
31965 };
31966
31967@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31968 .release = cpuidle_state_sysfs_release,
31969 };
31970
31971-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31972+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31973 {
31974 kobject_put(&device->kobjs[i]->kobj);
31975 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31976diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31977index 5f753fc..0377ae9 100644
31978--- a/drivers/crypto/hifn_795x.c
31979+++ b/drivers/crypto/hifn_795x.c
31980@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31981 0xCA, 0x34, 0x2B, 0x2E};
31982 struct scatterlist sg;
31983
31984+ pax_track_stack();
31985+
31986 memset(src, 0, sizeof(src));
31987 memset(ctx.key, 0, sizeof(ctx.key));
31988
31989diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31990index 71e6482..de8d96c 100644
31991--- a/drivers/crypto/padlock-aes.c
31992+++ b/drivers/crypto/padlock-aes.c
31993@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31994 struct crypto_aes_ctx gen_aes;
31995 int cpu;
31996
31997+ pax_track_stack();
31998+
31999 if (key_len % 8) {
32000 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32001 return -EINVAL;
32002diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32003index dcc4ab7..cc834bb 100644
32004--- a/drivers/dma/ioat/dma.c
32005+++ b/drivers/dma/ioat/dma.c
32006@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32007 return entry->show(&chan->common, page);
32008 }
32009
32010-struct sysfs_ops ioat_sysfs_ops = {
32011+const struct sysfs_ops ioat_sysfs_ops = {
32012 .show = ioat_attr_show,
32013 };
32014
32015diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32016index bbc3e78..f2db62c 100644
32017--- a/drivers/dma/ioat/dma.h
32018+++ b/drivers/dma/ioat/dma.h
32019@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32020 unsigned long *phys_complete);
32021 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32022 void ioat_kobject_del(struct ioatdma_device *device);
32023-extern struct sysfs_ops ioat_sysfs_ops;
32024+extern const struct sysfs_ops ioat_sysfs_ops;
32025 extern struct ioat_sysfs_entry ioat_version_attr;
32026 extern struct ioat_sysfs_entry ioat_cap_attr;
32027 #endif /* IOATDMA_H */
32028diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32029index 9908c9e..3ceb0e5 100644
32030--- a/drivers/dma/ioat/dma_v3.c
32031+++ b/drivers/dma/ioat/dma_v3.c
32032@@ -71,10 +71,10 @@
32033 /* provide a lookup table for setting the source address in the base or
32034 * extended descriptor of an xor or pq descriptor
32035 */
32036-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32037-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32038-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32039-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32040+static const u8 xor_idx_to_desc = 0xd0;
32041+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32042+static const u8 pq_idx_to_desc = 0xf8;
32043+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32044
32045 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32046 {
32047diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32048index 85c464a..afd1e73 100644
32049--- a/drivers/edac/amd64_edac.c
32050+++ b/drivers/edac/amd64_edac.c
32051@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32052 * PCI core identifies what devices are on a system during boot, and then
32053 * inquiry this table to see if this driver is for a given device found.
32054 */
32055-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32056+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32057 {
32058 .vendor = PCI_VENDOR_ID_AMD,
32059 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32060diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32061index 2b95f1a..4f52793 100644
32062--- a/drivers/edac/amd76x_edac.c
32063+++ b/drivers/edac/amd76x_edac.c
32064@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32065 edac_mc_free(mci);
32066 }
32067
32068-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32069+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32070 {
32071 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32072 AMD762},
32073diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32074index d205d49..74c9672 100644
32075--- a/drivers/edac/e752x_edac.c
32076+++ b/drivers/edac/e752x_edac.c
32077@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32078 edac_mc_free(mci);
32079 }
32080
32081-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32082+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32083 {
32084 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32085 E7520},
32086diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32087index c7d11cc..c59c1ca 100644
32088--- a/drivers/edac/e7xxx_edac.c
32089+++ b/drivers/edac/e7xxx_edac.c
32090@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32091 edac_mc_free(mci);
32092 }
32093
32094-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32095+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32096 {
32097 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32098 E7205},
32099diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32100index 5376457..5fdedbc 100644
32101--- a/drivers/edac/edac_device_sysfs.c
32102+++ b/drivers/edac/edac_device_sysfs.c
32103@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32104 }
32105
32106 /* edac_dev file operations for an 'ctl_info' */
32107-static struct sysfs_ops device_ctl_info_ops = {
32108+static const struct sysfs_ops device_ctl_info_ops = {
32109 .show = edac_dev_ctl_info_show,
32110 .store = edac_dev_ctl_info_store
32111 };
32112@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32113 }
32114
32115 /* edac_dev file operations for an 'instance' */
32116-static struct sysfs_ops device_instance_ops = {
32117+static const struct sysfs_ops device_instance_ops = {
32118 .show = edac_dev_instance_show,
32119 .store = edac_dev_instance_store
32120 };
32121@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32122 }
32123
32124 /* edac_dev file operations for a 'block' */
32125-static struct sysfs_ops device_block_ops = {
32126+static const struct sysfs_ops device_block_ops = {
32127 .show = edac_dev_block_show,
32128 .store = edac_dev_block_store
32129 };
32130diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32131index e1d4ce0..88840e9 100644
32132--- a/drivers/edac/edac_mc_sysfs.c
32133+++ b/drivers/edac/edac_mc_sysfs.c
32134@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32135 return -EIO;
32136 }
32137
32138-static struct sysfs_ops csrowfs_ops = {
32139+static const struct sysfs_ops csrowfs_ops = {
32140 .show = csrowdev_show,
32141 .store = csrowdev_store
32142 };
32143@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32144 }
32145
32146 /* Intermediate show/store table */
32147-static struct sysfs_ops mci_ops = {
32148+static const struct sysfs_ops mci_ops = {
32149 .show = mcidev_show,
32150 .store = mcidev_store
32151 };
32152diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32153index 422728c..d8d9c88 100644
32154--- a/drivers/edac/edac_pci_sysfs.c
32155+++ b/drivers/edac/edac_pci_sysfs.c
32156@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32157 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32158 static int edac_pci_poll_msec = 1000; /* one second workq period */
32159
32160-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32161-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32162+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32163+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32164
32165 static struct kobject *edac_pci_top_main_kobj;
32166 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32167@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32168 }
32169
32170 /* fs_ops table */
32171-static struct sysfs_ops pci_instance_ops = {
32172+static const struct sysfs_ops pci_instance_ops = {
32173 .show = edac_pci_instance_show,
32174 .store = edac_pci_instance_store
32175 };
32176@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32177 return -EIO;
32178 }
32179
32180-static struct sysfs_ops edac_pci_sysfs_ops = {
32181+static const struct sysfs_ops edac_pci_sysfs_ops = {
32182 .show = edac_pci_dev_show,
32183 .store = edac_pci_dev_store
32184 };
32185@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32186 edac_printk(KERN_CRIT, EDAC_PCI,
32187 "Signaled System Error on %s\n",
32188 pci_name(dev));
32189- atomic_inc(&pci_nonparity_count);
32190+ atomic_inc_unchecked(&pci_nonparity_count);
32191 }
32192
32193 if (status & (PCI_STATUS_PARITY)) {
32194@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32195 "Master Data Parity Error on %s\n",
32196 pci_name(dev));
32197
32198- atomic_inc(&pci_parity_count);
32199+ atomic_inc_unchecked(&pci_parity_count);
32200 }
32201
32202 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32203@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32204 "Detected Parity Error on %s\n",
32205 pci_name(dev));
32206
32207- atomic_inc(&pci_parity_count);
32208+ atomic_inc_unchecked(&pci_parity_count);
32209 }
32210 }
32211
32212@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32213 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32214 "Signaled System Error on %s\n",
32215 pci_name(dev));
32216- atomic_inc(&pci_nonparity_count);
32217+ atomic_inc_unchecked(&pci_nonparity_count);
32218 }
32219
32220 if (status & (PCI_STATUS_PARITY)) {
32221@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32222 "Master Data Parity Error on "
32223 "%s\n", pci_name(dev));
32224
32225- atomic_inc(&pci_parity_count);
32226+ atomic_inc_unchecked(&pci_parity_count);
32227 }
32228
32229 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32230@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32231 "Detected Parity Error on %s\n",
32232 pci_name(dev));
32233
32234- atomic_inc(&pci_parity_count);
32235+ atomic_inc_unchecked(&pci_parity_count);
32236 }
32237 }
32238 }
32239@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32240 if (!check_pci_errors)
32241 return;
32242
32243- before_count = atomic_read(&pci_parity_count);
32244+ before_count = atomic_read_unchecked(&pci_parity_count);
32245
32246 /* scan all PCI devices looking for a Parity Error on devices and
32247 * bridges.
32248@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32249 /* Only if operator has selected panic on PCI Error */
32250 if (edac_pci_get_panic_on_pe()) {
32251 /* If the count is different 'after' from 'before' */
32252- if (before_count != atomic_read(&pci_parity_count))
32253+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32254 panic("EDAC: PCI Parity Error");
32255 }
32256 }
32257diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32258index 6c9a0f2..9c1cf7e 100644
32259--- a/drivers/edac/i3000_edac.c
32260+++ b/drivers/edac/i3000_edac.c
32261@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32262 edac_mc_free(mci);
32263 }
32264
32265-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32266+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32267 {
32268 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32269 I3000},
32270diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32271index fde4db9..fe108f9 100644
32272--- a/drivers/edac/i3200_edac.c
32273+++ b/drivers/edac/i3200_edac.c
32274@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32275 edac_mc_free(mci);
32276 }
32277
32278-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32279+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32280 {
32281 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32282 I3200},
32283diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32284index adc10a2..57d4ccf 100644
32285--- a/drivers/edac/i5000_edac.c
32286+++ b/drivers/edac/i5000_edac.c
32287@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32288 *
32289 * The "E500P" device is the first device supported.
32290 */
32291-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32292+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32293 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32294 .driver_data = I5000P},
32295
32296diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32297index 22db05a..b2b5503 100644
32298--- a/drivers/edac/i5100_edac.c
32299+++ b/drivers/edac/i5100_edac.c
32300@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32301 edac_mc_free(mci);
32302 }
32303
32304-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32305+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32306 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32307 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32308 { 0, }
32309diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32310index f99d106..f050710 100644
32311--- a/drivers/edac/i5400_edac.c
32312+++ b/drivers/edac/i5400_edac.c
32313@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32314 *
32315 * The "E500P" device is the first device supported.
32316 */
32317-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32318+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32319 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32320 {0,} /* 0 terminated list. */
32321 };
32322diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32323index 577760a..9ce16ce 100644
32324--- a/drivers/edac/i82443bxgx_edac.c
32325+++ b/drivers/edac/i82443bxgx_edac.c
32326@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32327
32328 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32329
32330-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32331+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32332 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32333 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32334 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32335diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32336index c0088ba..64a7b98 100644
32337--- a/drivers/edac/i82860_edac.c
32338+++ b/drivers/edac/i82860_edac.c
32339@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32340 edac_mc_free(mci);
32341 }
32342
32343-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32344+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32345 {
32346 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32347 I82860},
32348diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32349index b2d83b9..a34357b 100644
32350--- a/drivers/edac/i82875p_edac.c
32351+++ b/drivers/edac/i82875p_edac.c
32352@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32353 edac_mc_free(mci);
32354 }
32355
32356-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32357+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32358 {
32359 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32360 I82875P},
32361diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32362index 2eed3ea..87bbbd1 100644
32363--- a/drivers/edac/i82975x_edac.c
32364+++ b/drivers/edac/i82975x_edac.c
32365@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32366 edac_mc_free(mci);
32367 }
32368
32369-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32370+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32371 {
32372 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32373 I82975X
32374diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32375index 9900675..78ac2b6 100644
32376--- a/drivers/edac/r82600_edac.c
32377+++ b/drivers/edac/r82600_edac.c
32378@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32379 edac_mc_free(mci);
32380 }
32381
32382-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32383+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32384 {
32385 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32386 },
32387diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32388index d4ec605..4cfec4e 100644
32389--- a/drivers/edac/x38_edac.c
32390+++ b/drivers/edac/x38_edac.c
32391@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32392 edac_mc_free(mci);
32393 }
32394
32395-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32396+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32397 {
32398 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32399 X38},
32400diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32401index 3fc2ceb..daf098f 100644
32402--- a/drivers/firewire/core-card.c
32403+++ b/drivers/firewire/core-card.c
32404@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32405
32406 void fw_core_remove_card(struct fw_card *card)
32407 {
32408- struct fw_card_driver dummy_driver = dummy_driver_template;
32409+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32410
32411 card->driver->update_phy_reg(card, 4,
32412 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32413diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32414index 4560d8f..36db24a 100644
32415--- a/drivers/firewire/core-cdev.c
32416+++ b/drivers/firewire/core-cdev.c
32417@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32418 int ret;
32419
32420 if ((request->channels == 0 && request->bandwidth == 0) ||
32421- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32422- request->bandwidth < 0)
32423+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32424 return -EINVAL;
32425
32426 r = kmalloc(sizeof(*r), GFP_KERNEL);
32427diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32428index da628c7..cf54a2c 100644
32429--- a/drivers/firewire/core-transaction.c
32430+++ b/drivers/firewire/core-transaction.c
32431@@ -36,6 +36,7 @@
32432 #include <linux/string.h>
32433 #include <linux/timer.h>
32434 #include <linux/types.h>
32435+#include <linux/sched.h>
32436
32437 #include <asm/byteorder.h>
32438
32439@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32440 struct transaction_callback_data d;
32441 struct fw_transaction t;
32442
32443+ pax_track_stack();
32444+
32445 init_completion(&d.done);
32446 d.payload = payload;
32447 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32448diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32449index 7ff6e75..a2965d9 100644
32450--- a/drivers/firewire/core.h
32451+++ b/drivers/firewire/core.h
32452@@ -86,6 +86,7 @@ struct fw_card_driver {
32453
32454 int (*stop_iso)(struct fw_iso_context *ctx);
32455 };
32456+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32457
32458 void fw_card_initialize(struct fw_card *card,
32459 const struct fw_card_driver *driver, struct device *device);
32460diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32461index 3a2ccb0..82fd7c4 100644
32462--- a/drivers/firmware/dmi_scan.c
32463+++ b/drivers/firmware/dmi_scan.c
32464@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32465 }
32466 }
32467 else {
32468- /*
32469- * no iounmap() for that ioremap(); it would be a no-op, but
32470- * it's so early in setup that sucker gets confused into doing
32471- * what it shouldn't if we actually call it.
32472- */
32473 p = dmi_ioremap(0xF0000, 0x10000);
32474 if (p == NULL)
32475 goto error;
32476@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32477 if (buf == NULL)
32478 return -1;
32479
32480- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32481+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32482
32483 iounmap(buf);
32484 return 0;
32485diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32486index 9e4f59d..110e24e 100644
32487--- a/drivers/firmware/edd.c
32488+++ b/drivers/firmware/edd.c
32489@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32490 return ret;
32491 }
32492
32493-static struct sysfs_ops edd_attr_ops = {
32494+static const struct sysfs_ops edd_attr_ops = {
32495 .show = edd_attr_show,
32496 };
32497
32498diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32499index f4f709d..082f06e 100644
32500--- a/drivers/firmware/efivars.c
32501+++ b/drivers/firmware/efivars.c
32502@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32503 return ret;
32504 }
32505
32506-static struct sysfs_ops efivar_attr_ops = {
32507+static const struct sysfs_ops efivar_attr_ops = {
32508 .show = efivar_attr_show,
32509 .store = efivar_attr_store,
32510 };
32511diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32512index 051d1eb..0a5d4e7 100644
32513--- a/drivers/firmware/iscsi_ibft.c
32514+++ b/drivers/firmware/iscsi_ibft.c
32515@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32516 return ret;
32517 }
32518
32519-static struct sysfs_ops ibft_attr_ops = {
32520+static const struct sysfs_ops ibft_attr_ops = {
32521 .show = ibft_show_attribute,
32522 };
32523
32524diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32525index 56f9234..8c58c7b 100644
32526--- a/drivers/firmware/memmap.c
32527+++ b/drivers/firmware/memmap.c
32528@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32529 NULL
32530 };
32531
32532-static struct sysfs_ops memmap_attr_ops = {
32533+static const struct sysfs_ops memmap_attr_ops = {
32534 .show = memmap_attr_show,
32535 };
32536
32537diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32538index b16c9a8..2af7d3f 100644
32539--- a/drivers/gpio/vr41xx_giu.c
32540+++ b/drivers/gpio/vr41xx_giu.c
32541@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32542 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32543 maskl, pendl, maskh, pendh);
32544
32545- atomic_inc(&irq_err_count);
32546+ atomic_inc_unchecked(&irq_err_count);
32547
32548 return -EINVAL;
32549 }
32550diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32551index bea6efc..3dc0f42 100644
32552--- a/drivers/gpu/drm/drm_crtc.c
32553+++ b/drivers/gpu/drm/drm_crtc.c
32554@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32555 */
32556 if ((out_resp->count_modes >= mode_count) && mode_count) {
32557 copied = 0;
32558- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32559+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32560 list_for_each_entry(mode, &connector->modes, head) {
32561 drm_crtc_convert_to_umode(&u_mode, mode);
32562 if (copy_to_user(mode_ptr + copied,
32563@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32564
32565 if ((out_resp->count_props >= props_count) && props_count) {
32566 copied = 0;
32567- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32568- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32569+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32570+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32571 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32572 if (connector->property_ids[i] != 0) {
32573 if (put_user(connector->property_ids[i],
32574@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32575
32576 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32577 copied = 0;
32578- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32579+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32580 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32581 if (connector->encoder_ids[i] != 0) {
32582 if (put_user(connector->encoder_ids[i],
32583@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32584 }
32585
32586 for (i = 0; i < crtc_req->count_connectors; i++) {
32587- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32588+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32589 if (get_user(out_id, &set_connectors_ptr[i])) {
32590 ret = -EFAULT;
32591 goto out;
32592@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32593 out_resp->flags = property->flags;
32594
32595 if ((out_resp->count_values >= value_count) && value_count) {
32596- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32597+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32598 for (i = 0; i < value_count; i++) {
32599 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32600 ret = -EFAULT;
32601@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32602 if (property->flags & DRM_MODE_PROP_ENUM) {
32603 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32604 copied = 0;
32605- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32606+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32607 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32608
32609 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32610@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32611 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32612 copied = 0;
32613 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32614- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32615+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32616
32617 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32618 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32619@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32620 blob = obj_to_blob(obj);
32621
32622 if (out_resp->length == blob->length) {
32623- blob_ptr = (void *)(unsigned long)out_resp->data;
32624+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32625 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32626 ret = -EFAULT;
32627 goto done;
32628diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32629index 1b8745d..92fdbf6 100644
32630--- a/drivers/gpu/drm/drm_crtc_helper.c
32631+++ b/drivers/gpu/drm/drm_crtc_helper.c
32632@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32633 struct drm_crtc *tmp;
32634 int crtc_mask = 1;
32635
32636- WARN(!crtc, "checking null crtc?");
32637+ BUG_ON(!crtc);
32638
32639 dev = crtc->dev;
32640
32641@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32642
32643 adjusted_mode = drm_mode_duplicate(dev, mode);
32644
32645+ pax_track_stack();
32646+
32647 crtc->enabled = drm_helper_crtc_in_use(crtc);
32648
32649 if (!crtc->enabled)
32650diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32651index 0e27d98..dec8768 100644
32652--- a/drivers/gpu/drm/drm_drv.c
32653+++ b/drivers/gpu/drm/drm_drv.c
32654@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32655 char *kdata = NULL;
32656
32657 atomic_inc(&dev->ioctl_count);
32658- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32659+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32660 ++file_priv->ioctl_count;
32661
32662 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32663diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32664index 519161e..98c840c 100644
32665--- a/drivers/gpu/drm/drm_fops.c
32666+++ b/drivers/gpu/drm/drm_fops.c
32667@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32668 }
32669
32670 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32671- atomic_set(&dev->counts[i], 0);
32672+ atomic_set_unchecked(&dev->counts[i], 0);
32673
32674 dev->sigdata.lock = NULL;
32675
32676@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32677
32678 retcode = drm_open_helper(inode, filp, dev);
32679 if (!retcode) {
32680- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32681+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32682 spin_lock(&dev->count_lock);
32683- if (!dev->open_count++) {
32684+ if (local_inc_return(&dev->open_count) == 1) {
32685 spin_unlock(&dev->count_lock);
32686 retcode = drm_setup(dev);
32687 goto out;
32688@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32689
32690 lock_kernel();
32691
32692- DRM_DEBUG("open_count = %d\n", dev->open_count);
32693+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32694
32695 if (dev->driver->preclose)
32696 dev->driver->preclose(dev, file_priv);
32697@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32698 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32699 task_pid_nr(current),
32700 (long)old_encode_dev(file_priv->minor->device),
32701- dev->open_count);
32702+ local_read(&dev->open_count));
32703
32704 /* Release any auth tokens that might point to this file_priv,
32705 (do that under the drm_global_mutex) */
32706@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32707 * End inline drm_release
32708 */
32709
32710- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32711+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32712 spin_lock(&dev->count_lock);
32713- if (!--dev->open_count) {
32714+ if (local_dec_and_test(&dev->open_count)) {
32715 if (atomic_read(&dev->ioctl_count)) {
32716 DRM_ERROR("Device busy: %d\n",
32717 atomic_read(&dev->ioctl_count));
32718diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32719index 8bf3770..79422805 100644
32720--- a/drivers/gpu/drm/drm_gem.c
32721+++ b/drivers/gpu/drm/drm_gem.c
32722@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32723 spin_lock_init(&dev->object_name_lock);
32724 idr_init(&dev->object_name_idr);
32725 atomic_set(&dev->object_count, 0);
32726- atomic_set(&dev->object_memory, 0);
32727+ atomic_set_unchecked(&dev->object_memory, 0);
32728 atomic_set(&dev->pin_count, 0);
32729- atomic_set(&dev->pin_memory, 0);
32730+ atomic_set_unchecked(&dev->pin_memory, 0);
32731 atomic_set(&dev->gtt_count, 0);
32732- atomic_set(&dev->gtt_memory, 0);
32733+ atomic_set_unchecked(&dev->gtt_memory, 0);
32734
32735 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32736 if (!mm) {
32737@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32738 goto fput;
32739 }
32740 atomic_inc(&dev->object_count);
32741- atomic_add(obj->size, &dev->object_memory);
32742+ atomic_add_unchecked(obj->size, &dev->object_memory);
32743 return obj;
32744 fput:
32745 fput(obj->filp);
32746@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32747
32748 fput(obj->filp);
32749 atomic_dec(&dev->object_count);
32750- atomic_sub(obj->size, &dev->object_memory);
32751+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32752 kfree(obj);
32753 }
32754 EXPORT_SYMBOL(drm_gem_object_free);
32755diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32756index f0f6c6b..34af322 100644
32757--- a/drivers/gpu/drm/drm_info.c
32758+++ b/drivers/gpu/drm/drm_info.c
32759@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32760 struct drm_local_map *map;
32761 struct drm_map_list *r_list;
32762
32763- /* Hardcoded from _DRM_FRAME_BUFFER,
32764- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32765- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32766- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32767+ static const char * const types[] = {
32768+ [_DRM_FRAME_BUFFER] = "FB",
32769+ [_DRM_REGISTERS] = "REG",
32770+ [_DRM_SHM] = "SHM",
32771+ [_DRM_AGP] = "AGP",
32772+ [_DRM_SCATTER_GATHER] = "SG",
32773+ [_DRM_CONSISTENT] = "PCI",
32774+ [_DRM_GEM] = "GEM" };
32775 const char *type;
32776 int i;
32777
32778@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32779 map = r_list->map;
32780 if (!map)
32781 continue;
32782- if (map->type < 0 || map->type > 5)
32783+ if (map->type >= ARRAY_SIZE(types))
32784 type = "??";
32785 else
32786 type = types[map->type];
32787@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32788 struct drm_device *dev = node->minor->dev;
32789
32790 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32791- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32792+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32793 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32794- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32795- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32796+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32797+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32798 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32799 return 0;
32800 }
32801@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32802 mutex_lock(&dev->struct_mutex);
32803 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32804 atomic_read(&dev->vma_count),
32805+#ifdef CONFIG_GRKERNSEC_HIDESYM
32806+ NULL, 0);
32807+#else
32808 high_memory, (u64)virt_to_phys(high_memory));
32809+#endif
32810
32811 list_for_each_entry(pt, &dev->vmalist, head) {
32812 vma = pt->vma;
32813@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32814 continue;
32815 seq_printf(m,
32816 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32817- pt->pid, vma->vm_start, vma->vm_end,
32818+ pt->pid,
32819+#ifdef CONFIG_GRKERNSEC_HIDESYM
32820+ 0, 0,
32821+#else
32822+ vma->vm_start, vma->vm_end,
32823+#endif
32824 vma->vm_flags & VM_READ ? 'r' : '-',
32825 vma->vm_flags & VM_WRITE ? 'w' : '-',
32826 vma->vm_flags & VM_EXEC ? 'x' : '-',
32827 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32828 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32829 vma->vm_flags & VM_IO ? 'i' : '-',
32830+#ifdef CONFIG_GRKERNSEC_HIDESYM
32831+ 0);
32832+#else
32833 vma->vm_pgoff);
32834+#endif
32835
32836 #if defined(__i386__)
32837 pgprot = pgprot_val(vma->vm_page_prot);
32838diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32839index 282d9fd..71e5f11 100644
32840--- a/drivers/gpu/drm/drm_ioc32.c
32841+++ b/drivers/gpu/drm/drm_ioc32.c
32842@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32843 request = compat_alloc_user_space(nbytes);
32844 if (!access_ok(VERIFY_WRITE, request, nbytes))
32845 return -EFAULT;
32846- list = (struct drm_buf_desc *) (request + 1);
32847+ list = (struct drm_buf_desc __user *) (request + 1);
32848
32849 if (__put_user(count, &request->count)
32850 || __put_user(list, &request->list))
32851@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32852 request = compat_alloc_user_space(nbytes);
32853 if (!access_ok(VERIFY_WRITE, request, nbytes))
32854 return -EFAULT;
32855- list = (struct drm_buf_pub *) (request + 1);
32856+ list = (struct drm_buf_pub __user *) (request + 1);
32857
32858 if (__put_user(count, &request->count)
32859 || __put_user(list, &request->list))
32860diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32861index 9b9ff46..4ea724c 100644
32862--- a/drivers/gpu/drm/drm_ioctl.c
32863+++ b/drivers/gpu/drm/drm_ioctl.c
32864@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32865 stats->data[i].value =
32866 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32867 else
32868- stats->data[i].value = atomic_read(&dev->counts[i]);
32869+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32870 stats->data[i].type = dev->types[i];
32871 }
32872
32873diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32874index e2f70a5..c703e86 100644
32875--- a/drivers/gpu/drm/drm_lock.c
32876+++ b/drivers/gpu/drm/drm_lock.c
32877@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32878 if (drm_lock_take(&master->lock, lock->context)) {
32879 master->lock.file_priv = file_priv;
32880 master->lock.lock_time = jiffies;
32881- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32882+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32883 break; /* Got lock */
32884 }
32885
32886@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32887 return -EINVAL;
32888 }
32889
32890- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32891+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32892
32893 /* kernel_context_switch isn't used by any of the x86 drm
32894 * modules but is required by the Sparc driver.
32895diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32896index 7d1d88c..b9131b2 100644
32897--- a/drivers/gpu/drm/i810/i810_dma.c
32898+++ b/drivers/gpu/drm/i810/i810_dma.c
32899@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32900 dma->buflist[vertex->idx],
32901 vertex->discard, vertex->used);
32902
32903- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32904- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32905+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32906+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32907 sarea_priv->last_enqueue = dev_priv->counter - 1;
32908 sarea_priv->last_dispatch = (int)hw_status[5];
32909
32910@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32911 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32912 mc->last_render);
32913
32914- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32915- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32916+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32917+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32918 sarea_priv->last_enqueue = dev_priv->counter - 1;
32919 sarea_priv->last_dispatch = (int)hw_status[5];
32920
32921diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32922index 21e2691..7321edd 100644
32923--- a/drivers/gpu/drm/i810/i810_drv.h
32924+++ b/drivers/gpu/drm/i810/i810_drv.h
32925@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32926 int page_flipping;
32927
32928 wait_queue_head_t irq_queue;
32929- atomic_t irq_received;
32930- atomic_t irq_emitted;
32931+ atomic_unchecked_t irq_received;
32932+ atomic_unchecked_t irq_emitted;
32933
32934 int front_offset;
32935 } drm_i810_private_t;
32936diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32937index da82afe..48a45de 100644
32938--- a/drivers/gpu/drm/i830/i830_drv.h
32939+++ b/drivers/gpu/drm/i830/i830_drv.h
32940@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32941 int page_flipping;
32942
32943 wait_queue_head_t irq_queue;
32944- atomic_t irq_received;
32945- atomic_t irq_emitted;
32946+ atomic_unchecked_t irq_received;
32947+ atomic_unchecked_t irq_emitted;
32948
32949 int use_mi_batchbuffer_start;
32950
32951diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32952index 91ec2bb..6f21fab 100644
32953--- a/drivers/gpu/drm/i830/i830_irq.c
32954+++ b/drivers/gpu/drm/i830/i830_irq.c
32955@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32956
32957 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32958
32959- atomic_inc(&dev_priv->irq_received);
32960+ atomic_inc_unchecked(&dev_priv->irq_received);
32961 wake_up_interruptible(&dev_priv->irq_queue);
32962
32963 return IRQ_HANDLED;
32964@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32965
32966 DRM_DEBUG("%s\n", __func__);
32967
32968- atomic_inc(&dev_priv->irq_emitted);
32969+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32970
32971 BEGIN_LP_RING(2);
32972 OUT_RING(0);
32973 OUT_RING(GFX_OP_USER_INTERRUPT);
32974 ADVANCE_LP_RING();
32975
32976- return atomic_read(&dev_priv->irq_emitted);
32977+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32978 }
32979
32980 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32981@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32982
32983 DRM_DEBUG("%s\n", __func__);
32984
32985- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32986+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32987 return 0;
32988
32989 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32990@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32991
32992 for (;;) {
32993 __set_current_state(TASK_INTERRUPTIBLE);
32994- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32995+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32996 break;
32997 if ((signed)(end - jiffies) <= 0) {
32998 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32999@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33000 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33001 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33002 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33003- atomic_set(&dev_priv->irq_received, 0);
33004- atomic_set(&dev_priv->irq_emitted, 0);
33005+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33006+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33007 init_waitqueue_head(&dev_priv->irq_queue);
33008 }
33009
33010diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33011index 288fc50..c6092055 100644
33012--- a/drivers/gpu/drm/i915/dvo.h
33013+++ b/drivers/gpu/drm/i915/dvo.h
33014@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33015 *
33016 * \return singly-linked list of modes or NULL if no modes found.
33017 */
33018- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33019+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33020
33021 /**
33022 * Clean up driver-specific bits of the output
33023 */
33024- void (*destroy) (struct intel_dvo_device *dvo);
33025+ void (* const destroy) (struct intel_dvo_device *dvo);
33026
33027 /**
33028 * Debugging hook to dump device registers to log file
33029 */
33030- void (*dump_regs)(struct intel_dvo_device *dvo);
33031+ void (* const dump_regs)(struct intel_dvo_device *dvo);
33032 };
33033
33034-extern struct intel_dvo_dev_ops sil164_ops;
33035-extern struct intel_dvo_dev_ops ch7xxx_ops;
33036-extern struct intel_dvo_dev_ops ivch_ops;
33037-extern struct intel_dvo_dev_ops tfp410_ops;
33038-extern struct intel_dvo_dev_ops ch7017_ops;
33039+extern const struct intel_dvo_dev_ops sil164_ops;
33040+extern const struct intel_dvo_dev_ops ch7xxx_ops;
33041+extern const struct intel_dvo_dev_ops ivch_ops;
33042+extern const struct intel_dvo_dev_ops tfp410_ops;
33043+extern const struct intel_dvo_dev_ops ch7017_ops;
33044
33045 #endif /* _INTEL_DVO_H */
33046diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33047index 621815b..499d82e 100644
33048--- a/drivers/gpu/drm/i915/dvo_ch7017.c
33049+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33050@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33051 }
33052 }
33053
33054-struct intel_dvo_dev_ops ch7017_ops = {
33055+const struct intel_dvo_dev_ops ch7017_ops = {
33056 .init = ch7017_init,
33057 .detect = ch7017_detect,
33058 .mode_valid = ch7017_mode_valid,
33059diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33060index a9b8962..ac769ba 100644
33061--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33062+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33063@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33064 }
33065 }
33066
33067-struct intel_dvo_dev_ops ch7xxx_ops = {
33068+const struct intel_dvo_dev_ops ch7xxx_ops = {
33069 .init = ch7xxx_init,
33070 .detect = ch7xxx_detect,
33071 .mode_valid = ch7xxx_mode_valid,
33072diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33073index aa176f9..ed2930c 100644
33074--- a/drivers/gpu/drm/i915/dvo_ivch.c
33075+++ b/drivers/gpu/drm/i915/dvo_ivch.c
33076@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33077 }
33078 }
33079
33080-struct intel_dvo_dev_ops ivch_ops= {
33081+const struct intel_dvo_dev_ops ivch_ops= {
33082 .init = ivch_init,
33083 .dpms = ivch_dpms,
33084 .save = ivch_save,
33085diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33086index e1c1f73..7dbebcf 100644
33087--- a/drivers/gpu/drm/i915/dvo_sil164.c
33088+++ b/drivers/gpu/drm/i915/dvo_sil164.c
33089@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33090 }
33091 }
33092
33093-struct intel_dvo_dev_ops sil164_ops = {
33094+const struct intel_dvo_dev_ops sil164_ops = {
33095 .init = sil164_init,
33096 .detect = sil164_detect,
33097 .mode_valid = sil164_mode_valid,
33098diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33099index 16dce84..7e1b6f8 100644
33100--- a/drivers/gpu/drm/i915/dvo_tfp410.c
33101+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33102@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33103 }
33104 }
33105
33106-struct intel_dvo_dev_ops tfp410_ops = {
33107+const struct intel_dvo_dev_ops tfp410_ops = {
33108 .init = tfp410_init,
33109 .detect = tfp410_detect,
33110 .mode_valid = tfp410_mode_valid,
33111diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33112index 7e859d6..7d1cf2b 100644
33113--- a/drivers/gpu/drm/i915/i915_debugfs.c
33114+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33115@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33116 I915_READ(GTIMR));
33117 }
33118 seq_printf(m, "Interrupts received: %d\n",
33119- atomic_read(&dev_priv->irq_received));
33120+ atomic_read_unchecked(&dev_priv->irq_received));
33121 if (dev_priv->hw_status_page != NULL) {
33122 seq_printf(m, "Current sequence: %d\n",
33123 i915_get_gem_seqno(dev));
33124diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33125index 5449239..7e4f68d 100644
33126--- a/drivers/gpu/drm/i915/i915_drv.c
33127+++ b/drivers/gpu/drm/i915/i915_drv.c
33128@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33129 return i915_resume(dev);
33130 }
33131
33132-static struct vm_operations_struct i915_gem_vm_ops = {
33133+static const struct vm_operations_struct i915_gem_vm_ops = {
33134 .fault = i915_gem_fault,
33135 .open = drm_gem_vm_open,
33136 .close = drm_gem_vm_close,
33137diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33138index 97163f7..c24c7c7 100644
33139--- a/drivers/gpu/drm/i915/i915_drv.h
33140+++ b/drivers/gpu/drm/i915/i915_drv.h
33141@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33142 /* display clock increase/decrease */
33143 /* pll clock increase/decrease */
33144 /* clock gating init */
33145-};
33146+} __no_const;
33147
33148 typedef struct drm_i915_private {
33149 struct drm_device *dev;
33150@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33151 int page_flipping;
33152
33153 wait_queue_head_t irq_queue;
33154- atomic_t irq_received;
33155+ atomic_unchecked_t irq_received;
33156 /** Protects user_irq_refcount and irq_mask_reg */
33157 spinlock_t user_irq_lock;
33158 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33159diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33160index 27a3074..eb3f959 100644
33161--- a/drivers/gpu/drm/i915/i915_gem.c
33162+++ b/drivers/gpu/drm/i915/i915_gem.c
33163@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33164
33165 args->aper_size = dev->gtt_total;
33166 args->aper_available_size = (args->aper_size -
33167- atomic_read(&dev->pin_memory));
33168+ atomic_read_unchecked(&dev->pin_memory));
33169
33170 return 0;
33171 }
33172@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33173
33174 if (obj_priv->gtt_space) {
33175 atomic_dec(&dev->gtt_count);
33176- atomic_sub(obj->size, &dev->gtt_memory);
33177+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33178
33179 drm_mm_put_block(obj_priv->gtt_space);
33180 obj_priv->gtt_space = NULL;
33181@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33182 goto search_free;
33183 }
33184 atomic_inc(&dev->gtt_count);
33185- atomic_add(obj->size, &dev->gtt_memory);
33186+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33187
33188 /* Assert that the object is not currently in any GPU domain. As it
33189 * wasn't in the GTT, there shouldn't be any way it could have been in
33190@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33191 "%d/%d gtt bytes\n",
33192 atomic_read(&dev->object_count),
33193 atomic_read(&dev->pin_count),
33194- atomic_read(&dev->object_memory),
33195- atomic_read(&dev->pin_memory),
33196- atomic_read(&dev->gtt_memory),
33197+ atomic_read_unchecked(&dev->object_memory),
33198+ atomic_read_unchecked(&dev->pin_memory),
33199+ atomic_read_unchecked(&dev->gtt_memory),
33200 dev->gtt_total);
33201 }
33202 goto err;
33203@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33204 */
33205 if (obj_priv->pin_count == 1) {
33206 atomic_inc(&dev->pin_count);
33207- atomic_add(obj->size, &dev->pin_memory);
33208+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33209 if (!obj_priv->active &&
33210 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33211 !list_empty(&obj_priv->list))
33212@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33213 list_move_tail(&obj_priv->list,
33214 &dev_priv->mm.inactive_list);
33215 atomic_dec(&dev->pin_count);
33216- atomic_sub(obj->size, &dev->pin_memory);
33217+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33218 }
33219 i915_verify_inactive(dev, __FILE__, __LINE__);
33220 }
33221diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33222index 63f28ad..f5469da 100644
33223--- a/drivers/gpu/drm/i915/i915_irq.c
33224+++ b/drivers/gpu/drm/i915/i915_irq.c
33225@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33226 int irq_received;
33227 int ret = IRQ_NONE;
33228
33229- atomic_inc(&dev_priv->irq_received);
33230+ atomic_inc_unchecked(&dev_priv->irq_received);
33231
33232 if (IS_IGDNG(dev))
33233 return igdng_irq_handler(dev);
33234@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33235 {
33236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33237
33238- atomic_set(&dev_priv->irq_received, 0);
33239+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33240
33241 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33242 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33243diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33244index 5d9c6a7..d1b0e29 100644
33245--- a/drivers/gpu/drm/i915/intel_sdvo.c
33246+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33247@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33248 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33249
33250 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33251- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33252+ pax_open_kernel();
33253+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33254+ pax_close_kernel();
33255
33256 /* Read the regs to test if we can talk to the device */
33257 for (i = 0; i < 0x40; i++) {
33258diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33259index be6c6b9..8615d9c 100644
33260--- a/drivers/gpu/drm/mga/mga_drv.h
33261+++ b/drivers/gpu/drm/mga/mga_drv.h
33262@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33263 u32 clear_cmd;
33264 u32 maccess;
33265
33266- atomic_t vbl_received; /**< Number of vblanks received. */
33267+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33268 wait_queue_head_t fence_queue;
33269- atomic_t last_fence_retired;
33270+ atomic_unchecked_t last_fence_retired;
33271 u32 next_fence_to_post;
33272
33273 unsigned int fb_cpp;
33274diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33275index daa6041..a28a5da 100644
33276--- a/drivers/gpu/drm/mga/mga_irq.c
33277+++ b/drivers/gpu/drm/mga/mga_irq.c
33278@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33279 if (crtc != 0)
33280 return 0;
33281
33282- return atomic_read(&dev_priv->vbl_received);
33283+ return atomic_read_unchecked(&dev_priv->vbl_received);
33284 }
33285
33286
33287@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33288 /* VBLANK interrupt */
33289 if (status & MGA_VLINEPEN) {
33290 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33291- atomic_inc(&dev_priv->vbl_received);
33292+ atomic_inc_unchecked(&dev_priv->vbl_received);
33293 drm_handle_vblank(dev, 0);
33294 handled = 1;
33295 }
33296@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33297 MGA_WRITE(MGA_PRIMEND, prim_end);
33298 }
33299
33300- atomic_inc(&dev_priv->last_fence_retired);
33301+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33302 DRM_WAKEUP(&dev_priv->fence_queue);
33303 handled = 1;
33304 }
33305@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33306 * using fences.
33307 */
33308 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33309- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33310+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33311 - *sequence) <= (1 << 23)));
33312
33313 *sequence = cur_fence;
33314diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33315index 4c39a40..b22a9ea 100644
33316--- a/drivers/gpu/drm/r128/r128_cce.c
33317+++ b/drivers/gpu/drm/r128/r128_cce.c
33318@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33319
33320 /* GH: Simple idle check.
33321 */
33322- atomic_set(&dev_priv->idle_count, 0);
33323+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33324
33325 /* We don't support anything other than bus-mastering ring mode,
33326 * but the ring can be in either AGP or PCI space for the ring
33327diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33328index 3c60829..4faf484 100644
33329--- a/drivers/gpu/drm/r128/r128_drv.h
33330+++ b/drivers/gpu/drm/r128/r128_drv.h
33331@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33332 int is_pci;
33333 unsigned long cce_buffers_offset;
33334
33335- atomic_t idle_count;
33336+ atomic_unchecked_t idle_count;
33337
33338 int page_flipping;
33339 int current_page;
33340 u32 crtc_offset;
33341 u32 crtc_offset_cntl;
33342
33343- atomic_t vbl_received;
33344+ atomic_unchecked_t vbl_received;
33345
33346 u32 color_fmt;
33347 unsigned int front_offset;
33348diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33349index 69810fb..97bf17a 100644
33350--- a/drivers/gpu/drm/r128/r128_irq.c
33351+++ b/drivers/gpu/drm/r128/r128_irq.c
33352@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33353 if (crtc != 0)
33354 return 0;
33355
33356- return atomic_read(&dev_priv->vbl_received);
33357+ return atomic_read_unchecked(&dev_priv->vbl_received);
33358 }
33359
33360 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33361@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33362 /* VBLANK interrupt */
33363 if (status & R128_CRTC_VBLANK_INT) {
33364 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33365- atomic_inc(&dev_priv->vbl_received);
33366+ atomic_inc_unchecked(&dev_priv->vbl_received);
33367 drm_handle_vblank(dev, 0);
33368 return IRQ_HANDLED;
33369 }
33370diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33371index af2665c..51922d2 100644
33372--- a/drivers/gpu/drm/r128/r128_state.c
33373+++ b/drivers/gpu/drm/r128/r128_state.c
33374@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33375
33376 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33377 {
33378- if (atomic_read(&dev_priv->idle_count) == 0) {
33379+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33380 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33381 } else {
33382- atomic_set(&dev_priv->idle_count, 0);
33383+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33384 }
33385 }
33386
33387diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33388index dd72b91..8644b3c 100644
33389--- a/drivers/gpu/drm/radeon/atom.c
33390+++ b/drivers/gpu/drm/radeon/atom.c
33391@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33392 char name[512];
33393 int i;
33394
33395+ pax_track_stack();
33396+
33397 ctx->card = card;
33398 ctx->bios = bios;
33399
33400diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33401index 0d79577..efaa7a5 100644
33402--- a/drivers/gpu/drm/radeon/mkregtable.c
33403+++ b/drivers/gpu/drm/radeon/mkregtable.c
33404@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33405 regex_t mask_rex;
33406 regmatch_t match[4];
33407 char buf[1024];
33408- size_t end;
33409+ long end;
33410 int len;
33411 int done = 0;
33412 int r;
33413 unsigned o;
33414 struct offset *offset;
33415 char last_reg_s[10];
33416- int last_reg;
33417+ unsigned long last_reg;
33418
33419 if (regcomp
33420 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33421diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33422index 6735213..38c2c67 100644
33423--- a/drivers/gpu/drm/radeon/radeon.h
33424+++ b/drivers/gpu/drm/radeon/radeon.h
33425@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33426 */
33427 struct radeon_fence_driver {
33428 uint32_t scratch_reg;
33429- atomic_t seq;
33430+ atomic_unchecked_t seq;
33431 uint32_t last_seq;
33432 unsigned long count_timeout;
33433 wait_queue_head_t queue;
33434@@ -640,7 +640,7 @@ struct radeon_asic {
33435 uint32_t offset, uint32_t obj_size);
33436 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33437 void (*bandwidth_update)(struct radeon_device *rdev);
33438-};
33439+} __no_const;
33440
33441 /*
33442 * Asic structures
33443diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33444index 4e928b9..d8b6008 100644
33445--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33446+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33447@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33448 bool linkb;
33449 struct radeon_i2c_bus_rec ddc_bus;
33450
33451+ pax_track_stack();
33452+
33453 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33454
33455 if (data_offset == 0)
33456@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33457 }
33458 }
33459
33460-struct bios_connector {
33461+static struct bios_connector {
33462 bool valid;
33463 uint16_t line_mux;
33464 uint16_t devices;
33465 int connector_type;
33466 struct radeon_i2c_bus_rec ddc_bus;
33467-};
33468+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33469
33470 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33471 drm_device
33472@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33473 uint8_t dac;
33474 union atom_supported_devices *supported_devices;
33475 int i, j;
33476- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33477
33478 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33479
33480diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33481index 083a181..ccccae0 100644
33482--- a/drivers/gpu/drm/radeon/radeon_display.c
33483+++ b/drivers/gpu/drm/radeon/radeon_display.c
33484@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33485
33486 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33487 error = freq - current_freq;
33488- error = error < 0 ? 0xffffffff : error;
33489+ error = (int32_t)error < 0 ? 0xffffffff : error;
33490 } else
33491 error = abs(current_freq - freq);
33492 vco_diff = abs(vco - best_vco);
33493diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33494index 76e4070..193fa7f 100644
33495--- a/drivers/gpu/drm/radeon/radeon_drv.h
33496+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33497@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33498
33499 /* SW interrupt */
33500 wait_queue_head_t swi_queue;
33501- atomic_t swi_emitted;
33502+ atomic_unchecked_t swi_emitted;
33503 int vblank_crtc;
33504 uint32_t irq_enable_reg;
33505 uint32_t r500_disp_irq_reg;
33506diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33507index 3beb26d..6ce9c4a 100644
33508--- a/drivers/gpu/drm/radeon/radeon_fence.c
33509+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33510@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33511 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33512 return 0;
33513 }
33514- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33515+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33516 if (!rdev->cp.ready) {
33517 /* FIXME: cp is not running assume everythings is done right
33518 * away
33519@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33520 return r;
33521 }
33522 WREG32(rdev->fence_drv.scratch_reg, 0);
33523- atomic_set(&rdev->fence_drv.seq, 0);
33524+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33525 INIT_LIST_HEAD(&rdev->fence_drv.created);
33526 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33527 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33528diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33529index a1bf11d..4a123c0 100644
33530--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33531+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33532@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33533 request = compat_alloc_user_space(sizeof(*request));
33534 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33535 || __put_user(req32.param, &request->param)
33536- || __put_user((void __user *)(unsigned long)req32.value,
33537+ || __put_user((unsigned long)req32.value,
33538 &request->value))
33539 return -EFAULT;
33540
33541diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33542index b79ecc4..8dab92d 100644
33543--- a/drivers/gpu/drm/radeon/radeon_irq.c
33544+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33545@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33546 unsigned int ret;
33547 RING_LOCALS;
33548
33549- atomic_inc(&dev_priv->swi_emitted);
33550- ret = atomic_read(&dev_priv->swi_emitted);
33551+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33552+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33553
33554 BEGIN_RING(4);
33555 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33556@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33557 drm_radeon_private_t *dev_priv =
33558 (drm_radeon_private_t *) dev->dev_private;
33559
33560- atomic_set(&dev_priv->swi_emitted, 0);
33561+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33562 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33563
33564 dev->max_vblank_count = 0x001fffff;
33565diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33566index 4747910..48ca4b3 100644
33567--- a/drivers/gpu/drm/radeon/radeon_state.c
33568+++ b/drivers/gpu/drm/radeon/radeon_state.c
33569@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33570 {
33571 drm_radeon_private_t *dev_priv = dev->dev_private;
33572 drm_radeon_getparam_t *param = data;
33573- int value;
33574+ int value = 0;
33575
33576 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33577
33578diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33579index 1381e06..0e53b17 100644
33580--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33581+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33582@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33583 DRM_INFO("radeon: ttm finalized\n");
33584 }
33585
33586-static struct vm_operations_struct radeon_ttm_vm_ops;
33587-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33588-
33589-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33590-{
33591- struct ttm_buffer_object *bo;
33592- int r;
33593-
33594- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33595- if (bo == NULL) {
33596- return VM_FAULT_NOPAGE;
33597- }
33598- r = ttm_vm_ops->fault(vma, vmf);
33599- return r;
33600-}
33601-
33602 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33603 {
33604 struct drm_file *file_priv;
33605 struct radeon_device *rdev;
33606- int r;
33607
33608 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33609 return drm_mmap(filp, vma);
33610@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33611
33612 file_priv = (struct drm_file *)filp->private_data;
33613 rdev = file_priv->minor->dev->dev_private;
33614- if (rdev == NULL) {
33615+ if (!rdev)
33616 return -EINVAL;
33617- }
33618- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33619- if (unlikely(r != 0)) {
33620- return r;
33621- }
33622- if (unlikely(ttm_vm_ops == NULL)) {
33623- ttm_vm_ops = vma->vm_ops;
33624- radeon_ttm_vm_ops = *ttm_vm_ops;
33625- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33626- }
33627- vma->vm_ops = &radeon_ttm_vm_ops;
33628- return 0;
33629+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33630 }
33631
33632
33633diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33634index b12ff76..0bd0c6e 100644
33635--- a/drivers/gpu/drm/radeon/rs690.c
33636+++ b/drivers/gpu/drm/radeon/rs690.c
33637@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33638 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33639 rdev->pm.sideport_bandwidth.full)
33640 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33641- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33642+ read_delay_latency.full = rfixed_const(800 * 1000);
33643 read_delay_latency.full = rfixed_div(read_delay_latency,
33644 rdev->pm.igp_sideport_mclk);
33645+ a.full = rfixed_const(370);
33646+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33647 } else {
33648 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33649 rdev->pm.k8_bandwidth.full)
33650diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33651index 0ed436e..e6e7ce3 100644
33652--- a/drivers/gpu/drm/ttm/ttm_bo.c
33653+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33654@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33655 NULL
33656 };
33657
33658-static struct sysfs_ops ttm_bo_global_ops = {
33659+static const struct sysfs_ops ttm_bo_global_ops = {
33660 .show = &ttm_bo_global_show
33661 };
33662
33663diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33664index 1c040d0..f9e4af8 100644
33665--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33666+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33667@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33668 {
33669 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33670 vma->vm_private_data;
33671- struct ttm_bo_device *bdev = bo->bdev;
33672+ struct ttm_bo_device *bdev;
33673 unsigned long bus_base;
33674 unsigned long bus_offset;
33675 unsigned long bus_size;
33676@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33677 unsigned long address = (unsigned long)vmf->virtual_address;
33678 int retval = VM_FAULT_NOPAGE;
33679
33680+ if (!bo)
33681+ return VM_FAULT_NOPAGE;
33682+ bdev = bo->bdev;
33683+
33684 /*
33685 * Work around locking order reversal in fault / nopfn
33686 * between mmap_sem and bo_reserve: Perform a trylock operation
33687diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33688index b170071..28ae90e 100644
33689--- a/drivers/gpu/drm/ttm/ttm_global.c
33690+++ b/drivers/gpu/drm/ttm/ttm_global.c
33691@@ -36,7 +36,7 @@
33692 struct ttm_global_item {
33693 struct mutex mutex;
33694 void *object;
33695- int refcount;
33696+ atomic_t refcount;
33697 };
33698
33699 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33700@@ -49,7 +49,7 @@ void ttm_global_init(void)
33701 struct ttm_global_item *item = &glob[i];
33702 mutex_init(&item->mutex);
33703 item->object = NULL;
33704- item->refcount = 0;
33705+ atomic_set(&item->refcount, 0);
33706 }
33707 }
33708
33709@@ -59,7 +59,7 @@ void ttm_global_release(void)
33710 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33711 struct ttm_global_item *item = &glob[i];
33712 BUG_ON(item->object != NULL);
33713- BUG_ON(item->refcount != 0);
33714+ BUG_ON(atomic_read(&item->refcount) != 0);
33715 }
33716 }
33717
33718@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33719 void *object;
33720
33721 mutex_lock(&item->mutex);
33722- if (item->refcount == 0) {
33723+ if (atomic_read(&item->refcount) == 0) {
33724 item->object = kzalloc(ref->size, GFP_KERNEL);
33725 if (unlikely(item->object == NULL)) {
33726 ret = -ENOMEM;
33727@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33728 goto out_err;
33729
33730 }
33731- ++item->refcount;
33732+ atomic_inc(&item->refcount);
33733 ref->object = item->object;
33734 object = item->object;
33735 mutex_unlock(&item->mutex);
33736@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33737 struct ttm_global_item *item = &glob[ref->global_type];
33738
33739 mutex_lock(&item->mutex);
33740- BUG_ON(item->refcount == 0);
33741+ BUG_ON(atomic_read(&item->refcount) == 0);
33742 BUG_ON(ref->object != item->object);
33743- if (--item->refcount == 0) {
33744+ if (atomic_dec_and_test(&item->refcount)) {
33745 ref->release(ref);
33746 item->object = NULL;
33747 }
33748diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33749index 072c281..d8ef483 100644
33750--- a/drivers/gpu/drm/ttm/ttm_memory.c
33751+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33752@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33753 NULL
33754 };
33755
33756-static struct sysfs_ops ttm_mem_zone_ops = {
33757+static const struct sysfs_ops ttm_mem_zone_ops = {
33758 .show = &ttm_mem_zone_show,
33759 .store = &ttm_mem_zone_store
33760 };
33761diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33762index cafcb84..b8e66cc 100644
33763--- a/drivers/gpu/drm/via/via_drv.h
33764+++ b/drivers/gpu/drm/via/via_drv.h
33765@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33766 typedef uint32_t maskarray_t[5];
33767
33768 typedef struct drm_via_irq {
33769- atomic_t irq_received;
33770+ atomic_unchecked_t irq_received;
33771 uint32_t pending_mask;
33772 uint32_t enable_mask;
33773 wait_queue_head_t irq_queue;
33774@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33775 struct timeval last_vblank;
33776 int last_vblank_valid;
33777 unsigned usec_per_vblank;
33778- atomic_t vbl_received;
33779+ atomic_unchecked_t vbl_received;
33780 drm_via_state_t hc_state;
33781 char pci_buf[VIA_PCI_BUF_SIZE];
33782 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33783diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33784index 5935b88..127a8a6 100644
33785--- a/drivers/gpu/drm/via/via_irq.c
33786+++ b/drivers/gpu/drm/via/via_irq.c
33787@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33788 if (crtc != 0)
33789 return 0;
33790
33791- return atomic_read(&dev_priv->vbl_received);
33792+ return atomic_read_unchecked(&dev_priv->vbl_received);
33793 }
33794
33795 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33796@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33797
33798 status = VIA_READ(VIA_REG_INTERRUPT);
33799 if (status & VIA_IRQ_VBLANK_PENDING) {
33800- atomic_inc(&dev_priv->vbl_received);
33801- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33802+ atomic_inc_unchecked(&dev_priv->vbl_received);
33803+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33804 do_gettimeofday(&cur_vblank);
33805 if (dev_priv->last_vblank_valid) {
33806 dev_priv->usec_per_vblank =
33807@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33808 dev_priv->last_vblank = cur_vblank;
33809 dev_priv->last_vblank_valid = 1;
33810 }
33811- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33812+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33813 DRM_DEBUG("US per vblank is: %u\n",
33814 dev_priv->usec_per_vblank);
33815 }
33816@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33817
33818 for (i = 0; i < dev_priv->num_irqs; ++i) {
33819 if (status & cur_irq->pending_mask) {
33820- atomic_inc(&cur_irq->irq_received);
33821+ atomic_inc_unchecked(&cur_irq->irq_received);
33822 DRM_WAKEUP(&cur_irq->irq_queue);
33823 handled = 1;
33824 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33825@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33826 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33827 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33828 masks[irq][4]));
33829- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33830+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33831 } else {
33832 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33833 (((cur_irq_sequence =
33834- atomic_read(&cur_irq->irq_received)) -
33835+ atomic_read_unchecked(&cur_irq->irq_received)) -
33836 *sequence) <= (1 << 23)));
33837 }
33838 *sequence = cur_irq_sequence;
33839@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33840 }
33841
33842 for (i = 0; i < dev_priv->num_irqs; ++i) {
33843- atomic_set(&cur_irq->irq_received, 0);
33844+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33845 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33846 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33847 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33848@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33849 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33850 case VIA_IRQ_RELATIVE:
33851 irqwait->request.sequence +=
33852- atomic_read(&cur_irq->irq_received);
33853+ atomic_read_unchecked(&cur_irq->irq_received);
33854 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33855 case VIA_IRQ_ABSOLUTE:
33856 break;
33857diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33858index aa8688d..6a0140c 100644
33859--- a/drivers/gpu/vga/vgaarb.c
33860+++ b/drivers/gpu/vga/vgaarb.c
33861@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33862 uc = &priv->cards[i];
33863 }
33864
33865- if (!uc)
33866- return -EINVAL;
33867+ if (!uc) {
33868+ ret_val = -EINVAL;
33869+ goto done;
33870+ }
33871
33872- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33873- return -EINVAL;
33874+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33875+ ret_val = -EINVAL;
33876+ goto done;
33877+ }
33878
33879- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33880- return -EINVAL;
33881+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33882+ ret_val = -EINVAL;
33883+ goto done;
33884+ }
33885
33886 vga_put(pdev, io_state);
33887
33888diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33889index 11f8069..4783396 100644
33890--- a/drivers/hid/hid-core.c
33891+++ b/drivers/hid/hid-core.c
33892@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33893
33894 int hid_add_device(struct hid_device *hdev)
33895 {
33896- static atomic_t id = ATOMIC_INIT(0);
33897+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33898 int ret;
33899
33900 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33901@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33902 /* XXX hack, any other cleaner solution after the driver core
33903 * is converted to allow more than 20 bytes as the device name? */
33904 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33905- hdev->vendor, hdev->product, atomic_inc_return(&id));
33906+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33907
33908 ret = device_add(&hdev->dev);
33909 if (!ret)
33910diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33911index 8b6ee24..70f657d 100644
33912--- a/drivers/hid/usbhid/hiddev.c
33913+++ b/drivers/hid/usbhid/hiddev.c
33914@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33915 return put_user(HID_VERSION, (int __user *)arg);
33916
33917 case HIDIOCAPPLICATION:
33918- if (arg < 0 || arg >= hid->maxapplication)
33919+ if (arg >= hid->maxapplication)
33920 return -EINVAL;
33921
33922 for (i = 0; i < hid->maxcollection; i++)
33923diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33924index 5d5ed69..f40533e 100644
33925--- a/drivers/hwmon/lis3lv02d.c
33926+++ b/drivers/hwmon/lis3lv02d.c
33927@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33928 * the lid is closed. This leads to interrupts as soon as a little move
33929 * is done.
33930 */
33931- atomic_inc(&lis3_dev.count);
33932+ atomic_inc_unchecked(&lis3_dev.count);
33933
33934 wake_up_interruptible(&lis3_dev.misc_wait);
33935 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33936@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33937 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33938 return -EBUSY; /* already open */
33939
33940- atomic_set(&lis3_dev.count, 0);
33941+ atomic_set_unchecked(&lis3_dev.count, 0);
33942
33943 /*
33944 * The sensor can generate interrupts for free-fall and direction
33945@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33946 add_wait_queue(&lis3_dev.misc_wait, &wait);
33947 while (true) {
33948 set_current_state(TASK_INTERRUPTIBLE);
33949- data = atomic_xchg(&lis3_dev.count, 0);
33950+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33951 if (data)
33952 break;
33953
33954@@ -244,7 +244,7 @@ out:
33955 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33956 {
33957 poll_wait(file, &lis3_dev.misc_wait, wait);
33958- if (atomic_read(&lis3_dev.count))
33959+ if (atomic_read_unchecked(&lis3_dev.count))
33960 return POLLIN | POLLRDNORM;
33961 return 0;
33962 }
33963diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33964index 7cdd76f..fe0efdf 100644
33965--- a/drivers/hwmon/lis3lv02d.h
33966+++ b/drivers/hwmon/lis3lv02d.h
33967@@ -201,7 +201,7 @@ struct lis3lv02d {
33968
33969 struct input_polled_dev *idev; /* input device */
33970 struct platform_device *pdev; /* platform device */
33971- atomic_t count; /* interrupt count after last read */
33972+ atomic_unchecked_t count; /* interrupt count after last read */
33973 int xcalib; /* calibrated null value for x */
33974 int ycalib; /* calibrated null value for y */
33975 int zcalib; /* calibrated null value for z */
33976diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33977index 740785e..5a5c6c6 100644
33978--- a/drivers/hwmon/sht15.c
33979+++ b/drivers/hwmon/sht15.c
33980@@ -112,7 +112,7 @@ struct sht15_data {
33981 int supply_uV;
33982 int supply_uV_valid;
33983 struct work_struct update_supply_work;
33984- atomic_t interrupt_handled;
33985+ atomic_unchecked_t interrupt_handled;
33986 };
33987
33988 /**
33989@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33990 return ret;
33991
33992 gpio_direction_input(data->pdata->gpio_data);
33993- atomic_set(&data->interrupt_handled, 0);
33994+ atomic_set_unchecked(&data->interrupt_handled, 0);
33995
33996 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33997 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33998 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33999 /* Only relevant if the interrupt hasn't occured. */
34000- if (!atomic_read(&data->interrupt_handled))
34001+ if (!atomic_read_unchecked(&data->interrupt_handled))
34002 schedule_work(&data->read_work);
34003 }
34004 ret = wait_event_timeout(data->wait_queue,
34005@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34006 struct sht15_data *data = d;
34007 /* First disable the interrupt */
34008 disable_irq_nosync(irq);
34009- atomic_inc(&data->interrupt_handled);
34010+ atomic_inc_unchecked(&data->interrupt_handled);
34011 /* Then schedule a reading work struct */
34012 if (data->flag != SHT15_READING_NOTHING)
34013 schedule_work(&data->read_work);
34014@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34015 here as could have gone low in meantime so verify
34016 it hasn't!
34017 */
34018- atomic_set(&data->interrupt_handled, 0);
34019+ atomic_set_unchecked(&data->interrupt_handled, 0);
34020 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34021 /* If still not occured or another handler has been scheduled */
34022 if (gpio_get_value(data->pdata->gpio_data)
34023- || atomic_read(&data->interrupt_handled))
34024+ || atomic_read_unchecked(&data->interrupt_handled))
34025 return;
34026 }
34027 /* Read the data back from the device */
34028diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34029index 97851c5..cb40626 100644
34030--- a/drivers/hwmon/w83791d.c
34031+++ b/drivers/hwmon/w83791d.c
34032@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34033 struct i2c_board_info *info);
34034 static int w83791d_remove(struct i2c_client *client);
34035
34036-static int w83791d_read(struct i2c_client *client, u8 register);
34037-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34038+static int w83791d_read(struct i2c_client *client, u8 reg);
34039+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34040 static struct w83791d_data *w83791d_update_device(struct device *dev);
34041
34042 #ifdef DEBUG
34043diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34044index 378fcb5..5e91fa8 100644
34045--- a/drivers/i2c/busses/i2c-amd756-s4882.c
34046+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34047@@ -43,7 +43,7 @@
34048 extern struct i2c_adapter amd756_smbus;
34049
34050 static struct i2c_adapter *s4882_adapter;
34051-static struct i2c_algorithm *s4882_algo;
34052+static i2c_algorithm_no_const *s4882_algo;
34053
34054 /* Wrapper access functions for multiplexed SMBus */
34055 static DEFINE_MUTEX(amd756_lock);
34056diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34057index 29015eb..af2d8e9 100644
34058--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34059+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34060@@ -41,7 +41,7 @@
34061 extern struct i2c_adapter *nforce2_smbus;
34062
34063 static struct i2c_adapter *s4985_adapter;
34064-static struct i2c_algorithm *s4985_algo;
34065+static i2c_algorithm_no_const *s4985_algo;
34066
34067 /* Wrapper access functions for multiplexed SMBus */
34068 static DEFINE_MUTEX(nforce2_lock);
34069diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34070index 878f8ec..12376fc 100644
34071--- a/drivers/ide/aec62xx.c
34072+++ b/drivers/ide/aec62xx.c
34073@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34074 .cable_detect = atp86x_cable_detect,
34075 };
34076
34077-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34078+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34079 { /* 0: AEC6210 */
34080 .name = DRV_NAME,
34081 .init_chipset = init_chipset_aec62xx,
34082diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34083index e59b6de..4b4fc65 100644
34084--- a/drivers/ide/alim15x3.c
34085+++ b/drivers/ide/alim15x3.c
34086@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34087 .dma_sff_read_status = ide_dma_sff_read_status,
34088 };
34089
34090-static const struct ide_port_info ali15x3_chipset __devinitdata = {
34091+static const struct ide_port_info ali15x3_chipset __devinitconst = {
34092 .name = DRV_NAME,
34093 .init_chipset = init_chipset_ali15x3,
34094 .init_hwif = init_hwif_ali15x3,
34095diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34096index 628cd2e..087a414 100644
34097--- a/drivers/ide/amd74xx.c
34098+++ b/drivers/ide/amd74xx.c
34099@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34100 .udma_mask = udma, \
34101 }
34102
34103-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34104+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34105 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34106 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34107 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34108diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34109index 837322b..837fd71 100644
34110--- a/drivers/ide/atiixp.c
34111+++ b/drivers/ide/atiixp.c
34112@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34113 .cable_detect = atiixp_cable_detect,
34114 };
34115
34116-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34117+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34118 { /* 0: IXP200/300/400/700 */
34119 .name = DRV_NAME,
34120 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34121diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34122index ca0c46f..d55318a 100644
34123--- a/drivers/ide/cmd64x.c
34124+++ b/drivers/ide/cmd64x.c
34125@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34126 .dma_sff_read_status = ide_dma_sff_read_status,
34127 };
34128
34129-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34130+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34131 { /* 0: CMD643 */
34132 .name = DRV_NAME,
34133 .init_chipset = init_chipset_cmd64x,
34134diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34135index 09f98ed..cebc5bc 100644
34136--- a/drivers/ide/cs5520.c
34137+++ b/drivers/ide/cs5520.c
34138@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34139 .set_dma_mode = cs5520_set_dma_mode,
34140 };
34141
34142-static const struct ide_port_info cyrix_chipset __devinitdata = {
34143+static const struct ide_port_info cyrix_chipset __devinitconst = {
34144 .name = DRV_NAME,
34145 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34146 .port_ops = &cs5520_port_ops,
34147diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34148index 40bf05e..7d58ca0 100644
34149--- a/drivers/ide/cs5530.c
34150+++ b/drivers/ide/cs5530.c
34151@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34152 .udma_filter = cs5530_udma_filter,
34153 };
34154
34155-static const struct ide_port_info cs5530_chipset __devinitdata = {
34156+static const struct ide_port_info cs5530_chipset __devinitconst = {
34157 .name = DRV_NAME,
34158 .init_chipset = init_chipset_cs5530,
34159 .init_hwif = init_hwif_cs5530,
34160diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34161index 983d957..53e6172 100644
34162--- a/drivers/ide/cs5535.c
34163+++ b/drivers/ide/cs5535.c
34164@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34165 .cable_detect = cs5535_cable_detect,
34166 };
34167
34168-static const struct ide_port_info cs5535_chipset __devinitdata = {
34169+static const struct ide_port_info cs5535_chipset __devinitconst = {
34170 .name = DRV_NAME,
34171 .port_ops = &cs5535_port_ops,
34172 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34173diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34174index 74fc540..8e933d8 100644
34175--- a/drivers/ide/cy82c693.c
34176+++ b/drivers/ide/cy82c693.c
34177@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34178 .set_dma_mode = cy82c693_set_dma_mode,
34179 };
34180
34181-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34182+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34183 .name = DRV_NAME,
34184 .init_iops = init_iops_cy82c693,
34185 .port_ops = &cy82c693_port_ops,
34186diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34187index 7ce68ef..e78197d 100644
34188--- a/drivers/ide/hpt366.c
34189+++ b/drivers/ide/hpt366.c
34190@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34191 }
34192 };
34193
34194-static const struct hpt_info hpt36x __devinitdata = {
34195+static const struct hpt_info hpt36x __devinitconst = {
34196 .chip_name = "HPT36x",
34197 .chip_type = HPT36x,
34198 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34199@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34200 .timings = &hpt36x_timings
34201 };
34202
34203-static const struct hpt_info hpt370 __devinitdata = {
34204+static const struct hpt_info hpt370 __devinitconst = {
34205 .chip_name = "HPT370",
34206 .chip_type = HPT370,
34207 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34208@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34209 .timings = &hpt37x_timings
34210 };
34211
34212-static const struct hpt_info hpt370a __devinitdata = {
34213+static const struct hpt_info hpt370a __devinitconst = {
34214 .chip_name = "HPT370A",
34215 .chip_type = HPT370A,
34216 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34217@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34218 .timings = &hpt37x_timings
34219 };
34220
34221-static const struct hpt_info hpt374 __devinitdata = {
34222+static const struct hpt_info hpt374 __devinitconst = {
34223 .chip_name = "HPT374",
34224 .chip_type = HPT374,
34225 .udma_mask = ATA_UDMA5,
34226@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34227 .timings = &hpt37x_timings
34228 };
34229
34230-static const struct hpt_info hpt372 __devinitdata = {
34231+static const struct hpt_info hpt372 __devinitconst = {
34232 .chip_name = "HPT372",
34233 .chip_type = HPT372,
34234 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34235@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34236 .timings = &hpt37x_timings
34237 };
34238
34239-static const struct hpt_info hpt372a __devinitdata = {
34240+static const struct hpt_info hpt372a __devinitconst = {
34241 .chip_name = "HPT372A",
34242 .chip_type = HPT372A,
34243 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34244@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34245 .timings = &hpt37x_timings
34246 };
34247
34248-static const struct hpt_info hpt302 __devinitdata = {
34249+static const struct hpt_info hpt302 __devinitconst = {
34250 .chip_name = "HPT302",
34251 .chip_type = HPT302,
34252 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34253@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34254 .timings = &hpt37x_timings
34255 };
34256
34257-static const struct hpt_info hpt371 __devinitdata = {
34258+static const struct hpt_info hpt371 __devinitconst = {
34259 .chip_name = "HPT371",
34260 .chip_type = HPT371,
34261 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34262@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34263 .timings = &hpt37x_timings
34264 };
34265
34266-static const struct hpt_info hpt372n __devinitdata = {
34267+static const struct hpt_info hpt372n __devinitconst = {
34268 .chip_name = "HPT372N",
34269 .chip_type = HPT372N,
34270 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34271@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34272 .timings = &hpt37x_timings
34273 };
34274
34275-static const struct hpt_info hpt302n __devinitdata = {
34276+static const struct hpt_info hpt302n __devinitconst = {
34277 .chip_name = "HPT302N",
34278 .chip_type = HPT302N,
34279 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34280@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34281 .timings = &hpt37x_timings
34282 };
34283
34284-static const struct hpt_info hpt371n __devinitdata = {
34285+static const struct hpt_info hpt371n __devinitconst = {
34286 .chip_name = "HPT371N",
34287 .chip_type = HPT371N,
34288 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34289@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34290 .dma_sff_read_status = ide_dma_sff_read_status,
34291 };
34292
34293-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34294+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34295 { /* 0: HPT36x */
34296 .name = DRV_NAME,
34297 .init_chipset = init_chipset_hpt366,
34298diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34299index 2de76cc..74186a1 100644
34300--- a/drivers/ide/ide-cd.c
34301+++ b/drivers/ide/ide-cd.c
34302@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34303 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34304 if ((unsigned long)buf & alignment
34305 || blk_rq_bytes(rq) & q->dma_pad_mask
34306- || object_is_on_stack(buf))
34307+ || object_starts_on_stack(buf))
34308 drive->dma = 0;
34309 }
34310 }
34311diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34312index fefbdfc..62ff465 100644
34313--- a/drivers/ide/ide-floppy.c
34314+++ b/drivers/ide/ide-floppy.c
34315@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34316 u8 pc_buf[256], header_len, desc_cnt;
34317 int i, rc = 1, blocks, length;
34318
34319+ pax_track_stack();
34320+
34321 ide_debug_log(IDE_DBG_FUNC, "enter");
34322
34323 drive->bios_cyl = 0;
34324diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34325index 39d4e01..11538ce 100644
34326--- a/drivers/ide/ide-pci-generic.c
34327+++ b/drivers/ide/ide-pci-generic.c
34328@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34329 .udma_mask = ATA_UDMA6, \
34330 }
34331
34332-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34333+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34334 /* 0: Unknown */
34335 DECLARE_GENERIC_PCI_DEV(0),
34336
34337diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34338index 0d266a5..aaca790 100644
34339--- a/drivers/ide/it8172.c
34340+++ b/drivers/ide/it8172.c
34341@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34342 .set_dma_mode = it8172_set_dma_mode,
34343 };
34344
34345-static const struct ide_port_info it8172_port_info __devinitdata = {
34346+static const struct ide_port_info it8172_port_info __devinitconst = {
34347 .name = DRV_NAME,
34348 .port_ops = &it8172_port_ops,
34349 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34350diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34351index 4797616..4be488a 100644
34352--- a/drivers/ide/it8213.c
34353+++ b/drivers/ide/it8213.c
34354@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34355 .cable_detect = it8213_cable_detect,
34356 };
34357
34358-static const struct ide_port_info it8213_chipset __devinitdata = {
34359+static const struct ide_port_info it8213_chipset __devinitconst = {
34360 .name = DRV_NAME,
34361 .enablebits = { {0x41, 0x80, 0x80} },
34362 .port_ops = &it8213_port_ops,
34363diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34364index 51aa745..146ee60 100644
34365--- a/drivers/ide/it821x.c
34366+++ b/drivers/ide/it821x.c
34367@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34368 .cable_detect = it821x_cable_detect,
34369 };
34370
34371-static const struct ide_port_info it821x_chipset __devinitdata = {
34372+static const struct ide_port_info it821x_chipset __devinitconst = {
34373 .name = DRV_NAME,
34374 .init_chipset = init_chipset_it821x,
34375 .init_hwif = init_hwif_it821x,
34376diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34377index bf2be64..9270098 100644
34378--- a/drivers/ide/jmicron.c
34379+++ b/drivers/ide/jmicron.c
34380@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34381 .cable_detect = jmicron_cable_detect,
34382 };
34383
34384-static const struct ide_port_info jmicron_chipset __devinitdata = {
34385+static const struct ide_port_info jmicron_chipset __devinitconst = {
34386 .name = DRV_NAME,
34387 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34388 .port_ops = &jmicron_port_ops,
34389diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34390index 95327a2..73f78d8 100644
34391--- a/drivers/ide/ns87415.c
34392+++ b/drivers/ide/ns87415.c
34393@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34394 .dma_sff_read_status = superio_dma_sff_read_status,
34395 };
34396
34397-static const struct ide_port_info ns87415_chipset __devinitdata = {
34398+static const struct ide_port_info ns87415_chipset __devinitconst = {
34399 .name = DRV_NAME,
34400 .init_hwif = init_hwif_ns87415,
34401 .tp_ops = &ns87415_tp_ops,
34402diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34403index f1d70d6..e1de05b 100644
34404--- a/drivers/ide/opti621.c
34405+++ b/drivers/ide/opti621.c
34406@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34407 .set_pio_mode = opti621_set_pio_mode,
34408 };
34409
34410-static const struct ide_port_info opti621_chipset __devinitdata = {
34411+static const struct ide_port_info opti621_chipset __devinitconst = {
34412 .name = DRV_NAME,
34413 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34414 .port_ops = &opti621_port_ops,
34415diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34416index 65ba823..7311f4d 100644
34417--- a/drivers/ide/pdc202xx_new.c
34418+++ b/drivers/ide/pdc202xx_new.c
34419@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34420 .udma_mask = udma, \
34421 }
34422
34423-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34424+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34425 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34426 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34427 };
34428diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34429index cb812f3..af816ef 100644
34430--- a/drivers/ide/pdc202xx_old.c
34431+++ b/drivers/ide/pdc202xx_old.c
34432@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34433 .max_sectors = sectors, \
34434 }
34435
34436-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34437+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34438 { /* 0: PDC20246 */
34439 .name = DRV_NAME,
34440 .init_chipset = init_chipset_pdc202xx,
34441diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34442index bf14f39..15c4b98 100644
34443--- a/drivers/ide/piix.c
34444+++ b/drivers/ide/piix.c
34445@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34446 .udma_mask = udma, \
34447 }
34448
34449-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34450+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34451 /* 0: MPIIX */
34452 { /*
34453 * MPIIX actually has only a single IDE channel mapped to
34454diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34455index a6414a8..c04173e 100644
34456--- a/drivers/ide/rz1000.c
34457+++ b/drivers/ide/rz1000.c
34458@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34459 }
34460 }
34461
34462-static const struct ide_port_info rz1000_chipset __devinitdata = {
34463+static const struct ide_port_info rz1000_chipset __devinitconst = {
34464 .name = DRV_NAME,
34465 .host_flags = IDE_HFLAG_NO_DMA,
34466 };
34467diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34468index d467478..9203942 100644
34469--- a/drivers/ide/sc1200.c
34470+++ b/drivers/ide/sc1200.c
34471@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34472 .dma_sff_read_status = ide_dma_sff_read_status,
34473 };
34474
34475-static const struct ide_port_info sc1200_chipset __devinitdata = {
34476+static const struct ide_port_info sc1200_chipset __devinitconst = {
34477 .name = DRV_NAME,
34478 .port_ops = &sc1200_port_ops,
34479 .dma_ops = &sc1200_dma_ops,
34480diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34481index 1104bb3..59c5194 100644
34482--- a/drivers/ide/scc_pata.c
34483+++ b/drivers/ide/scc_pata.c
34484@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34485 .dma_sff_read_status = scc_dma_sff_read_status,
34486 };
34487
34488-static const struct ide_port_info scc_chipset __devinitdata = {
34489+static const struct ide_port_info scc_chipset __devinitconst = {
34490 .name = "sccIDE",
34491 .init_iops = init_iops_scc,
34492 .init_dma = scc_init_dma,
34493diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34494index b6554ef..6cc2cc3 100644
34495--- a/drivers/ide/serverworks.c
34496+++ b/drivers/ide/serverworks.c
34497@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34498 .cable_detect = svwks_cable_detect,
34499 };
34500
34501-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34502+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34503 { /* 0: OSB4 */
34504 .name = DRV_NAME,
34505 .init_chipset = init_chipset_svwks,
34506diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34507index ab3db61..afed580 100644
34508--- a/drivers/ide/setup-pci.c
34509+++ b/drivers/ide/setup-pci.c
34510@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34511 int ret, i, n_ports = dev2 ? 4 : 2;
34512 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34513
34514+ pax_track_stack();
34515+
34516 for (i = 0; i < n_ports / 2; i++) {
34517 ret = ide_setup_pci_controller(pdev[i], d, !i);
34518 if (ret < 0)
34519diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34520index d95df52..0b03a39 100644
34521--- a/drivers/ide/siimage.c
34522+++ b/drivers/ide/siimage.c
34523@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34524 .udma_mask = ATA_UDMA6, \
34525 }
34526
34527-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34528+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34529 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34530 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34531 };
34532diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34533index 3b88eba..ca8699d 100644
34534--- a/drivers/ide/sis5513.c
34535+++ b/drivers/ide/sis5513.c
34536@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34537 .cable_detect = sis_cable_detect,
34538 };
34539
34540-static const struct ide_port_info sis5513_chipset __devinitdata = {
34541+static const struct ide_port_info sis5513_chipset __devinitconst = {
34542 .name = DRV_NAME,
34543 .init_chipset = init_chipset_sis5513,
34544 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34545diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34546index d698da4..fca42a4 100644
34547--- a/drivers/ide/sl82c105.c
34548+++ b/drivers/ide/sl82c105.c
34549@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34550 .dma_sff_read_status = ide_dma_sff_read_status,
34551 };
34552
34553-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34554+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34555 .name = DRV_NAME,
34556 .init_chipset = init_chipset_sl82c105,
34557 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34558diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34559index 1ccfb40..83d5779 100644
34560--- a/drivers/ide/slc90e66.c
34561+++ b/drivers/ide/slc90e66.c
34562@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34563 .cable_detect = slc90e66_cable_detect,
34564 };
34565
34566-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34567+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34568 .name = DRV_NAME,
34569 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34570 .port_ops = &slc90e66_port_ops,
34571diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34572index 05a93d6..5f9e325 100644
34573--- a/drivers/ide/tc86c001.c
34574+++ b/drivers/ide/tc86c001.c
34575@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34576 .dma_sff_read_status = ide_dma_sff_read_status,
34577 };
34578
34579-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34580+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34581 .name = DRV_NAME,
34582 .init_hwif = init_hwif_tc86c001,
34583 .port_ops = &tc86c001_port_ops,
34584diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34585index 8773c3b..7907d6c 100644
34586--- a/drivers/ide/triflex.c
34587+++ b/drivers/ide/triflex.c
34588@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34589 .set_dma_mode = triflex_set_mode,
34590 };
34591
34592-static const struct ide_port_info triflex_device __devinitdata = {
34593+static const struct ide_port_info triflex_device __devinitconst = {
34594 .name = DRV_NAME,
34595 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34596 .port_ops = &triflex_port_ops,
34597diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34598index 4b42ca0..e494a98 100644
34599--- a/drivers/ide/trm290.c
34600+++ b/drivers/ide/trm290.c
34601@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34602 .dma_check = trm290_dma_check,
34603 };
34604
34605-static const struct ide_port_info trm290_chipset __devinitdata = {
34606+static const struct ide_port_info trm290_chipset __devinitconst = {
34607 .name = DRV_NAME,
34608 .init_hwif = init_hwif_trm290,
34609 .tp_ops = &trm290_tp_ops,
34610diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34611index 028de26..520d5d5 100644
34612--- a/drivers/ide/via82cxxx.c
34613+++ b/drivers/ide/via82cxxx.c
34614@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34615 .cable_detect = via82cxxx_cable_detect,
34616 };
34617
34618-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34619+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34620 .name = DRV_NAME,
34621 .init_chipset = init_chipset_via82cxxx,
34622 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34623diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34624index 2cd00b5..14de699 100644
34625--- a/drivers/ieee1394/dv1394.c
34626+++ b/drivers/ieee1394/dv1394.c
34627@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34628 based upon DIF section and sequence
34629 */
34630
34631-static void inline
34632+static inline void
34633 frame_put_packet (struct frame *f, struct packet *p)
34634 {
34635 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34636diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34637index e947d8f..6a966b9 100644
34638--- a/drivers/ieee1394/hosts.c
34639+++ b/drivers/ieee1394/hosts.c
34640@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34641 }
34642
34643 static struct hpsb_host_driver dummy_driver = {
34644+ .name = "dummy",
34645 .transmit_packet = dummy_transmit_packet,
34646 .devctl = dummy_devctl,
34647 .isoctl = dummy_isoctl
34648diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34649index ddaab6e..8d37435 100644
34650--- a/drivers/ieee1394/init_ohci1394_dma.c
34651+++ b/drivers/ieee1394/init_ohci1394_dma.c
34652@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34653 for (func = 0; func < 8; func++) {
34654 u32 class = read_pci_config(num,slot,func,
34655 PCI_CLASS_REVISION);
34656- if ((class == 0xffffffff))
34657+ if (class == 0xffffffff)
34658 continue; /* No device at this func */
34659
34660 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34661diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34662index 65c1429..5d8c11f 100644
34663--- a/drivers/ieee1394/ohci1394.c
34664+++ b/drivers/ieee1394/ohci1394.c
34665@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34666 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34667
34668 /* Module Parameters */
34669-static int phys_dma = 1;
34670+static int phys_dma;
34671 module_param(phys_dma, int, 0444);
34672-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34673+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34674
34675 static void dma_trm_tasklet(unsigned long data);
34676 static void dma_trm_reset(struct dma_trm_ctx *d);
34677diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34678index f199896..78c9fc8 100644
34679--- a/drivers/ieee1394/sbp2.c
34680+++ b/drivers/ieee1394/sbp2.c
34681@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34682 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34683 MODULE_LICENSE("GPL");
34684
34685-static int sbp2_module_init(void)
34686+static int __init sbp2_module_init(void)
34687 {
34688 int ret;
34689
34690diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34691index a5dea6b..0cefe8f 100644
34692--- a/drivers/infiniband/core/cm.c
34693+++ b/drivers/infiniband/core/cm.c
34694@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34695
34696 struct cm_counter_group {
34697 struct kobject obj;
34698- atomic_long_t counter[CM_ATTR_COUNT];
34699+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34700 };
34701
34702 struct cm_counter_attribute {
34703@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34704 struct ib_mad_send_buf *msg = NULL;
34705 int ret;
34706
34707- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34708+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34709 counter[CM_REQ_COUNTER]);
34710
34711 /* Quick state check to discard duplicate REQs. */
34712@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34713 if (!cm_id_priv)
34714 return;
34715
34716- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34717+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34718 counter[CM_REP_COUNTER]);
34719 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34720 if (ret)
34721@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34722 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34723 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34724 spin_unlock_irq(&cm_id_priv->lock);
34725- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34726+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34727 counter[CM_RTU_COUNTER]);
34728 goto out;
34729 }
34730@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34731 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34732 dreq_msg->local_comm_id);
34733 if (!cm_id_priv) {
34734- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34735+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34736 counter[CM_DREQ_COUNTER]);
34737 cm_issue_drep(work->port, work->mad_recv_wc);
34738 return -EINVAL;
34739@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34740 case IB_CM_MRA_REP_RCVD:
34741 break;
34742 case IB_CM_TIMEWAIT:
34743- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34744+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34745 counter[CM_DREQ_COUNTER]);
34746 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34747 goto unlock;
34748@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34749 cm_free_msg(msg);
34750 goto deref;
34751 case IB_CM_DREQ_RCVD:
34752- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34753+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34754 counter[CM_DREQ_COUNTER]);
34755 goto unlock;
34756 default:
34757@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34758 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34759 cm_id_priv->msg, timeout)) {
34760 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34761- atomic_long_inc(&work->port->
34762+ atomic_long_inc_unchecked(&work->port->
34763 counter_group[CM_RECV_DUPLICATES].
34764 counter[CM_MRA_COUNTER]);
34765 goto out;
34766@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34767 break;
34768 case IB_CM_MRA_REQ_RCVD:
34769 case IB_CM_MRA_REP_RCVD:
34770- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34771+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34772 counter[CM_MRA_COUNTER]);
34773 /* fall through */
34774 default:
34775@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34776 case IB_CM_LAP_IDLE:
34777 break;
34778 case IB_CM_MRA_LAP_SENT:
34779- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34780+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34781 counter[CM_LAP_COUNTER]);
34782 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34783 goto unlock;
34784@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34785 cm_free_msg(msg);
34786 goto deref;
34787 case IB_CM_LAP_RCVD:
34788- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34789+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34790 counter[CM_LAP_COUNTER]);
34791 goto unlock;
34792 default:
34793@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34794 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34795 if (cur_cm_id_priv) {
34796 spin_unlock_irq(&cm.lock);
34797- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34798+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34799 counter[CM_SIDR_REQ_COUNTER]);
34800 goto out; /* Duplicate message. */
34801 }
34802@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34803 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34804 msg->retries = 1;
34805
34806- atomic_long_add(1 + msg->retries,
34807+ atomic_long_add_unchecked(1 + msg->retries,
34808 &port->counter_group[CM_XMIT].counter[attr_index]);
34809 if (msg->retries)
34810- atomic_long_add(msg->retries,
34811+ atomic_long_add_unchecked(msg->retries,
34812 &port->counter_group[CM_XMIT_RETRIES].
34813 counter[attr_index]);
34814
34815@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34816 }
34817
34818 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34819- atomic_long_inc(&port->counter_group[CM_RECV].
34820+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34821 counter[attr_id - CM_ATTR_ID_OFFSET]);
34822
34823 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34824@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34825 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34826
34827 return sprintf(buf, "%ld\n",
34828- atomic_long_read(&group->counter[cm_attr->index]));
34829+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34830 }
34831
34832-static struct sysfs_ops cm_counter_ops = {
34833+static const struct sysfs_ops cm_counter_ops = {
34834 .show = cm_show_counter
34835 };
34836
34837diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34838index 8fd3a6f..61d8075 100644
34839--- a/drivers/infiniband/core/cma.c
34840+++ b/drivers/infiniband/core/cma.c
34841@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
34842
34843 req.private_data_len = sizeof(struct cma_hdr) +
34844 conn_param->private_data_len;
34845+ if (req.private_data_len < conn_param->private_data_len)
34846+ return -EINVAL;
34847+
34848 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34849 if (!req.private_data)
34850 return -ENOMEM;
34851@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
34852 memset(&req, 0, sizeof req);
34853 offset = cma_user_data_offset(id_priv->id.ps);
34854 req.private_data_len = offset + conn_param->private_data_len;
34855+ if (req.private_data_len < conn_param->private_data_len)
34856+ return -EINVAL;
34857+
34858 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34859 if (!private_data)
34860 return -ENOMEM;
34861diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34862index 4507043..14ad522 100644
34863--- a/drivers/infiniband/core/fmr_pool.c
34864+++ b/drivers/infiniband/core/fmr_pool.c
34865@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34866
34867 struct task_struct *thread;
34868
34869- atomic_t req_ser;
34870- atomic_t flush_ser;
34871+ atomic_unchecked_t req_ser;
34872+ atomic_unchecked_t flush_ser;
34873
34874 wait_queue_head_t force_wait;
34875 };
34876@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34877 struct ib_fmr_pool *pool = pool_ptr;
34878
34879 do {
34880- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34881+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34882 ib_fmr_batch_release(pool);
34883
34884- atomic_inc(&pool->flush_ser);
34885+ atomic_inc_unchecked(&pool->flush_ser);
34886 wake_up_interruptible(&pool->force_wait);
34887
34888 if (pool->flush_function)
34889@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34890 }
34891
34892 set_current_state(TASK_INTERRUPTIBLE);
34893- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34894+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34895 !kthread_should_stop())
34896 schedule();
34897 __set_current_state(TASK_RUNNING);
34898@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34899 pool->dirty_watermark = params->dirty_watermark;
34900 pool->dirty_len = 0;
34901 spin_lock_init(&pool->pool_lock);
34902- atomic_set(&pool->req_ser, 0);
34903- atomic_set(&pool->flush_ser, 0);
34904+ atomic_set_unchecked(&pool->req_ser, 0);
34905+ atomic_set_unchecked(&pool->flush_ser, 0);
34906 init_waitqueue_head(&pool->force_wait);
34907
34908 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34909@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34910 }
34911 spin_unlock_irq(&pool->pool_lock);
34912
34913- serial = atomic_inc_return(&pool->req_ser);
34914+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34915 wake_up_process(pool->thread);
34916
34917 if (wait_event_interruptible(pool->force_wait,
34918- atomic_read(&pool->flush_ser) - serial >= 0))
34919+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34920 return -EINTR;
34921
34922 return 0;
34923@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34924 } else {
34925 list_add_tail(&fmr->list, &pool->dirty_list);
34926 if (++pool->dirty_len >= pool->dirty_watermark) {
34927- atomic_inc(&pool->req_ser);
34928+ atomic_inc_unchecked(&pool->req_ser);
34929 wake_up_process(pool->thread);
34930 }
34931 }
34932diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34933index 158a214..1558bb7 100644
34934--- a/drivers/infiniband/core/sysfs.c
34935+++ b/drivers/infiniband/core/sysfs.c
34936@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34937 return port_attr->show(p, port_attr, buf);
34938 }
34939
34940-static struct sysfs_ops port_sysfs_ops = {
34941+static const struct sysfs_ops port_sysfs_ops = {
34942 .show = port_attr_show
34943 };
34944
34945diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34946index 5440da0..1194ecb 100644
34947--- a/drivers/infiniband/core/uverbs_marshall.c
34948+++ b/drivers/infiniband/core/uverbs_marshall.c
34949@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34950 dst->grh.sgid_index = src->grh.sgid_index;
34951 dst->grh.hop_limit = src->grh.hop_limit;
34952 dst->grh.traffic_class = src->grh.traffic_class;
34953+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34954 dst->dlid = src->dlid;
34955 dst->sl = src->sl;
34956 dst->src_path_bits = src->src_path_bits;
34957 dst->static_rate = src->static_rate;
34958 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34959 dst->port_num = src->port_num;
34960+ dst->reserved = 0;
34961 }
34962 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34963
34964 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34965 struct ib_qp_attr *src)
34966 {
34967+ dst->qp_state = src->qp_state;
34968 dst->cur_qp_state = src->cur_qp_state;
34969 dst->path_mtu = src->path_mtu;
34970 dst->path_mig_state = src->path_mig_state;
34971@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34972 dst->rnr_retry = src->rnr_retry;
34973 dst->alt_port_num = src->alt_port_num;
34974 dst->alt_timeout = src->alt_timeout;
34975+ memset(dst->reserved, 0, sizeof(dst->reserved));
34976 }
34977 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34978
34979diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34980index 100da85..62e6b88 100644
34981--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34982+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34983@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34984 struct infinipath_counters counters;
34985 struct ipath_devdata *dd;
34986
34987+ pax_track_stack();
34988+
34989 dd = file->f_path.dentry->d_inode->i_private;
34990 dd->ipath_f_read_counters(dd, &counters);
34991
34992diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34993index cbde0cf..afaf55c 100644
34994--- a/drivers/infiniband/hw/nes/nes.c
34995+++ b/drivers/infiniband/hw/nes/nes.c
34996@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34997 LIST_HEAD(nes_adapter_list);
34998 static LIST_HEAD(nes_dev_list);
34999
35000-atomic_t qps_destroyed;
35001+atomic_unchecked_t qps_destroyed;
35002
35003 static unsigned int ee_flsh_adapter;
35004 static unsigned int sysfs_nonidx_addr;
35005@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35006 struct nes_adapter *nesadapter = nesdev->nesadapter;
35007 u32 qp_id;
35008
35009- atomic_inc(&qps_destroyed);
35010+ atomic_inc_unchecked(&qps_destroyed);
35011
35012 /* Free the control structures */
35013
35014diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35015index bcc6abc..9c76b2f 100644
35016--- a/drivers/infiniband/hw/nes/nes.h
35017+++ b/drivers/infiniband/hw/nes/nes.h
35018@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35019 extern unsigned int wqm_quanta;
35020 extern struct list_head nes_adapter_list;
35021
35022-extern atomic_t cm_connects;
35023-extern atomic_t cm_accepts;
35024-extern atomic_t cm_disconnects;
35025-extern atomic_t cm_closes;
35026-extern atomic_t cm_connecteds;
35027-extern atomic_t cm_connect_reqs;
35028-extern atomic_t cm_rejects;
35029-extern atomic_t mod_qp_timouts;
35030-extern atomic_t qps_created;
35031-extern atomic_t qps_destroyed;
35032-extern atomic_t sw_qps_destroyed;
35033+extern atomic_unchecked_t cm_connects;
35034+extern atomic_unchecked_t cm_accepts;
35035+extern atomic_unchecked_t cm_disconnects;
35036+extern atomic_unchecked_t cm_closes;
35037+extern atomic_unchecked_t cm_connecteds;
35038+extern atomic_unchecked_t cm_connect_reqs;
35039+extern atomic_unchecked_t cm_rejects;
35040+extern atomic_unchecked_t mod_qp_timouts;
35041+extern atomic_unchecked_t qps_created;
35042+extern atomic_unchecked_t qps_destroyed;
35043+extern atomic_unchecked_t sw_qps_destroyed;
35044 extern u32 mh_detected;
35045 extern u32 mh_pauses_sent;
35046 extern u32 cm_packets_sent;
35047@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35048 extern u32 cm_listens_created;
35049 extern u32 cm_listens_destroyed;
35050 extern u32 cm_backlog_drops;
35051-extern atomic_t cm_loopbacks;
35052-extern atomic_t cm_nodes_created;
35053-extern atomic_t cm_nodes_destroyed;
35054-extern atomic_t cm_accel_dropped_pkts;
35055-extern atomic_t cm_resets_recvd;
35056+extern atomic_unchecked_t cm_loopbacks;
35057+extern atomic_unchecked_t cm_nodes_created;
35058+extern atomic_unchecked_t cm_nodes_destroyed;
35059+extern atomic_unchecked_t cm_accel_dropped_pkts;
35060+extern atomic_unchecked_t cm_resets_recvd;
35061
35062 extern u32 int_mod_timer_init;
35063 extern u32 int_mod_cq_depth_256;
35064diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35065index 73473db..5ed06e8 100644
35066--- a/drivers/infiniband/hw/nes/nes_cm.c
35067+++ b/drivers/infiniband/hw/nes/nes_cm.c
35068@@ -69,11 +69,11 @@ u32 cm_packets_received;
35069 u32 cm_listens_created;
35070 u32 cm_listens_destroyed;
35071 u32 cm_backlog_drops;
35072-atomic_t cm_loopbacks;
35073-atomic_t cm_nodes_created;
35074-atomic_t cm_nodes_destroyed;
35075-atomic_t cm_accel_dropped_pkts;
35076-atomic_t cm_resets_recvd;
35077+atomic_unchecked_t cm_loopbacks;
35078+atomic_unchecked_t cm_nodes_created;
35079+atomic_unchecked_t cm_nodes_destroyed;
35080+atomic_unchecked_t cm_accel_dropped_pkts;
35081+atomic_unchecked_t cm_resets_recvd;
35082
35083 static inline int mini_cm_accelerated(struct nes_cm_core *,
35084 struct nes_cm_node *);
35085@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35086
35087 static struct nes_cm_core *g_cm_core;
35088
35089-atomic_t cm_connects;
35090-atomic_t cm_accepts;
35091-atomic_t cm_disconnects;
35092-atomic_t cm_closes;
35093-atomic_t cm_connecteds;
35094-atomic_t cm_connect_reqs;
35095-atomic_t cm_rejects;
35096+atomic_unchecked_t cm_connects;
35097+atomic_unchecked_t cm_accepts;
35098+atomic_unchecked_t cm_disconnects;
35099+atomic_unchecked_t cm_closes;
35100+atomic_unchecked_t cm_connecteds;
35101+atomic_unchecked_t cm_connect_reqs;
35102+atomic_unchecked_t cm_rejects;
35103
35104
35105 /**
35106@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35107 cm_node->rem_mac);
35108
35109 add_hte_node(cm_core, cm_node);
35110- atomic_inc(&cm_nodes_created);
35111+ atomic_inc_unchecked(&cm_nodes_created);
35112
35113 return cm_node;
35114 }
35115@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35116 }
35117
35118 atomic_dec(&cm_core->node_cnt);
35119- atomic_inc(&cm_nodes_destroyed);
35120+ atomic_inc_unchecked(&cm_nodes_destroyed);
35121 nesqp = cm_node->nesqp;
35122 if (nesqp) {
35123 nesqp->cm_node = NULL;
35124@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35125
35126 static void drop_packet(struct sk_buff *skb)
35127 {
35128- atomic_inc(&cm_accel_dropped_pkts);
35129+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35130 dev_kfree_skb_any(skb);
35131 }
35132
35133@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35134
35135 int reset = 0; /* whether to send reset in case of err.. */
35136 int passive_state;
35137- atomic_inc(&cm_resets_recvd);
35138+ atomic_inc_unchecked(&cm_resets_recvd);
35139 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35140 " refcnt=%d\n", cm_node, cm_node->state,
35141 atomic_read(&cm_node->ref_count));
35142@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35143 rem_ref_cm_node(cm_node->cm_core, cm_node);
35144 return NULL;
35145 }
35146- atomic_inc(&cm_loopbacks);
35147+ atomic_inc_unchecked(&cm_loopbacks);
35148 loopbackremotenode->loopbackpartner = cm_node;
35149 loopbackremotenode->tcp_cntxt.rcv_wscale =
35150 NES_CM_DEFAULT_RCV_WND_SCALE;
35151@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35152 add_ref_cm_node(cm_node);
35153 } else if (cm_node->state == NES_CM_STATE_TSA) {
35154 rem_ref_cm_node(cm_core, cm_node);
35155- atomic_inc(&cm_accel_dropped_pkts);
35156+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35157 dev_kfree_skb_any(skb);
35158 break;
35159 }
35160@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35161
35162 if ((cm_id) && (cm_id->event_handler)) {
35163 if (issue_disconn) {
35164- atomic_inc(&cm_disconnects);
35165+ atomic_inc_unchecked(&cm_disconnects);
35166 cm_event.event = IW_CM_EVENT_DISCONNECT;
35167 cm_event.status = disconn_status;
35168 cm_event.local_addr = cm_id->local_addr;
35169@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35170 }
35171
35172 if (issue_close) {
35173- atomic_inc(&cm_closes);
35174+ atomic_inc_unchecked(&cm_closes);
35175 nes_disconnect(nesqp, 1);
35176
35177 cm_id->provider_data = nesqp;
35178@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35179
35180 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35181 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35182- atomic_inc(&cm_accepts);
35183+ atomic_inc_unchecked(&cm_accepts);
35184
35185 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35186 atomic_read(&nesvnic->netdev->refcnt));
35187@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35188
35189 struct nes_cm_core *cm_core;
35190
35191- atomic_inc(&cm_rejects);
35192+ atomic_inc_unchecked(&cm_rejects);
35193 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35194 loopback = cm_node->loopbackpartner;
35195 cm_core = cm_node->cm_core;
35196@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35197 ntohl(cm_id->local_addr.sin_addr.s_addr),
35198 ntohs(cm_id->local_addr.sin_port));
35199
35200- atomic_inc(&cm_connects);
35201+ atomic_inc_unchecked(&cm_connects);
35202 nesqp->active_conn = 1;
35203
35204 /* cache the cm_id in the qp */
35205@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35206 if (nesqp->destroyed) {
35207 return;
35208 }
35209- atomic_inc(&cm_connecteds);
35210+ atomic_inc_unchecked(&cm_connecteds);
35211 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35212 " local port 0x%04X. jiffies = %lu.\n",
35213 nesqp->hwqp.qp_id,
35214@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35215
35216 ret = cm_id->event_handler(cm_id, &cm_event);
35217 cm_id->add_ref(cm_id);
35218- atomic_inc(&cm_closes);
35219+ atomic_inc_unchecked(&cm_closes);
35220 cm_event.event = IW_CM_EVENT_CLOSE;
35221 cm_event.status = IW_CM_EVENT_STATUS_OK;
35222 cm_event.provider_data = cm_id->provider_data;
35223@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35224 return;
35225 cm_id = cm_node->cm_id;
35226
35227- atomic_inc(&cm_connect_reqs);
35228+ atomic_inc_unchecked(&cm_connect_reqs);
35229 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35230 cm_node, cm_id, jiffies);
35231
35232@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35233 return;
35234 cm_id = cm_node->cm_id;
35235
35236- atomic_inc(&cm_connect_reqs);
35237+ atomic_inc_unchecked(&cm_connect_reqs);
35238 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35239 cm_node, cm_id, jiffies);
35240
35241diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35242index e593af3..870694a 100644
35243--- a/drivers/infiniband/hw/nes/nes_nic.c
35244+++ b/drivers/infiniband/hw/nes/nes_nic.c
35245@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35246 target_stat_values[++index] = mh_detected;
35247 target_stat_values[++index] = mh_pauses_sent;
35248 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35249- target_stat_values[++index] = atomic_read(&cm_connects);
35250- target_stat_values[++index] = atomic_read(&cm_accepts);
35251- target_stat_values[++index] = atomic_read(&cm_disconnects);
35252- target_stat_values[++index] = atomic_read(&cm_connecteds);
35253- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35254- target_stat_values[++index] = atomic_read(&cm_rejects);
35255- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35256- target_stat_values[++index] = atomic_read(&qps_created);
35257- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35258- target_stat_values[++index] = atomic_read(&qps_destroyed);
35259- target_stat_values[++index] = atomic_read(&cm_closes);
35260+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35261+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35262+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35263+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35264+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35265+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35266+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35267+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35268+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35269+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35270+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35271 target_stat_values[++index] = cm_packets_sent;
35272 target_stat_values[++index] = cm_packets_bounced;
35273 target_stat_values[++index] = cm_packets_created;
35274@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35275 target_stat_values[++index] = cm_listens_created;
35276 target_stat_values[++index] = cm_listens_destroyed;
35277 target_stat_values[++index] = cm_backlog_drops;
35278- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35279- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35280- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35281- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35282- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35283+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35284+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35285+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35286+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35287+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35288 target_stat_values[++index] = int_mod_timer_init;
35289 target_stat_values[++index] = int_mod_cq_depth_1;
35290 target_stat_values[++index] = int_mod_cq_depth_4;
35291diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35292index a680c42..f914deb 100644
35293--- a/drivers/infiniband/hw/nes/nes_verbs.c
35294+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35295@@ -45,9 +45,9 @@
35296
35297 #include <rdma/ib_umem.h>
35298
35299-atomic_t mod_qp_timouts;
35300-atomic_t qps_created;
35301-atomic_t sw_qps_destroyed;
35302+atomic_unchecked_t mod_qp_timouts;
35303+atomic_unchecked_t qps_created;
35304+atomic_unchecked_t sw_qps_destroyed;
35305
35306 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35307
35308@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35309 if (init_attr->create_flags)
35310 return ERR_PTR(-EINVAL);
35311
35312- atomic_inc(&qps_created);
35313+ atomic_inc_unchecked(&qps_created);
35314 switch (init_attr->qp_type) {
35315 case IB_QPT_RC:
35316 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35317@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35318 struct iw_cm_event cm_event;
35319 int ret;
35320
35321- atomic_inc(&sw_qps_destroyed);
35322+ atomic_inc_unchecked(&sw_qps_destroyed);
35323 nesqp->destroyed = 1;
35324
35325 /* Blow away the connection if it exists. */
35326diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35327index ac11be0..3883c04 100644
35328--- a/drivers/input/gameport/gameport.c
35329+++ b/drivers/input/gameport/gameport.c
35330@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35331 */
35332 static void gameport_init_port(struct gameport *gameport)
35333 {
35334- static atomic_t gameport_no = ATOMIC_INIT(0);
35335+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35336
35337 __module_get(THIS_MODULE);
35338
35339 mutex_init(&gameport->drv_mutex);
35340 device_initialize(&gameport->dev);
35341- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35342+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35343 gameport->dev.bus = &gameport_bus;
35344 gameport->dev.release = gameport_release_port;
35345 if (gameport->parent)
35346diff --git a/drivers/input/input.c b/drivers/input/input.c
35347index c82ae82..8cfb9cb 100644
35348--- a/drivers/input/input.c
35349+++ b/drivers/input/input.c
35350@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35351 */
35352 int input_register_device(struct input_dev *dev)
35353 {
35354- static atomic_t input_no = ATOMIC_INIT(0);
35355+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35356 struct input_handler *handler;
35357 const char *path;
35358 int error;
35359@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35360 dev->setkeycode = input_default_setkeycode;
35361
35362 dev_set_name(&dev->dev, "input%ld",
35363- (unsigned long) atomic_inc_return(&input_no) - 1);
35364+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35365
35366 error = device_add(&dev->dev);
35367 if (error)
35368diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35369index ca13a6b..b032b0c 100644
35370--- a/drivers/input/joystick/sidewinder.c
35371+++ b/drivers/input/joystick/sidewinder.c
35372@@ -30,6 +30,7 @@
35373 #include <linux/kernel.h>
35374 #include <linux/module.h>
35375 #include <linux/slab.h>
35376+#include <linux/sched.h>
35377 #include <linux/init.h>
35378 #include <linux/input.h>
35379 #include <linux/gameport.h>
35380@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35381 unsigned char buf[SW_LENGTH];
35382 int i;
35383
35384+ pax_track_stack();
35385+
35386 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35387
35388 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35389diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35390index 79e3edc..01412b9 100644
35391--- a/drivers/input/joystick/xpad.c
35392+++ b/drivers/input/joystick/xpad.c
35393@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35394
35395 static int xpad_led_probe(struct usb_xpad *xpad)
35396 {
35397- static atomic_t led_seq = ATOMIC_INIT(0);
35398+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35399 long led_no;
35400 struct xpad_led *led;
35401 struct led_classdev *led_cdev;
35402@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35403 if (!led)
35404 return -ENOMEM;
35405
35406- led_no = (long)atomic_inc_return(&led_seq) - 1;
35407+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35408
35409 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35410 led->xpad = xpad;
35411diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35412index 0236f0d..c7327f1 100644
35413--- a/drivers/input/serio/serio.c
35414+++ b/drivers/input/serio/serio.c
35415@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35416 */
35417 static void serio_init_port(struct serio *serio)
35418 {
35419- static atomic_t serio_no = ATOMIC_INIT(0);
35420+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35421
35422 __module_get(THIS_MODULE);
35423
35424@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35425 mutex_init(&serio->drv_mutex);
35426 device_initialize(&serio->dev);
35427 dev_set_name(&serio->dev, "serio%ld",
35428- (long)atomic_inc_return(&serio_no) - 1);
35429+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35430 serio->dev.bus = &serio_bus;
35431 serio->dev.release = serio_release_port;
35432 if (serio->parent) {
35433diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35434index 33dcd8d..2783d25 100644
35435--- a/drivers/isdn/gigaset/common.c
35436+++ b/drivers/isdn/gigaset/common.c
35437@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35438 cs->commands_pending = 0;
35439 cs->cur_at_seq = 0;
35440 cs->gotfwver = -1;
35441- cs->open_count = 0;
35442+ local_set(&cs->open_count, 0);
35443 cs->dev = NULL;
35444 cs->tty = NULL;
35445 cs->tty_dev = NULL;
35446diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35447index a2f6125..6a70677 100644
35448--- a/drivers/isdn/gigaset/gigaset.h
35449+++ b/drivers/isdn/gigaset/gigaset.h
35450@@ -34,6 +34,7 @@
35451 #include <linux/tty_driver.h>
35452 #include <linux/list.h>
35453 #include <asm/atomic.h>
35454+#include <asm/local.h>
35455
35456 #define GIG_VERSION {0,5,0,0}
35457 #define GIG_COMPAT {0,4,0,0}
35458@@ -446,7 +447,7 @@ struct cardstate {
35459 spinlock_t cmdlock;
35460 unsigned curlen, cmdbytes;
35461
35462- unsigned open_count;
35463+ local_t open_count;
35464 struct tty_struct *tty;
35465 struct tasklet_struct if_wake_tasklet;
35466 unsigned control_state;
35467diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35468index b3065b8..c7e8cc9 100644
35469--- a/drivers/isdn/gigaset/interface.c
35470+++ b/drivers/isdn/gigaset/interface.c
35471@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35472 return -ERESTARTSYS; // FIXME -EINTR?
35473 tty->driver_data = cs;
35474
35475- ++cs->open_count;
35476-
35477- if (cs->open_count == 1) {
35478+ if (local_inc_return(&cs->open_count) == 1) {
35479 spin_lock_irqsave(&cs->lock, flags);
35480 cs->tty = tty;
35481 spin_unlock_irqrestore(&cs->lock, flags);
35482@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35483
35484 if (!cs->connected)
35485 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35486- else if (!cs->open_count)
35487+ else if (!local_read(&cs->open_count))
35488 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35489 else {
35490- if (!--cs->open_count) {
35491+ if (!local_dec_return(&cs->open_count)) {
35492 spin_lock_irqsave(&cs->lock, flags);
35493 cs->tty = NULL;
35494 spin_unlock_irqrestore(&cs->lock, flags);
35495@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35496 if (!cs->connected) {
35497 gig_dbg(DEBUG_IF, "not connected");
35498 retval = -ENODEV;
35499- } else if (!cs->open_count)
35500+ } else if (!local_read(&cs->open_count))
35501 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35502 else {
35503 retval = 0;
35504@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35505 if (!cs->connected) {
35506 gig_dbg(DEBUG_IF, "not connected");
35507 retval = -ENODEV;
35508- } else if (!cs->open_count)
35509+ } else if (!local_read(&cs->open_count))
35510 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35511 else if (cs->mstate != MS_LOCKED) {
35512 dev_warn(cs->dev, "can't write to unlocked device\n");
35513@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35514 if (!cs->connected) {
35515 gig_dbg(DEBUG_IF, "not connected");
35516 retval = -ENODEV;
35517- } else if (!cs->open_count)
35518+ } else if (!local_read(&cs->open_count))
35519 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35520 else if (cs->mstate != MS_LOCKED) {
35521 dev_warn(cs->dev, "can't write to unlocked device\n");
35522@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35523
35524 if (!cs->connected)
35525 gig_dbg(DEBUG_IF, "not connected");
35526- else if (!cs->open_count)
35527+ else if (!local_read(&cs->open_count))
35528 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35529 else if (cs->mstate != MS_LOCKED)
35530 dev_warn(cs->dev, "can't write to unlocked device\n");
35531@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35532
35533 if (!cs->connected)
35534 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35535- else if (!cs->open_count)
35536+ else if (!local_read(&cs->open_count))
35537 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35538 else {
35539 //FIXME
35540@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35541
35542 if (!cs->connected)
35543 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35544- else if (!cs->open_count)
35545+ else if (!local_read(&cs->open_count))
35546 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35547 else {
35548 //FIXME
35549@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35550 goto out;
35551 }
35552
35553- if (!cs->open_count) {
35554+ if (!local_read(&cs->open_count)) {
35555 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35556 goto out;
35557 }
35558diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35559index a7c0083..62a7cb6 100644
35560--- a/drivers/isdn/hardware/avm/b1.c
35561+++ b/drivers/isdn/hardware/avm/b1.c
35562@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35563 }
35564 if (left) {
35565 if (t4file->user) {
35566- if (copy_from_user(buf, dp, left))
35567+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35568 return -EFAULT;
35569 } else {
35570 memcpy(buf, dp, left);
35571@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35572 }
35573 if (left) {
35574 if (config->user) {
35575- if (copy_from_user(buf, dp, left))
35576+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35577 return -EFAULT;
35578 } else {
35579 memcpy(buf, dp, left);
35580diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35581index f130724..c373c68 100644
35582--- a/drivers/isdn/hardware/eicon/capidtmf.c
35583+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35584@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35585 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35586 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35587
35588+ pax_track_stack();
35589
35590 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35591 {
35592diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35593index 4d425c6..a9be6c4 100644
35594--- a/drivers/isdn/hardware/eicon/capifunc.c
35595+++ b/drivers/isdn/hardware/eicon/capifunc.c
35596@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35597 IDI_SYNC_REQ req;
35598 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35599
35600+ pax_track_stack();
35601+
35602 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35603
35604 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35605diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35606index 3029234..ef0d9e2 100644
35607--- a/drivers/isdn/hardware/eicon/diddfunc.c
35608+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35609@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35610 IDI_SYNC_REQ req;
35611 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35612
35613+ pax_track_stack();
35614+
35615 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35616
35617 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35618diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35619index d36a4c0..11e7d1a 100644
35620--- a/drivers/isdn/hardware/eicon/divasfunc.c
35621+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35622@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35623 IDI_SYNC_REQ req;
35624 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35625
35626+ pax_track_stack();
35627+
35628 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35629
35630 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35631diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35632index 85784a7..a19ca98 100644
35633--- a/drivers/isdn/hardware/eicon/divasync.h
35634+++ b/drivers/isdn/hardware/eicon/divasync.h
35635@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35636 } diva_didd_add_adapter_t;
35637 typedef struct _diva_didd_remove_adapter {
35638 IDI_CALL p_request;
35639-} diva_didd_remove_adapter_t;
35640+} __no_const diva_didd_remove_adapter_t;
35641 typedef struct _diva_didd_read_adapter_array {
35642 void * buffer;
35643 dword length;
35644diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35645index db87d51..7d09acf 100644
35646--- a/drivers/isdn/hardware/eicon/idifunc.c
35647+++ b/drivers/isdn/hardware/eicon/idifunc.c
35648@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35649 IDI_SYNC_REQ req;
35650 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35651
35652+ pax_track_stack();
35653+
35654 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35655
35656 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35657diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35658index ae89fb8..0fab299 100644
35659--- a/drivers/isdn/hardware/eicon/message.c
35660+++ b/drivers/isdn/hardware/eicon/message.c
35661@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35662 dword d;
35663 word w;
35664
35665+ pax_track_stack();
35666+
35667 a = plci->adapter;
35668 Id = ((word)plci->Id<<8)|a->Id;
35669 PUT_WORD(&SS_Ind[4],0x0000);
35670@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35671 word j, n, w;
35672 dword d;
35673
35674+ pax_track_stack();
35675+
35676
35677 for(i=0;i<8;i++) bp_parms[i].length = 0;
35678 for(i=0;i<2;i++) global_config[i].length = 0;
35679@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35680 const byte llc3[] = {4,3,2,2,6,6,0};
35681 const byte header[] = {0,2,3,3,0,0,0};
35682
35683+ pax_track_stack();
35684+
35685 for(i=0;i<8;i++) bp_parms[i].length = 0;
35686 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35687 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35688@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35689 word appl_number_group_type[MAX_APPL];
35690 PLCI *auxplci;
35691
35692+ pax_track_stack();
35693+
35694 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35695
35696 if(!a->group_optimization_enabled)
35697diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35698index a564b75..f3cf8b5 100644
35699--- a/drivers/isdn/hardware/eicon/mntfunc.c
35700+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35701@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35702 IDI_SYNC_REQ req;
35703 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35704
35705+ pax_track_stack();
35706+
35707 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35708
35709 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35710diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35711index a3bd163..8956575 100644
35712--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35713+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35714@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35715 typedef struct _diva_os_idi_adapter_interface {
35716 diva_init_card_proc_t cleanup_adapter_proc;
35717 diva_cmd_card_proc_t cmd_proc;
35718-} diva_os_idi_adapter_interface_t;
35719+} __no_const diva_os_idi_adapter_interface_t;
35720
35721 typedef struct _diva_os_xdi_adapter {
35722 struct list_head link;
35723diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35724index adb1e8c..21b590b 100644
35725--- a/drivers/isdn/i4l/isdn_common.c
35726+++ b/drivers/isdn/i4l/isdn_common.c
35727@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35728 } iocpar;
35729 void __user *argp = (void __user *)arg;
35730
35731+ pax_track_stack();
35732+
35733 #define name iocpar.name
35734 #define bname iocpar.bname
35735 #define iocts iocpar.iocts
35736diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35737index bf7997a..cf091db 100644
35738--- a/drivers/isdn/icn/icn.c
35739+++ b/drivers/isdn/icn/icn.c
35740@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35741 if (count > len)
35742 count = len;
35743 if (user) {
35744- if (copy_from_user(msg, buf, count))
35745+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35746 return -EFAULT;
35747 } else
35748 memcpy(msg, buf, count);
35749diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35750index feb0fa4..f76f830 100644
35751--- a/drivers/isdn/mISDN/socket.c
35752+++ b/drivers/isdn/mISDN/socket.c
35753@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35754 if (dev) {
35755 struct mISDN_devinfo di;
35756
35757+ memset(&di, 0, sizeof(di));
35758 di.id = dev->id;
35759 di.Dprotocols = dev->Dprotocols;
35760 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35761@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35762 if (dev) {
35763 struct mISDN_devinfo di;
35764
35765+ memset(&di, 0, sizeof(di));
35766 di.id = dev->id;
35767 di.Dprotocols = dev->Dprotocols;
35768 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35769diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35770index 485be8b..f0225bc 100644
35771--- a/drivers/isdn/sc/interrupt.c
35772+++ b/drivers/isdn/sc/interrupt.c
35773@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35774 }
35775 else if(callid>=0x0000 && callid<=0x7FFF)
35776 {
35777+ int len;
35778+
35779 pr_debug("%s: Got Incoming Call\n",
35780 sc_adapter[card]->devicename);
35781- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35782- strcpy(setup.eazmsn,
35783- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35784+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35785+ sizeof(setup.phone));
35786+ if (len >= sizeof(setup.phone))
35787+ continue;
35788+ len = strlcpy(setup.eazmsn,
35789+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35790+ sizeof(setup.eazmsn));
35791+ if (len >= sizeof(setup.eazmsn))
35792+ continue;
35793 setup.si1 = 7;
35794 setup.si2 = 0;
35795 setup.plan = 0;
35796@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35797 * Handle a GetMyNumber Rsp
35798 */
35799 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35800- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35801+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35802+ rcvmsg.msg_data.byte_array,
35803+ sizeof(rcvmsg.msg_data.byte_array));
35804 continue;
35805 }
35806
35807diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35808index 8744d24..d1f9a9a 100644
35809--- a/drivers/lguest/core.c
35810+++ b/drivers/lguest/core.c
35811@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35812 * it's worked so far. The end address needs +1 because __get_vm_area
35813 * allocates an extra guard page, so we need space for that.
35814 */
35815+
35816+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35817+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35818+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35819+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35820+#else
35821 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35822 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35823 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35824+#endif
35825+
35826 if (!switcher_vma) {
35827 err = -ENOMEM;
35828 printk("lguest: could not map switcher pages high\n");
35829@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35830 * Now the Switcher is mapped at the right address, we can't fail!
35831 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35832 */
35833- memcpy(switcher_vma->addr, start_switcher_text,
35834+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35835 end_switcher_text - start_switcher_text);
35836
35837 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35838diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35839index 6ae3888..8b38145 100644
35840--- a/drivers/lguest/x86/core.c
35841+++ b/drivers/lguest/x86/core.c
35842@@ -59,7 +59,7 @@ static struct {
35843 /* Offset from where switcher.S was compiled to where we've copied it */
35844 static unsigned long switcher_offset(void)
35845 {
35846- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35847+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35848 }
35849
35850 /* This cpu's struct lguest_pages. */
35851@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35852 * These copies are pretty cheap, so we do them unconditionally: */
35853 /* Save the current Host top-level page directory.
35854 */
35855+
35856+#ifdef CONFIG_PAX_PER_CPU_PGD
35857+ pages->state.host_cr3 = read_cr3();
35858+#else
35859 pages->state.host_cr3 = __pa(current->mm->pgd);
35860+#endif
35861+
35862 /*
35863 * Set up the Guest's page tables to see this CPU's pages (and no
35864 * other CPU's pages).
35865@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35866 * compiled-in switcher code and the high-mapped copy we just made.
35867 */
35868 for (i = 0; i < IDT_ENTRIES; i++)
35869- default_idt_entries[i] += switcher_offset();
35870+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35871
35872 /*
35873 * Set up the Switcher's per-cpu areas.
35874@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35875 * it will be undisturbed when we switch. To change %cs and jump we
35876 * need this structure to feed to Intel's "lcall" instruction.
35877 */
35878- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35879+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35880 lguest_entry.segment = LGUEST_CS;
35881
35882 /*
35883diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35884index 40634b0..4f5855e 100644
35885--- a/drivers/lguest/x86/switcher_32.S
35886+++ b/drivers/lguest/x86/switcher_32.S
35887@@ -87,6 +87,7 @@
35888 #include <asm/page.h>
35889 #include <asm/segment.h>
35890 #include <asm/lguest.h>
35891+#include <asm/processor-flags.h>
35892
35893 // We mark the start of the code to copy
35894 // It's placed in .text tho it's never run here
35895@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35896 // Changes type when we load it: damn Intel!
35897 // For after we switch over our page tables
35898 // That entry will be read-only: we'd crash.
35899+
35900+#ifdef CONFIG_PAX_KERNEXEC
35901+ mov %cr0, %edx
35902+ xor $X86_CR0_WP, %edx
35903+ mov %edx, %cr0
35904+#endif
35905+
35906 movl $(GDT_ENTRY_TSS*8), %edx
35907 ltr %dx
35908
35909@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35910 // Let's clear it again for our return.
35911 // The GDT descriptor of the Host
35912 // Points to the table after two "size" bytes
35913- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35914+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35915 // Clear "used" from type field (byte 5, bit 2)
35916- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35917+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35918+
35919+#ifdef CONFIG_PAX_KERNEXEC
35920+ mov %cr0, %eax
35921+ xor $X86_CR0_WP, %eax
35922+ mov %eax, %cr0
35923+#endif
35924
35925 // Once our page table's switched, the Guest is live!
35926 // The Host fades as we run this final step.
35927@@ -295,13 +309,12 @@ deliver_to_host:
35928 // I consulted gcc, and it gave
35929 // These instructions, which I gladly credit:
35930 leal (%edx,%ebx,8), %eax
35931- movzwl (%eax),%edx
35932- movl 4(%eax), %eax
35933- xorw %ax, %ax
35934- orl %eax, %edx
35935+ movl 4(%eax), %edx
35936+ movw (%eax), %dx
35937 // Now the address of the handler's in %edx
35938 // We call it now: its "iret" drops us home.
35939- jmp *%edx
35940+ ljmp $__KERNEL_CS, $1f
35941+1: jmp *%edx
35942
35943 // Every interrupt can come to us here
35944 // But we must truly tell each apart.
35945diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35946index 588a5b0..b71db89 100644
35947--- a/drivers/macintosh/macio_asic.c
35948+++ b/drivers/macintosh/macio_asic.c
35949@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35950 * MacIO is matched against any Apple ID, it's probe() function
35951 * will then decide wether it applies or not
35952 */
35953-static const struct pci_device_id __devinitdata pci_ids [] = { {
35954+static const struct pci_device_id __devinitconst pci_ids [] = { {
35955 .vendor = PCI_VENDOR_ID_APPLE,
35956 .device = PCI_ANY_ID,
35957 .subvendor = PCI_ANY_ID,
35958diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35959index a348bb0..ecd9b3f 100644
35960--- a/drivers/macintosh/via-pmu-backlight.c
35961+++ b/drivers/macintosh/via-pmu-backlight.c
35962@@ -15,7 +15,7 @@
35963
35964 #define MAX_PMU_LEVEL 0xFF
35965
35966-static struct backlight_ops pmu_backlight_data;
35967+static const struct backlight_ops pmu_backlight_data;
35968 static DEFINE_SPINLOCK(pmu_backlight_lock);
35969 static int sleeping, uses_pmu_bl;
35970 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35971@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35972 return bd->props.brightness;
35973 }
35974
35975-static struct backlight_ops pmu_backlight_data = {
35976+static const struct backlight_ops pmu_backlight_data = {
35977 .get_brightness = pmu_backlight_get_brightness,
35978 .update_status = pmu_backlight_update_status,
35979
35980diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35981index 6f308a4..b5f7ff7 100644
35982--- a/drivers/macintosh/via-pmu.c
35983+++ b/drivers/macintosh/via-pmu.c
35984@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35985 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35986 }
35987
35988-static struct platform_suspend_ops pmu_pm_ops = {
35989+static const struct platform_suspend_ops pmu_pm_ops = {
35990 .enter = powerbook_sleep,
35991 .valid = pmu_sleep_valid,
35992 };
35993diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35994index 818b617..4656e38 100644
35995--- a/drivers/md/dm-ioctl.c
35996+++ b/drivers/md/dm-ioctl.c
35997@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35998 cmd == DM_LIST_VERSIONS_CMD)
35999 return 0;
36000
36001- if ((cmd == DM_DEV_CREATE_CMD)) {
36002+ if (cmd == DM_DEV_CREATE_CMD) {
36003 if (!*param->name) {
36004 DMWARN("name not supplied when creating device");
36005 return -EINVAL;
36006diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36007index 6021d0a..a878643 100644
36008--- a/drivers/md/dm-raid1.c
36009+++ b/drivers/md/dm-raid1.c
36010@@ -41,7 +41,7 @@ enum dm_raid1_error {
36011
36012 struct mirror {
36013 struct mirror_set *ms;
36014- atomic_t error_count;
36015+ atomic_unchecked_t error_count;
36016 unsigned long error_type;
36017 struct dm_dev *dev;
36018 sector_t offset;
36019@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36020 * simple way to tell if a device has encountered
36021 * errors.
36022 */
36023- atomic_inc(&m->error_count);
36024+ atomic_inc_unchecked(&m->error_count);
36025
36026 if (test_and_set_bit(error_type, &m->error_type))
36027 return;
36028@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36029 }
36030
36031 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36032- if (!atomic_read(&new->error_count)) {
36033+ if (!atomic_read_unchecked(&new->error_count)) {
36034 set_default_mirror(new);
36035 break;
36036 }
36037@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36038 struct mirror *m = get_default_mirror(ms);
36039
36040 do {
36041- if (likely(!atomic_read(&m->error_count)))
36042+ if (likely(!atomic_read_unchecked(&m->error_count)))
36043 return m;
36044
36045 if (m-- == ms->mirror)
36046@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36047 {
36048 struct mirror *default_mirror = get_default_mirror(m->ms);
36049
36050- return !atomic_read(&default_mirror->error_count);
36051+ return !atomic_read_unchecked(&default_mirror->error_count);
36052 }
36053
36054 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36055@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36056 */
36057 if (likely(region_in_sync(ms, region, 1)))
36058 m = choose_mirror(ms, bio->bi_sector);
36059- else if (m && atomic_read(&m->error_count))
36060+ else if (m && atomic_read_unchecked(&m->error_count))
36061 m = NULL;
36062
36063 if (likely(m))
36064@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36065 }
36066
36067 ms->mirror[mirror].ms = ms;
36068- atomic_set(&(ms->mirror[mirror].error_count), 0);
36069+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36070 ms->mirror[mirror].error_type = 0;
36071 ms->mirror[mirror].offset = offset;
36072
36073@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36074 */
36075 static char device_status_char(struct mirror *m)
36076 {
36077- if (!atomic_read(&(m->error_count)))
36078+ if (!atomic_read_unchecked(&(m->error_count)))
36079 return 'A';
36080
36081 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36082diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36083index bd58703..9f26571 100644
36084--- a/drivers/md/dm-stripe.c
36085+++ b/drivers/md/dm-stripe.c
36086@@ -20,7 +20,7 @@ struct stripe {
36087 struct dm_dev *dev;
36088 sector_t physical_start;
36089
36090- atomic_t error_count;
36091+ atomic_unchecked_t error_count;
36092 };
36093
36094 struct stripe_c {
36095@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36096 kfree(sc);
36097 return r;
36098 }
36099- atomic_set(&(sc->stripe[i].error_count), 0);
36100+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36101 }
36102
36103 ti->private = sc;
36104@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36105 DMEMIT("%d ", sc->stripes);
36106 for (i = 0; i < sc->stripes; i++) {
36107 DMEMIT("%s ", sc->stripe[i].dev->name);
36108- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36109+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36110 'D' : 'A';
36111 }
36112 buffer[i] = '\0';
36113@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36114 */
36115 for (i = 0; i < sc->stripes; i++)
36116 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36117- atomic_inc(&(sc->stripe[i].error_count));
36118- if (atomic_read(&(sc->stripe[i].error_count)) <
36119+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
36120+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36121 DM_IO_ERROR_THRESHOLD)
36122 queue_work(kstriped, &sc->kstriped_ws);
36123 }
36124diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36125index 4b04590..13a77b2 100644
36126--- a/drivers/md/dm-sysfs.c
36127+++ b/drivers/md/dm-sysfs.c
36128@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36129 NULL,
36130 };
36131
36132-static struct sysfs_ops dm_sysfs_ops = {
36133+static const struct sysfs_ops dm_sysfs_ops = {
36134 .show = dm_attr_show,
36135 };
36136
36137diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36138index 03345bb..332250d 100644
36139--- a/drivers/md/dm-table.c
36140+++ b/drivers/md/dm-table.c
36141@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36142 if (!dev_size)
36143 return 0;
36144
36145- if ((start >= dev_size) || (start + len > dev_size)) {
36146+ if ((start >= dev_size) || (len > dev_size - start)) {
36147 DMWARN("%s: %s too small for target: "
36148 "start=%llu, len=%llu, dev_size=%llu",
36149 dm_device_name(ti->table->md), bdevname(bdev, b),
36150diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36151index c988ac2..c418141 100644
36152--- a/drivers/md/dm.c
36153+++ b/drivers/md/dm.c
36154@@ -165,9 +165,9 @@ struct mapped_device {
36155 /*
36156 * Event handling.
36157 */
36158- atomic_t event_nr;
36159+ atomic_unchecked_t event_nr;
36160 wait_queue_head_t eventq;
36161- atomic_t uevent_seq;
36162+ atomic_unchecked_t uevent_seq;
36163 struct list_head uevent_list;
36164 spinlock_t uevent_lock; /* Protect access to uevent_list */
36165
36166@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36167 rwlock_init(&md->map_lock);
36168 atomic_set(&md->holders, 1);
36169 atomic_set(&md->open_count, 0);
36170- atomic_set(&md->event_nr, 0);
36171- atomic_set(&md->uevent_seq, 0);
36172+ atomic_set_unchecked(&md->event_nr, 0);
36173+ atomic_set_unchecked(&md->uevent_seq, 0);
36174 INIT_LIST_HEAD(&md->uevent_list);
36175 spin_lock_init(&md->uevent_lock);
36176
36177@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36178
36179 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36180
36181- atomic_inc(&md->event_nr);
36182+ atomic_inc_unchecked(&md->event_nr);
36183 wake_up(&md->eventq);
36184 }
36185
36186@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36187
36188 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36189 {
36190- return atomic_add_return(1, &md->uevent_seq);
36191+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36192 }
36193
36194 uint32_t dm_get_event_nr(struct mapped_device *md)
36195 {
36196- return atomic_read(&md->event_nr);
36197+ return atomic_read_unchecked(&md->event_nr);
36198 }
36199
36200 int dm_wait_event(struct mapped_device *md, int event_nr)
36201 {
36202 return wait_event_interruptible(md->eventq,
36203- (event_nr != atomic_read(&md->event_nr)));
36204+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36205 }
36206
36207 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36208diff --git a/drivers/md/md.c b/drivers/md/md.c
36209index 4ce6e2f..7a9530a 100644
36210--- a/drivers/md/md.c
36211+++ b/drivers/md/md.c
36212@@ -153,10 +153,10 @@ static int start_readonly;
36213 * start build, activate spare
36214 */
36215 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36216-static atomic_t md_event_count;
36217+static atomic_unchecked_t md_event_count;
36218 void md_new_event(mddev_t *mddev)
36219 {
36220- atomic_inc(&md_event_count);
36221+ atomic_inc_unchecked(&md_event_count);
36222 wake_up(&md_event_waiters);
36223 }
36224 EXPORT_SYMBOL_GPL(md_new_event);
36225@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36226 */
36227 static void md_new_event_inintr(mddev_t *mddev)
36228 {
36229- atomic_inc(&md_event_count);
36230+ atomic_inc_unchecked(&md_event_count);
36231 wake_up(&md_event_waiters);
36232 }
36233
36234@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36235
36236 rdev->preferred_minor = 0xffff;
36237 rdev->data_offset = le64_to_cpu(sb->data_offset);
36238- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36239+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36240
36241 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36242 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36243@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36244 else
36245 sb->resync_offset = cpu_to_le64(0);
36246
36247- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36248+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36249
36250 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36251 sb->size = cpu_to_le64(mddev->dev_sectors);
36252@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36253 static ssize_t
36254 errors_show(mdk_rdev_t *rdev, char *page)
36255 {
36256- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36257+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36258 }
36259
36260 static ssize_t
36261@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36262 char *e;
36263 unsigned long n = simple_strtoul(buf, &e, 10);
36264 if (*buf && (*e == 0 || *e == '\n')) {
36265- atomic_set(&rdev->corrected_errors, n);
36266+ atomic_set_unchecked(&rdev->corrected_errors, n);
36267 return len;
36268 }
36269 return -EINVAL;
36270@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36271 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36272 kfree(rdev);
36273 }
36274-static struct sysfs_ops rdev_sysfs_ops = {
36275+static const struct sysfs_ops rdev_sysfs_ops = {
36276 .show = rdev_attr_show,
36277 .store = rdev_attr_store,
36278 };
36279@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36280 rdev->data_offset = 0;
36281 rdev->sb_events = 0;
36282 atomic_set(&rdev->nr_pending, 0);
36283- atomic_set(&rdev->read_errors, 0);
36284- atomic_set(&rdev->corrected_errors, 0);
36285+ atomic_set_unchecked(&rdev->read_errors, 0);
36286+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36287
36288 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36289 if (!size) {
36290@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36291 kfree(mddev);
36292 }
36293
36294-static struct sysfs_ops md_sysfs_ops = {
36295+static const struct sysfs_ops md_sysfs_ops = {
36296 .show = md_attr_show,
36297 .store = md_attr_store,
36298 };
36299@@ -4482,7 +4482,8 @@ out:
36300 err = 0;
36301 blk_integrity_unregister(disk);
36302 md_new_event(mddev);
36303- sysfs_notify_dirent(mddev->sysfs_state);
36304+ if (mddev->sysfs_state)
36305+ sysfs_notify_dirent(mddev->sysfs_state);
36306 return err;
36307 }
36308
36309@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36310
36311 spin_unlock(&pers_lock);
36312 seq_printf(seq, "\n");
36313- mi->event = atomic_read(&md_event_count);
36314+ mi->event = atomic_read_unchecked(&md_event_count);
36315 return 0;
36316 }
36317 if (v == (void*)2) {
36318@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36319 chunk_kb ? "KB" : "B");
36320 if (bitmap->file) {
36321 seq_printf(seq, ", file: ");
36322- seq_path(seq, &bitmap->file->f_path, " \t\n");
36323+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36324 }
36325
36326 seq_printf(seq, "\n");
36327@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36328 else {
36329 struct seq_file *p = file->private_data;
36330 p->private = mi;
36331- mi->event = atomic_read(&md_event_count);
36332+ mi->event = atomic_read_unchecked(&md_event_count);
36333 }
36334 return error;
36335 }
36336@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36337 /* always allow read */
36338 mask = POLLIN | POLLRDNORM;
36339
36340- if (mi->event != atomic_read(&md_event_count))
36341+ if (mi->event != atomic_read_unchecked(&md_event_count))
36342 mask |= POLLERR | POLLPRI;
36343 return mask;
36344 }
36345@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36346 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36347 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36348 (int)part_stat_read(&disk->part0, sectors[1]) -
36349- atomic_read(&disk->sync_io);
36350+ atomic_read_unchecked(&disk->sync_io);
36351 /* sync IO will cause sync_io to increase before the disk_stats
36352 * as sync_io is counted when a request starts, and
36353 * disk_stats is counted when it completes.
36354diff --git a/drivers/md/md.h b/drivers/md/md.h
36355index 87430fe..0024a4c 100644
36356--- a/drivers/md/md.h
36357+++ b/drivers/md/md.h
36358@@ -94,10 +94,10 @@ struct mdk_rdev_s
36359 * only maintained for arrays that
36360 * support hot removal
36361 */
36362- atomic_t read_errors; /* number of consecutive read errors that
36363+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36364 * we have tried to ignore.
36365 */
36366- atomic_t corrected_errors; /* number of corrected read errors,
36367+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36368 * for reporting to userspace and storing
36369 * in superblock.
36370 */
36371@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36372
36373 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36374 {
36375- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36376+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36377 }
36378
36379 struct mdk_personality
36380diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36381index 968cb14..f0ad2e4 100644
36382--- a/drivers/md/raid1.c
36383+++ b/drivers/md/raid1.c
36384@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36385 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36386 continue;
36387 rdev = conf->mirrors[d].rdev;
36388- atomic_add(s, &rdev->corrected_errors);
36389+ atomic_add_unchecked(s, &rdev->corrected_errors);
36390 if (sync_page_io(rdev->bdev,
36391 sect + rdev->data_offset,
36392 s<<9,
36393@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36394 /* Well, this device is dead */
36395 md_error(mddev, rdev);
36396 else {
36397- atomic_add(s, &rdev->corrected_errors);
36398+ atomic_add_unchecked(s, &rdev->corrected_errors);
36399 printk(KERN_INFO
36400 "raid1:%s: read error corrected "
36401 "(%d sectors at %llu on %s)\n",
36402diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36403index 1b4e232..cf0f534 100644
36404--- a/drivers/md/raid10.c
36405+++ b/drivers/md/raid10.c
36406@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36407 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36408 set_bit(R10BIO_Uptodate, &r10_bio->state);
36409 else {
36410- atomic_add(r10_bio->sectors,
36411+ atomic_add_unchecked(r10_bio->sectors,
36412 &conf->mirrors[d].rdev->corrected_errors);
36413 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36414 md_error(r10_bio->mddev,
36415@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36416 test_bit(In_sync, &rdev->flags)) {
36417 atomic_inc(&rdev->nr_pending);
36418 rcu_read_unlock();
36419- atomic_add(s, &rdev->corrected_errors);
36420+ atomic_add_unchecked(s, &rdev->corrected_errors);
36421 if (sync_page_io(rdev->bdev,
36422 r10_bio->devs[sl].addr +
36423 sect + rdev->data_offset,
36424diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36425index 883215d..675bf47 100644
36426--- a/drivers/md/raid5.c
36427+++ b/drivers/md/raid5.c
36428@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36429 bi->bi_next = NULL;
36430 if ((rw & WRITE) &&
36431 test_bit(R5_ReWrite, &sh->dev[i].flags))
36432- atomic_add(STRIPE_SECTORS,
36433+ atomic_add_unchecked(STRIPE_SECTORS,
36434 &rdev->corrected_errors);
36435 generic_make_request(bi);
36436 } else {
36437@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36438 clear_bit(R5_ReadError, &sh->dev[i].flags);
36439 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36440 }
36441- if (atomic_read(&conf->disks[i].rdev->read_errors))
36442- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36443+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36444+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36445 } else {
36446 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36447 int retry = 0;
36448 rdev = conf->disks[i].rdev;
36449
36450 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36451- atomic_inc(&rdev->read_errors);
36452+ atomic_inc_unchecked(&rdev->read_errors);
36453 if (conf->mddev->degraded >= conf->max_degraded)
36454 printk_rl(KERN_WARNING
36455 "raid5:%s: read error not correctable "
36456@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36457 (unsigned long long)(sh->sector
36458 + rdev->data_offset),
36459 bdn);
36460- else if (atomic_read(&rdev->read_errors)
36461+ else if (atomic_read_unchecked(&rdev->read_errors)
36462 > conf->max_nr_stripes)
36463 printk(KERN_WARNING
36464 "raid5:%s: Too many read errors, failing device %s.\n",
36465@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36466 sector_t r_sector;
36467 struct stripe_head sh2;
36468
36469+ pax_track_stack();
36470
36471 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36472 stripe = new_sector;
36473diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36474index 05bde9c..2f31d40 100644
36475--- a/drivers/media/common/saa7146_hlp.c
36476+++ b/drivers/media/common/saa7146_hlp.c
36477@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36478
36479 int x[32], y[32], w[32], h[32];
36480
36481+ pax_track_stack();
36482+
36483 /* clear out memory */
36484 memset(&line_list[0], 0x00, sizeof(u32)*32);
36485 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36486diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36487index cb22da5..82b686e 100644
36488--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36489+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36490@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36491 u8 buf[HOST_LINK_BUF_SIZE];
36492 int i;
36493
36494+ pax_track_stack();
36495+
36496 dprintk("%s\n", __func__);
36497
36498 /* check if we have space for a link buf in the rx_buffer */
36499@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36500 unsigned long timeout;
36501 int written;
36502
36503+ pax_track_stack();
36504+
36505 dprintk("%s\n", __func__);
36506
36507 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36508diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36509index 2fe05d0..a3289c4 100644
36510--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36511+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36512@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36513 union {
36514 dmx_ts_cb ts;
36515 dmx_section_cb sec;
36516- } cb;
36517+ } __no_const cb;
36518
36519 struct dvb_demux *demux;
36520 void *priv;
36521diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36522index 94159b9..376bd8e 100644
36523--- a/drivers/media/dvb/dvb-core/dvbdev.c
36524+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36525@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36526 const struct dvb_device *template, void *priv, int type)
36527 {
36528 struct dvb_device *dvbdev;
36529- struct file_operations *dvbdevfops;
36530+ file_operations_no_const *dvbdevfops;
36531 struct device *clsdev;
36532 int minor;
36533 int id;
36534diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36535index 2a53dd0..db8c07a 100644
36536--- a/drivers/media/dvb/dvb-usb/cxusb.c
36537+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36538@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36539 struct dib0700_adapter_state {
36540 int (*set_param_save) (struct dvb_frontend *,
36541 struct dvb_frontend_parameters *);
36542-};
36543+} __no_const;
36544
36545 static int dib7070_set_param_override(struct dvb_frontend *fe,
36546 struct dvb_frontend_parameters *fep)
36547diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36548index db7f7f7..f55e96f 100644
36549--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36550+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36551@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36552
36553 u8 buf[260];
36554
36555+ pax_track_stack();
36556+
36557 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36558 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36559
36560diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36561index 524acf5..5ffc403 100644
36562--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36563+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36564@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36565
36566 struct dib0700_adapter_state {
36567 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36568-};
36569+} __no_const;
36570
36571 /* Hauppauge Nova-T 500 (aka Bristol)
36572 * has a LNA on GPIO0 which is enabled by setting 1 */
36573diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36574index ba91735..4261d84 100644
36575--- a/drivers/media/dvb/frontends/dib3000.h
36576+++ b/drivers/media/dvb/frontends/dib3000.h
36577@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36578 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36579 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36580 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36581-};
36582+} __no_const;
36583
36584 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36585 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36586diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36587index c709ce6..b3fe620 100644
36588--- a/drivers/media/dvb/frontends/or51211.c
36589+++ b/drivers/media/dvb/frontends/or51211.c
36590@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36591 u8 tudata[585];
36592 int i;
36593
36594+ pax_track_stack();
36595+
36596 dprintk("Firmware is %zd bytes\n",fw->size);
36597
36598 /* Get eprom data */
36599diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36600index 482d0f3..ee1e202 100644
36601--- a/drivers/media/radio/radio-cadet.c
36602+++ b/drivers/media/radio/radio-cadet.c
36603@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36604 while (i < count && dev->rdsin != dev->rdsout)
36605 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36606
36607- if (copy_to_user(data, readbuf, i))
36608+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36609 return -EFAULT;
36610 return i;
36611 }
36612diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36613index 6dd51e2..0359b92 100644
36614--- a/drivers/media/video/cx18/cx18-driver.c
36615+++ b/drivers/media/video/cx18/cx18-driver.c
36616@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36617
36618 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36619
36620-static atomic_t cx18_instance = ATOMIC_INIT(0);
36621+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36622
36623 /* Parameter declarations */
36624 static int cardtype[CX18_MAX_CARDS];
36625@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36626 struct i2c_client c;
36627 u8 eedata[256];
36628
36629+ pax_track_stack();
36630+
36631 memset(&c, 0, sizeof(c));
36632 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36633 c.adapter = &cx->i2c_adap[0];
36634@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36635 struct cx18 *cx;
36636
36637 /* FIXME - module parameter arrays constrain max instances */
36638- i = atomic_inc_return(&cx18_instance) - 1;
36639+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36640 if (i >= CX18_MAX_CARDS) {
36641 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36642 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36643diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36644index 463ec34..2f4625a 100644
36645--- a/drivers/media/video/ivtv/ivtv-driver.c
36646+++ b/drivers/media/video/ivtv/ivtv-driver.c
36647@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36648 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36649
36650 /* ivtv instance counter */
36651-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36652+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36653
36654 /* Parameter declarations */
36655 static int cardtype[IVTV_MAX_CARDS];
36656diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36657index 5fc4ac0..652a54a 100644
36658--- a/drivers/media/video/omap24xxcam.c
36659+++ b/drivers/media/video/omap24xxcam.c
36660@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36661 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36662
36663 do_gettimeofday(&vb->ts);
36664- vb->field_count = atomic_add_return(2, &fh->field_count);
36665+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36666 if (csr & csr_error) {
36667 vb->state = VIDEOBUF_ERROR;
36668 if (!atomic_read(&fh->cam->in_reset)) {
36669diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36670index 2ce67f5..cf26a5b 100644
36671--- a/drivers/media/video/omap24xxcam.h
36672+++ b/drivers/media/video/omap24xxcam.h
36673@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36674 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36675 struct videobuf_queue vbq;
36676 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36677- atomic_t field_count; /* field counter for videobuf_buffer */
36678+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36679 /* accessing cam here doesn't need serialisation: it's constant */
36680 struct omap24xxcam_device *cam;
36681 };
36682diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36683index 299afa4..eb47459 100644
36684--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36685+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36686@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36687 u8 *eeprom;
36688 struct tveeprom tvdata;
36689
36690+ pax_track_stack();
36691+
36692 memset(&tvdata,0,sizeof(tvdata));
36693
36694 eeprom = pvr2_eeprom_fetch(hdw);
36695diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36696index 5b152ff..3320638 100644
36697--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36698+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36699@@ -195,7 +195,7 @@ struct pvr2_hdw {
36700
36701 /* I2C stuff */
36702 struct i2c_adapter i2c_adap;
36703- struct i2c_algorithm i2c_algo;
36704+ i2c_algorithm_no_const i2c_algo;
36705 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36706 int i2c_cx25840_hack_state;
36707 int i2c_linked;
36708diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36709index 1eabff6..8e2313a 100644
36710--- a/drivers/media/video/saa7134/saa6752hs.c
36711+++ b/drivers/media/video/saa7134/saa6752hs.c
36712@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36713 unsigned char localPAT[256];
36714 unsigned char localPMT[256];
36715
36716+ pax_track_stack();
36717+
36718 /* Set video format - must be done first as it resets other settings */
36719 set_reg8(client, 0x41, h->video_format);
36720
36721diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36722index 9c1d3ac..b1b49e9 100644
36723--- a/drivers/media/video/saa7164/saa7164-cmd.c
36724+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36725@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36726 wait_queue_head_t *q = 0;
36727 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36728
36729+ pax_track_stack();
36730+
36731 /* While any outstand message on the bus exists... */
36732 do {
36733
36734@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36735 u8 tmp[512];
36736 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36737
36738+ pax_track_stack();
36739+
36740 while (loop) {
36741
36742 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36743diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36744index b085496..cde0270 100644
36745--- a/drivers/media/video/usbvideo/ibmcam.c
36746+++ b/drivers/media/video/usbvideo/ibmcam.c
36747@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36748 static int __init ibmcam_init(void)
36749 {
36750 struct usbvideo_cb cbTbl;
36751- memset(&cbTbl, 0, sizeof(cbTbl));
36752- cbTbl.probe = ibmcam_probe;
36753- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36754- cbTbl.videoStart = ibmcam_video_start;
36755- cbTbl.videoStop = ibmcam_video_stop;
36756- cbTbl.processData = ibmcam_ProcessIsocData;
36757- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36758- cbTbl.adjustPicture = ibmcam_adjust_picture;
36759- cbTbl.getFPS = ibmcam_calculate_fps;
36760+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36761+ *(void **)&cbTbl.probe = ibmcam_probe;
36762+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36763+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36764+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36765+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36766+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36767+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36768+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36769 return usbvideo_register(
36770 &cams,
36771 MAX_IBMCAM,
36772diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36773index 31d57f2..600b735 100644
36774--- a/drivers/media/video/usbvideo/konicawc.c
36775+++ b/drivers/media/video/usbvideo/konicawc.c
36776@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36777 int error;
36778
36779 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36780- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36781+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36782
36783 cam->input = input_dev = input_allocate_device();
36784 if (!input_dev) {
36785@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36786 struct usbvideo_cb cbTbl;
36787 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36788 DRIVER_DESC "\n");
36789- memset(&cbTbl, 0, sizeof(cbTbl));
36790- cbTbl.probe = konicawc_probe;
36791- cbTbl.setupOnOpen = konicawc_setup_on_open;
36792- cbTbl.processData = konicawc_process_isoc;
36793- cbTbl.getFPS = konicawc_calculate_fps;
36794- cbTbl.setVideoMode = konicawc_set_video_mode;
36795- cbTbl.startDataPump = konicawc_start_data;
36796- cbTbl.stopDataPump = konicawc_stop_data;
36797- cbTbl.adjustPicture = konicawc_adjust_picture;
36798- cbTbl.userFree = konicawc_free_uvd;
36799+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36800+ *(void **)&cbTbl.probe = konicawc_probe;
36801+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36802+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36803+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36804+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36805+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36806+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36807+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36808+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36809 return usbvideo_register(
36810 &cams,
36811 MAX_CAMERAS,
36812diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36813index 803d3e4..c4d1b96 100644
36814--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36815+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36816@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36817 int error;
36818
36819 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36820- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36821+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36822
36823 cam->input = input_dev = input_allocate_device();
36824 if (!input_dev) {
36825diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36826index fbd1b63..292f9f0 100644
36827--- a/drivers/media/video/usbvideo/ultracam.c
36828+++ b/drivers/media/video/usbvideo/ultracam.c
36829@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36830 {
36831 struct usbvideo_cb cbTbl;
36832 memset(&cbTbl, 0, sizeof(cbTbl));
36833- cbTbl.probe = ultracam_probe;
36834- cbTbl.setupOnOpen = ultracam_setup_on_open;
36835- cbTbl.videoStart = ultracam_video_start;
36836- cbTbl.videoStop = ultracam_video_stop;
36837- cbTbl.processData = ultracam_ProcessIsocData;
36838- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36839- cbTbl.adjustPicture = ultracam_adjust_picture;
36840- cbTbl.getFPS = ultracam_calculate_fps;
36841+ *(void **)&cbTbl.probe = ultracam_probe;
36842+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36843+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36844+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36845+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36846+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36847+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36848+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36849 return usbvideo_register(
36850 &cams,
36851 MAX_CAMERAS,
36852diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36853index dea8b32..34f6878 100644
36854--- a/drivers/media/video/usbvideo/usbvideo.c
36855+++ b/drivers/media/video/usbvideo/usbvideo.c
36856@@ -697,15 +697,15 @@ int usbvideo_register(
36857 __func__, cams, base_size, num_cams);
36858
36859 /* Copy callbacks, apply defaults for those that are not set */
36860- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36861+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36862 if (cams->cb.getFrame == NULL)
36863- cams->cb.getFrame = usbvideo_GetFrame;
36864+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36865 if (cams->cb.disconnect == NULL)
36866- cams->cb.disconnect = usbvideo_Disconnect;
36867+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36868 if (cams->cb.startDataPump == NULL)
36869- cams->cb.startDataPump = usbvideo_StartDataPump;
36870+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36871 if (cams->cb.stopDataPump == NULL)
36872- cams->cb.stopDataPump = usbvideo_StopDataPump;
36873+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36874
36875 cams->num_cameras = num_cams;
36876 cams->cam = (struct uvd *) &cams[1];
36877diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36878index c66985b..7fa143a 100644
36879--- a/drivers/media/video/usbvideo/usbvideo.h
36880+++ b/drivers/media/video/usbvideo/usbvideo.h
36881@@ -268,7 +268,7 @@ struct usbvideo_cb {
36882 int (*startDataPump)(struct uvd *uvd);
36883 void (*stopDataPump)(struct uvd *uvd);
36884 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36885-};
36886+} __no_const;
36887
36888 struct usbvideo {
36889 int num_cameras; /* As allocated */
36890diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36891index e0f91e4..37554ea 100644
36892--- a/drivers/media/video/usbvision/usbvision-core.c
36893+++ b/drivers/media/video/usbvision/usbvision-core.c
36894@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36895 unsigned char rv, gv, bv;
36896 static unsigned char *Y, *U, *V;
36897
36898+ pax_track_stack();
36899+
36900 frame = usbvision->curFrame;
36901 imageSize = frame->frmwidth * frame->frmheight;
36902 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36903diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36904index 0d06e7c..3d17d24 100644
36905--- a/drivers/media/video/v4l2-device.c
36906+++ b/drivers/media/video/v4l2-device.c
36907@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36908 EXPORT_SYMBOL_GPL(v4l2_device_register);
36909
36910 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36911- atomic_t *instance)
36912+ atomic_unchecked_t *instance)
36913 {
36914- int num = atomic_inc_return(instance) - 1;
36915+ int num = atomic_inc_return_unchecked(instance) - 1;
36916 int len = strlen(basename);
36917
36918 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36919diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36920index 032ebae..6a3532c 100644
36921--- a/drivers/media/video/videobuf-dma-sg.c
36922+++ b/drivers/media/video/videobuf-dma-sg.c
36923@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36924 {
36925 struct videobuf_queue q;
36926
36927+ pax_track_stack();
36928+
36929 /* Required to make generic handler to call __videobuf_alloc */
36930 q.int_ops = &sg_ops;
36931
36932diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36933index b6992b7..9fa7547 100644
36934--- a/drivers/message/fusion/mptbase.c
36935+++ b/drivers/message/fusion/mptbase.c
36936@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36937 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36938 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36939
36940+#ifdef CONFIG_GRKERNSEC_HIDESYM
36941+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36942+ NULL, NULL);
36943+#else
36944 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36945 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36946+#endif
36947+
36948 /*
36949 * Rounding UP to nearest 4-kB boundary here...
36950 */
36951diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36952index 83873e3..e360e9a 100644
36953--- a/drivers/message/fusion/mptsas.c
36954+++ b/drivers/message/fusion/mptsas.c
36955@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36956 return 0;
36957 }
36958
36959+static inline void
36960+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36961+{
36962+ if (phy_info->port_details) {
36963+ phy_info->port_details->rphy = rphy;
36964+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36965+ ioc->name, rphy));
36966+ }
36967+
36968+ if (rphy) {
36969+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36970+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36971+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36972+ ioc->name, rphy, rphy->dev.release));
36973+ }
36974+}
36975+
36976 /* no mutex */
36977 static void
36978 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36979@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36980 return NULL;
36981 }
36982
36983-static inline void
36984-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36985-{
36986- if (phy_info->port_details) {
36987- phy_info->port_details->rphy = rphy;
36988- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36989- ioc->name, rphy));
36990- }
36991-
36992- if (rphy) {
36993- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36994- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36995- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36996- ioc->name, rphy, rphy->dev.release));
36997- }
36998-}
36999-
37000 static inline struct sas_port *
37001 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37002 {
37003diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37004index bd096ca..332cf76 100644
37005--- a/drivers/message/fusion/mptscsih.c
37006+++ b/drivers/message/fusion/mptscsih.c
37007@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37008
37009 h = shost_priv(SChost);
37010
37011- if (h) {
37012- if (h->info_kbuf == NULL)
37013- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37014- return h->info_kbuf;
37015- h->info_kbuf[0] = '\0';
37016+ if (!h)
37017+ return NULL;
37018
37019- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37020- h->info_kbuf[size-1] = '\0';
37021- }
37022+ if (h->info_kbuf == NULL)
37023+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37024+ return h->info_kbuf;
37025+ h->info_kbuf[0] = '\0';
37026+
37027+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37028+ h->info_kbuf[size-1] = '\0';
37029
37030 return h->info_kbuf;
37031 }
37032diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37033index efba702..59b2c0f 100644
37034--- a/drivers/message/i2o/i2o_config.c
37035+++ b/drivers/message/i2o/i2o_config.c
37036@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37037 struct i2o_message *msg;
37038 unsigned int iop;
37039
37040+ pax_track_stack();
37041+
37042 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37043 return -EFAULT;
37044
37045diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37046index 7045c45..c07b170 100644
37047--- a/drivers/message/i2o/i2o_proc.c
37048+++ b/drivers/message/i2o/i2o_proc.c
37049@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37050 "Array Controller Device"
37051 };
37052
37053-static char *chtostr(u8 * chars, int n)
37054-{
37055- char tmp[256];
37056- tmp[0] = 0;
37057- return strncat(tmp, (char *)chars, n);
37058-}
37059-
37060 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37061 char *group)
37062 {
37063@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37064
37065 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37066 seq_printf(seq, "%-#8x", ddm_table.module_id);
37067- seq_printf(seq, "%-29s",
37068- chtostr(ddm_table.module_name_version, 28));
37069+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37070 seq_printf(seq, "%9d ", ddm_table.data_size);
37071 seq_printf(seq, "%8d", ddm_table.code_size);
37072
37073@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37074
37075 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37076 seq_printf(seq, "%-#8x", dst->module_id);
37077- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37078- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37079+ seq_printf(seq, "%-.28s", dst->module_name_version);
37080+ seq_printf(seq, "%-.8s", dst->date);
37081 seq_printf(seq, "%8d ", dst->module_size);
37082 seq_printf(seq, "%8d ", dst->mpb_size);
37083 seq_printf(seq, "0x%04x", dst->module_flags);
37084@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37085 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37086 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37087 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37088- seq_printf(seq, "Vendor info : %s\n",
37089- chtostr((u8 *) (work32 + 2), 16));
37090- seq_printf(seq, "Product info : %s\n",
37091- chtostr((u8 *) (work32 + 6), 16));
37092- seq_printf(seq, "Description : %s\n",
37093- chtostr((u8 *) (work32 + 10), 16));
37094- seq_printf(seq, "Product rev. : %s\n",
37095- chtostr((u8 *) (work32 + 14), 8));
37096+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37097+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37098+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37099+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37100
37101 seq_printf(seq, "Serial number : ");
37102 print_serial_number(seq, (u8 *) (work32 + 16),
37103@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37104 }
37105
37106 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37107- seq_printf(seq, "Module name : %s\n",
37108- chtostr(result.module_name, 24));
37109- seq_printf(seq, "Module revision : %s\n",
37110- chtostr(result.module_rev, 8));
37111+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
37112+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37113
37114 seq_printf(seq, "Serial number : ");
37115 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37116@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37117 return 0;
37118 }
37119
37120- seq_printf(seq, "Device name : %s\n",
37121- chtostr(result.device_name, 64));
37122- seq_printf(seq, "Service name : %s\n",
37123- chtostr(result.service_name, 64));
37124- seq_printf(seq, "Physical name : %s\n",
37125- chtostr(result.physical_location, 64));
37126- seq_printf(seq, "Instance number : %s\n",
37127- chtostr(result.instance_number, 4));
37128+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
37129+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
37130+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37131+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37132
37133 return 0;
37134 }
37135diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37136index 27cf4af..b1205b8 100644
37137--- a/drivers/message/i2o/iop.c
37138+++ b/drivers/message/i2o/iop.c
37139@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37140
37141 spin_lock_irqsave(&c->context_list_lock, flags);
37142
37143- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37144- atomic_inc(&c->context_list_counter);
37145+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37146+ atomic_inc_unchecked(&c->context_list_counter);
37147
37148- entry->context = atomic_read(&c->context_list_counter);
37149+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37150
37151 list_add(&entry->list, &c->context_list);
37152
37153@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37154
37155 #if BITS_PER_LONG == 64
37156 spin_lock_init(&c->context_list_lock);
37157- atomic_set(&c->context_list_counter, 0);
37158+ atomic_set_unchecked(&c->context_list_counter, 0);
37159 INIT_LIST_HEAD(&c->context_list);
37160 #endif
37161
37162diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37163index 78e3e85..66c9a0d 100644
37164--- a/drivers/mfd/ab3100-core.c
37165+++ b/drivers/mfd/ab3100-core.c
37166@@ -777,7 +777,7 @@ struct ab_family_id {
37167 char *name;
37168 };
37169
37170-static const struct ab_family_id ids[] __initdata = {
37171+static const struct ab_family_id ids[] __initconst = {
37172 /* AB3100 */
37173 {
37174 .id = 0xc0,
37175diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37176index 8d8c932..8104515 100644
37177--- a/drivers/mfd/wm8350-i2c.c
37178+++ b/drivers/mfd/wm8350-i2c.c
37179@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37180 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37181 int ret;
37182
37183+ pax_track_stack();
37184+
37185 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37186 return -EINVAL;
37187
37188diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37189index e4ff50b..4cc3f04 100644
37190--- a/drivers/misc/kgdbts.c
37191+++ b/drivers/misc/kgdbts.c
37192@@ -118,7 +118,7 @@
37193 } while (0)
37194 #define MAX_CONFIG_LEN 40
37195
37196-static struct kgdb_io kgdbts_io_ops;
37197+static const struct kgdb_io kgdbts_io_ops;
37198 static char get_buf[BUFMAX];
37199 static int get_buf_cnt;
37200 static char put_buf[BUFMAX];
37201@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37202 module_put(THIS_MODULE);
37203 }
37204
37205-static struct kgdb_io kgdbts_io_ops = {
37206+static const struct kgdb_io kgdbts_io_ops = {
37207 .name = "kgdbts",
37208 .read_char = kgdbts_get_char,
37209 .write_char = kgdbts_put_char,
37210diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37211index 37e7cfc..67cfb76 100644
37212--- a/drivers/misc/sgi-gru/gruhandles.c
37213+++ b/drivers/misc/sgi-gru/gruhandles.c
37214@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37215
37216 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37217 {
37218- atomic_long_inc(&mcs_op_statistics[op].count);
37219- atomic_long_add(clks, &mcs_op_statistics[op].total);
37220+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37221+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37222 if (mcs_op_statistics[op].max < clks)
37223 mcs_op_statistics[op].max = clks;
37224 }
37225diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37226index 3f2375c..467c6e6 100644
37227--- a/drivers/misc/sgi-gru/gruprocfs.c
37228+++ b/drivers/misc/sgi-gru/gruprocfs.c
37229@@ -32,9 +32,9 @@
37230
37231 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37232
37233-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37234+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37235 {
37236- unsigned long val = atomic_long_read(v);
37237+ unsigned long val = atomic_long_read_unchecked(v);
37238
37239 if (val)
37240 seq_printf(s, "%16lu %s\n", val, id);
37241@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37242 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37243
37244 for (op = 0; op < mcsop_last; op++) {
37245- count = atomic_long_read(&mcs_op_statistics[op].count);
37246- total = atomic_long_read(&mcs_op_statistics[op].total);
37247+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37248+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37249 max = mcs_op_statistics[op].max;
37250 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37251 count ? total / count : 0, max);
37252diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37253index 46990bc..4a251b5 100644
37254--- a/drivers/misc/sgi-gru/grutables.h
37255+++ b/drivers/misc/sgi-gru/grutables.h
37256@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37257 * GRU statistics.
37258 */
37259 struct gru_stats_s {
37260- atomic_long_t vdata_alloc;
37261- atomic_long_t vdata_free;
37262- atomic_long_t gts_alloc;
37263- atomic_long_t gts_free;
37264- atomic_long_t vdata_double_alloc;
37265- atomic_long_t gts_double_allocate;
37266- atomic_long_t assign_context;
37267- atomic_long_t assign_context_failed;
37268- atomic_long_t free_context;
37269- atomic_long_t load_user_context;
37270- atomic_long_t load_kernel_context;
37271- atomic_long_t lock_kernel_context;
37272- atomic_long_t unlock_kernel_context;
37273- atomic_long_t steal_user_context;
37274- atomic_long_t steal_kernel_context;
37275- atomic_long_t steal_context_failed;
37276- atomic_long_t nopfn;
37277- atomic_long_t break_cow;
37278- atomic_long_t asid_new;
37279- atomic_long_t asid_next;
37280- atomic_long_t asid_wrap;
37281- atomic_long_t asid_reuse;
37282- atomic_long_t intr;
37283- atomic_long_t intr_mm_lock_failed;
37284- atomic_long_t call_os;
37285- atomic_long_t call_os_offnode_reference;
37286- atomic_long_t call_os_check_for_bug;
37287- atomic_long_t call_os_wait_queue;
37288- atomic_long_t user_flush_tlb;
37289- atomic_long_t user_unload_context;
37290- atomic_long_t user_exception;
37291- atomic_long_t set_context_option;
37292- atomic_long_t migrate_check;
37293- atomic_long_t migrated_retarget;
37294- atomic_long_t migrated_unload;
37295- atomic_long_t migrated_unload_delay;
37296- atomic_long_t migrated_nopfn_retarget;
37297- atomic_long_t migrated_nopfn_unload;
37298- atomic_long_t tlb_dropin;
37299- atomic_long_t tlb_dropin_fail_no_asid;
37300- atomic_long_t tlb_dropin_fail_upm;
37301- atomic_long_t tlb_dropin_fail_invalid;
37302- atomic_long_t tlb_dropin_fail_range_active;
37303- atomic_long_t tlb_dropin_fail_idle;
37304- atomic_long_t tlb_dropin_fail_fmm;
37305- atomic_long_t tlb_dropin_fail_no_exception;
37306- atomic_long_t tlb_dropin_fail_no_exception_war;
37307- atomic_long_t tfh_stale_on_fault;
37308- atomic_long_t mmu_invalidate_range;
37309- atomic_long_t mmu_invalidate_page;
37310- atomic_long_t mmu_clear_flush_young;
37311- atomic_long_t flush_tlb;
37312- atomic_long_t flush_tlb_gru;
37313- atomic_long_t flush_tlb_gru_tgh;
37314- atomic_long_t flush_tlb_gru_zero_asid;
37315+ atomic_long_unchecked_t vdata_alloc;
37316+ atomic_long_unchecked_t vdata_free;
37317+ atomic_long_unchecked_t gts_alloc;
37318+ atomic_long_unchecked_t gts_free;
37319+ atomic_long_unchecked_t vdata_double_alloc;
37320+ atomic_long_unchecked_t gts_double_allocate;
37321+ atomic_long_unchecked_t assign_context;
37322+ atomic_long_unchecked_t assign_context_failed;
37323+ atomic_long_unchecked_t free_context;
37324+ atomic_long_unchecked_t load_user_context;
37325+ atomic_long_unchecked_t load_kernel_context;
37326+ atomic_long_unchecked_t lock_kernel_context;
37327+ atomic_long_unchecked_t unlock_kernel_context;
37328+ atomic_long_unchecked_t steal_user_context;
37329+ atomic_long_unchecked_t steal_kernel_context;
37330+ atomic_long_unchecked_t steal_context_failed;
37331+ atomic_long_unchecked_t nopfn;
37332+ atomic_long_unchecked_t break_cow;
37333+ atomic_long_unchecked_t asid_new;
37334+ atomic_long_unchecked_t asid_next;
37335+ atomic_long_unchecked_t asid_wrap;
37336+ atomic_long_unchecked_t asid_reuse;
37337+ atomic_long_unchecked_t intr;
37338+ atomic_long_unchecked_t intr_mm_lock_failed;
37339+ atomic_long_unchecked_t call_os;
37340+ atomic_long_unchecked_t call_os_offnode_reference;
37341+ atomic_long_unchecked_t call_os_check_for_bug;
37342+ atomic_long_unchecked_t call_os_wait_queue;
37343+ atomic_long_unchecked_t user_flush_tlb;
37344+ atomic_long_unchecked_t user_unload_context;
37345+ atomic_long_unchecked_t user_exception;
37346+ atomic_long_unchecked_t set_context_option;
37347+ atomic_long_unchecked_t migrate_check;
37348+ atomic_long_unchecked_t migrated_retarget;
37349+ atomic_long_unchecked_t migrated_unload;
37350+ atomic_long_unchecked_t migrated_unload_delay;
37351+ atomic_long_unchecked_t migrated_nopfn_retarget;
37352+ atomic_long_unchecked_t migrated_nopfn_unload;
37353+ atomic_long_unchecked_t tlb_dropin;
37354+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37355+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37356+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37357+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37358+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37359+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37360+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37361+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37362+ atomic_long_unchecked_t tfh_stale_on_fault;
37363+ atomic_long_unchecked_t mmu_invalidate_range;
37364+ atomic_long_unchecked_t mmu_invalidate_page;
37365+ atomic_long_unchecked_t mmu_clear_flush_young;
37366+ atomic_long_unchecked_t flush_tlb;
37367+ atomic_long_unchecked_t flush_tlb_gru;
37368+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37369+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37370
37371- atomic_long_t copy_gpa;
37372+ atomic_long_unchecked_t copy_gpa;
37373
37374- atomic_long_t mesq_receive;
37375- atomic_long_t mesq_receive_none;
37376- atomic_long_t mesq_send;
37377- atomic_long_t mesq_send_failed;
37378- atomic_long_t mesq_noop;
37379- atomic_long_t mesq_send_unexpected_error;
37380- atomic_long_t mesq_send_lb_overflow;
37381- atomic_long_t mesq_send_qlimit_reached;
37382- atomic_long_t mesq_send_amo_nacked;
37383- atomic_long_t mesq_send_put_nacked;
37384- atomic_long_t mesq_qf_not_full;
37385- atomic_long_t mesq_qf_locked;
37386- atomic_long_t mesq_qf_noop_not_full;
37387- atomic_long_t mesq_qf_switch_head_failed;
37388- atomic_long_t mesq_qf_unexpected_error;
37389- atomic_long_t mesq_noop_unexpected_error;
37390- atomic_long_t mesq_noop_lb_overflow;
37391- atomic_long_t mesq_noop_qlimit_reached;
37392- atomic_long_t mesq_noop_amo_nacked;
37393- atomic_long_t mesq_noop_put_nacked;
37394+ atomic_long_unchecked_t mesq_receive;
37395+ atomic_long_unchecked_t mesq_receive_none;
37396+ atomic_long_unchecked_t mesq_send;
37397+ atomic_long_unchecked_t mesq_send_failed;
37398+ atomic_long_unchecked_t mesq_noop;
37399+ atomic_long_unchecked_t mesq_send_unexpected_error;
37400+ atomic_long_unchecked_t mesq_send_lb_overflow;
37401+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37402+ atomic_long_unchecked_t mesq_send_amo_nacked;
37403+ atomic_long_unchecked_t mesq_send_put_nacked;
37404+ atomic_long_unchecked_t mesq_qf_not_full;
37405+ atomic_long_unchecked_t mesq_qf_locked;
37406+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37407+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37408+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37409+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37410+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37411+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37412+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37413+ atomic_long_unchecked_t mesq_noop_put_nacked;
37414
37415 };
37416
37417@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37418 cchop_deallocate, tghop_invalidate, mcsop_last};
37419
37420 struct mcs_op_statistic {
37421- atomic_long_t count;
37422- atomic_long_t total;
37423+ atomic_long_unchecked_t count;
37424+ atomic_long_unchecked_t total;
37425 unsigned long max;
37426 };
37427
37428@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37429
37430 #define STAT(id) do { \
37431 if (gru_options & OPT_STATS) \
37432- atomic_long_inc(&gru_stats.id); \
37433+ atomic_long_inc_unchecked(&gru_stats.id); \
37434 } while (0)
37435
37436 #ifdef CONFIG_SGI_GRU_DEBUG
37437diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37438index 2275126..12a9dbfb 100644
37439--- a/drivers/misc/sgi-xp/xp.h
37440+++ b/drivers/misc/sgi-xp/xp.h
37441@@ -289,7 +289,7 @@ struct xpc_interface {
37442 xpc_notify_func, void *);
37443 void (*received) (short, int, void *);
37444 enum xp_retval (*partid_to_nasids) (short, void *);
37445-};
37446+} __no_const;
37447
37448 extern struct xpc_interface xpc_interface;
37449
37450diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37451index b94d5f7..7f494c5 100644
37452--- a/drivers/misc/sgi-xp/xpc.h
37453+++ b/drivers/misc/sgi-xp/xpc.h
37454@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37455 void (*received_payload) (struct xpc_channel *, void *);
37456 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37457 };
37458+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37459
37460 /* struct xpc_partition act_state values (for XPC HB) */
37461
37462@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37463 /* found in xpc_main.c */
37464 extern struct device *xpc_part;
37465 extern struct device *xpc_chan;
37466-extern struct xpc_arch_operations xpc_arch_ops;
37467+extern xpc_arch_operations_no_const xpc_arch_ops;
37468 extern int xpc_disengage_timelimit;
37469 extern int xpc_disengage_timedout;
37470 extern int xpc_activate_IRQ_rcvd;
37471diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37472index fd3688a..7e211a4 100644
37473--- a/drivers/misc/sgi-xp/xpc_main.c
37474+++ b/drivers/misc/sgi-xp/xpc_main.c
37475@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37476 .notifier_call = xpc_system_die,
37477 };
37478
37479-struct xpc_arch_operations xpc_arch_ops;
37480+xpc_arch_operations_no_const xpc_arch_ops;
37481
37482 /*
37483 * Timer function to enforce the timelimit on the partition disengage.
37484diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37485index 8b70e03..700bda6 100644
37486--- a/drivers/misc/sgi-xp/xpc_sn2.c
37487+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37488@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37489 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37490 }
37491
37492-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37493+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37494 .setup_partitions = xpc_setup_partitions_sn2,
37495 .teardown_partitions = xpc_teardown_partitions_sn2,
37496 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37497@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37498 int ret;
37499 size_t buf_size;
37500
37501- xpc_arch_ops = xpc_arch_ops_sn2;
37502+ pax_open_kernel();
37503+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37504+ pax_close_kernel();
37505
37506 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37507 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37508diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37509index 8e08d71..7cb8c9b 100644
37510--- a/drivers/misc/sgi-xp/xpc_uv.c
37511+++ b/drivers/misc/sgi-xp/xpc_uv.c
37512@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37513 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37514 }
37515
37516-static struct xpc_arch_operations xpc_arch_ops_uv = {
37517+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37518 .setup_partitions = xpc_setup_partitions_uv,
37519 .teardown_partitions = xpc_teardown_partitions_uv,
37520 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37521@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37522 int
37523 xpc_init_uv(void)
37524 {
37525- xpc_arch_ops = xpc_arch_ops_uv;
37526+ pax_open_kernel();
37527+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37528+ pax_close_kernel();
37529
37530 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37531 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37532diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37533index 6fd20b42..650efe3 100644
37534--- a/drivers/mmc/host/sdhci-pci.c
37535+++ b/drivers/mmc/host/sdhci-pci.c
37536@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37537 .probe = via_probe,
37538 };
37539
37540-static const struct pci_device_id pci_ids[] __devinitdata = {
37541+static const struct pci_device_id pci_ids[] __devinitconst = {
37542 {
37543 .vendor = PCI_VENDOR_ID_RICOH,
37544 .device = PCI_DEVICE_ID_RICOH_R5C822,
37545diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37546index e7563a9..5f90ce5 100644
37547--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37548+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37549@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37550 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37551 unsigned long timeo = jiffies + HZ;
37552
37553+ pax_track_stack();
37554+
37555 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37556 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37557 goto sleep;
37558@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37559 unsigned long initial_adr;
37560 int initial_len = len;
37561
37562+ pax_track_stack();
37563+
37564 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37565 adr += chip->start;
37566 initial_adr = adr;
37567@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37568 int retries = 3;
37569 int ret;
37570
37571+ pax_track_stack();
37572+
37573 adr += chip->start;
37574
37575 retry:
37576diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37577index 0667a67..3ab97ed 100644
37578--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37579+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37580@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37581 unsigned long cmd_addr;
37582 struct cfi_private *cfi = map->fldrv_priv;
37583
37584+ pax_track_stack();
37585+
37586 adr += chip->start;
37587
37588 /* Ensure cmd read/writes are aligned. */
37589@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37590 DECLARE_WAITQUEUE(wait, current);
37591 int wbufsize, z;
37592
37593+ pax_track_stack();
37594+
37595 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37596 if (adr & (map_bankwidth(map)-1))
37597 return -EINVAL;
37598@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37599 DECLARE_WAITQUEUE(wait, current);
37600 int ret = 0;
37601
37602+ pax_track_stack();
37603+
37604 adr += chip->start;
37605
37606 /* Let's determine this according to the interleave only once */
37607@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37608 unsigned long timeo = jiffies + HZ;
37609 DECLARE_WAITQUEUE(wait, current);
37610
37611+ pax_track_stack();
37612+
37613 adr += chip->start;
37614
37615 /* Let's determine this according to the interleave only once */
37616@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37617 unsigned long timeo = jiffies + HZ;
37618 DECLARE_WAITQUEUE(wait, current);
37619
37620+ pax_track_stack();
37621+
37622 adr += chip->start;
37623
37624 /* Let's determine this according to the interleave only once */
37625diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37626index 5bf5f46..c5de373 100644
37627--- a/drivers/mtd/devices/doc2000.c
37628+++ b/drivers/mtd/devices/doc2000.c
37629@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37630
37631 /* The ECC will not be calculated correctly if less than 512 is written */
37632 /* DBB-
37633- if (len != 0x200 && eccbuf)
37634+ if (len != 0x200)
37635 printk(KERN_WARNING
37636 "ECC needs a full sector write (adr: %lx size %lx)\n",
37637 (long) to, (long) len);
37638diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37639index 0990f78..bb4e8a4 100644
37640--- a/drivers/mtd/devices/doc2001.c
37641+++ b/drivers/mtd/devices/doc2001.c
37642@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37643 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37644
37645 /* Don't allow read past end of device */
37646- if (from >= this->totlen)
37647+ if (from >= this->totlen || !len)
37648 return -EINVAL;
37649
37650 /* Don't allow a single read to cross a 512-byte block boundary */
37651diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37652index e56d6b4..f07e6cf 100644
37653--- a/drivers/mtd/ftl.c
37654+++ b/drivers/mtd/ftl.c
37655@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37656 loff_t offset;
37657 uint16_t srcunitswap = cpu_to_le16(srcunit);
37658
37659+ pax_track_stack();
37660+
37661 eun = &part->EUNInfo[srcunit];
37662 xfer = &part->XferInfo[xferunit];
37663 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37664diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37665index 8aca552..146446e 100755
37666--- a/drivers/mtd/inftlcore.c
37667+++ b/drivers/mtd/inftlcore.c
37668@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37669 struct inftl_oob oob;
37670 size_t retlen;
37671
37672+ pax_track_stack();
37673+
37674 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37675 "pending=%d)\n", inftl, thisVUC, pendingblock);
37676
37677diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37678index 32e82ae..ed50953 100644
37679--- a/drivers/mtd/inftlmount.c
37680+++ b/drivers/mtd/inftlmount.c
37681@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37682 struct INFTLPartition *ip;
37683 size_t retlen;
37684
37685+ pax_track_stack();
37686+
37687 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37688
37689 /*
37690diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37691index 79bf40f..fe5f8fd 100644
37692--- a/drivers/mtd/lpddr/qinfo_probe.c
37693+++ b/drivers/mtd/lpddr/qinfo_probe.c
37694@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37695 {
37696 map_word pfow_val[4];
37697
37698+ pax_track_stack();
37699+
37700 /* Check identification string */
37701 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37702 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37703diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37704index 726a1b8..f46b460 100644
37705--- a/drivers/mtd/mtdchar.c
37706+++ b/drivers/mtd/mtdchar.c
37707@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37708 u_long size;
37709 struct mtd_info_user info;
37710
37711+ pax_track_stack();
37712+
37713 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37714
37715 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37716diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37717index 1002e18..26d82d5 100644
37718--- a/drivers/mtd/nftlcore.c
37719+++ b/drivers/mtd/nftlcore.c
37720@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37721 int inplace = 1;
37722 size_t retlen;
37723
37724+ pax_track_stack();
37725+
37726 memset(BlockMap, 0xff, sizeof(BlockMap));
37727 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37728
37729diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37730index 8b22b18..6fada85 100644
37731--- a/drivers/mtd/nftlmount.c
37732+++ b/drivers/mtd/nftlmount.c
37733@@ -23,6 +23,7 @@
37734 #include <asm/errno.h>
37735 #include <linux/delay.h>
37736 #include <linux/slab.h>
37737+#include <linux/sched.h>
37738 #include <linux/mtd/mtd.h>
37739 #include <linux/mtd/nand.h>
37740 #include <linux/mtd/nftl.h>
37741@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37742 struct mtd_info *mtd = nftl->mbd.mtd;
37743 unsigned int i;
37744
37745+ pax_track_stack();
37746+
37747 /* Assume logical EraseSize == physical erasesize for starting the scan.
37748 We'll sort it out later if we find a MediaHeader which says otherwise */
37749 /* Actually, we won't. The new DiskOnChip driver has already scanned
37750diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37751index 14cec04..d775b87 100644
37752--- a/drivers/mtd/ubi/build.c
37753+++ b/drivers/mtd/ubi/build.c
37754@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37755 static int __init bytes_str_to_int(const char *str)
37756 {
37757 char *endp;
37758- unsigned long result;
37759+ unsigned long result, scale = 1;
37760
37761 result = simple_strtoul(str, &endp, 0);
37762 if (str == endp || result >= INT_MAX) {
37763@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37764
37765 switch (*endp) {
37766 case 'G':
37767- result *= 1024;
37768+ scale *= 1024;
37769 case 'M':
37770- result *= 1024;
37771+ scale *= 1024;
37772 case 'K':
37773- result *= 1024;
37774+ scale *= 1024;
37775 if (endp[1] == 'i' && endp[2] == 'B')
37776 endp += 2;
37777 case '\0':
37778@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37779 return -EINVAL;
37780 }
37781
37782- return result;
37783+ if ((intoverflow_t)result*scale >= INT_MAX) {
37784+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37785+ str);
37786+ return -EINVAL;
37787+ }
37788+
37789+ return result*scale;
37790 }
37791
37792 /**
37793diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37794index ab68886..ca405e8 100644
37795--- a/drivers/net/atlx/atl2.c
37796+++ b/drivers/net/atlx/atl2.c
37797@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37798 */
37799
37800 #define ATL2_PARAM(X, desc) \
37801- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37802+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37803 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37804 MODULE_PARM_DESC(X, desc);
37805 #else
37806diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37807index 4874b2b..67f8526 100644
37808--- a/drivers/net/bnx2.c
37809+++ b/drivers/net/bnx2.c
37810@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37811 int rc = 0;
37812 u32 magic, csum;
37813
37814+ pax_track_stack();
37815+
37816 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37817 goto test_nvram_done;
37818
37819diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37820index fd3eb07..8a6978d 100644
37821--- a/drivers/net/cxgb3/l2t.h
37822+++ b/drivers/net/cxgb3/l2t.h
37823@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37824 */
37825 struct l2t_skb_cb {
37826 arp_failure_handler_func arp_failure_handler;
37827-};
37828+} __no_const;
37829
37830 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37831
37832diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37833index 032cfe0..411af379 100644
37834--- a/drivers/net/cxgb3/t3_hw.c
37835+++ b/drivers/net/cxgb3/t3_hw.c
37836@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37837 int i, addr, ret;
37838 struct t3_vpd vpd;
37839
37840+ pax_track_stack();
37841+
37842 /*
37843 * Card information is normally at VPD_BASE but some early cards had
37844 * it at 0.
37845diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37846index d1e0563..b9e129c 100644
37847--- a/drivers/net/e1000e/82571.c
37848+++ b/drivers/net/e1000e/82571.c
37849@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37850 {
37851 struct e1000_hw *hw = &adapter->hw;
37852 struct e1000_mac_info *mac = &hw->mac;
37853- struct e1000_mac_operations *func = &mac->ops;
37854+ e1000_mac_operations_no_const *func = &mac->ops;
37855 u32 swsm = 0;
37856 u32 swsm2 = 0;
37857 bool force_clear_smbi = false;
37858@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37859 temp = er32(ICRXDMTC);
37860 }
37861
37862-static struct e1000_mac_operations e82571_mac_ops = {
37863+static const struct e1000_mac_operations e82571_mac_ops = {
37864 /* .check_mng_mode: mac type dependent */
37865 /* .check_for_link: media type dependent */
37866 .id_led_init = e1000e_id_led_init,
37867@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37868 .setup_led = e1000e_setup_led_generic,
37869 };
37870
37871-static struct e1000_phy_operations e82_phy_ops_igp = {
37872+static const struct e1000_phy_operations e82_phy_ops_igp = {
37873 .acquire_phy = e1000_get_hw_semaphore_82571,
37874 .check_reset_block = e1000e_check_reset_block_generic,
37875 .commit_phy = NULL,
37876@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37877 .cfg_on_link_up = NULL,
37878 };
37879
37880-static struct e1000_phy_operations e82_phy_ops_m88 = {
37881+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37882 .acquire_phy = e1000_get_hw_semaphore_82571,
37883 .check_reset_block = e1000e_check_reset_block_generic,
37884 .commit_phy = e1000e_phy_sw_reset,
37885@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37886 .cfg_on_link_up = NULL,
37887 };
37888
37889-static struct e1000_phy_operations e82_phy_ops_bm = {
37890+static const struct e1000_phy_operations e82_phy_ops_bm = {
37891 .acquire_phy = e1000_get_hw_semaphore_82571,
37892 .check_reset_block = e1000e_check_reset_block_generic,
37893 .commit_phy = e1000e_phy_sw_reset,
37894@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37895 .cfg_on_link_up = NULL,
37896 };
37897
37898-static struct e1000_nvm_operations e82571_nvm_ops = {
37899+static const struct e1000_nvm_operations e82571_nvm_ops = {
37900 .acquire_nvm = e1000_acquire_nvm_82571,
37901 .read_nvm = e1000e_read_nvm_eerd,
37902 .release_nvm = e1000_release_nvm_82571,
37903diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37904index 47db9bd..fa58ccd 100644
37905--- a/drivers/net/e1000e/e1000.h
37906+++ b/drivers/net/e1000e/e1000.h
37907@@ -375,9 +375,9 @@ struct e1000_info {
37908 u32 pba;
37909 u32 max_hw_frame_size;
37910 s32 (*get_variants)(struct e1000_adapter *);
37911- struct e1000_mac_operations *mac_ops;
37912- struct e1000_phy_operations *phy_ops;
37913- struct e1000_nvm_operations *nvm_ops;
37914+ const struct e1000_mac_operations *mac_ops;
37915+ const struct e1000_phy_operations *phy_ops;
37916+ const struct e1000_nvm_operations *nvm_ops;
37917 };
37918
37919 /* hardware capability, feature, and workaround flags */
37920diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37921index ae5d736..e9a93a1 100644
37922--- a/drivers/net/e1000e/es2lan.c
37923+++ b/drivers/net/e1000e/es2lan.c
37924@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37925 {
37926 struct e1000_hw *hw = &adapter->hw;
37927 struct e1000_mac_info *mac = &hw->mac;
37928- struct e1000_mac_operations *func = &mac->ops;
37929+ e1000_mac_operations_no_const *func = &mac->ops;
37930
37931 /* Set media type */
37932 switch (adapter->pdev->device) {
37933@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37934 temp = er32(ICRXDMTC);
37935 }
37936
37937-static struct e1000_mac_operations es2_mac_ops = {
37938+static const struct e1000_mac_operations es2_mac_ops = {
37939 .id_led_init = e1000e_id_led_init,
37940 .check_mng_mode = e1000e_check_mng_mode_generic,
37941 /* check_for_link dependent on media type */
37942@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37943 .setup_led = e1000e_setup_led_generic,
37944 };
37945
37946-static struct e1000_phy_operations es2_phy_ops = {
37947+static const struct e1000_phy_operations es2_phy_ops = {
37948 .acquire_phy = e1000_acquire_phy_80003es2lan,
37949 .check_reset_block = e1000e_check_reset_block_generic,
37950 .commit_phy = e1000e_phy_sw_reset,
37951@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37952 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37953 };
37954
37955-static struct e1000_nvm_operations es2_nvm_ops = {
37956+static const struct e1000_nvm_operations es2_nvm_ops = {
37957 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37958 .read_nvm = e1000e_read_nvm_eerd,
37959 .release_nvm = e1000_release_nvm_80003es2lan,
37960diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37961index 11f3b7c..6381887 100644
37962--- a/drivers/net/e1000e/hw.h
37963+++ b/drivers/net/e1000e/hw.h
37964@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37965 s32 (*setup_physical_interface)(struct e1000_hw *);
37966 s32 (*setup_led)(struct e1000_hw *);
37967 };
37968+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37969
37970 /* Function pointers for the PHY. */
37971 struct e1000_phy_operations {
37972@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37973 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37974 s32 (*cfg_on_link_up)(struct e1000_hw *);
37975 };
37976+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37977
37978 /* Function pointers for the NVM. */
37979 struct e1000_nvm_operations {
37980@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37981 s32 (*validate_nvm)(struct e1000_hw *);
37982 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37983 };
37984+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37985
37986 struct e1000_mac_info {
37987- struct e1000_mac_operations ops;
37988+ e1000_mac_operations_no_const ops;
37989
37990 u8 addr[6];
37991 u8 perm_addr[6];
37992@@ -823,7 +826,7 @@ struct e1000_mac_info {
37993 };
37994
37995 struct e1000_phy_info {
37996- struct e1000_phy_operations ops;
37997+ e1000_phy_operations_no_const ops;
37998
37999 enum e1000_phy_type type;
38000
38001@@ -857,7 +860,7 @@ struct e1000_phy_info {
38002 };
38003
38004 struct e1000_nvm_info {
38005- struct e1000_nvm_operations ops;
38006+ e1000_nvm_operations_no_const ops;
38007
38008 enum e1000_nvm_type type;
38009 enum e1000_nvm_override override;
38010diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38011index de39f9a..e28d3e0 100644
38012--- a/drivers/net/e1000e/ich8lan.c
38013+++ b/drivers/net/e1000e/ich8lan.c
38014@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38015 }
38016 }
38017
38018-static struct e1000_mac_operations ich8_mac_ops = {
38019+static const struct e1000_mac_operations ich8_mac_ops = {
38020 .id_led_init = e1000e_id_led_init,
38021 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38022 .check_for_link = e1000_check_for_copper_link_ich8lan,
38023@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38024 /* id_led_init dependent on mac type */
38025 };
38026
38027-static struct e1000_phy_operations ich8_phy_ops = {
38028+static const struct e1000_phy_operations ich8_phy_ops = {
38029 .acquire_phy = e1000_acquire_swflag_ich8lan,
38030 .check_reset_block = e1000_check_reset_block_ich8lan,
38031 .commit_phy = NULL,
38032@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38033 .write_phy_reg = e1000e_write_phy_reg_igp,
38034 };
38035
38036-static struct e1000_nvm_operations ich8_nvm_ops = {
38037+static const struct e1000_nvm_operations ich8_nvm_ops = {
38038 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38039 .read_nvm = e1000_read_nvm_ich8lan,
38040 .release_nvm = e1000_release_nvm_ich8lan,
38041diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38042index 18d5fbb..542d96d 100644
38043--- a/drivers/net/fealnx.c
38044+++ b/drivers/net/fealnx.c
38045@@ -151,7 +151,7 @@ struct chip_info {
38046 int flags;
38047 };
38048
38049-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38050+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38051 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38052 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38053 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38054diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38055index 0e5b54b..b503f82 100644
38056--- a/drivers/net/hamradio/6pack.c
38057+++ b/drivers/net/hamradio/6pack.c
38058@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38059 unsigned char buf[512];
38060 int count1;
38061
38062+ pax_track_stack();
38063+
38064 if (!count)
38065 return;
38066
38067diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38068index 5862282..7cce8cb 100644
38069--- a/drivers/net/ibmveth.c
38070+++ b/drivers/net/ibmveth.c
38071@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38072 NULL,
38073 };
38074
38075-static struct sysfs_ops veth_pool_ops = {
38076+static const struct sysfs_ops veth_pool_ops = {
38077 .show = veth_pool_show,
38078 .store = veth_pool_store,
38079 };
38080diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38081index d617f2d..57b5309 100644
38082--- a/drivers/net/igb/e1000_82575.c
38083+++ b/drivers/net/igb/e1000_82575.c
38084@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38085 wr32(E1000_VT_CTL, vt_ctl);
38086 }
38087
38088-static struct e1000_mac_operations e1000_mac_ops_82575 = {
38089+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38090 .reset_hw = igb_reset_hw_82575,
38091 .init_hw = igb_init_hw_82575,
38092 .check_for_link = igb_check_for_link_82575,
38093@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38094 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38095 };
38096
38097-static struct e1000_phy_operations e1000_phy_ops_82575 = {
38098+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38099 .acquire = igb_acquire_phy_82575,
38100 .get_cfg_done = igb_get_cfg_done_82575,
38101 .release = igb_release_phy_82575,
38102 };
38103
38104-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38105+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38106 .acquire = igb_acquire_nvm_82575,
38107 .read = igb_read_nvm_eerd,
38108 .release = igb_release_nvm_82575,
38109diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38110index 72081df..d855cf5 100644
38111--- a/drivers/net/igb/e1000_hw.h
38112+++ b/drivers/net/igb/e1000_hw.h
38113@@ -288,6 +288,7 @@ struct e1000_mac_operations {
38114 s32 (*read_mac_addr)(struct e1000_hw *);
38115 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38116 };
38117+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38118
38119 struct e1000_phy_operations {
38120 s32 (*acquire)(struct e1000_hw *);
38121@@ -303,6 +304,7 @@ struct e1000_phy_operations {
38122 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38123 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38124 };
38125+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38126
38127 struct e1000_nvm_operations {
38128 s32 (*acquire)(struct e1000_hw *);
38129@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38130 void (*release)(struct e1000_hw *);
38131 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38132 };
38133+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38134
38135 struct e1000_info {
38136 s32 (*get_invariants)(struct e1000_hw *);
38137@@ -321,7 +324,7 @@ struct e1000_info {
38138 extern const struct e1000_info e1000_82575_info;
38139
38140 struct e1000_mac_info {
38141- struct e1000_mac_operations ops;
38142+ e1000_mac_operations_no_const ops;
38143
38144 u8 addr[6];
38145 u8 perm_addr[6];
38146@@ -365,7 +368,7 @@ struct e1000_mac_info {
38147 };
38148
38149 struct e1000_phy_info {
38150- struct e1000_phy_operations ops;
38151+ e1000_phy_operations_no_const ops;
38152
38153 enum e1000_phy_type type;
38154
38155@@ -400,7 +403,7 @@ struct e1000_phy_info {
38156 };
38157
38158 struct e1000_nvm_info {
38159- struct e1000_nvm_operations ops;
38160+ e1000_nvm_operations_no_const ops;
38161
38162 enum e1000_nvm_type type;
38163 enum e1000_nvm_override override;
38164@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38165 s32 (*check_for_ack)(struct e1000_hw *, u16);
38166 s32 (*check_for_rst)(struct e1000_hw *, u16);
38167 };
38168+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38169
38170 struct e1000_mbx_stats {
38171 u32 msgs_tx;
38172@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38173 };
38174
38175 struct e1000_mbx_info {
38176- struct e1000_mbx_operations ops;
38177+ e1000_mbx_operations_no_const ops;
38178 struct e1000_mbx_stats stats;
38179 u32 timeout;
38180 u32 usec_delay;
38181diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38182index 1e8ce37..549c453 100644
38183--- a/drivers/net/igbvf/vf.h
38184+++ b/drivers/net/igbvf/vf.h
38185@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38186 s32 (*read_mac_addr)(struct e1000_hw *);
38187 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38188 };
38189+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38190
38191 struct e1000_mac_info {
38192- struct e1000_mac_operations ops;
38193+ e1000_mac_operations_no_const ops;
38194 u8 addr[6];
38195 u8 perm_addr[6];
38196
38197@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38198 s32 (*check_for_ack)(struct e1000_hw *);
38199 s32 (*check_for_rst)(struct e1000_hw *);
38200 };
38201+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38202
38203 struct e1000_mbx_stats {
38204 u32 msgs_tx;
38205@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38206 };
38207
38208 struct e1000_mbx_info {
38209- struct e1000_mbx_operations ops;
38210+ e1000_mbx_operations_no_const ops;
38211 struct e1000_mbx_stats stats;
38212 u32 timeout;
38213 u32 usec_delay;
38214diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38215index aa7286b..a61394f 100644
38216--- a/drivers/net/iseries_veth.c
38217+++ b/drivers/net/iseries_veth.c
38218@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38219 NULL
38220 };
38221
38222-static struct sysfs_ops veth_cnx_sysfs_ops = {
38223+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38224 .show = veth_cnx_attribute_show
38225 };
38226
38227@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38228 NULL
38229 };
38230
38231-static struct sysfs_ops veth_port_sysfs_ops = {
38232+static const struct sysfs_ops veth_port_sysfs_ops = {
38233 .show = veth_port_attribute_show
38234 };
38235
38236diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38237index 8aa44dc..fa1e797 100644
38238--- a/drivers/net/ixgb/ixgb_main.c
38239+++ b/drivers/net/ixgb/ixgb_main.c
38240@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38241 u32 rctl;
38242 int i;
38243
38244+ pax_track_stack();
38245+
38246 /* Check for Promiscuous and All Multicast modes */
38247
38248 rctl = IXGB_READ_REG(hw, RCTL);
38249diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38250index af35e1d..8781785 100644
38251--- a/drivers/net/ixgb/ixgb_param.c
38252+++ b/drivers/net/ixgb/ixgb_param.c
38253@@ -260,6 +260,9 @@ void __devinit
38254 ixgb_check_options(struct ixgb_adapter *adapter)
38255 {
38256 int bd = adapter->bd_number;
38257+
38258+ pax_track_stack();
38259+
38260 if (bd >= IXGB_MAX_NIC) {
38261 printk(KERN_NOTICE
38262 "Warning: no configuration for board #%i\n", bd);
38263diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38264index b17aa73..ed74540 100644
38265--- a/drivers/net/ixgbe/ixgbe_type.h
38266+++ b/drivers/net/ixgbe/ixgbe_type.h
38267@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38268 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38269 s32 (*update_checksum)(struct ixgbe_hw *);
38270 };
38271+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38272
38273 struct ixgbe_mac_operations {
38274 s32 (*init_hw)(struct ixgbe_hw *);
38275@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38276 /* Flow Control */
38277 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38278 };
38279+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38280
38281 struct ixgbe_phy_operations {
38282 s32 (*identify)(struct ixgbe_hw *);
38283@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38284 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38285 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38286 };
38287+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38288
38289 struct ixgbe_eeprom_info {
38290- struct ixgbe_eeprom_operations ops;
38291+ ixgbe_eeprom_operations_no_const ops;
38292 enum ixgbe_eeprom_type type;
38293 u32 semaphore_delay;
38294 u16 word_size;
38295@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38296 };
38297
38298 struct ixgbe_mac_info {
38299- struct ixgbe_mac_operations ops;
38300+ ixgbe_mac_operations_no_const ops;
38301 enum ixgbe_mac_type type;
38302 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38303 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38304@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38305 };
38306
38307 struct ixgbe_phy_info {
38308- struct ixgbe_phy_operations ops;
38309+ ixgbe_phy_operations_no_const ops;
38310 struct mdio_if_info mdio;
38311 enum ixgbe_phy_type type;
38312 u32 id;
38313diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38314index 291a505..2543756 100644
38315--- a/drivers/net/mlx4/main.c
38316+++ b/drivers/net/mlx4/main.c
38317@@ -38,6 +38,7 @@
38318 #include <linux/errno.h>
38319 #include <linux/pci.h>
38320 #include <linux/dma-mapping.h>
38321+#include <linux/sched.h>
38322
38323 #include <linux/mlx4/device.h>
38324 #include <linux/mlx4/doorbell.h>
38325@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38326 u64 icm_size;
38327 int err;
38328
38329+ pax_track_stack();
38330+
38331 err = mlx4_QUERY_FW(dev);
38332 if (err) {
38333 if (err == -EACCES)
38334diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38335index 2dce134..fa5ce75 100644
38336--- a/drivers/net/niu.c
38337+++ b/drivers/net/niu.c
38338@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38339 int i, num_irqs, err;
38340 u8 first_ldg;
38341
38342+ pax_track_stack();
38343+
38344 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38345 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38346 ldg_num_map[i] = first_ldg + i;
38347diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38348index c1b3f09..97cd8c4 100644
38349--- a/drivers/net/pcnet32.c
38350+++ b/drivers/net/pcnet32.c
38351@@ -79,7 +79,7 @@ static int cards_found;
38352 /*
38353 * VLB I/O addresses
38354 */
38355-static unsigned int pcnet32_portlist[] __initdata =
38356+static unsigned int pcnet32_portlist[] __devinitdata =
38357 { 0x300, 0x320, 0x340, 0x360, 0 };
38358
38359 static int pcnet32_debug = 0;
38360@@ -267,7 +267,7 @@ struct pcnet32_private {
38361 struct sk_buff **rx_skbuff;
38362 dma_addr_t *tx_dma_addr;
38363 dma_addr_t *rx_dma_addr;
38364- struct pcnet32_access a;
38365+ struct pcnet32_access *a;
38366 spinlock_t lock; /* Guard lock */
38367 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38368 unsigned int rx_ring_size; /* current rx ring size */
38369@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38370 u16 val;
38371
38372 netif_wake_queue(dev);
38373- val = lp->a.read_csr(ioaddr, CSR3);
38374+ val = lp->a->read_csr(ioaddr, CSR3);
38375 val &= 0x00ff;
38376- lp->a.write_csr(ioaddr, CSR3, val);
38377+ lp->a->write_csr(ioaddr, CSR3, val);
38378 napi_enable(&lp->napi);
38379 }
38380
38381@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38382 r = mii_link_ok(&lp->mii_if);
38383 } else if (lp->chip_version >= PCNET32_79C970A) {
38384 ulong ioaddr = dev->base_addr; /* card base I/O address */
38385- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38386+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38387 } else { /* can not detect link on really old chips */
38388 r = 1;
38389 }
38390@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38391 pcnet32_netif_stop(dev);
38392
38393 spin_lock_irqsave(&lp->lock, flags);
38394- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38395+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38396
38397 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38398
38399@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38400 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38401 {
38402 struct pcnet32_private *lp = netdev_priv(dev);
38403- struct pcnet32_access *a = &lp->a; /* access to registers */
38404+ struct pcnet32_access *a = lp->a; /* access to registers */
38405 ulong ioaddr = dev->base_addr; /* card base I/O address */
38406 struct sk_buff *skb; /* sk buff */
38407 int x, i; /* counters */
38408@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38409 pcnet32_netif_stop(dev);
38410
38411 spin_lock_irqsave(&lp->lock, flags);
38412- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38413+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38414
38415 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38416
38417 /* Reset the PCNET32 */
38418- lp->a.reset(ioaddr);
38419- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38420+ lp->a->reset(ioaddr);
38421+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38422
38423 /* switch pcnet32 to 32bit mode */
38424- lp->a.write_bcr(ioaddr, 20, 2);
38425+ lp->a->write_bcr(ioaddr, 20, 2);
38426
38427 /* purge & init rings but don't actually restart */
38428 pcnet32_restart(dev, 0x0000);
38429
38430- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38431+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38432
38433 /* Initialize Transmit buffers. */
38434 size = data_len + 15;
38435@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38436
38437 /* set int loopback in CSR15 */
38438 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38439- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38440+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38441
38442 teststatus = cpu_to_le16(0x8000);
38443- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38444+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38445
38446 /* Check status of descriptors */
38447 for (x = 0; x < numbuffs; x++) {
38448@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38449 }
38450 }
38451
38452- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38453+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38454 wmb();
38455 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38456 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38457@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38458 pcnet32_restart(dev, CSR0_NORMAL);
38459 } else {
38460 pcnet32_purge_rx_ring(dev);
38461- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38462+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38463 }
38464 spin_unlock_irqrestore(&lp->lock, flags);
38465
38466@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38467 static void pcnet32_led_blink_callback(struct net_device *dev)
38468 {
38469 struct pcnet32_private *lp = netdev_priv(dev);
38470- struct pcnet32_access *a = &lp->a;
38471+ struct pcnet32_access *a = lp->a;
38472 ulong ioaddr = dev->base_addr;
38473 unsigned long flags;
38474 int i;
38475@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38476 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38477 {
38478 struct pcnet32_private *lp = netdev_priv(dev);
38479- struct pcnet32_access *a = &lp->a;
38480+ struct pcnet32_access *a = lp->a;
38481 ulong ioaddr = dev->base_addr;
38482 unsigned long flags;
38483 int i, regs[4];
38484@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38485 {
38486 int csr5;
38487 struct pcnet32_private *lp = netdev_priv(dev);
38488- struct pcnet32_access *a = &lp->a;
38489+ struct pcnet32_access *a = lp->a;
38490 ulong ioaddr = dev->base_addr;
38491 int ticks;
38492
38493@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38494 spin_lock_irqsave(&lp->lock, flags);
38495 if (pcnet32_tx(dev)) {
38496 /* reset the chip to clear the error condition, then restart */
38497- lp->a.reset(ioaddr);
38498- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38499+ lp->a->reset(ioaddr);
38500+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38501 pcnet32_restart(dev, CSR0_START);
38502 netif_wake_queue(dev);
38503 }
38504@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38505 __napi_complete(napi);
38506
38507 /* clear interrupt masks */
38508- val = lp->a.read_csr(ioaddr, CSR3);
38509+ val = lp->a->read_csr(ioaddr, CSR3);
38510 val &= 0x00ff;
38511- lp->a.write_csr(ioaddr, CSR3, val);
38512+ lp->a->write_csr(ioaddr, CSR3, val);
38513
38514 /* Set interrupt enable. */
38515- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38516+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38517
38518 spin_unlock_irqrestore(&lp->lock, flags);
38519 }
38520@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38521 int i, csr0;
38522 u16 *buff = ptr;
38523 struct pcnet32_private *lp = netdev_priv(dev);
38524- struct pcnet32_access *a = &lp->a;
38525+ struct pcnet32_access *a = lp->a;
38526 ulong ioaddr = dev->base_addr;
38527 unsigned long flags;
38528
38529@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38530 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38531 if (lp->phymask & (1 << j)) {
38532 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38533- lp->a.write_bcr(ioaddr, 33,
38534+ lp->a->write_bcr(ioaddr, 33,
38535 (j << 5) | i);
38536- *buff++ = lp->a.read_bcr(ioaddr, 34);
38537+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38538 }
38539 }
38540 }
38541@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38542 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38543 lp->options |= PCNET32_PORT_FD;
38544
38545- lp->a = *a;
38546+ lp->a = a;
38547
38548 /* prior to register_netdev, dev->name is not yet correct */
38549 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38550@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38551 if (lp->mii) {
38552 /* lp->phycount and lp->phymask are set to 0 by memset above */
38553
38554- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38555+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38556 /* scan for PHYs */
38557 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38558 unsigned short id1, id2;
38559@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38560 "Found PHY %04x:%04x at address %d.\n",
38561 id1, id2, i);
38562 }
38563- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38564+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38565 if (lp->phycount > 1) {
38566 lp->options |= PCNET32_PORT_MII;
38567 }
38568@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38569 }
38570
38571 /* Reset the PCNET32 */
38572- lp->a.reset(ioaddr);
38573+ lp->a->reset(ioaddr);
38574
38575 /* switch pcnet32 to 32bit mode */
38576- lp->a.write_bcr(ioaddr, 20, 2);
38577+ lp->a->write_bcr(ioaddr, 20, 2);
38578
38579 if (netif_msg_ifup(lp))
38580 printk(KERN_DEBUG
38581@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38582 (u32) (lp->init_dma_addr));
38583
38584 /* set/reset autoselect bit */
38585- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38586+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38587 if (lp->options & PCNET32_PORT_ASEL)
38588 val |= 2;
38589- lp->a.write_bcr(ioaddr, 2, val);
38590+ lp->a->write_bcr(ioaddr, 2, val);
38591
38592 /* handle full duplex setting */
38593 if (lp->mii_if.full_duplex) {
38594- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38595+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38596 if (lp->options & PCNET32_PORT_FD) {
38597 val |= 1;
38598 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38599@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38600 if (lp->chip_version == 0x2627)
38601 val |= 3;
38602 }
38603- lp->a.write_bcr(ioaddr, 9, val);
38604+ lp->a->write_bcr(ioaddr, 9, val);
38605 }
38606
38607 /* set/reset GPSI bit in test register */
38608- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38609+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38610 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38611 val |= 0x10;
38612- lp->a.write_csr(ioaddr, 124, val);
38613+ lp->a->write_csr(ioaddr, 124, val);
38614
38615 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38616 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38617@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38618 * duplex, and/or enable auto negotiation, and clear DANAS
38619 */
38620 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38621- lp->a.write_bcr(ioaddr, 32,
38622- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38623+ lp->a->write_bcr(ioaddr, 32,
38624+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38625 /* disable Auto Negotiation, set 10Mpbs, HD */
38626- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38627+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38628 if (lp->options & PCNET32_PORT_FD)
38629 val |= 0x10;
38630 if (lp->options & PCNET32_PORT_100)
38631 val |= 0x08;
38632- lp->a.write_bcr(ioaddr, 32, val);
38633+ lp->a->write_bcr(ioaddr, 32, val);
38634 } else {
38635 if (lp->options & PCNET32_PORT_ASEL) {
38636- lp->a.write_bcr(ioaddr, 32,
38637- lp->a.read_bcr(ioaddr,
38638+ lp->a->write_bcr(ioaddr, 32,
38639+ lp->a->read_bcr(ioaddr,
38640 32) | 0x0080);
38641 /* enable auto negotiate, setup, disable fd */
38642- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38643+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38644 val |= 0x20;
38645- lp->a.write_bcr(ioaddr, 32, val);
38646+ lp->a->write_bcr(ioaddr, 32, val);
38647 }
38648 }
38649 } else {
38650@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38651 * There is really no good other way to handle multiple PHYs
38652 * other than turning off all automatics
38653 */
38654- val = lp->a.read_bcr(ioaddr, 2);
38655- lp->a.write_bcr(ioaddr, 2, val & ~2);
38656- val = lp->a.read_bcr(ioaddr, 32);
38657- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38658+ val = lp->a->read_bcr(ioaddr, 2);
38659+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38660+ val = lp->a->read_bcr(ioaddr, 32);
38661+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38662
38663 if (!(lp->options & PCNET32_PORT_ASEL)) {
38664 /* setup ecmd */
38665@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38666 ecmd.speed =
38667 lp->
38668 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38669- bcr9 = lp->a.read_bcr(ioaddr, 9);
38670+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38671
38672 if (lp->options & PCNET32_PORT_FD) {
38673 ecmd.duplex = DUPLEX_FULL;
38674@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38675 ecmd.duplex = DUPLEX_HALF;
38676 bcr9 |= ~(1 << 0);
38677 }
38678- lp->a.write_bcr(ioaddr, 9, bcr9);
38679+ lp->a->write_bcr(ioaddr, 9, bcr9);
38680 }
38681
38682 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38683@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38684
38685 #ifdef DO_DXSUFLO
38686 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38687- val = lp->a.read_csr(ioaddr, CSR3);
38688+ val = lp->a->read_csr(ioaddr, CSR3);
38689 val |= 0x40;
38690- lp->a.write_csr(ioaddr, CSR3, val);
38691+ lp->a->write_csr(ioaddr, CSR3, val);
38692 }
38693 #endif
38694
38695@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38696 napi_enable(&lp->napi);
38697
38698 /* Re-initialize the PCNET32, and start it when done. */
38699- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38700- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38701+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38702+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38703
38704- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38705- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38706+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38707+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38708
38709 netif_start_queue(dev);
38710
38711@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38712
38713 i = 0;
38714 while (i++ < 100)
38715- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38716+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38717 break;
38718 /*
38719 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38720 * reports that doing so triggers a bug in the '974.
38721 */
38722- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38723+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38724
38725 if (netif_msg_ifup(lp))
38726 printk(KERN_DEBUG
38727 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38728 dev->name, i,
38729 (u32) (lp->init_dma_addr),
38730- lp->a.read_csr(ioaddr, CSR0));
38731+ lp->a->read_csr(ioaddr, CSR0));
38732
38733 spin_unlock_irqrestore(&lp->lock, flags);
38734
38735@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38736 * Switch back to 16bit mode to avoid problems with dumb
38737 * DOS packet driver after a warm reboot
38738 */
38739- lp->a.write_bcr(ioaddr, 20, 4);
38740+ lp->a->write_bcr(ioaddr, 20, 4);
38741
38742 err_free_irq:
38743 spin_unlock_irqrestore(&lp->lock, flags);
38744@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38745
38746 /* wait for stop */
38747 for (i = 0; i < 100; i++)
38748- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38749+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38750 break;
38751
38752 if (i >= 100 && netif_msg_drv(lp))
38753@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38754 return;
38755
38756 /* ReInit Ring */
38757- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38758+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38759 i = 0;
38760 while (i++ < 1000)
38761- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38762+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38763 break;
38764
38765- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38766+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38767 }
38768
38769 static void pcnet32_tx_timeout(struct net_device *dev)
38770@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38771 if (pcnet32_debug & NETIF_MSG_DRV)
38772 printk(KERN_ERR
38773 "%s: transmit timed out, status %4.4x, resetting.\n",
38774- dev->name, lp->a.read_csr(ioaddr, CSR0));
38775- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38776+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38777+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38778 dev->stats.tx_errors++;
38779 if (netif_msg_tx_err(lp)) {
38780 int i;
38781@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38782 if (netif_msg_tx_queued(lp)) {
38783 printk(KERN_DEBUG
38784 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38785- dev->name, lp->a.read_csr(ioaddr, CSR0));
38786+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38787 }
38788
38789 /* Default status -- will not enable Successful-TxDone
38790@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38791 dev->stats.tx_bytes += skb->len;
38792
38793 /* Trigger an immediate send poll. */
38794- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38795+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38796
38797 dev->trans_start = jiffies;
38798
38799@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38800
38801 spin_lock(&lp->lock);
38802
38803- csr0 = lp->a.read_csr(ioaddr, CSR0);
38804+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38805 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38806 if (csr0 == 0xffff) {
38807 break; /* PCMCIA remove happened */
38808 }
38809 /* Acknowledge all of the current interrupt sources ASAP. */
38810- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38811+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38812
38813 if (netif_msg_intr(lp))
38814 printk(KERN_DEBUG
38815 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38816- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38817+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38818
38819 /* Log misc errors. */
38820 if (csr0 & 0x4000)
38821@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38822 if (napi_schedule_prep(&lp->napi)) {
38823 u16 val;
38824 /* set interrupt masks */
38825- val = lp->a.read_csr(ioaddr, CSR3);
38826+ val = lp->a->read_csr(ioaddr, CSR3);
38827 val |= 0x5f00;
38828- lp->a.write_csr(ioaddr, CSR3, val);
38829+ lp->a->write_csr(ioaddr, CSR3, val);
38830
38831 __napi_schedule(&lp->napi);
38832 break;
38833 }
38834- csr0 = lp->a.read_csr(ioaddr, CSR0);
38835+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38836 }
38837
38838 if (netif_msg_intr(lp))
38839 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38840- dev->name, lp->a.read_csr(ioaddr, CSR0));
38841+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38842
38843 spin_unlock(&lp->lock);
38844
38845@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38846
38847 spin_lock_irqsave(&lp->lock, flags);
38848
38849- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38850+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38851
38852 if (netif_msg_ifdown(lp))
38853 printk(KERN_DEBUG
38854 "%s: Shutting down ethercard, status was %2.2x.\n",
38855- dev->name, lp->a.read_csr(ioaddr, CSR0));
38856+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38857
38858 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38859- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38860+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38861
38862 /*
38863 * Switch back to 16bit mode to avoid problems with dumb
38864 * DOS packet driver after a warm reboot
38865 */
38866- lp->a.write_bcr(ioaddr, 20, 4);
38867+ lp->a->write_bcr(ioaddr, 20, 4);
38868
38869 spin_unlock_irqrestore(&lp->lock, flags);
38870
38871@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38872 unsigned long flags;
38873
38874 spin_lock_irqsave(&lp->lock, flags);
38875- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38876+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38877 spin_unlock_irqrestore(&lp->lock, flags);
38878
38879 return &dev->stats;
38880@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38881 if (dev->flags & IFF_ALLMULTI) {
38882 ib->filter[0] = cpu_to_le32(~0U);
38883 ib->filter[1] = cpu_to_le32(~0U);
38884- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38885- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38886- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38887- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38888+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38889+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38890+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38891+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38892 return;
38893 }
38894 /* clear the multicast filter */
38895@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38896 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38897 }
38898 for (i = 0; i < 4; i++)
38899- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38900+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38901 le16_to_cpu(mcast_table[i]));
38902 return;
38903 }
38904@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38905
38906 spin_lock_irqsave(&lp->lock, flags);
38907 suspended = pcnet32_suspend(dev, &flags, 0);
38908- csr15 = lp->a.read_csr(ioaddr, CSR15);
38909+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38910 if (dev->flags & IFF_PROMISC) {
38911 /* Log any net taps. */
38912 if (netif_msg_hw(lp))
38913@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38914 lp->init_block->mode =
38915 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38916 7);
38917- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38918+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38919 } else {
38920 lp->init_block->mode =
38921 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38922- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38923+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38924 pcnet32_load_multicast(dev);
38925 }
38926
38927 if (suspended) {
38928 int csr5;
38929 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38930- csr5 = lp->a.read_csr(ioaddr, CSR5);
38931- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38932+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38933+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38934 } else {
38935- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38936+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38937 pcnet32_restart(dev, CSR0_NORMAL);
38938 netif_wake_queue(dev);
38939 }
38940@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38941 if (!lp->mii)
38942 return 0;
38943
38944- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38945- val_out = lp->a.read_bcr(ioaddr, 34);
38946+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38947+ val_out = lp->a->read_bcr(ioaddr, 34);
38948
38949 return val_out;
38950 }
38951@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38952 if (!lp->mii)
38953 return;
38954
38955- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38956- lp->a.write_bcr(ioaddr, 34, val);
38957+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38958+ lp->a->write_bcr(ioaddr, 34, val);
38959 }
38960
38961 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38962@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38963 curr_link = mii_link_ok(&lp->mii_if);
38964 } else {
38965 ulong ioaddr = dev->base_addr; /* card base I/O address */
38966- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38967+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38968 }
38969 if (!curr_link) {
38970 if (prev_link || verbose) {
38971@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38972 (ecmd.duplex ==
38973 DUPLEX_FULL) ? "full" : "half");
38974 }
38975- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38976+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38977 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38978 if (lp->mii_if.full_duplex)
38979 bcr9 |= (1 << 0);
38980 else
38981 bcr9 &= ~(1 << 0);
38982- lp->a.write_bcr(dev->base_addr, 9, bcr9);
38983+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
38984 }
38985 } else {
38986 if (netif_msg_link(lp))
38987diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38988index 7cc9898..6eb50d3 100644
38989--- a/drivers/net/sis190.c
38990+++ b/drivers/net/sis190.c
38991@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38992 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38993 struct net_device *dev)
38994 {
38995- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38996+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38997 struct sis190_private *tp = netdev_priv(dev);
38998 struct pci_dev *isa_bridge;
38999 u8 reg, tmp8;
39000diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39001index e13685a..60c948c 100644
39002--- a/drivers/net/sundance.c
39003+++ b/drivers/net/sundance.c
39004@@ -225,7 +225,7 @@ enum {
39005 struct pci_id_info {
39006 const char *name;
39007 };
39008-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39009+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39010 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39011 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39012 {"D-Link DFE-580TX 4 port Server Adapter"},
39013diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39014index 529f55a..cccaa18 100644
39015--- a/drivers/net/tg3.h
39016+++ b/drivers/net/tg3.h
39017@@ -95,6 +95,7 @@
39018 #define CHIPREV_ID_5750_A0 0x4000
39019 #define CHIPREV_ID_5750_A1 0x4001
39020 #define CHIPREV_ID_5750_A3 0x4003
39021+#define CHIPREV_ID_5750_C1 0x4201
39022 #define CHIPREV_ID_5750_C2 0x4202
39023 #define CHIPREV_ID_5752_A0_HW 0x5000
39024 #define CHIPREV_ID_5752_A0 0x6000
39025diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39026index b9db1b5..720f9ce 100644
39027--- a/drivers/net/tokenring/abyss.c
39028+++ b/drivers/net/tokenring/abyss.c
39029@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39030
39031 static int __init abyss_init (void)
39032 {
39033- abyss_netdev_ops = tms380tr_netdev_ops;
39034+ pax_open_kernel();
39035+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39036
39037- abyss_netdev_ops.ndo_open = abyss_open;
39038- abyss_netdev_ops.ndo_stop = abyss_close;
39039+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39040+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39041+ pax_close_kernel();
39042
39043 return pci_register_driver(&abyss_driver);
39044 }
39045diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39046index 456f8bf..373e56d 100644
39047--- a/drivers/net/tokenring/madgemc.c
39048+++ b/drivers/net/tokenring/madgemc.c
39049@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39050
39051 static int __init madgemc_init (void)
39052 {
39053- madgemc_netdev_ops = tms380tr_netdev_ops;
39054- madgemc_netdev_ops.ndo_open = madgemc_open;
39055- madgemc_netdev_ops.ndo_stop = madgemc_close;
39056+ pax_open_kernel();
39057+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39058+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39059+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39060+ pax_close_kernel();
39061
39062 return mca_register_driver (&madgemc_driver);
39063 }
39064diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39065index 16e8783..925bd49 100644
39066--- a/drivers/net/tokenring/proteon.c
39067+++ b/drivers/net/tokenring/proteon.c
39068@@ -353,9 +353,11 @@ static int __init proteon_init(void)
39069 struct platform_device *pdev;
39070 int i, num = 0, err = 0;
39071
39072- proteon_netdev_ops = tms380tr_netdev_ops;
39073- proteon_netdev_ops.ndo_open = proteon_open;
39074- proteon_netdev_ops.ndo_stop = tms380tr_close;
39075+ pax_open_kernel();
39076+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39077+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39078+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39079+ pax_close_kernel();
39080
39081 err = platform_driver_register(&proteon_driver);
39082 if (err)
39083diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39084index 46db5c5..37c1536 100644
39085--- a/drivers/net/tokenring/skisa.c
39086+++ b/drivers/net/tokenring/skisa.c
39087@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39088 struct platform_device *pdev;
39089 int i, num = 0, err = 0;
39090
39091- sk_isa_netdev_ops = tms380tr_netdev_ops;
39092- sk_isa_netdev_ops.ndo_open = sk_isa_open;
39093- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39094+ pax_open_kernel();
39095+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39096+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39097+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39098+ pax_close_kernel();
39099
39100 err = platform_driver_register(&sk_isa_driver);
39101 if (err)
39102diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39103index 74e5ba4..5cf6bc9 100644
39104--- a/drivers/net/tulip/de2104x.c
39105+++ b/drivers/net/tulip/de2104x.c
39106@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39107 struct de_srom_info_leaf *il;
39108 void *bufp;
39109
39110+ pax_track_stack();
39111+
39112 /* download entire eeprom */
39113 for (i = 0; i < DE_EEPROM_WORDS; i++)
39114 ((__le16 *)ee_data)[i] =
39115diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39116index a8349b7..90f9dfe 100644
39117--- a/drivers/net/tulip/de4x5.c
39118+++ b/drivers/net/tulip/de4x5.c
39119@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39120 for (i=0; i<ETH_ALEN; i++) {
39121 tmp.addr[i] = dev->dev_addr[i];
39122 }
39123- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39124+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39125 break;
39126
39127 case DE4X5_SET_HWADDR: /* Set the hardware address */
39128@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39129 spin_lock_irqsave(&lp->lock, flags);
39130 memcpy(&statbuf, &lp->pktStats, ioc->len);
39131 spin_unlock_irqrestore(&lp->lock, flags);
39132- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39133+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39134 return -EFAULT;
39135 break;
39136 }
39137diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39138index 391acd3..56d11cd 100644
39139--- a/drivers/net/tulip/eeprom.c
39140+++ b/drivers/net/tulip/eeprom.c
39141@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39142 {NULL}};
39143
39144
39145-static const char *block_name[] __devinitdata = {
39146+static const char *block_name[] __devinitconst = {
39147 "21140 non-MII",
39148 "21140 MII PHY",
39149 "21142 Serial PHY",
39150diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39151index b38d3b7..b1cff23 100644
39152--- a/drivers/net/tulip/winbond-840.c
39153+++ b/drivers/net/tulip/winbond-840.c
39154@@ -235,7 +235,7 @@ struct pci_id_info {
39155 int drv_flags; /* Driver use, intended as capability flags. */
39156 };
39157
39158-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39159+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39160 { /* Sometime a Level-One switch card. */
39161 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39162 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39163diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39164index f450bc9..2b747c8 100644
39165--- a/drivers/net/usb/hso.c
39166+++ b/drivers/net/usb/hso.c
39167@@ -71,7 +71,7 @@
39168 #include <asm/byteorder.h>
39169 #include <linux/serial_core.h>
39170 #include <linux/serial.h>
39171-
39172+#include <asm/local.h>
39173
39174 #define DRIVER_VERSION "1.2"
39175 #define MOD_AUTHOR "Option Wireless"
39176@@ -258,7 +258,7 @@ struct hso_serial {
39177
39178 /* from usb_serial_port */
39179 struct tty_struct *tty;
39180- int open_count;
39181+ local_t open_count;
39182 spinlock_t serial_lock;
39183
39184 int (*write_data) (struct hso_serial *serial);
39185@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39186 struct urb *urb;
39187
39188 urb = serial->rx_urb[0];
39189- if (serial->open_count > 0) {
39190+ if (local_read(&serial->open_count) > 0) {
39191 count = put_rxbuf_data(urb, serial);
39192 if (count == -1)
39193 return;
39194@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39195 DUMP1(urb->transfer_buffer, urb->actual_length);
39196
39197 /* Anyone listening? */
39198- if (serial->open_count == 0)
39199+ if (local_read(&serial->open_count) == 0)
39200 return;
39201
39202 if (status == 0) {
39203@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39204 spin_unlock_irq(&serial->serial_lock);
39205
39206 /* check for port already opened, if not set the termios */
39207- serial->open_count++;
39208- if (serial->open_count == 1) {
39209+ if (local_inc_return(&serial->open_count) == 1) {
39210 tty->low_latency = 1;
39211 serial->rx_state = RX_IDLE;
39212 /* Force default termio settings */
39213@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39214 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39215 if (result) {
39216 hso_stop_serial_device(serial->parent);
39217- serial->open_count--;
39218+ local_dec(&serial->open_count);
39219 kref_put(&serial->parent->ref, hso_serial_ref_free);
39220 }
39221 } else {
39222@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39223
39224 /* reset the rts and dtr */
39225 /* do the actual close */
39226- serial->open_count--;
39227+ local_dec(&serial->open_count);
39228
39229- if (serial->open_count <= 0) {
39230- serial->open_count = 0;
39231+ if (local_read(&serial->open_count) <= 0) {
39232+ local_set(&serial->open_count, 0);
39233 spin_lock_irq(&serial->serial_lock);
39234 if (serial->tty == tty) {
39235 serial->tty->driver_data = NULL;
39236@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39237
39238 /* the actual setup */
39239 spin_lock_irqsave(&serial->serial_lock, flags);
39240- if (serial->open_count)
39241+ if (local_read(&serial->open_count))
39242 _hso_serial_set_termios(tty, old);
39243 else
39244 tty->termios = old;
39245@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39246 /* Start all serial ports */
39247 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39248 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39249- if (dev2ser(serial_table[i])->open_count) {
39250+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39251 result =
39252 hso_start_serial_device(serial_table[i], GFP_NOIO);
39253 hso_kick_transmit(dev2ser(serial_table[i]));
39254diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39255index 3e94f0c..ffdd926 100644
39256--- a/drivers/net/vxge/vxge-config.h
39257+++ b/drivers/net/vxge/vxge-config.h
39258@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39259 void (*link_down)(struct __vxge_hw_device *devh);
39260 void (*crit_err)(struct __vxge_hw_device *devh,
39261 enum vxge_hw_event type, u64 ext_data);
39262-};
39263+} __no_const;
39264
39265 /*
39266 * struct __vxge_hw_blockpool_entry - Block private data structure
39267diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39268index 068d7a9..35293de 100644
39269--- a/drivers/net/vxge/vxge-main.c
39270+++ b/drivers/net/vxge/vxge-main.c
39271@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39272 struct sk_buff *completed[NR_SKB_COMPLETED];
39273 int more;
39274
39275+ pax_track_stack();
39276+
39277 do {
39278 more = 0;
39279 skb_ptr = completed;
39280@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39281 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39282 int index;
39283
39284+ pax_track_stack();
39285+
39286 /*
39287 * Filling
39288 * - itable with bucket numbers
39289diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39290index 461742b..81be42e 100644
39291--- a/drivers/net/vxge/vxge-traffic.h
39292+++ b/drivers/net/vxge/vxge-traffic.h
39293@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39294 struct vxge_hw_mempool_dma *dma_object,
39295 u32 index,
39296 u32 is_last);
39297-};
39298+} __no_const;
39299
39300 void
39301 __vxge_hw_mempool_destroy(
39302diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39303index cd8cb95..4153b79 100644
39304--- a/drivers/net/wan/cycx_x25.c
39305+++ b/drivers/net/wan/cycx_x25.c
39306@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39307 unsigned char hex[1024],
39308 * phex = hex;
39309
39310+ pax_track_stack();
39311+
39312 if (len >= (sizeof(hex) / 2))
39313 len = (sizeof(hex) / 2) - 1;
39314
39315diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39316index aa9248f..a4e3c3b 100644
39317--- a/drivers/net/wan/hdlc_x25.c
39318+++ b/drivers/net/wan/hdlc_x25.c
39319@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39320
39321 static int x25_open(struct net_device *dev)
39322 {
39323- struct lapb_register_struct cb;
39324+ static struct lapb_register_struct cb = {
39325+ .connect_confirmation = x25_connected,
39326+ .connect_indication = x25_connected,
39327+ .disconnect_confirmation = x25_disconnected,
39328+ .disconnect_indication = x25_disconnected,
39329+ .data_indication = x25_data_indication,
39330+ .data_transmit = x25_data_transmit
39331+ };
39332 int result;
39333
39334- cb.connect_confirmation = x25_connected;
39335- cb.connect_indication = x25_connected;
39336- cb.disconnect_confirmation = x25_disconnected;
39337- cb.disconnect_indication = x25_disconnected;
39338- cb.data_indication = x25_data_indication;
39339- cb.data_transmit = x25_data_transmit;
39340-
39341 result = lapb_register(dev, &cb);
39342 if (result != LAPB_OK)
39343 return result;
39344diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39345index 5ad287c..783b020 100644
39346--- a/drivers/net/wimax/i2400m/usb-fw.c
39347+++ b/drivers/net/wimax/i2400m/usb-fw.c
39348@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39349 int do_autopm = 1;
39350 DECLARE_COMPLETION_ONSTACK(notif_completion);
39351
39352+ pax_track_stack();
39353+
39354 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39355 i2400m, ack, ack_size);
39356 BUG_ON(_ack == i2400m->bm_ack_buf);
39357diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39358index 6c26840..62c97c3 100644
39359--- a/drivers/net/wireless/airo.c
39360+++ b/drivers/net/wireless/airo.c
39361@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39362 BSSListElement * loop_net;
39363 BSSListElement * tmp_net;
39364
39365+ pax_track_stack();
39366+
39367 /* Blow away current list of scan results */
39368 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39369 list_move_tail (&loop_net->list, &ai->network_free_list);
39370@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39371 WepKeyRid wkr;
39372 int rc;
39373
39374+ pax_track_stack();
39375+
39376 memset( &mySsid, 0, sizeof( mySsid ) );
39377 kfree (ai->flash);
39378 ai->flash = NULL;
39379@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39380 __le32 *vals = stats.vals;
39381 int len;
39382
39383+ pax_track_stack();
39384+
39385 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39386 return -ENOMEM;
39387 data = (struct proc_data *)file->private_data;
39388@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39389 /* If doLoseSync is not 1, we won't do a Lose Sync */
39390 int doLoseSync = -1;
39391
39392+ pax_track_stack();
39393+
39394 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39395 return -ENOMEM;
39396 data = (struct proc_data *)file->private_data;
39397@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39398 int i;
39399 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39400
39401+ pax_track_stack();
39402+
39403 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39404 if (!qual)
39405 return -ENOMEM;
39406@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39407 CapabilityRid cap_rid;
39408 __le32 *vals = stats_rid.vals;
39409
39410+ pax_track_stack();
39411+
39412 /* Get stats out of the card */
39413 clear_bit(JOB_WSTATS, &local->jobs);
39414 if (local->power.event) {
39415diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39416index 747508c..82e965d 100644
39417--- a/drivers/net/wireless/ath/ath5k/debug.c
39418+++ b/drivers/net/wireless/ath/ath5k/debug.c
39419@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39420 unsigned int v;
39421 u64 tsf;
39422
39423+ pax_track_stack();
39424+
39425 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39426 len += snprintf(buf+len, sizeof(buf)-len,
39427 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39428@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39429 unsigned int len = 0;
39430 unsigned int i;
39431
39432+ pax_track_stack();
39433+
39434 len += snprintf(buf+len, sizeof(buf)-len,
39435 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39436
39437diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39438index 2be4c22..593b1eb 100644
39439--- a/drivers/net/wireless/ath/ath9k/debug.c
39440+++ b/drivers/net/wireless/ath/ath9k/debug.c
39441@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39442 char buf[512];
39443 unsigned int len = 0;
39444
39445+ pax_track_stack();
39446+
39447 len += snprintf(buf + len, sizeof(buf) - len,
39448 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39449 len += snprintf(buf + len, sizeof(buf) - len,
39450@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39451 int i;
39452 u8 addr[ETH_ALEN];
39453
39454+ pax_track_stack();
39455+
39456 len += snprintf(buf + len, sizeof(buf) - len,
39457 "primary: %s (%s chan=%d ht=%d)\n",
39458 wiphy_name(sc->pri_wiphy->hw->wiphy),
39459diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39460index 80b19a4..dab3a45 100644
39461--- a/drivers/net/wireless/b43/debugfs.c
39462+++ b/drivers/net/wireless/b43/debugfs.c
39463@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39464 struct b43_debugfs_fops {
39465 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39466 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39467- struct file_operations fops;
39468+ const struct file_operations fops;
39469 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39470 size_t file_struct_offset;
39471 };
39472diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39473index 1f85ac5..c99b4b4 100644
39474--- a/drivers/net/wireless/b43legacy/debugfs.c
39475+++ b/drivers/net/wireless/b43legacy/debugfs.c
39476@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39477 struct b43legacy_debugfs_fops {
39478 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39479 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39480- struct file_operations fops;
39481+ const struct file_operations fops;
39482 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39483 size_t file_struct_offset;
39484 /* Take wl->irq_lock before calling read/write? */
39485diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39486index 43102bf..3b569c3 100644
39487--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39488+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39489@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39490 int err;
39491 DECLARE_SSID_BUF(ssid);
39492
39493+ pax_track_stack();
39494+
39495 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39496
39497 if (ssid_len)
39498@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39499 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39500 int err;
39501
39502+ pax_track_stack();
39503+
39504 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39505 idx, keylen, len);
39506
39507diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39508index 282b1f7..169f0cf 100644
39509--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39510+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39511@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39512 unsigned long flags;
39513 DECLARE_SSID_BUF(ssid);
39514
39515+ pax_track_stack();
39516+
39517 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39518 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39519 print_ssid(ssid, info_element->data, info_element->len),
39520diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39521index 950267a..80d5fd2 100644
39522--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39523+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39524@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39525 },
39526 };
39527
39528-static struct iwl_ops iwl1000_ops = {
39529+static const struct iwl_ops iwl1000_ops = {
39530 .ucode = &iwl5000_ucode,
39531 .lib = &iwl1000_lib,
39532 .hcmd = &iwl5000_hcmd,
39533diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39534index 56bfcc3..b348020 100644
39535--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39536+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39537@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39538 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39539 };
39540
39541-static struct iwl_ops iwl3945_ops = {
39542+static const struct iwl_ops iwl3945_ops = {
39543 .ucode = &iwl3945_ucode,
39544 .lib = &iwl3945_lib,
39545 .hcmd = &iwl3945_hcmd,
39546diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39547index 585b8d4..e142963 100644
39548--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39549+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39550@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39551 },
39552 };
39553
39554-static struct iwl_ops iwl4965_ops = {
39555+static const struct iwl_ops iwl4965_ops = {
39556 .ucode = &iwl4965_ucode,
39557 .lib = &iwl4965_lib,
39558 .hcmd = &iwl4965_hcmd,
39559diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39560index 1f423f2..e37c192 100644
39561--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39562+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39563@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39564 },
39565 };
39566
39567-struct iwl_ops iwl5000_ops = {
39568+const struct iwl_ops iwl5000_ops = {
39569 .ucode = &iwl5000_ucode,
39570 .lib = &iwl5000_lib,
39571 .hcmd = &iwl5000_hcmd,
39572 .utils = &iwl5000_hcmd_utils,
39573 };
39574
39575-static struct iwl_ops iwl5150_ops = {
39576+static const struct iwl_ops iwl5150_ops = {
39577 .ucode = &iwl5000_ucode,
39578 .lib = &iwl5150_lib,
39579 .hcmd = &iwl5000_hcmd,
39580diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39581index 1473452..f07d5e1 100644
39582--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39583+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39584@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39585 .calc_rssi = iwl5000_calc_rssi,
39586 };
39587
39588-static struct iwl_ops iwl6000_ops = {
39589+static const struct iwl_ops iwl6000_ops = {
39590 .ucode = &iwl5000_ucode,
39591 .lib = &iwl6000_lib,
39592 .hcmd = &iwl5000_hcmd,
39593diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39594index 1a3dfa2..b3e0a61 100644
39595--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39596+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39597@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39598 u8 active_index = 0;
39599 s32 tpt = 0;
39600
39601+ pax_track_stack();
39602+
39603 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39604
39605 if (!ieee80211_is_data(hdr->frame_control) ||
39606@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39607 u8 valid_tx_ant = 0;
39608 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39609
39610+ pax_track_stack();
39611+
39612 /* Override starting rate (index 0) if needed for debug purposes */
39613 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39614
39615diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39616index 0e56d78..6a3c107 100644
39617--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39618+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39619@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39620 if (iwl_debug_level & IWL_DL_INFO)
39621 dev_printk(KERN_DEBUG, &(pdev->dev),
39622 "Disabling hw_scan\n");
39623- iwl_hw_ops.hw_scan = NULL;
39624+ pax_open_kernel();
39625+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39626+ pax_close_kernel();
39627 }
39628
39629 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39630diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39631index cbc6290..eb323d7 100644
39632--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39633+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39634@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39635 #endif
39636
39637 #else
39638-#define IWL_DEBUG(__priv, level, fmt, args...)
39639-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39640+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39641+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39642 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39643 void *p, u32 len)
39644 {}
39645diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39646index a198bcf..8e68233 100644
39647--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39648+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39649@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39650 int pos = 0;
39651 const size_t bufsz = sizeof(buf);
39652
39653+ pax_track_stack();
39654+
39655 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39656 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39657 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39658@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39659 const size_t bufsz = sizeof(buf);
39660 ssize_t ret;
39661
39662+ pax_track_stack();
39663+
39664 for (i = 0; i < AC_NUM; i++) {
39665 pos += scnprintf(buf + pos, bufsz - pos,
39666 "\tcw_min\tcw_max\taifsn\ttxop\n");
39667diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39668index 3539ea4..b174bfa 100644
39669--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39670+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39671@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39672
39673 /* shared structures from iwl-5000.c */
39674 extern struct iwl_mod_params iwl50_mod_params;
39675-extern struct iwl_ops iwl5000_ops;
39676+extern const struct iwl_ops iwl5000_ops;
39677 extern struct iwl_ucode_ops iwl5000_ucode;
39678 extern struct iwl_lib_ops iwl5000_lib;
39679 extern struct iwl_hcmd_ops iwl5000_hcmd;
39680diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39681index 619590d..69235ee 100644
39682--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39683+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39684@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39685 */
39686 if (iwl3945_mod_params.disable_hw_scan) {
39687 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39688- iwl3945_hw_ops.hw_scan = NULL;
39689+ pax_open_kernel();
39690+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39691+ pax_close_kernel();
39692 }
39693
39694
39695diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39696index 1465379..fe4d78b 100644
39697--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39698+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39699@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39700 int buf_len = 512;
39701 size_t len = 0;
39702
39703+ pax_track_stack();
39704+
39705 if (*ppos != 0)
39706 return 0;
39707 if (count < sizeof(buf))
39708diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39709index 893a55c..7f66a50 100644
39710--- a/drivers/net/wireless/libertas/debugfs.c
39711+++ b/drivers/net/wireless/libertas/debugfs.c
39712@@ -708,7 +708,7 @@ out_unlock:
39713 struct lbs_debugfs_files {
39714 const char *name;
39715 int perm;
39716- struct file_operations fops;
39717+ const struct file_operations fops;
39718 };
39719
39720 static const struct lbs_debugfs_files debugfs_files[] = {
39721diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39722index 2ecbedb..42704f0 100644
39723--- a/drivers/net/wireless/rndis_wlan.c
39724+++ b/drivers/net/wireless/rndis_wlan.c
39725@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39726
39727 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39728
39729- if (rts_threshold < 0 || rts_threshold > 2347)
39730+ if (rts_threshold > 2347)
39731 rts_threshold = 2347;
39732
39733 tmp = cpu_to_le32(rts_threshold);
39734diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39735index 334ccd6..47f8944 100644
39736--- a/drivers/oprofile/buffer_sync.c
39737+++ b/drivers/oprofile/buffer_sync.c
39738@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39739 if (cookie == NO_COOKIE)
39740 offset = pc;
39741 if (cookie == INVALID_COOKIE) {
39742- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39743+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39744 offset = pc;
39745 }
39746 if (cookie != last_cookie) {
39747@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39748 /* add userspace sample */
39749
39750 if (!mm) {
39751- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39752+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39753 return 0;
39754 }
39755
39756 cookie = lookup_dcookie(mm, s->eip, &offset);
39757
39758 if (cookie == INVALID_COOKIE) {
39759- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39760+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39761 return 0;
39762 }
39763
39764@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39765 /* ignore backtraces if failed to add a sample */
39766 if (state == sb_bt_start) {
39767 state = sb_bt_ignore;
39768- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39769+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39770 }
39771 }
39772 release_mm(mm);
39773diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39774index 5df60a6..72f5c1c 100644
39775--- a/drivers/oprofile/event_buffer.c
39776+++ b/drivers/oprofile/event_buffer.c
39777@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39778 }
39779
39780 if (buffer_pos == buffer_size) {
39781- atomic_inc(&oprofile_stats.event_lost_overflow);
39782+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39783 return;
39784 }
39785
39786diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39787index dc8a042..fe5f315 100644
39788--- a/drivers/oprofile/oprof.c
39789+++ b/drivers/oprofile/oprof.c
39790@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39791 if (oprofile_ops.switch_events())
39792 return;
39793
39794- atomic_inc(&oprofile_stats.multiplex_counter);
39795+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39796 start_switch_worker();
39797 }
39798
39799diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39800index 61689e8..387f7f8 100644
39801--- a/drivers/oprofile/oprofile_stats.c
39802+++ b/drivers/oprofile/oprofile_stats.c
39803@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39804 cpu_buf->sample_invalid_eip = 0;
39805 }
39806
39807- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39808- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39809- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39810- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39811- atomic_set(&oprofile_stats.multiplex_counter, 0);
39812+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39813+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39814+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39815+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39816+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39817 }
39818
39819
39820diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39821index 0b54e46..a37c527 100644
39822--- a/drivers/oprofile/oprofile_stats.h
39823+++ b/drivers/oprofile/oprofile_stats.h
39824@@ -13,11 +13,11 @@
39825 #include <asm/atomic.h>
39826
39827 struct oprofile_stat_struct {
39828- atomic_t sample_lost_no_mm;
39829- atomic_t sample_lost_no_mapping;
39830- atomic_t bt_lost_no_mapping;
39831- atomic_t event_lost_overflow;
39832- atomic_t multiplex_counter;
39833+ atomic_unchecked_t sample_lost_no_mm;
39834+ atomic_unchecked_t sample_lost_no_mapping;
39835+ atomic_unchecked_t bt_lost_no_mapping;
39836+ atomic_unchecked_t event_lost_overflow;
39837+ atomic_unchecked_t multiplex_counter;
39838 };
39839
39840 extern struct oprofile_stat_struct oprofile_stats;
39841diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39842index 2766a6d..80c77e2 100644
39843--- a/drivers/oprofile/oprofilefs.c
39844+++ b/drivers/oprofile/oprofilefs.c
39845@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39846
39847
39848 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39849- char const *name, atomic_t *val)
39850+ char const *name, atomic_unchecked_t *val)
39851 {
39852 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39853 &atomic_ro_fops, 0444);
39854diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39855index 13a64bc..ad62835 100644
39856--- a/drivers/parisc/pdc_stable.c
39857+++ b/drivers/parisc/pdc_stable.c
39858@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39859 return ret;
39860 }
39861
39862-static struct sysfs_ops pdcspath_attr_ops = {
39863+static const struct sysfs_ops pdcspath_attr_ops = {
39864 .show = pdcspath_attr_show,
39865 .store = pdcspath_attr_store,
39866 };
39867diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39868index 8eefe56..40751a7 100644
39869--- a/drivers/parport/procfs.c
39870+++ b/drivers/parport/procfs.c
39871@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39872
39873 *ppos += len;
39874
39875- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39876+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39877 }
39878
39879 #ifdef CONFIG_PARPORT_1284
39880@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39881
39882 *ppos += len;
39883
39884- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39885+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39886 }
39887 #endif /* IEEE1284.3 support. */
39888
39889diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39890index 73e7d8e..c80f3d2 100644
39891--- a/drivers/pci/hotplug/acpiphp_glue.c
39892+++ b/drivers/pci/hotplug/acpiphp_glue.c
39893@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39894 }
39895
39896
39897-static struct acpi_dock_ops acpiphp_dock_ops = {
39898+static const struct acpi_dock_ops acpiphp_dock_ops = {
39899 .handler = handle_hotplug_event_func,
39900 };
39901
39902diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39903index 9fff878..ad0ad53 100644
39904--- a/drivers/pci/hotplug/cpci_hotplug.h
39905+++ b/drivers/pci/hotplug/cpci_hotplug.h
39906@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39907 int (*hardware_test) (struct slot* slot, u32 value);
39908 u8 (*get_power) (struct slot* slot);
39909 int (*set_power) (struct slot* slot, int value);
39910-};
39911+} __no_const;
39912
39913 struct cpci_hp_controller {
39914 unsigned int irq;
39915diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39916index 76ba8a1..20ca857 100644
39917--- a/drivers/pci/hotplug/cpqphp_nvram.c
39918+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39919@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39920
39921 void compaq_nvram_init (void __iomem *rom_start)
39922 {
39923+
39924+#ifndef CONFIG_PAX_KERNEXEC
39925 if (rom_start) {
39926 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39927 }
39928+#endif
39929+
39930 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39931
39932 /* initialize our int15 lock */
39933diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39934index 6151389..0a894ef 100644
39935--- a/drivers/pci/hotplug/fakephp.c
39936+++ b/drivers/pci/hotplug/fakephp.c
39937@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39938 }
39939
39940 static struct kobj_type legacy_ktype = {
39941- .sysfs_ops = &(struct sysfs_ops){
39942+ .sysfs_ops = &(const struct sysfs_ops){
39943 .store = legacy_store, .show = legacy_show
39944 },
39945 .release = &legacy_release,
39946diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39947index 5b680df..fe05b7e 100644
39948--- a/drivers/pci/intel-iommu.c
39949+++ b/drivers/pci/intel-iommu.c
39950@@ -2643,7 +2643,7 @@ error:
39951 return 0;
39952 }
39953
39954-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39955+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39956 unsigned long offset, size_t size,
39957 enum dma_data_direction dir,
39958 struct dma_attrs *attrs)
39959@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39960 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39961 }
39962
39963-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39964+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39965 size_t size, enum dma_data_direction dir,
39966 struct dma_attrs *attrs)
39967 {
39968@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39969 }
39970 }
39971
39972-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39973+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39974 dma_addr_t *dma_handle, gfp_t flags)
39975 {
39976 void *vaddr;
39977@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39978 return NULL;
39979 }
39980
39981-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39982+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39983 dma_addr_t dma_handle)
39984 {
39985 int order;
39986@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39987 free_pages((unsigned long)vaddr, order);
39988 }
39989
39990-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39991+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39992 int nelems, enum dma_data_direction dir,
39993 struct dma_attrs *attrs)
39994 {
39995@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39996 return nelems;
39997 }
39998
39999-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40000+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40001 enum dma_data_direction dir, struct dma_attrs *attrs)
40002 {
40003 int i;
40004@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40005 return nelems;
40006 }
40007
40008-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40009+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40010 {
40011 return !dma_addr;
40012 }
40013
40014-struct dma_map_ops intel_dma_ops = {
40015+const struct dma_map_ops intel_dma_ops = {
40016 .alloc_coherent = intel_alloc_coherent,
40017 .free_coherent = intel_free_coherent,
40018 .map_sg = intel_map_sg,
40019diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40020index 5b7056c..607bc94 100644
40021--- a/drivers/pci/pcie/aspm.c
40022+++ b/drivers/pci/pcie/aspm.c
40023@@ -27,9 +27,9 @@
40024 #define MODULE_PARAM_PREFIX "pcie_aspm."
40025
40026 /* Note: those are not register definitions */
40027-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40028-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40029-#define ASPM_STATE_L1 (4) /* L1 state */
40030+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40031+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40032+#define ASPM_STATE_L1 (4U) /* L1 state */
40033 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40034 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40035
40036diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40037index 8105e32..ca10419 100644
40038--- a/drivers/pci/probe.c
40039+++ b/drivers/pci/probe.c
40040@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40041 return ret;
40042 }
40043
40044-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40045+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40046 struct device_attribute *attr,
40047 char *buf)
40048 {
40049 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40050 }
40051
40052-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40053+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40054 struct device_attribute *attr,
40055 char *buf)
40056 {
40057diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40058index a03ad8c..024b0da 100644
40059--- a/drivers/pci/proc.c
40060+++ b/drivers/pci/proc.c
40061@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40062 static int __init pci_proc_init(void)
40063 {
40064 struct pci_dev *dev = NULL;
40065+
40066+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40067+#ifdef CONFIG_GRKERNSEC_PROC_USER
40068+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40069+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40070+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40071+#endif
40072+#else
40073 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40074+#endif
40075 proc_create("devices", 0, proc_bus_pci_dir,
40076 &proc_bus_pci_dev_operations);
40077 proc_initialized = 1;
40078diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40079index 8c02b6c..5584d8e 100644
40080--- a/drivers/pci/slot.c
40081+++ b/drivers/pci/slot.c
40082@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40083 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40084 }
40085
40086-static struct sysfs_ops pci_slot_sysfs_ops = {
40087+static const struct sysfs_ops pci_slot_sysfs_ops = {
40088 .show = pci_slot_attr_show,
40089 .store = pci_slot_attr_store,
40090 };
40091diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40092index 30cf71d2..50938f1 100644
40093--- a/drivers/pcmcia/pcmcia_ioctl.c
40094+++ b/drivers/pcmcia/pcmcia_ioctl.c
40095@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40096 return -EFAULT;
40097 }
40098 }
40099- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40100+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40101 if (!buf)
40102 return -ENOMEM;
40103
40104diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40105index 52183c4..b224c69 100644
40106--- a/drivers/platform/x86/acer-wmi.c
40107+++ b/drivers/platform/x86/acer-wmi.c
40108@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40109 return 0;
40110 }
40111
40112-static struct backlight_ops acer_bl_ops = {
40113+static const struct backlight_ops acer_bl_ops = {
40114 .get_brightness = read_brightness,
40115 .update_status = update_bl_status,
40116 };
40117diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40118index 767cb61..a87380b 100644
40119--- a/drivers/platform/x86/asus-laptop.c
40120+++ b/drivers/platform/x86/asus-laptop.c
40121@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40122 */
40123 static int read_brightness(struct backlight_device *bd);
40124 static int update_bl_status(struct backlight_device *bd);
40125-static struct backlight_ops asusbl_ops = {
40126+static const struct backlight_ops asusbl_ops = {
40127 .get_brightness = read_brightness,
40128 .update_status = update_bl_status,
40129 };
40130diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40131index d66c07a..a4abaac 100644
40132--- a/drivers/platform/x86/asus_acpi.c
40133+++ b/drivers/platform/x86/asus_acpi.c
40134@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40135 return 0;
40136 }
40137
40138-static struct backlight_ops asus_backlight_data = {
40139+static const struct backlight_ops asus_backlight_data = {
40140 .get_brightness = read_brightness,
40141 .update_status = set_brightness_status,
40142 };
40143diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40144index 11003bb..550ff1b 100644
40145--- a/drivers/platform/x86/compal-laptop.c
40146+++ b/drivers/platform/x86/compal-laptop.c
40147@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40148 return set_lcd_level(b->props.brightness);
40149 }
40150
40151-static struct backlight_ops compalbl_ops = {
40152+static const struct backlight_ops compalbl_ops = {
40153 .get_brightness = bl_get_brightness,
40154 .update_status = bl_update_status,
40155 };
40156diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40157index 07a74da..9dc99fa 100644
40158--- a/drivers/platform/x86/dell-laptop.c
40159+++ b/drivers/platform/x86/dell-laptop.c
40160@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40161 return buffer.output[1];
40162 }
40163
40164-static struct backlight_ops dell_ops = {
40165+static const struct backlight_ops dell_ops = {
40166 .get_brightness = dell_get_intensity,
40167 .update_status = dell_send_intensity,
40168 };
40169diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40170index c533b1c..5c81f22 100644
40171--- a/drivers/platform/x86/eeepc-laptop.c
40172+++ b/drivers/platform/x86/eeepc-laptop.c
40173@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40174 */
40175 static int read_brightness(struct backlight_device *bd);
40176 static int update_bl_status(struct backlight_device *bd);
40177-static struct backlight_ops eeepcbl_ops = {
40178+static const struct backlight_ops eeepcbl_ops = {
40179 .get_brightness = read_brightness,
40180 .update_status = update_bl_status,
40181 };
40182diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40183index bcd4ba8..a249b35 100644
40184--- a/drivers/platform/x86/fujitsu-laptop.c
40185+++ b/drivers/platform/x86/fujitsu-laptop.c
40186@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40187 return ret;
40188 }
40189
40190-static struct backlight_ops fujitsubl_ops = {
40191+static const struct backlight_ops fujitsubl_ops = {
40192 .get_brightness = bl_get_brightness,
40193 .update_status = bl_update_status,
40194 };
40195diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40196index 759763d..1093ba2 100644
40197--- a/drivers/platform/x86/msi-laptop.c
40198+++ b/drivers/platform/x86/msi-laptop.c
40199@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40200 return set_lcd_level(b->props.brightness);
40201 }
40202
40203-static struct backlight_ops msibl_ops = {
40204+static const struct backlight_ops msibl_ops = {
40205 .get_brightness = bl_get_brightness,
40206 .update_status = bl_update_status,
40207 };
40208diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40209index fe7cf01..9012d8d 100644
40210--- a/drivers/platform/x86/panasonic-laptop.c
40211+++ b/drivers/platform/x86/panasonic-laptop.c
40212@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40213 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40214 }
40215
40216-static struct backlight_ops pcc_backlight_ops = {
40217+static const struct backlight_ops pcc_backlight_ops = {
40218 .get_brightness = bl_get,
40219 .update_status = bl_set_status,
40220 };
40221diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40222index a2a742c..b37e25e 100644
40223--- a/drivers/platform/x86/sony-laptop.c
40224+++ b/drivers/platform/x86/sony-laptop.c
40225@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40226 }
40227
40228 static struct backlight_device *sony_backlight_device;
40229-static struct backlight_ops sony_backlight_ops = {
40230+static const struct backlight_ops sony_backlight_ops = {
40231 .update_status = sony_backlight_update_status,
40232 .get_brightness = sony_backlight_get_brightness,
40233 };
40234diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40235index 68271ae..5e8fb10 100644
40236--- a/drivers/platform/x86/thinkpad_acpi.c
40237+++ b/drivers/platform/x86/thinkpad_acpi.c
40238@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40239 return 0;
40240 }
40241
40242-void static hotkey_mask_warn_incomplete_mask(void)
40243+static void hotkey_mask_warn_incomplete_mask(void)
40244 {
40245 /* log only what the user can fix... */
40246 const u32 wantedmask = hotkey_driver_mask &
40247@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40248 BACKLIGHT_UPDATE_HOTKEY);
40249 }
40250
40251-static struct backlight_ops ibm_backlight_data = {
40252+static const struct backlight_ops ibm_backlight_data = {
40253 .get_brightness = brightness_get,
40254 .update_status = brightness_update_status,
40255 };
40256diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40257index 51c0a8b..0786629 100644
40258--- a/drivers/platform/x86/toshiba_acpi.c
40259+++ b/drivers/platform/x86/toshiba_acpi.c
40260@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40261 return AE_OK;
40262 }
40263
40264-static struct backlight_ops toshiba_backlight_data = {
40265+static const struct backlight_ops toshiba_backlight_data = {
40266 .get_brightness = get_lcd,
40267 .update_status = set_lcd_status,
40268 };
40269diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40270index fc83783c..cf370d7 100644
40271--- a/drivers/pnp/pnpbios/bioscalls.c
40272+++ b/drivers/pnp/pnpbios/bioscalls.c
40273@@ -60,7 +60,7 @@ do { \
40274 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40275 } while(0)
40276
40277-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40278+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40279 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40280
40281 /*
40282@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40283
40284 cpu = get_cpu();
40285 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40286+
40287+ pax_open_kernel();
40288 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40289+ pax_close_kernel();
40290
40291 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40292 spin_lock_irqsave(&pnp_bios_lock, flags);
40293@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40294 :"memory");
40295 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40296
40297+ pax_open_kernel();
40298 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40299+ pax_close_kernel();
40300+
40301 put_cpu();
40302
40303 /* If we get here and this is set then the PnP BIOS faulted on us. */
40304@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40305 return status;
40306 }
40307
40308-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40309+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40310 {
40311 int i;
40312
40313@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40314 pnp_bios_callpoint.offset = header->fields.pm16offset;
40315 pnp_bios_callpoint.segment = PNP_CS16;
40316
40317+ pax_open_kernel();
40318+
40319 for_each_possible_cpu(i) {
40320 struct desc_struct *gdt = get_cpu_gdt_table(i);
40321 if (!gdt)
40322@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40323 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40324 (unsigned long)__va(header->fields.pm16dseg));
40325 }
40326+
40327+ pax_close_kernel();
40328 }
40329diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40330index ba97654..66b99d4 100644
40331--- a/drivers/pnp/resource.c
40332+++ b/drivers/pnp/resource.c
40333@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40334 return 1;
40335
40336 /* check if the resource is valid */
40337- if (*irq < 0 || *irq > 15)
40338+ if (*irq > 15)
40339 return 0;
40340
40341 /* check if the resource is reserved */
40342@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40343 return 1;
40344
40345 /* check if the resource is valid */
40346- if (*dma < 0 || *dma == 4 || *dma > 7)
40347+ if (*dma == 4 || *dma > 7)
40348 return 0;
40349
40350 /* check if the resource is reserved */
40351diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40352index 62bb981..24a2dc9 100644
40353--- a/drivers/power/bq27x00_battery.c
40354+++ b/drivers/power/bq27x00_battery.c
40355@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40356 struct bq27x00_access_methods {
40357 int (*read)(u8 reg, int *rt_value, int b_single,
40358 struct bq27x00_device_info *di);
40359-};
40360+} __no_const;
40361
40362 struct bq27x00_device_info {
40363 struct device *dev;
40364diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40365index 62227cd..b5b538b 100644
40366--- a/drivers/rtc/rtc-dev.c
40367+++ b/drivers/rtc/rtc-dev.c
40368@@ -14,6 +14,7 @@
40369 #include <linux/module.h>
40370 #include <linux/rtc.h>
40371 #include <linux/sched.h>
40372+#include <linux/grsecurity.h>
40373 #include "rtc-core.h"
40374
40375 static dev_t rtc_devt;
40376@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40377 if (copy_from_user(&tm, uarg, sizeof(tm)))
40378 return -EFAULT;
40379
40380+ gr_log_timechange();
40381+
40382 return rtc_set_time(rtc, &tm);
40383
40384 case RTC_PIE_ON:
40385diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40386index 968e3c7..fbc637a 100644
40387--- a/drivers/s390/cio/qdio_perf.c
40388+++ b/drivers/s390/cio/qdio_perf.c
40389@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40390 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40391 {
40392 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40393- (long)atomic_long_read(&perf_stats.qdio_int));
40394+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40395 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40396- (long)atomic_long_read(&perf_stats.pci_int));
40397+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40398 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40399- (long)atomic_long_read(&perf_stats.thin_int));
40400+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40401 seq_printf(m, "\n");
40402 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40403- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40404+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40405 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40406- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40407+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40408 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40409- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40410- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40411+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40412+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40413 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40414- (long)atomic_long_read(&perf_stats.thinint_inbound),
40415- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40416+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40417+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40418 seq_printf(m, "\n");
40419 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40420- (long)atomic_long_read(&perf_stats.siga_in));
40421+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40422 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40423- (long)atomic_long_read(&perf_stats.siga_out));
40424+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40425 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40426- (long)atomic_long_read(&perf_stats.siga_sync));
40427+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40428 seq_printf(m, "\n");
40429 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40430- (long)atomic_long_read(&perf_stats.inbound_handler));
40431+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40432 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40433- (long)atomic_long_read(&perf_stats.outbound_handler));
40434+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40435 seq_printf(m, "\n");
40436 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40437- (long)atomic_long_read(&perf_stats.fast_requeue));
40438+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40439 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40440- (long)atomic_long_read(&perf_stats.outbound_target_full));
40441+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40442 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40443- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40444+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40445 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40446- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40447+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40448 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40449- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40450+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40451 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40452- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40453- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40454+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40455+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40456 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40457- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40458- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40459+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40460+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40461 seq_printf(m, "\n");
40462 return 0;
40463 }
40464diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40465index ff4504c..b3604c3 100644
40466--- a/drivers/s390/cio/qdio_perf.h
40467+++ b/drivers/s390/cio/qdio_perf.h
40468@@ -13,46 +13,46 @@
40469
40470 struct qdio_perf_stats {
40471 /* interrupt handler calls */
40472- atomic_long_t qdio_int;
40473- atomic_long_t pci_int;
40474- atomic_long_t thin_int;
40475+ atomic_long_unchecked_t qdio_int;
40476+ atomic_long_unchecked_t pci_int;
40477+ atomic_long_unchecked_t thin_int;
40478
40479 /* tasklet runs */
40480- atomic_long_t tasklet_inbound;
40481- atomic_long_t tasklet_outbound;
40482- atomic_long_t tasklet_thinint;
40483- atomic_long_t tasklet_thinint_loop;
40484- atomic_long_t thinint_inbound;
40485- atomic_long_t thinint_inbound_loop;
40486- atomic_long_t thinint_inbound_loop2;
40487+ atomic_long_unchecked_t tasklet_inbound;
40488+ atomic_long_unchecked_t tasklet_outbound;
40489+ atomic_long_unchecked_t tasklet_thinint;
40490+ atomic_long_unchecked_t tasklet_thinint_loop;
40491+ atomic_long_unchecked_t thinint_inbound;
40492+ atomic_long_unchecked_t thinint_inbound_loop;
40493+ atomic_long_unchecked_t thinint_inbound_loop2;
40494
40495 /* signal adapter calls */
40496- atomic_long_t siga_out;
40497- atomic_long_t siga_in;
40498- atomic_long_t siga_sync;
40499+ atomic_long_unchecked_t siga_out;
40500+ atomic_long_unchecked_t siga_in;
40501+ atomic_long_unchecked_t siga_sync;
40502
40503 /* misc */
40504- atomic_long_t inbound_handler;
40505- atomic_long_t outbound_handler;
40506- atomic_long_t fast_requeue;
40507- atomic_long_t outbound_target_full;
40508+ atomic_long_unchecked_t inbound_handler;
40509+ atomic_long_unchecked_t outbound_handler;
40510+ atomic_long_unchecked_t fast_requeue;
40511+ atomic_long_unchecked_t outbound_target_full;
40512
40513 /* for debugging */
40514- atomic_long_t debug_tl_out_timer;
40515- atomic_long_t debug_stop_polling;
40516- atomic_long_t debug_eqbs_all;
40517- atomic_long_t debug_eqbs_incomplete;
40518- atomic_long_t debug_sqbs_all;
40519- atomic_long_t debug_sqbs_incomplete;
40520+ atomic_long_unchecked_t debug_tl_out_timer;
40521+ atomic_long_unchecked_t debug_stop_polling;
40522+ atomic_long_unchecked_t debug_eqbs_all;
40523+ atomic_long_unchecked_t debug_eqbs_incomplete;
40524+ atomic_long_unchecked_t debug_sqbs_all;
40525+ atomic_long_unchecked_t debug_sqbs_incomplete;
40526 };
40527
40528 extern struct qdio_perf_stats perf_stats;
40529 extern int qdio_performance_stats;
40530
40531-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40532+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40533 {
40534 if (qdio_performance_stats)
40535- atomic_long_inc(count);
40536+ atomic_long_inc_unchecked(count);
40537 }
40538
40539 int qdio_setup_perf_stats(void);
40540diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40541index 1ddcf40..a85f062 100644
40542--- a/drivers/scsi/BusLogic.c
40543+++ b/drivers/scsi/BusLogic.c
40544@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40545 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40546 *PrototypeHostAdapter)
40547 {
40548+ pax_track_stack();
40549+
40550 /*
40551 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40552 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40553diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40554index cdbdec9..b7d560b 100644
40555--- a/drivers/scsi/aacraid/aacraid.h
40556+++ b/drivers/scsi/aacraid/aacraid.h
40557@@ -471,7 +471,7 @@ struct adapter_ops
40558 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40559 /* Administrative operations */
40560 int (*adapter_comm)(struct aac_dev * dev, int comm);
40561-};
40562+} __no_const;
40563
40564 /*
40565 * Define which interrupt handler needs to be installed
40566diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40567index a5b8e7b..a6a0e43 100644
40568--- a/drivers/scsi/aacraid/commctrl.c
40569+++ b/drivers/scsi/aacraid/commctrl.c
40570@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40571 u32 actual_fibsize64, actual_fibsize = 0;
40572 int i;
40573
40574+ pax_track_stack();
40575
40576 if (dev->in_reset) {
40577 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40578diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40579index 9b97c3e..f099725 100644
40580--- a/drivers/scsi/aacraid/linit.c
40581+++ b/drivers/scsi/aacraid/linit.c
40582@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40583 #elif defined(__devinitconst)
40584 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40585 #else
40586-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40587+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40588 #endif
40589 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40590 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40591diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40592index 996f722..9127845 100644
40593--- a/drivers/scsi/aic94xx/aic94xx_init.c
40594+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40595@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40596 flash_error_table[i].reason);
40597 }
40598
40599-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40600+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40601 asd_show_update_bios, asd_store_update_bios);
40602
40603 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40604@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40605 .lldd_control_phy = asd_control_phy,
40606 };
40607
40608-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40609+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40610 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40611 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40612 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40613diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40614index 58efd4b..cb48dc7 100644
40615--- a/drivers/scsi/bfa/bfa_ioc.h
40616+++ b/drivers/scsi/bfa/bfa_ioc.h
40617@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40618 bfa_ioc_disable_cbfn_t disable_cbfn;
40619 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40620 bfa_ioc_reset_cbfn_t reset_cbfn;
40621-};
40622+} __no_const;
40623
40624 /**
40625 * Heartbeat failure notification queue element.
40626diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40627index 7ad177e..5503586 100644
40628--- a/drivers/scsi/bfa/bfa_iocfc.h
40629+++ b/drivers/scsi/bfa/bfa_iocfc.h
40630@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40631 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40632 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40633 u32 *nvecs, u32 *maxvec);
40634-};
40635+} __no_const;
40636 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40637
40638 struct bfa_iocfc_s {
40639diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40640index 4967643..cbec06b 100644
40641--- a/drivers/scsi/dpt_i2o.c
40642+++ b/drivers/scsi/dpt_i2o.c
40643@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40644 dma_addr_t addr;
40645 ulong flags = 0;
40646
40647+ pax_track_stack();
40648+
40649 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40650 // get user msg size in u32s
40651 if(get_user(size, &user_msg[0])){
40652@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40653 s32 rcode;
40654 dma_addr_t addr;
40655
40656+ pax_track_stack();
40657+
40658 memset(msg, 0 , sizeof(msg));
40659 len = scsi_bufflen(cmd);
40660 direction = 0x00000000;
40661diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40662index c7076ce..e20c67c 100644
40663--- a/drivers/scsi/eata.c
40664+++ b/drivers/scsi/eata.c
40665@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40666 struct hostdata *ha;
40667 char name[16];
40668
40669+ pax_track_stack();
40670+
40671 sprintf(name, "%s%d", driver_name, j);
40672
40673 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40674diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40675index 11ae5c9..891daec 100644
40676--- a/drivers/scsi/fcoe/libfcoe.c
40677+++ b/drivers/scsi/fcoe/libfcoe.c
40678@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40679 size_t rlen;
40680 size_t dlen;
40681
40682+ pax_track_stack();
40683+
40684 fiph = (struct fip_header *)skb->data;
40685 sub = fiph->fip_subcode;
40686 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40687diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40688index 71c7bbe..e93088a 100644
40689--- a/drivers/scsi/fnic/fnic_main.c
40690+++ b/drivers/scsi/fnic/fnic_main.c
40691@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40692 /* Start local port initiatialization */
40693
40694 lp->link_up = 0;
40695- lp->tt = fnic_transport_template;
40696+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40697
40698 lp->max_retry_count = fnic->config.flogi_retries;
40699 lp->max_rport_retry_count = fnic->config.plogi_retries;
40700diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40701index bb96d74..9ec3ce4 100644
40702--- a/drivers/scsi/gdth.c
40703+++ b/drivers/scsi/gdth.c
40704@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40705 ulong flags;
40706 gdth_ha_str *ha;
40707
40708+ pax_track_stack();
40709+
40710 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40711 return -EFAULT;
40712 ha = gdth_find_ha(ldrv.ionode);
40713@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40714 gdth_ha_str *ha;
40715 int rval;
40716
40717+ pax_track_stack();
40718+
40719 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40720 res.number >= MAX_HDRIVES)
40721 return -EFAULT;
40722@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40723 gdth_ha_str *ha;
40724 int rval;
40725
40726+ pax_track_stack();
40727+
40728 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40729 return -EFAULT;
40730 ha = gdth_find_ha(gen.ionode);
40731@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40732 int i;
40733 gdth_cmd_str gdtcmd;
40734 char cmnd[MAX_COMMAND_SIZE];
40735+
40736+ pax_track_stack();
40737+
40738 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40739
40740 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40741diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40742index 1258da3..20d8ae6 100644
40743--- a/drivers/scsi/gdth_proc.c
40744+++ b/drivers/scsi/gdth_proc.c
40745@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40746 ulong64 paddr;
40747
40748 char cmnd[MAX_COMMAND_SIZE];
40749+
40750+ pax_track_stack();
40751+
40752 memset(cmnd, 0xff, 12);
40753 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40754
40755@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40756 gdth_hget_str *phg;
40757 char cmnd[MAX_COMMAND_SIZE];
40758
40759+ pax_track_stack();
40760+
40761 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40762 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40763 if (!gdtcmd || !estr)
40764diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40765index d03a926..f324286 100644
40766--- a/drivers/scsi/hosts.c
40767+++ b/drivers/scsi/hosts.c
40768@@ -40,7 +40,7 @@
40769 #include "scsi_logging.h"
40770
40771
40772-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40773+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40774
40775
40776 static void scsi_host_cls_release(struct device *dev)
40777@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40778 * subtract one because we increment first then return, but we need to
40779 * know what the next host number was before increment
40780 */
40781- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40782+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40783 shost->dma_channel = 0xff;
40784
40785 /* These three are default values which can be overridden */
40786diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40787index a601159..55e19d2 100644
40788--- a/drivers/scsi/ipr.c
40789+++ b/drivers/scsi/ipr.c
40790@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40791 return true;
40792 }
40793
40794-static struct ata_port_operations ipr_sata_ops = {
40795+static const struct ata_port_operations ipr_sata_ops = {
40796 .phy_reset = ipr_ata_phy_reset,
40797 .hardreset = ipr_sata_reset,
40798 .post_internal_cmd = ipr_ata_post_internal,
40799diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40800index 4e49fbc..97907ff 100644
40801--- a/drivers/scsi/ips.h
40802+++ b/drivers/scsi/ips.h
40803@@ -1027,7 +1027,7 @@ typedef struct {
40804 int (*intr)(struct ips_ha *);
40805 void (*enableint)(struct ips_ha *);
40806 uint32_t (*statupd)(struct ips_ha *);
40807-} ips_hw_func_t;
40808+} __no_const ips_hw_func_t;
40809
40810 typedef struct ips_ha {
40811 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40812diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40813index c1c1574..a9c9348 100644
40814--- a/drivers/scsi/libfc/fc_exch.c
40815+++ b/drivers/scsi/libfc/fc_exch.c
40816@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40817 * all together if not used XXX
40818 */
40819 struct {
40820- atomic_t no_free_exch;
40821- atomic_t no_free_exch_xid;
40822- atomic_t xid_not_found;
40823- atomic_t xid_busy;
40824- atomic_t seq_not_found;
40825- atomic_t non_bls_resp;
40826+ atomic_unchecked_t no_free_exch;
40827+ atomic_unchecked_t no_free_exch_xid;
40828+ atomic_unchecked_t xid_not_found;
40829+ atomic_unchecked_t xid_busy;
40830+ atomic_unchecked_t seq_not_found;
40831+ atomic_unchecked_t non_bls_resp;
40832 } stats;
40833 };
40834 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40835@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40836 /* allocate memory for exchange */
40837 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40838 if (!ep) {
40839- atomic_inc(&mp->stats.no_free_exch);
40840+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40841 goto out;
40842 }
40843 memset(ep, 0, sizeof(*ep));
40844@@ -557,7 +557,7 @@ out:
40845 return ep;
40846 err:
40847 spin_unlock_bh(&pool->lock);
40848- atomic_inc(&mp->stats.no_free_exch_xid);
40849+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40850 mempool_free(ep, mp->ep_pool);
40851 return NULL;
40852 }
40853@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40854 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40855 ep = fc_exch_find(mp, xid);
40856 if (!ep) {
40857- atomic_inc(&mp->stats.xid_not_found);
40858+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40859 reject = FC_RJT_OX_ID;
40860 goto out;
40861 }
40862@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40863 ep = fc_exch_find(mp, xid);
40864 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40865 if (ep) {
40866- atomic_inc(&mp->stats.xid_busy);
40867+ atomic_inc_unchecked(&mp->stats.xid_busy);
40868 reject = FC_RJT_RX_ID;
40869 goto rel;
40870 }
40871@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40872 }
40873 xid = ep->xid; /* get our XID */
40874 } else if (!ep) {
40875- atomic_inc(&mp->stats.xid_not_found);
40876+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40877 reject = FC_RJT_RX_ID; /* XID not found */
40878 goto out;
40879 }
40880@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40881 } else {
40882 sp = &ep->seq;
40883 if (sp->id != fh->fh_seq_id) {
40884- atomic_inc(&mp->stats.seq_not_found);
40885+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40886 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40887 goto rel;
40888 }
40889@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40890
40891 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40892 if (!ep) {
40893- atomic_inc(&mp->stats.xid_not_found);
40894+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40895 goto out;
40896 }
40897 if (ep->esb_stat & ESB_ST_COMPLETE) {
40898- atomic_inc(&mp->stats.xid_not_found);
40899+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40900 goto out;
40901 }
40902 if (ep->rxid == FC_XID_UNKNOWN)
40903 ep->rxid = ntohs(fh->fh_rx_id);
40904 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40905- atomic_inc(&mp->stats.xid_not_found);
40906+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40907 goto rel;
40908 }
40909 if (ep->did != ntoh24(fh->fh_s_id) &&
40910 ep->did != FC_FID_FLOGI) {
40911- atomic_inc(&mp->stats.xid_not_found);
40912+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40913 goto rel;
40914 }
40915 sof = fr_sof(fp);
40916@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40917 } else {
40918 sp = &ep->seq;
40919 if (sp->id != fh->fh_seq_id) {
40920- atomic_inc(&mp->stats.seq_not_found);
40921+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40922 goto rel;
40923 }
40924 }
40925@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40926 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40927
40928 if (!sp)
40929- atomic_inc(&mp->stats.xid_not_found);
40930+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40931 else
40932- atomic_inc(&mp->stats.non_bls_resp);
40933+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40934
40935 fc_frame_free(fp);
40936 }
40937diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40938index 0ee989f..a582241 100644
40939--- a/drivers/scsi/libsas/sas_ata.c
40940+++ b/drivers/scsi/libsas/sas_ata.c
40941@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40942 }
40943 }
40944
40945-static struct ata_port_operations sas_sata_ops = {
40946+static const struct ata_port_operations sas_sata_ops = {
40947 .phy_reset = sas_ata_phy_reset,
40948 .post_internal_cmd = sas_ata_post_internal,
40949 .qc_defer = ata_std_qc_defer,
40950diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40951index aa10f79..5cc79e4 100644
40952--- a/drivers/scsi/lpfc/lpfc.h
40953+++ b/drivers/scsi/lpfc/lpfc.h
40954@@ -400,7 +400,7 @@ struct lpfc_vport {
40955 struct dentry *debug_nodelist;
40956 struct dentry *vport_debugfs_root;
40957 struct lpfc_debugfs_trc *disc_trc;
40958- atomic_t disc_trc_cnt;
40959+ atomic_unchecked_t disc_trc_cnt;
40960 #endif
40961 uint8_t stat_data_enabled;
40962 uint8_t stat_data_blocked;
40963@@ -725,8 +725,8 @@ struct lpfc_hba {
40964 struct timer_list fabric_block_timer;
40965 unsigned long bit_flags;
40966 #define FABRIC_COMANDS_BLOCKED 0
40967- atomic_t num_rsrc_err;
40968- atomic_t num_cmd_success;
40969+ atomic_unchecked_t num_rsrc_err;
40970+ atomic_unchecked_t num_cmd_success;
40971 unsigned long last_rsrc_error_time;
40972 unsigned long last_ramp_down_time;
40973 unsigned long last_ramp_up_time;
40974@@ -740,7 +740,7 @@ struct lpfc_hba {
40975 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40976 struct dentry *debug_slow_ring_trc;
40977 struct lpfc_debugfs_trc *slow_ring_trc;
40978- atomic_t slow_ring_trc_cnt;
40979+ atomic_unchecked_t slow_ring_trc_cnt;
40980 #endif
40981
40982 /* Used for deferred freeing of ELS data buffers */
40983diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40984index 8d0f0de..7c77a62 100644
40985--- a/drivers/scsi/lpfc/lpfc_debugfs.c
40986+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40987@@ -124,7 +124,7 @@ struct lpfc_debug {
40988 int len;
40989 };
40990
40991-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40992+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40993 static unsigned long lpfc_debugfs_start_time = 0L;
40994
40995 /**
40996@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40997 lpfc_debugfs_enable = 0;
40998
40999 len = 0;
41000- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41001+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41002 (lpfc_debugfs_max_disc_trc - 1);
41003 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41004 dtp = vport->disc_trc + i;
41005@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41006 lpfc_debugfs_enable = 0;
41007
41008 len = 0;
41009- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41010+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41011 (lpfc_debugfs_max_slow_ring_trc - 1);
41012 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41013 dtp = phba->slow_ring_trc + i;
41014@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41015 uint32_t *ptr;
41016 char buffer[1024];
41017
41018+ pax_track_stack();
41019+
41020 off = 0;
41021 spin_lock_irq(&phba->hbalock);
41022
41023@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41024 !vport || !vport->disc_trc)
41025 return;
41026
41027- index = atomic_inc_return(&vport->disc_trc_cnt) &
41028+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41029 (lpfc_debugfs_max_disc_trc - 1);
41030 dtp = vport->disc_trc + index;
41031 dtp->fmt = fmt;
41032 dtp->data1 = data1;
41033 dtp->data2 = data2;
41034 dtp->data3 = data3;
41035- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41036+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41037 dtp->jif = jiffies;
41038 #endif
41039 return;
41040@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41041 !phba || !phba->slow_ring_trc)
41042 return;
41043
41044- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41045+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41046 (lpfc_debugfs_max_slow_ring_trc - 1);
41047 dtp = phba->slow_ring_trc + index;
41048 dtp->fmt = fmt;
41049 dtp->data1 = data1;
41050 dtp->data2 = data2;
41051 dtp->data3 = data3;
41052- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41053+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41054 dtp->jif = jiffies;
41055 #endif
41056 return;
41057@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41058 "slow_ring buffer\n");
41059 goto debug_failed;
41060 }
41061- atomic_set(&phba->slow_ring_trc_cnt, 0);
41062+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41063 memset(phba->slow_ring_trc, 0,
41064 (sizeof(struct lpfc_debugfs_trc) *
41065 lpfc_debugfs_max_slow_ring_trc));
41066@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41067 "buffer\n");
41068 goto debug_failed;
41069 }
41070- atomic_set(&vport->disc_trc_cnt, 0);
41071+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41072
41073 snprintf(name, sizeof(name), "discovery_trace");
41074 vport->debug_disc_trc =
41075diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41076index 549bc7d..8189dbb 100644
41077--- a/drivers/scsi/lpfc/lpfc_init.c
41078+++ b/drivers/scsi/lpfc/lpfc_init.c
41079@@ -8021,8 +8021,10 @@ lpfc_init(void)
41080 printk(LPFC_COPYRIGHT "\n");
41081
41082 if (lpfc_enable_npiv) {
41083- lpfc_transport_functions.vport_create = lpfc_vport_create;
41084- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41085+ pax_open_kernel();
41086+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41087+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41088+ pax_close_kernel();
41089 }
41090 lpfc_transport_template =
41091 fc_attach_transport(&lpfc_transport_functions);
41092diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41093index c88f59f..ff2a42f 100644
41094--- a/drivers/scsi/lpfc/lpfc_scsi.c
41095+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41096@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41097 uint32_t evt_posted;
41098
41099 spin_lock_irqsave(&phba->hbalock, flags);
41100- atomic_inc(&phba->num_rsrc_err);
41101+ atomic_inc_unchecked(&phba->num_rsrc_err);
41102 phba->last_rsrc_error_time = jiffies;
41103
41104 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41105@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41106 unsigned long flags;
41107 struct lpfc_hba *phba = vport->phba;
41108 uint32_t evt_posted;
41109- atomic_inc(&phba->num_cmd_success);
41110+ atomic_inc_unchecked(&phba->num_cmd_success);
41111
41112 if (vport->cfg_lun_queue_depth <= queue_depth)
41113 return;
41114@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41115 int i;
41116 struct lpfc_rport_data *rdata;
41117
41118- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41119- num_cmd_success = atomic_read(&phba->num_cmd_success);
41120+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41121+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41122
41123 vports = lpfc_create_vport_work_array(phba);
41124 if (vports != NULL)
41125@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41126 }
41127 }
41128 lpfc_destroy_vport_work_array(phba, vports);
41129- atomic_set(&phba->num_rsrc_err, 0);
41130- atomic_set(&phba->num_cmd_success, 0);
41131+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41132+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41133 }
41134
41135 /**
41136@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41137 }
41138 }
41139 lpfc_destroy_vport_work_array(phba, vports);
41140- atomic_set(&phba->num_rsrc_err, 0);
41141- atomic_set(&phba->num_cmd_success, 0);
41142+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41143+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41144 }
41145
41146 /**
41147diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41148index 234f0b7..3020aea 100644
41149--- a/drivers/scsi/megaraid/megaraid_mbox.c
41150+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41151@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41152 int rval;
41153 int i;
41154
41155+ pax_track_stack();
41156+
41157 // Allocate memory for the base list of scb for management module.
41158 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41159
41160diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41161index 7a117c1..ee01e9e 100644
41162--- a/drivers/scsi/osd/osd_initiator.c
41163+++ b/drivers/scsi/osd/osd_initiator.c
41164@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41165 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41166 int ret;
41167
41168+ pax_track_stack();
41169+
41170 or = osd_start_request(od, GFP_KERNEL);
41171 if (!or)
41172 return -ENOMEM;
41173diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41174index 9ab8c86..9425ad3 100644
41175--- a/drivers/scsi/pmcraid.c
41176+++ b/drivers/scsi/pmcraid.c
41177@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41178 res->scsi_dev = scsi_dev;
41179 scsi_dev->hostdata = res;
41180 res->change_detected = 0;
41181- atomic_set(&res->read_failures, 0);
41182- atomic_set(&res->write_failures, 0);
41183+ atomic_set_unchecked(&res->read_failures, 0);
41184+ atomic_set_unchecked(&res->write_failures, 0);
41185 rc = 0;
41186 }
41187 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41188@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41189
41190 /* If this was a SCSI read/write command keep count of errors */
41191 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41192- atomic_inc(&res->read_failures);
41193+ atomic_inc_unchecked(&res->read_failures);
41194 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41195- atomic_inc(&res->write_failures);
41196+ atomic_inc_unchecked(&res->write_failures);
41197
41198 if (!RES_IS_GSCSI(res->cfg_entry) &&
41199 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41200@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41201
41202 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41203 /* add resources only after host is added into system */
41204- if (!atomic_read(&pinstance->expose_resources))
41205+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41206 return;
41207
41208 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41209@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41210 init_waitqueue_head(&pinstance->reset_wait_q);
41211
41212 atomic_set(&pinstance->outstanding_cmds, 0);
41213- atomic_set(&pinstance->expose_resources, 0);
41214+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41215
41216 INIT_LIST_HEAD(&pinstance->free_res_q);
41217 INIT_LIST_HEAD(&pinstance->used_res_q);
41218@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41219 /* Schedule worker thread to handle CCN and take care of adding and
41220 * removing devices to OS
41221 */
41222- atomic_set(&pinstance->expose_resources, 1);
41223+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41224 schedule_work(&pinstance->worker_q);
41225 return rc;
41226
41227diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41228index 3441b3f..6cbe8f7 100644
41229--- a/drivers/scsi/pmcraid.h
41230+++ b/drivers/scsi/pmcraid.h
41231@@ -690,7 +690,7 @@ struct pmcraid_instance {
41232 atomic_t outstanding_cmds;
41233
41234 /* should add/delete resources to mid-layer now ?*/
41235- atomic_t expose_resources;
41236+ atomic_unchecked_t expose_resources;
41237
41238 /* Tasklet to handle deferred processing */
41239 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41240@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41241 struct list_head queue; /* link to "to be exposed" resources */
41242 struct pmcraid_config_table_entry cfg_entry;
41243 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41244- atomic_t read_failures; /* count of failed READ commands */
41245- atomic_t write_failures; /* count of failed WRITE commands */
41246+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41247+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41248
41249 /* To indicate add/delete/modify during CCN */
41250 u8 change_detected;
41251diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41252index 2150618..7034215 100644
41253--- a/drivers/scsi/qla2xxx/qla_def.h
41254+++ b/drivers/scsi/qla2xxx/qla_def.h
41255@@ -2089,7 +2089,7 @@ struct isp_operations {
41256
41257 int (*get_flash_version) (struct scsi_qla_host *, void *);
41258 int (*start_scsi) (srb_t *);
41259-};
41260+} __no_const;
41261
41262 /* MSI-X Support *************************************************************/
41263
41264diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41265index 81b5f29..2ae1fad 100644
41266--- a/drivers/scsi/qla4xxx/ql4_def.h
41267+++ b/drivers/scsi/qla4xxx/ql4_def.h
41268@@ -240,7 +240,7 @@ struct ddb_entry {
41269 atomic_t retry_relogin_timer; /* Min Time between relogins
41270 * (4000 only) */
41271 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41272- atomic_t relogin_retry_count; /* Num of times relogin has been
41273+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41274 * retried */
41275
41276 uint16_t port;
41277diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41278index af8c323..515dd51 100644
41279--- a/drivers/scsi/qla4xxx/ql4_init.c
41280+++ b/drivers/scsi/qla4xxx/ql4_init.c
41281@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41282 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41283 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41284 atomic_set(&ddb_entry->relogin_timer, 0);
41285- atomic_set(&ddb_entry->relogin_retry_count, 0);
41286+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41287 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41288 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41289 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41290@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41291 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41292 atomic_set(&ddb_entry->port_down_timer,
41293 ha->port_down_retry_count);
41294- atomic_set(&ddb_entry->relogin_retry_count, 0);
41295+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41296 atomic_set(&ddb_entry->relogin_timer, 0);
41297 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41298 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41299diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41300index 83c8b5e..a82b348 100644
41301--- a/drivers/scsi/qla4xxx/ql4_os.c
41302+++ b/drivers/scsi/qla4xxx/ql4_os.c
41303@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41304 ddb_entry->fw_ddb_device_state ==
41305 DDB_DS_SESSION_FAILED) {
41306 /* Reset retry relogin timer */
41307- atomic_inc(&ddb_entry->relogin_retry_count);
41308+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41309 DEBUG2(printk("scsi%ld: index[%d] relogin"
41310 " timed out-retrying"
41311 " relogin (%d)\n",
41312 ha->host_no,
41313 ddb_entry->fw_ddb_index,
41314- atomic_read(&ddb_entry->
41315+ atomic_read_unchecked(&ddb_entry->
41316 relogin_retry_count))
41317 );
41318 start_dpc++;
41319diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41320index dd098ca..686ce01 100644
41321--- a/drivers/scsi/scsi.c
41322+++ b/drivers/scsi/scsi.c
41323@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41324 unsigned long timeout;
41325 int rtn = 0;
41326
41327- atomic_inc(&cmd->device->iorequest_cnt);
41328+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41329
41330 /* check if the device is still usable */
41331 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41332diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41333index bc3e363..e1a8e50 100644
41334--- a/drivers/scsi/scsi_debug.c
41335+++ b/drivers/scsi/scsi_debug.c
41336@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41337 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41338 unsigned char *cmd = (unsigned char *)scp->cmnd;
41339
41340+ pax_track_stack();
41341+
41342 if ((errsts = check_readiness(scp, 1, devip)))
41343 return errsts;
41344 memset(arr, 0, sizeof(arr));
41345@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41346 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41347 unsigned char *cmd = (unsigned char *)scp->cmnd;
41348
41349+ pax_track_stack();
41350+
41351 if ((errsts = check_readiness(scp, 1, devip)))
41352 return errsts;
41353 memset(arr, 0, sizeof(arr));
41354diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41355index 8df12522..c4c1472 100644
41356--- a/drivers/scsi/scsi_lib.c
41357+++ b/drivers/scsi/scsi_lib.c
41358@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41359 shost = sdev->host;
41360 scsi_init_cmd_errh(cmd);
41361 cmd->result = DID_NO_CONNECT << 16;
41362- atomic_inc(&cmd->device->iorequest_cnt);
41363+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41364
41365 /*
41366 * SCSI request completion path will do scsi_device_unbusy(),
41367@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41368 */
41369 cmd->serial_number = 0;
41370
41371- atomic_inc(&cmd->device->iodone_cnt);
41372+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41373 if (cmd->result)
41374- atomic_inc(&cmd->device->ioerr_cnt);
41375+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41376
41377 disposition = scsi_decide_disposition(cmd);
41378 if (disposition != SUCCESS &&
41379diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41380index 91a93e0..eae0fe3 100644
41381--- a/drivers/scsi/scsi_sysfs.c
41382+++ b/drivers/scsi/scsi_sysfs.c
41383@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41384 char *buf) \
41385 { \
41386 struct scsi_device *sdev = to_scsi_device(dev); \
41387- unsigned long long count = atomic_read(&sdev->field); \
41388+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41389 return snprintf(buf, 20, "0x%llx\n", count); \
41390 } \
41391 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41392diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41393index 1030327..f91fd30 100644
41394--- a/drivers/scsi/scsi_tgt_lib.c
41395+++ b/drivers/scsi/scsi_tgt_lib.c
41396@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41397 int err;
41398
41399 dprintk("%lx %u\n", uaddr, len);
41400- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41401+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41402 if (err) {
41403 /*
41404 * TODO: need to fixup sg_tablesize, max_segment_size,
41405diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41406index db02e31..1b42ea9 100644
41407--- a/drivers/scsi/scsi_transport_fc.c
41408+++ b/drivers/scsi/scsi_transport_fc.c
41409@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41410 * Netlink Infrastructure
41411 */
41412
41413-static atomic_t fc_event_seq;
41414+static atomic_unchecked_t fc_event_seq;
41415
41416 /**
41417 * fc_get_event_number - Obtain the next sequential FC event number
41418@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41419 u32
41420 fc_get_event_number(void)
41421 {
41422- return atomic_add_return(1, &fc_event_seq);
41423+ return atomic_add_return_unchecked(1, &fc_event_seq);
41424 }
41425 EXPORT_SYMBOL(fc_get_event_number);
41426
41427@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41428 {
41429 int error;
41430
41431- atomic_set(&fc_event_seq, 0);
41432+ atomic_set_unchecked(&fc_event_seq, 0);
41433
41434 error = transport_class_register(&fc_host_class);
41435 if (error)
41436diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41437index de2f8c4..63c5278 100644
41438--- a/drivers/scsi/scsi_transport_iscsi.c
41439+++ b/drivers/scsi/scsi_transport_iscsi.c
41440@@ -81,7 +81,7 @@ struct iscsi_internal {
41441 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41442 };
41443
41444-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41445+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41446 static struct workqueue_struct *iscsi_eh_timer_workq;
41447
41448 /*
41449@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41450 int err;
41451
41452 ihost = shost->shost_data;
41453- session->sid = atomic_add_return(1, &iscsi_session_nr);
41454+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41455
41456 if (id == ISCSI_MAX_TARGET) {
41457 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41458@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41459 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41460 ISCSI_TRANSPORT_VERSION);
41461
41462- atomic_set(&iscsi_session_nr, 0);
41463+ atomic_set_unchecked(&iscsi_session_nr, 0);
41464
41465 err = class_register(&iscsi_transport_class);
41466 if (err)
41467diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41468index 21a045e..ec89e03 100644
41469--- a/drivers/scsi/scsi_transport_srp.c
41470+++ b/drivers/scsi/scsi_transport_srp.c
41471@@ -33,7 +33,7 @@
41472 #include "scsi_transport_srp_internal.h"
41473
41474 struct srp_host_attrs {
41475- atomic_t next_port_id;
41476+ atomic_unchecked_t next_port_id;
41477 };
41478 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41479
41480@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41481 struct Scsi_Host *shost = dev_to_shost(dev);
41482 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41483
41484- atomic_set(&srp_host->next_port_id, 0);
41485+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41486 return 0;
41487 }
41488
41489@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41490 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41491 rport->roles = ids->roles;
41492
41493- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41494+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41495 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41496
41497 transport_setup_device(&rport->dev);
41498diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41499index 040f751..98a5ed2 100644
41500--- a/drivers/scsi/sg.c
41501+++ b/drivers/scsi/sg.c
41502@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41503 sdp->disk->disk_name,
41504 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41505 NULL,
41506- (char *)arg);
41507+ (char __user *)arg);
41508 case BLKTRACESTART:
41509 return blk_trace_startstop(sdp->device->request_queue, 1);
41510 case BLKTRACESTOP:
41511@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41512 const struct file_operations * fops;
41513 };
41514
41515-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41516+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41517 {"allow_dio", &adio_fops},
41518 {"debug", &debug_fops},
41519 {"def_reserved_size", &dressz_fops},
41520@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41521 {
41522 int k, mask;
41523 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41524- struct sg_proc_leaf * leaf;
41525+ const struct sg_proc_leaf * leaf;
41526
41527 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41528 if (!sg_proc_sgp)
41529diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41530index c19ca5e..3eb5959 100644
41531--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41532+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41533@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41534 int do_iounmap = 0;
41535 int do_disable_device = 1;
41536
41537+ pax_track_stack();
41538+
41539 memset(&sym_dev, 0, sizeof(sym_dev));
41540 memset(&nvram, 0, sizeof(nvram));
41541 sym_dev.pdev = pdev;
41542diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41543index eadc1ab..2d81457 100644
41544--- a/drivers/serial/kgdboc.c
41545+++ b/drivers/serial/kgdboc.c
41546@@ -18,7 +18,7 @@
41547
41548 #define MAX_CONFIG_LEN 40
41549
41550-static struct kgdb_io kgdboc_io_ops;
41551+static const struct kgdb_io kgdboc_io_ops;
41552
41553 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41554 static int configured = -1;
41555@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41556 module_put(THIS_MODULE);
41557 }
41558
41559-static struct kgdb_io kgdboc_io_ops = {
41560+static const struct kgdb_io kgdboc_io_ops = {
41561 .name = "kgdboc",
41562 .read_char = kgdboc_get_char,
41563 .write_char = kgdboc_put_char,
41564diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41565index b76f246..7f41af7 100644
41566--- a/drivers/spi/spi.c
41567+++ b/drivers/spi/spi.c
41568@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41569 EXPORT_SYMBOL_GPL(spi_sync);
41570
41571 /* portable code must never pass more than 32 bytes */
41572-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41573+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41574
41575 static u8 *buf;
41576
41577diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41578index 99010d4..6bad87b 100644
41579--- a/drivers/staging/android/binder.c
41580+++ b/drivers/staging/android/binder.c
41581@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41582 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41583 }
41584
41585-static struct vm_operations_struct binder_vm_ops = {
41586+static const struct vm_operations_struct binder_vm_ops = {
41587 .open = binder_vma_open,
41588 .close = binder_vma_close,
41589 };
41590diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41591index cda26bb..39fed3f 100644
41592--- a/drivers/staging/b3dfg/b3dfg.c
41593+++ b/drivers/staging/b3dfg/b3dfg.c
41594@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41595 return VM_FAULT_NOPAGE;
41596 }
41597
41598-static struct vm_operations_struct b3dfg_vm_ops = {
41599+static const struct vm_operations_struct b3dfg_vm_ops = {
41600 .fault = b3dfg_vma_fault,
41601 };
41602
41603@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41604 return r;
41605 }
41606
41607-static struct file_operations b3dfg_fops = {
41608+static const struct file_operations b3dfg_fops = {
41609 .owner = THIS_MODULE,
41610 .open = b3dfg_open,
41611 .release = b3dfg_release,
41612diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41613index 908f25a..c9a579b 100644
41614--- a/drivers/staging/comedi/comedi_fops.c
41615+++ b/drivers/staging/comedi/comedi_fops.c
41616@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41617 mutex_unlock(&dev->mutex);
41618 }
41619
41620-static struct vm_operations_struct comedi_vm_ops = {
41621+static const struct vm_operations_struct comedi_vm_ops = {
41622 .close = comedi_unmap,
41623 };
41624
41625diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41626index e55a0db..577b776 100644
41627--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41628+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41629@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41630 static dev_t adsp_devno;
41631 static struct class *adsp_class;
41632
41633-static struct file_operations adsp_fops = {
41634+static const struct file_operations adsp_fops = {
41635 .owner = THIS_MODULE,
41636 .open = adsp_open,
41637 .unlocked_ioctl = adsp_ioctl,
41638diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41639index ad2390f..4116ee8 100644
41640--- a/drivers/staging/dream/qdsp5/audio_aac.c
41641+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41642@@ -1022,7 +1022,7 @@ done:
41643 return rc;
41644 }
41645
41646-static struct file_operations audio_aac_fops = {
41647+static const struct file_operations audio_aac_fops = {
41648 .owner = THIS_MODULE,
41649 .open = audio_open,
41650 .release = audio_release,
41651diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41652index cd818a5..870b37b 100644
41653--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41654+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41655@@ -833,7 +833,7 @@ done:
41656 return rc;
41657 }
41658
41659-static struct file_operations audio_amrnb_fops = {
41660+static const struct file_operations audio_amrnb_fops = {
41661 .owner = THIS_MODULE,
41662 .open = audamrnb_open,
41663 .release = audamrnb_release,
41664diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41665index 4b43e18..cedafda 100644
41666--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41667+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41668@@ -805,7 +805,7 @@ dma_fail:
41669 return rc;
41670 }
41671
41672-static struct file_operations audio_evrc_fops = {
41673+static const struct file_operations audio_evrc_fops = {
41674 .owner = THIS_MODULE,
41675 .open = audevrc_open,
41676 .release = audevrc_release,
41677diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41678index 3d950a2..9431118 100644
41679--- a/drivers/staging/dream/qdsp5/audio_in.c
41680+++ b/drivers/staging/dream/qdsp5/audio_in.c
41681@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41682 return 0;
41683 }
41684
41685-static struct file_operations audio_fops = {
41686+static const struct file_operations audio_fops = {
41687 .owner = THIS_MODULE,
41688 .open = audio_in_open,
41689 .release = audio_in_release,
41690@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41691 .unlocked_ioctl = audio_in_ioctl,
41692 };
41693
41694-static struct file_operations audpre_fops = {
41695+static const struct file_operations audpre_fops = {
41696 .owner = THIS_MODULE,
41697 .open = audpre_open,
41698 .unlocked_ioctl = audpre_ioctl,
41699diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41700index b95574f..286c2f4 100644
41701--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41702+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41703@@ -941,7 +941,7 @@ done:
41704 return rc;
41705 }
41706
41707-static struct file_operations audio_mp3_fops = {
41708+static const struct file_operations audio_mp3_fops = {
41709 .owner = THIS_MODULE,
41710 .open = audio_open,
41711 .release = audio_release,
41712diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41713index d1adcf6..f8f9833 100644
41714--- a/drivers/staging/dream/qdsp5/audio_out.c
41715+++ b/drivers/staging/dream/qdsp5/audio_out.c
41716@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41717 return 0;
41718 }
41719
41720-static struct file_operations audio_fops = {
41721+static const struct file_operations audio_fops = {
41722 .owner = THIS_MODULE,
41723 .open = audio_open,
41724 .release = audio_release,
41725@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41726 .unlocked_ioctl = audio_ioctl,
41727 };
41728
41729-static struct file_operations audpp_fops = {
41730+static const struct file_operations audpp_fops = {
41731 .owner = THIS_MODULE,
41732 .open = audpp_open,
41733 .unlocked_ioctl = audpp_ioctl,
41734diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41735index f0f50e3..f6b9dbc 100644
41736--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41737+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41738@@ -816,7 +816,7 @@ err:
41739 return rc;
41740 }
41741
41742-static struct file_operations audio_qcelp_fops = {
41743+static const struct file_operations audio_qcelp_fops = {
41744 .owner = THIS_MODULE,
41745 .open = audqcelp_open,
41746 .release = audqcelp_release,
41747diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41748index 037d7ff..5469ec3 100644
41749--- a/drivers/staging/dream/qdsp5/snd.c
41750+++ b/drivers/staging/dream/qdsp5/snd.c
41751@@ -242,7 +242,7 @@ err:
41752 return rc;
41753 }
41754
41755-static struct file_operations snd_fops = {
41756+static const struct file_operations snd_fops = {
41757 .owner = THIS_MODULE,
41758 .open = snd_open,
41759 .release = snd_release,
41760diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41761index d4e7d88..0ea632a 100644
41762--- a/drivers/staging/dream/smd/smd_qmi.c
41763+++ b/drivers/staging/dream/smd/smd_qmi.c
41764@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41765 return 0;
41766 }
41767
41768-static struct file_operations qmi_fops = {
41769+static const struct file_operations qmi_fops = {
41770 .owner = THIS_MODULE,
41771 .read = qmi_read,
41772 .write = qmi_write,
41773diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41774index cd3910b..ff053d3 100644
41775--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41776+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41777@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41778 return rc;
41779 }
41780
41781-static struct file_operations rpcrouter_server_fops = {
41782+static const struct file_operations rpcrouter_server_fops = {
41783 .owner = THIS_MODULE,
41784 .open = rpcrouter_open,
41785 .release = rpcrouter_release,
41786@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41787 .unlocked_ioctl = rpcrouter_ioctl,
41788 };
41789
41790-static struct file_operations rpcrouter_router_fops = {
41791+static const struct file_operations rpcrouter_router_fops = {
41792 .owner = THIS_MODULE,
41793 .open = rpcrouter_open,
41794 .release = rpcrouter_release,
41795diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41796index c24e4e0..07665be 100644
41797--- a/drivers/staging/dst/dcore.c
41798+++ b/drivers/staging/dst/dcore.c
41799@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41800 return 0;
41801 }
41802
41803-static struct block_device_operations dst_blk_ops = {
41804+static const struct block_device_operations dst_blk_ops = {
41805 .open = dst_bdev_open,
41806 .release = dst_bdev_release,
41807 .owner = THIS_MODULE,
41808@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41809 n->size = ctl->size;
41810
41811 atomic_set(&n->refcnt, 1);
41812- atomic_long_set(&n->gen, 0);
41813+ atomic_long_set_unchecked(&n->gen, 0);
41814 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41815
41816 err = dst_node_sysfs_init(n);
41817diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41818index 557d372..8d84422 100644
41819--- a/drivers/staging/dst/trans.c
41820+++ b/drivers/staging/dst/trans.c
41821@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41822 t->error = 0;
41823 t->retries = 0;
41824 atomic_set(&t->refcnt, 1);
41825- t->gen = atomic_long_inc_return(&n->gen);
41826+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41827
41828 t->enc = bio_data_dir(bio);
41829 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41830diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41831index 94f7752..d051514 100644
41832--- a/drivers/staging/et131x/et1310_tx.c
41833+++ b/drivers/staging/et131x/et1310_tx.c
41834@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41835 struct net_device_stats *stats = &etdev->net_stats;
41836
41837 if (pMpTcb->Flags & fMP_DEST_BROAD)
41838- atomic_inc(&etdev->Stats.brdcstxmt);
41839+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41840 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41841- atomic_inc(&etdev->Stats.multixmt);
41842+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41843 else
41844- atomic_inc(&etdev->Stats.unixmt);
41845+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41846
41847 if (pMpTcb->Packet) {
41848 stats->tx_bytes += pMpTcb->Packet->len;
41849diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41850index 1dfe06f..f469b4d 100644
41851--- a/drivers/staging/et131x/et131x_adapter.h
41852+++ b/drivers/staging/et131x/et131x_adapter.h
41853@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41854 * operations
41855 */
41856 u32 unircv; /* # multicast packets received */
41857- atomic_t unixmt; /* # multicast packets for Tx */
41858+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41859 u32 multircv; /* # multicast packets received */
41860- atomic_t multixmt; /* # multicast packets for Tx */
41861+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41862 u32 brdcstrcv; /* # broadcast packets received */
41863- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41864+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41865 u32 norcvbuf; /* # Rx packets discarded */
41866 u32 noxmtbuf; /* # Tx packets discarded */
41867
41868diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41869index 4bd353a..e28f455 100644
41870--- a/drivers/staging/go7007/go7007-v4l2.c
41871+++ b/drivers/staging/go7007/go7007-v4l2.c
41872@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41873 return 0;
41874 }
41875
41876-static struct vm_operations_struct go7007_vm_ops = {
41877+static const struct vm_operations_struct go7007_vm_ops = {
41878 .open = go7007_vm_open,
41879 .close = go7007_vm_close,
41880 .fault = go7007_vm_fault,
41881diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41882index 366dc95..b974d87 100644
41883--- a/drivers/staging/hv/Channel.c
41884+++ b/drivers/staging/hv/Channel.c
41885@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41886
41887 DPRINT_ENTER(VMBUS);
41888
41889- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41890- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41891+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41892+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41893
41894 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41895 ASSERT(msgInfo != NULL);
41896diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41897index b12237f..01ae28a 100644
41898--- a/drivers/staging/hv/Hv.c
41899+++ b/drivers/staging/hv/Hv.c
41900@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41901 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41902 u32 outputAddressHi = outputAddress >> 32;
41903 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41904- volatile void *hypercallPage = gHvContext.HypercallPage;
41905+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41906
41907 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41908 Control, Input, Output);
41909diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41910index d089bb1..2ebc158 100644
41911--- a/drivers/staging/hv/VmbusApi.h
41912+++ b/drivers/staging/hv/VmbusApi.h
41913@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41914 u32 *GpadlHandle);
41915 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41916 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41917-};
41918+} __no_const;
41919
41920 /* Base driver object */
41921 struct hv_driver {
41922diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41923index 5a37cce..6ecc88c 100644
41924--- a/drivers/staging/hv/VmbusPrivate.h
41925+++ b/drivers/staging/hv/VmbusPrivate.h
41926@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41927 struct VMBUS_CONNECTION {
41928 enum VMBUS_CONNECT_STATE ConnectState;
41929
41930- atomic_t NextGpadlHandle;
41931+ atomic_unchecked_t NextGpadlHandle;
41932
41933 /*
41934 * Represents channel interrupts. Each bit position represents a
41935diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41936index 871a202..ca50ddf 100644
41937--- a/drivers/staging/hv/blkvsc_drv.c
41938+++ b/drivers/staging/hv/blkvsc_drv.c
41939@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41940 /* The one and only one */
41941 static struct blkvsc_driver_context g_blkvsc_drv;
41942
41943-static struct block_device_operations block_ops = {
41944+static const struct block_device_operations block_ops = {
41945 .owner = THIS_MODULE,
41946 .open = blkvsc_open,
41947 .release = blkvsc_release,
41948diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41949index 6acc49a..fbc8d46 100644
41950--- a/drivers/staging/hv/vmbus_drv.c
41951+++ b/drivers/staging/hv/vmbus_drv.c
41952@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41953 to_device_context(root_device_obj);
41954 struct device_context *child_device_ctx =
41955 to_device_context(child_device_obj);
41956- static atomic_t device_num = ATOMIC_INIT(0);
41957+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41958
41959 DPRINT_ENTER(VMBUS_DRV);
41960
41961@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41962
41963 /* Set the device name. Otherwise, device_register() will fail. */
41964 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41965- atomic_inc_return(&device_num));
41966+ atomic_inc_return_unchecked(&device_num));
41967
41968 /* The new device belongs to this bus */
41969 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41970diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41971index d926189..17b19fd 100644
41972--- a/drivers/staging/iio/ring_generic.h
41973+++ b/drivers/staging/iio/ring_generic.h
41974@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41975
41976 int (*is_enabled)(struct iio_ring_buffer *ring);
41977 int (*enable)(struct iio_ring_buffer *ring);
41978-};
41979+} __no_const;
41980
41981 /**
41982 * struct iio_ring_buffer - general ring buffer structure
41983diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41984index 1b237b7..88c624e 100644
41985--- a/drivers/staging/octeon/ethernet-rx.c
41986+++ b/drivers/staging/octeon/ethernet-rx.c
41987@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41988 /* Increment RX stats for virtual ports */
41989 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41990 #ifdef CONFIG_64BIT
41991- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41992- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41993+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41994+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41995 #else
41996- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41997- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41998+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41999+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42000 #endif
42001 }
42002 netif_receive_skb(skb);
42003@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42004 dev->name);
42005 */
42006 #ifdef CONFIG_64BIT
42007- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42008+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42009 #else
42010- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42011+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42012 #endif
42013 dev_kfree_skb_irq(skb);
42014 }
42015diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42016index 492c502..d9909f1 100644
42017--- a/drivers/staging/octeon/ethernet.c
42018+++ b/drivers/staging/octeon/ethernet.c
42019@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42020 * since the RX tasklet also increments it.
42021 */
42022 #ifdef CONFIG_64BIT
42023- atomic64_add(rx_status.dropped_packets,
42024- (atomic64_t *)&priv->stats.rx_dropped);
42025+ atomic64_add_unchecked(rx_status.dropped_packets,
42026+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42027 #else
42028- atomic_add(rx_status.dropped_packets,
42029- (atomic_t *)&priv->stats.rx_dropped);
42030+ atomic_add_unchecked(rx_status.dropped_packets,
42031+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42032 #endif
42033 }
42034
42035diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42036index a35bd5d..28fff45 100644
42037--- a/drivers/staging/otus/80211core/pub_zfi.h
42038+++ b/drivers/staging/otus/80211core/pub_zfi.h
42039@@ -531,7 +531,7 @@ struct zsCbFuncTbl
42040 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42041
42042 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42043-};
42044+} __no_const;
42045
42046 extern void zfZeroMemory(u8_t* va, u16_t length);
42047 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42048diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42049index c39a25f..696f5aa 100644
42050--- a/drivers/staging/panel/panel.c
42051+++ b/drivers/staging/panel/panel.c
42052@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42053 return 0;
42054 }
42055
42056-static struct file_operations lcd_fops = {
42057+static const struct file_operations lcd_fops = {
42058 .write = lcd_write,
42059 .open = lcd_open,
42060 .release = lcd_release,
42061@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42062 return 0;
42063 }
42064
42065-static struct file_operations keypad_fops = {
42066+static const struct file_operations keypad_fops = {
42067 .read = keypad_read, /* read */
42068 .open = keypad_open, /* open */
42069 .release = keypad_release, /* close */
42070diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42071index 270ebcb..37e46af 100644
42072--- a/drivers/staging/phison/phison.c
42073+++ b/drivers/staging/phison/phison.c
42074@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42075 ATA_BMDMA_SHT(DRV_NAME),
42076 };
42077
42078-static struct ata_port_operations phison_ops = {
42079+static const struct ata_port_operations phison_ops = {
42080 .inherits = &ata_bmdma_port_ops,
42081 .prereset = phison_pre_reset,
42082 };
42083diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42084index 2eb8e3d..57616a7 100644
42085--- a/drivers/staging/poch/poch.c
42086+++ b/drivers/staging/poch/poch.c
42087@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42088 return 0;
42089 }
42090
42091-static struct file_operations poch_fops = {
42092+static const struct file_operations poch_fops = {
42093 .owner = THIS_MODULE,
42094 .open = poch_open,
42095 .release = poch_release,
42096diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42097index c94de31..19402bc 100644
42098--- a/drivers/staging/pohmelfs/inode.c
42099+++ b/drivers/staging/pohmelfs/inode.c
42100@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42101 mutex_init(&psb->mcache_lock);
42102 psb->mcache_root = RB_ROOT;
42103 psb->mcache_timeout = msecs_to_jiffies(5000);
42104- atomic_long_set(&psb->mcache_gen, 0);
42105+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
42106
42107 psb->trans_max_pages = 100;
42108
42109@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42110 INIT_LIST_HEAD(&psb->crypto_ready_list);
42111 INIT_LIST_HEAD(&psb->crypto_active_list);
42112
42113- atomic_set(&psb->trans_gen, 1);
42114+ atomic_set_unchecked(&psb->trans_gen, 1);
42115 atomic_long_set(&psb->total_inodes, 0);
42116
42117 mutex_init(&psb->state_lock);
42118diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42119index e22665c..a2a9390 100644
42120--- a/drivers/staging/pohmelfs/mcache.c
42121+++ b/drivers/staging/pohmelfs/mcache.c
42122@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42123 m->data = data;
42124 m->start = start;
42125 m->size = size;
42126- m->gen = atomic_long_inc_return(&psb->mcache_gen);
42127+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42128
42129 mutex_lock(&psb->mcache_lock);
42130 err = pohmelfs_mcache_insert(psb, m);
42131diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42132index 623a07d..4035c19 100644
42133--- a/drivers/staging/pohmelfs/netfs.h
42134+++ b/drivers/staging/pohmelfs/netfs.h
42135@@ -570,14 +570,14 @@ struct pohmelfs_config;
42136 struct pohmelfs_sb {
42137 struct rb_root mcache_root;
42138 struct mutex mcache_lock;
42139- atomic_long_t mcache_gen;
42140+ atomic_long_unchecked_t mcache_gen;
42141 unsigned long mcache_timeout;
42142
42143 unsigned int idx;
42144
42145 unsigned int trans_retries;
42146
42147- atomic_t trans_gen;
42148+ atomic_unchecked_t trans_gen;
42149
42150 unsigned int crypto_attached_size;
42151 unsigned int crypto_align_size;
42152diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42153index 36a2535..0591bf4 100644
42154--- a/drivers/staging/pohmelfs/trans.c
42155+++ b/drivers/staging/pohmelfs/trans.c
42156@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42157 int err;
42158 struct netfs_cmd *cmd = t->iovec.iov_base;
42159
42160- t->gen = atomic_inc_return(&psb->trans_gen);
42161+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42162
42163 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42164 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42165diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42166index f890a16..509ece8 100644
42167--- a/drivers/staging/sep/sep_driver.c
42168+++ b/drivers/staging/sep/sep_driver.c
42169@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42170 static dev_t sep_devno;
42171
42172 /* the files operations structure of the driver */
42173-static struct file_operations sep_file_operations = {
42174+static const struct file_operations sep_file_operations = {
42175 .owner = THIS_MODULE,
42176 .ioctl = sep_ioctl,
42177 .poll = sep_poll,
42178diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42179index 5e16bc3..7655b10 100644
42180--- a/drivers/staging/usbip/usbip_common.h
42181+++ b/drivers/staging/usbip/usbip_common.h
42182@@ -374,7 +374,7 @@ struct usbip_device {
42183 void (*shutdown)(struct usbip_device *);
42184 void (*reset)(struct usbip_device *);
42185 void (*unusable)(struct usbip_device *);
42186- } eh_ops;
42187+ } __no_const eh_ops;
42188 };
42189
42190
42191diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42192index 57f7946..d9df23d 100644
42193--- a/drivers/staging/usbip/vhci.h
42194+++ b/drivers/staging/usbip/vhci.h
42195@@ -92,7 +92,7 @@ struct vhci_hcd {
42196 unsigned resuming:1;
42197 unsigned long re_timeout;
42198
42199- atomic_t seqnum;
42200+ atomic_unchecked_t seqnum;
42201
42202 /*
42203 * NOTE:
42204diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42205index 20cd7db..c2693ff 100644
42206--- a/drivers/staging/usbip/vhci_hcd.c
42207+++ b/drivers/staging/usbip/vhci_hcd.c
42208@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42209 return;
42210 }
42211
42212- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42213+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42214 if (priv->seqnum == 0xffff)
42215 usbip_uinfo("seqnum max\n");
42216
42217@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42218 return -ENOMEM;
42219 }
42220
42221- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42222+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42223 if (unlink->seqnum == 0xffff)
42224 usbip_uinfo("seqnum max\n");
42225
42226@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42227 vdev->rhport = rhport;
42228 }
42229
42230- atomic_set(&vhci->seqnum, 0);
42231+ atomic_set_unchecked(&vhci->seqnum, 0);
42232 spin_lock_init(&vhci->lock);
42233
42234
42235diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42236index 7fd76fe..673695a 100644
42237--- a/drivers/staging/usbip/vhci_rx.c
42238+++ b/drivers/staging/usbip/vhci_rx.c
42239@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42240 usbip_uerr("cannot find a urb of seqnum %u\n",
42241 pdu->base.seqnum);
42242 usbip_uinfo("max seqnum %d\n",
42243- atomic_read(&the_controller->seqnum));
42244+ atomic_read_unchecked(&the_controller->seqnum));
42245 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42246 return;
42247 }
42248diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42249index 7891288..8e31300 100644
42250--- a/drivers/staging/vme/devices/vme_user.c
42251+++ b/drivers/staging/vme/devices/vme_user.c
42252@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42253 static int __init vme_user_probe(struct device *, int, int);
42254 static int __exit vme_user_remove(struct device *, int, int);
42255
42256-static struct file_operations vme_user_fops = {
42257+static const struct file_operations vme_user_fops = {
42258 .open = vme_user_open,
42259 .release = vme_user_release,
42260 .read = vme_user_read,
42261diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42262index 58abf44..00c1fc8 100644
42263--- a/drivers/staging/vt6655/hostap.c
42264+++ b/drivers/staging/vt6655/hostap.c
42265@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42266 PSDevice apdev_priv;
42267 struct net_device *dev = pDevice->dev;
42268 int ret;
42269- const struct net_device_ops apdev_netdev_ops = {
42270+ net_device_ops_no_const apdev_netdev_ops = {
42271 .ndo_start_xmit = pDevice->tx_80211,
42272 };
42273
42274diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42275index 0c8267a..db1f363 100644
42276--- a/drivers/staging/vt6656/hostap.c
42277+++ b/drivers/staging/vt6656/hostap.c
42278@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42279 PSDevice apdev_priv;
42280 struct net_device *dev = pDevice->dev;
42281 int ret;
42282- const struct net_device_ops apdev_netdev_ops = {
42283+ net_device_ops_no_const apdev_netdev_ops = {
42284 .ndo_start_xmit = pDevice->tx_80211,
42285 };
42286
42287diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42288index 925678b..da7f5ed 100644
42289--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42290+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42291@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42292
42293 struct usbctlx_completor {
42294 int (*complete) (struct usbctlx_completor *);
42295-};
42296+} __no_const;
42297 typedef struct usbctlx_completor usbctlx_completor_t;
42298
42299 static int
42300diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42301index 40de151..924f268 100644
42302--- a/drivers/telephony/ixj.c
42303+++ b/drivers/telephony/ixj.c
42304@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42305 bool mContinue;
42306 char *pIn, *pOut;
42307
42308+ pax_track_stack();
42309+
42310 if (!SCI_Prepare(j))
42311 return 0;
42312
42313diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42314index e941367..b631f5a 100644
42315--- a/drivers/uio/uio.c
42316+++ b/drivers/uio/uio.c
42317@@ -23,6 +23,7 @@
42318 #include <linux/string.h>
42319 #include <linux/kobject.h>
42320 #include <linux/uio_driver.h>
42321+#include <asm/local.h>
42322
42323 #define UIO_MAX_DEVICES 255
42324
42325@@ -30,10 +31,10 @@ struct uio_device {
42326 struct module *owner;
42327 struct device *dev;
42328 int minor;
42329- atomic_t event;
42330+ atomic_unchecked_t event;
42331 struct fasync_struct *async_queue;
42332 wait_queue_head_t wait;
42333- int vma_count;
42334+ local_t vma_count;
42335 struct uio_info *info;
42336 struct kobject *map_dir;
42337 struct kobject *portio_dir;
42338@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42339 return entry->show(mem, buf);
42340 }
42341
42342-static struct sysfs_ops map_sysfs_ops = {
42343+static const struct sysfs_ops map_sysfs_ops = {
42344 .show = map_type_show,
42345 };
42346
42347@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42348 return entry->show(port, buf);
42349 }
42350
42351-static struct sysfs_ops portio_sysfs_ops = {
42352+static const struct sysfs_ops portio_sysfs_ops = {
42353 .show = portio_type_show,
42354 };
42355
42356@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42357 struct uio_device *idev = dev_get_drvdata(dev);
42358 if (idev)
42359 return sprintf(buf, "%u\n",
42360- (unsigned int)atomic_read(&idev->event));
42361+ (unsigned int)atomic_read_unchecked(&idev->event));
42362 else
42363 return -ENODEV;
42364 }
42365@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42366 {
42367 struct uio_device *idev = info->uio_dev;
42368
42369- atomic_inc(&idev->event);
42370+ atomic_inc_unchecked(&idev->event);
42371 wake_up_interruptible(&idev->wait);
42372 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42373 }
42374@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42375 }
42376
42377 listener->dev = idev;
42378- listener->event_count = atomic_read(&idev->event);
42379+ listener->event_count = atomic_read_unchecked(&idev->event);
42380 filep->private_data = listener;
42381
42382 if (idev->info->open) {
42383@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42384 return -EIO;
42385
42386 poll_wait(filep, &idev->wait, wait);
42387- if (listener->event_count != atomic_read(&idev->event))
42388+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42389 return POLLIN | POLLRDNORM;
42390 return 0;
42391 }
42392@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42393 do {
42394 set_current_state(TASK_INTERRUPTIBLE);
42395
42396- event_count = atomic_read(&idev->event);
42397+ event_count = atomic_read_unchecked(&idev->event);
42398 if (event_count != listener->event_count) {
42399 if (copy_to_user(buf, &event_count, count))
42400 retval = -EFAULT;
42401@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42402 static void uio_vma_open(struct vm_area_struct *vma)
42403 {
42404 struct uio_device *idev = vma->vm_private_data;
42405- idev->vma_count++;
42406+ local_inc(&idev->vma_count);
42407 }
42408
42409 static void uio_vma_close(struct vm_area_struct *vma)
42410 {
42411 struct uio_device *idev = vma->vm_private_data;
42412- idev->vma_count--;
42413+ local_dec(&idev->vma_count);
42414 }
42415
42416 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42417@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42418 idev->owner = owner;
42419 idev->info = info;
42420 init_waitqueue_head(&idev->wait);
42421- atomic_set(&idev->event, 0);
42422+ atomic_set_unchecked(&idev->event, 0);
42423
42424 ret = uio_get_minor(idev);
42425 if (ret)
42426diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42427index fbea856..06efea6 100644
42428--- a/drivers/usb/atm/usbatm.c
42429+++ b/drivers/usb/atm/usbatm.c
42430@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42431 if (printk_ratelimit())
42432 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42433 __func__, vpi, vci);
42434- atomic_inc(&vcc->stats->rx_err);
42435+ atomic_inc_unchecked(&vcc->stats->rx_err);
42436 return;
42437 }
42438
42439@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42440 if (length > ATM_MAX_AAL5_PDU) {
42441 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42442 __func__, length, vcc);
42443- atomic_inc(&vcc->stats->rx_err);
42444+ atomic_inc_unchecked(&vcc->stats->rx_err);
42445 goto out;
42446 }
42447
42448@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42449 if (sarb->len < pdu_length) {
42450 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42451 __func__, pdu_length, sarb->len, vcc);
42452- atomic_inc(&vcc->stats->rx_err);
42453+ atomic_inc_unchecked(&vcc->stats->rx_err);
42454 goto out;
42455 }
42456
42457 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42458 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42459 __func__, vcc);
42460- atomic_inc(&vcc->stats->rx_err);
42461+ atomic_inc_unchecked(&vcc->stats->rx_err);
42462 goto out;
42463 }
42464
42465@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42466 if (printk_ratelimit())
42467 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42468 __func__, length);
42469- atomic_inc(&vcc->stats->rx_drop);
42470+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42471 goto out;
42472 }
42473
42474@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42475
42476 vcc->push(vcc, skb);
42477
42478- atomic_inc(&vcc->stats->rx);
42479+ atomic_inc_unchecked(&vcc->stats->rx);
42480 out:
42481 skb_trim(sarb, 0);
42482 }
42483@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42484 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42485
42486 usbatm_pop(vcc, skb);
42487- atomic_inc(&vcc->stats->tx);
42488+ atomic_inc_unchecked(&vcc->stats->tx);
42489
42490 skb = skb_dequeue(&instance->sndqueue);
42491 }
42492@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42493 if (!left--)
42494 return sprintf(page,
42495 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42496- atomic_read(&atm_dev->stats.aal5.tx),
42497- atomic_read(&atm_dev->stats.aal5.tx_err),
42498- atomic_read(&atm_dev->stats.aal5.rx),
42499- atomic_read(&atm_dev->stats.aal5.rx_err),
42500- atomic_read(&atm_dev->stats.aal5.rx_drop));
42501+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42502+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42503+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42504+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42505+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42506
42507 if (!left--) {
42508 if (instance->disconnected)
42509diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42510index 24e6205..fe5a5d4 100644
42511--- a/drivers/usb/core/hcd.c
42512+++ b/drivers/usb/core/hcd.c
42513@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42514
42515 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42516
42517-struct usb_mon_operations *mon_ops;
42518+const struct usb_mon_operations *mon_ops;
42519
42520 /*
42521 * The registration is unlocked.
42522@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42523 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42524 */
42525
42526-int usb_mon_register (struct usb_mon_operations *ops)
42527+int usb_mon_register (const struct usb_mon_operations *ops)
42528 {
42529
42530 if (mon_ops)
42531diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42532index bcbe104..9cfd1c6 100644
42533--- a/drivers/usb/core/hcd.h
42534+++ b/drivers/usb/core/hcd.h
42535@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42536 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42537
42538 struct usb_mon_operations {
42539- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42540- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42541- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42542+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42543+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42544+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42545 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42546 };
42547
42548-extern struct usb_mon_operations *mon_ops;
42549+extern const struct usb_mon_operations *mon_ops;
42550
42551 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42552 {
42553@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42554 (*mon_ops->urb_complete)(bus, urb, status);
42555 }
42556
42557-int usb_mon_register(struct usb_mon_operations *ops);
42558+int usb_mon_register(const struct usb_mon_operations *ops);
42559 void usb_mon_deregister(void);
42560
42561 #else
42562diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42563index 409cc94..a673bad 100644
42564--- a/drivers/usb/core/message.c
42565+++ b/drivers/usb/core/message.c
42566@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42567 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42568 if (buf) {
42569 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42570- if (len > 0) {
42571- smallbuf = kmalloc(++len, GFP_NOIO);
42572+ if (len++ > 0) {
42573+ smallbuf = kmalloc(len, GFP_NOIO);
42574 if (!smallbuf)
42575 return buf;
42576 memcpy(smallbuf, buf, len);
42577diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42578index 62ff5e7..530b74e 100644
42579--- a/drivers/usb/misc/appledisplay.c
42580+++ b/drivers/usb/misc/appledisplay.c
42581@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42582 return pdata->msgdata[1];
42583 }
42584
42585-static struct backlight_ops appledisplay_bl_data = {
42586+static const struct backlight_ops appledisplay_bl_data = {
42587 .get_brightness = appledisplay_bl_get_brightness,
42588 .update_status = appledisplay_bl_update_status,
42589 };
42590diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42591index e0c2db3..bd8cb66 100644
42592--- a/drivers/usb/mon/mon_main.c
42593+++ b/drivers/usb/mon/mon_main.c
42594@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42595 /*
42596 * Ops
42597 */
42598-static struct usb_mon_operations mon_ops_0 = {
42599+static const struct usb_mon_operations mon_ops_0 = {
42600 .urb_submit = mon_submit,
42601 .urb_submit_error = mon_submit_error,
42602 .urb_complete = mon_complete,
42603diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42604index d6bea3e..60b250e 100644
42605--- a/drivers/usb/wusbcore/wa-hc.h
42606+++ b/drivers/usb/wusbcore/wa-hc.h
42607@@ -192,7 +192,7 @@ struct wahc {
42608 struct list_head xfer_delayed_list;
42609 spinlock_t xfer_list_lock;
42610 struct work_struct xfer_work;
42611- atomic_t xfer_id_count;
42612+ atomic_unchecked_t xfer_id_count;
42613 };
42614
42615
42616@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42617 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42618 spin_lock_init(&wa->xfer_list_lock);
42619 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42620- atomic_set(&wa->xfer_id_count, 1);
42621+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42622 }
42623
42624 /**
42625diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42626index 613a5fc..3174865 100644
42627--- a/drivers/usb/wusbcore/wa-xfer.c
42628+++ b/drivers/usb/wusbcore/wa-xfer.c
42629@@ -293,7 +293,7 @@ out:
42630 */
42631 static void wa_xfer_id_init(struct wa_xfer *xfer)
42632 {
42633- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42634+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42635 }
42636
42637 /*
42638diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42639index aa42fce..f8a828c 100644
42640--- a/drivers/uwb/wlp/messages.c
42641+++ b/drivers/uwb/wlp/messages.c
42642@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42643 size_t len = skb->len;
42644 size_t used;
42645 ssize_t result;
42646- struct wlp_nonce enonce, rnonce;
42647+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42648 enum wlp_assc_error assc_err;
42649 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42650 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42651diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42652index 0370399..6627c94 100644
42653--- a/drivers/uwb/wlp/sysfs.c
42654+++ b/drivers/uwb/wlp/sysfs.c
42655@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42656 return ret;
42657 }
42658
42659-static
42660-struct sysfs_ops wss_sysfs_ops = {
42661+static const struct sysfs_ops wss_sysfs_ops = {
42662 .show = wlp_wss_attr_show,
42663 .store = wlp_wss_attr_store,
42664 };
42665diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42666index d5e8010..5687b56 100644
42667--- a/drivers/video/atmel_lcdfb.c
42668+++ b/drivers/video/atmel_lcdfb.c
42669@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42670 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42671 }
42672
42673-static struct backlight_ops atmel_lcdc_bl_ops = {
42674+static const struct backlight_ops atmel_lcdc_bl_ops = {
42675 .update_status = atmel_bl_update_status,
42676 .get_brightness = atmel_bl_get_brightness,
42677 };
42678diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42679index e4e4d43..66bcbcc 100644
42680--- a/drivers/video/aty/aty128fb.c
42681+++ b/drivers/video/aty/aty128fb.c
42682@@ -149,7 +149,7 @@ enum {
42683 };
42684
42685 /* Must match above enum */
42686-static const char *r128_family[] __devinitdata = {
42687+static const char *r128_family[] __devinitconst = {
42688 "AGP",
42689 "PCI",
42690 "PRO AGP",
42691@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42692 return bd->props.brightness;
42693 }
42694
42695-static struct backlight_ops aty128_bl_data = {
42696+static const struct backlight_ops aty128_bl_data = {
42697 .get_brightness = aty128_bl_get_brightness,
42698 .update_status = aty128_bl_update_status,
42699 };
42700diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42701index 913b4a4..9295a38 100644
42702--- a/drivers/video/aty/atyfb_base.c
42703+++ b/drivers/video/aty/atyfb_base.c
42704@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42705 return bd->props.brightness;
42706 }
42707
42708-static struct backlight_ops aty_bl_data = {
42709+static const struct backlight_ops aty_bl_data = {
42710 .get_brightness = aty_bl_get_brightness,
42711 .update_status = aty_bl_update_status,
42712 };
42713diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42714index 1a056ad..221bd6a 100644
42715--- a/drivers/video/aty/radeon_backlight.c
42716+++ b/drivers/video/aty/radeon_backlight.c
42717@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42718 return bd->props.brightness;
42719 }
42720
42721-static struct backlight_ops radeon_bl_data = {
42722+static const struct backlight_ops radeon_bl_data = {
42723 .get_brightness = radeon_bl_get_brightness,
42724 .update_status = radeon_bl_update_status,
42725 };
42726diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42727index ad05da5..3cb2cb9 100644
42728--- a/drivers/video/backlight/adp5520_bl.c
42729+++ b/drivers/video/backlight/adp5520_bl.c
42730@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42731 return error ? data->current_brightness : reg_val;
42732 }
42733
42734-static struct backlight_ops adp5520_bl_ops = {
42735+static const struct backlight_ops adp5520_bl_ops = {
42736 .update_status = adp5520_bl_update_status,
42737 .get_brightness = adp5520_bl_get_brightness,
42738 };
42739diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42740index 2c3bdfc..d769b0b 100644
42741--- a/drivers/video/backlight/adx_bl.c
42742+++ b/drivers/video/backlight/adx_bl.c
42743@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42744 return 1;
42745 }
42746
42747-static struct backlight_ops adx_backlight_ops = {
42748+static const struct backlight_ops adx_backlight_ops = {
42749 .options = 0,
42750 .update_status = adx_backlight_update_status,
42751 .get_brightness = adx_backlight_get_brightness,
42752diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42753index 505c082..6b6b3cc 100644
42754--- a/drivers/video/backlight/atmel-pwm-bl.c
42755+++ b/drivers/video/backlight/atmel-pwm-bl.c
42756@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42757 return pwm_channel_enable(&pwmbl->pwmc);
42758 }
42759
42760-static struct backlight_ops atmel_pwm_bl_ops = {
42761+static const struct backlight_ops atmel_pwm_bl_ops = {
42762 .get_brightness = atmel_pwm_bl_get_intensity,
42763 .update_status = atmel_pwm_bl_set_intensity,
42764 };
42765diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42766index 5e20e6e..89025e6 100644
42767--- a/drivers/video/backlight/backlight.c
42768+++ b/drivers/video/backlight/backlight.c
42769@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42770 * ERR_PTR() or a pointer to the newly allocated device.
42771 */
42772 struct backlight_device *backlight_device_register(const char *name,
42773- struct device *parent, void *devdata, struct backlight_ops *ops)
42774+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42775 {
42776 struct backlight_device *new_bd;
42777 int rc;
42778diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42779index 9677494..b4bcf80 100644
42780--- a/drivers/video/backlight/corgi_lcd.c
42781+++ b/drivers/video/backlight/corgi_lcd.c
42782@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42783 }
42784 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42785
42786-static struct backlight_ops corgi_bl_ops = {
42787+static const struct backlight_ops corgi_bl_ops = {
42788 .get_brightness = corgi_bl_get_intensity,
42789 .update_status = corgi_bl_update_status,
42790 };
42791diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42792index b9fe62b..2914bf1 100644
42793--- a/drivers/video/backlight/cr_bllcd.c
42794+++ b/drivers/video/backlight/cr_bllcd.c
42795@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42796 return intensity;
42797 }
42798
42799-static struct backlight_ops cr_backlight_ops = {
42800+static const struct backlight_ops cr_backlight_ops = {
42801 .get_brightness = cr_backlight_get_intensity,
42802 .update_status = cr_backlight_set_intensity,
42803 };
42804diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42805index 701a108..feacfd5 100644
42806--- a/drivers/video/backlight/da903x_bl.c
42807+++ b/drivers/video/backlight/da903x_bl.c
42808@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42809 return data->current_brightness;
42810 }
42811
42812-static struct backlight_ops da903x_backlight_ops = {
42813+static const struct backlight_ops da903x_backlight_ops = {
42814 .update_status = da903x_backlight_update_status,
42815 .get_brightness = da903x_backlight_get_brightness,
42816 };
42817diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42818index 6d27f62..e6d348e 100644
42819--- a/drivers/video/backlight/generic_bl.c
42820+++ b/drivers/video/backlight/generic_bl.c
42821@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42822 }
42823 EXPORT_SYMBOL(corgibl_limit_intensity);
42824
42825-static struct backlight_ops genericbl_ops = {
42826+static const struct backlight_ops genericbl_ops = {
42827 .options = BL_CORE_SUSPENDRESUME,
42828 .get_brightness = genericbl_get_intensity,
42829 .update_status = genericbl_send_intensity,
42830diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42831index 7fb4eef..f7cc528 100644
42832--- a/drivers/video/backlight/hp680_bl.c
42833+++ b/drivers/video/backlight/hp680_bl.c
42834@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42835 return current_intensity;
42836 }
42837
42838-static struct backlight_ops hp680bl_ops = {
42839+static const struct backlight_ops hp680bl_ops = {
42840 .get_brightness = hp680bl_get_intensity,
42841 .update_status = hp680bl_set_intensity,
42842 };
42843diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42844index 7aed256..db9071f 100644
42845--- a/drivers/video/backlight/jornada720_bl.c
42846+++ b/drivers/video/backlight/jornada720_bl.c
42847@@ -93,7 +93,7 @@ out:
42848 return ret;
42849 }
42850
42851-static struct backlight_ops jornada_bl_ops = {
42852+static const struct backlight_ops jornada_bl_ops = {
42853 .get_brightness = jornada_bl_get_brightness,
42854 .update_status = jornada_bl_update_status,
42855 .options = BL_CORE_SUSPENDRESUME,
42856diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42857index a38fda1..939e7b8 100644
42858--- a/drivers/video/backlight/kb3886_bl.c
42859+++ b/drivers/video/backlight/kb3886_bl.c
42860@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42861 return kb3886bl_intensity;
42862 }
42863
42864-static struct backlight_ops kb3886bl_ops = {
42865+static const struct backlight_ops kb3886bl_ops = {
42866 .get_brightness = kb3886bl_get_intensity,
42867 .update_status = kb3886bl_send_intensity,
42868 };
42869diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42870index 6b488b8..00a9591 100644
42871--- a/drivers/video/backlight/locomolcd.c
42872+++ b/drivers/video/backlight/locomolcd.c
42873@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42874 return current_intensity;
42875 }
42876
42877-static struct backlight_ops locomobl_data = {
42878+static const struct backlight_ops locomobl_data = {
42879 .get_brightness = locomolcd_get_intensity,
42880 .update_status = locomolcd_set_intensity,
42881 };
42882diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42883index 99bdfa8..3dac448 100644
42884--- a/drivers/video/backlight/mbp_nvidia_bl.c
42885+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42886@@ -33,7 +33,7 @@ struct dmi_match_data {
42887 unsigned long iostart;
42888 unsigned long iolen;
42889 /* Backlight operations structure. */
42890- struct backlight_ops backlight_ops;
42891+ const struct backlight_ops backlight_ops;
42892 };
42893
42894 /* Module parameters. */
42895diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42896index cbad67e..3cf900e 100644
42897--- a/drivers/video/backlight/omap1_bl.c
42898+++ b/drivers/video/backlight/omap1_bl.c
42899@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42900 return bl->current_intensity;
42901 }
42902
42903-static struct backlight_ops omapbl_ops = {
42904+static const struct backlight_ops omapbl_ops = {
42905 .get_brightness = omapbl_get_intensity,
42906 .update_status = omapbl_update_status,
42907 };
42908diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42909index 9edaf24..075786e 100644
42910--- a/drivers/video/backlight/progear_bl.c
42911+++ b/drivers/video/backlight/progear_bl.c
42912@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42913 return intensity - HW_LEVEL_MIN;
42914 }
42915
42916-static struct backlight_ops progearbl_ops = {
42917+static const struct backlight_ops progearbl_ops = {
42918 .get_brightness = progearbl_get_intensity,
42919 .update_status = progearbl_set_intensity,
42920 };
42921diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42922index 8871662..df9e0b3 100644
42923--- a/drivers/video/backlight/pwm_bl.c
42924+++ b/drivers/video/backlight/pwm_bl.c
42925@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42926 return bl->props.brightness;
42927 }
42928
42929-static struct backlight_ops pwm_backlight_ops = {
42930+static const struct backlight_ops pwm_backlight_ops = {
42931 .update_status = pwm_backlight_update_status,
42932 .get_brightness = pwm_backlight_get_brightness,
42933 };
42934diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42935index 43edbad..e14ce4d 100644
42936--- a/drivers/video/backlight/tosa_bl.c
42937+++ b/drivers/video/backlight/tosa_bl.c
42938@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42939 return props->brightness;
42940 }
42941
42942-static struct backlight_ops bl_ops = {
42943+static const struct backlight_ops bl_ops = {
42944 .get_brightness = tosa_bl_get_brightness,
42945 .update_status = tosa_bl_update_status,
42946 };
42947diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42948index 467bdb7..e32add3 100644
42949--- a/drivers/video/backlight/wm831x_bl.c
42950+++ b/drivers/video/backlight/wm831x_bl.c
42951@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42952 return data->current_brightness;
42953 }
42954
42955-static struct backlight_ops wm831x_backlight_ops = {
42956+static const struct backlight_ops wm831x_backlight_ops = {
42957 .options = BL_CORE_SUSPENDRESUME,
42958 .update_status = wm831x_backlight_update_status,
42959 .get_brightness = wm831x_backlight_get_brightness,
42960diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42961index e49ae5e..db4e6f7 100644
42962--- a/drivers/video/bf54x-lq043fb.c
42963+++ b/drivers/video/bf54x-lq043fb.c
42964@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42965 return 0;
42966 }
42967
42968-static struct backlight_ops bfin_lq043fb_bl_ops = {
42969+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42970 .get_brightness = bl_get_brightness,
42971 };
42972
42973diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42974index 2c72a7c..d523e52 100644
42975--- a/drivers/video/bfin-t350mcqb-fb.c
42976+++ b/drivers/video/bfin-t350mcqb-fb.c
42977@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42978 return 0;
42979 }
42980
42981-static struct backlight_ops bfin_lq043fb_bl_ops = {
42982+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42983 .get_brightness = bl_get_brightness,
42984 };
42985
42986diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42987index f53b9f1..958bf4e 100644
42988--- a/drivers/video/fbcmap.c
42989+++ b/drivers/video/fbcmap.c
42990@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42991 rc = -ENODEV;
42992 goto out;
42993 }
42994- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42995- !info->fbops->fb_setcmap)) {
42996+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42997 rc = -EINVAL;
42998 goto out1;
42999 }
43000diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43001index 99bbd28..ad3829e 100644
43002--- a/drivers/video/fbmem.c
43003+++ b/drivers/video/fbmem.c
43004@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43005 image->dx += image->width + 8;
43006 }
43007 } else if (rotate == FB_ROTATE_UD) {
43008- for (x = 0; x < num && image->dx >= 0; x++) {
43009+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43010 info->fbops->fb_imageblit(info, image);
43011 image->dx -= image->width + 8;
43012 }
43013@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43014 image->dy += image->height + 8;
43015 }
43016 } else if (rotate == FB_ROTATE_CCW) {
43017- for (x = 0; x < num && image->dy >= 0; x++) {
43018+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43019 info->fbops->fb_imageblit(info, image);
43020 image->dy -= image->height + 8;
43021 }
43022@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43023 int flags = info->flags;
43024 int ret = 0;
43025
43026+ pax_track_stack();
43027+
43028 if (var->activate & FB_ACTIVATE_INV_MODE) {
43029 struct fb_videomode mode1, mode2;
43030
43031@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43032 void __user *argp = (void __user *)arg;
43033 long ret = 0;
43034
43035+ pax_track_stack();
43036+
43037 switch (cmd) {
43038 case FBIOGET_VSCREENINFO:
43039 if (!lock_fb_info(info))
43040@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43041 return -EFAULT;
43042 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43043 return -EINVAL;
43044- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43045+ if (con2fb.framebuffer >= FB_MAX)
43046 return -EINVAL;
43047 if (!registered_fb[con2fb.framebuffer])
43048 request_module("fb%d", con2fb.framebuffer);
43049diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43050index f20eff8..3e4f622 100644
43051--- a/drivers/video/geode/gx1fb_core.c
43052+++ b/drivers/video/geode/gx1fb_core.c
43053@@ -30,7 +30,7 @@ static int crt_option = 1;
43054 static char panel_option[32] = "";
43055
43056 /* Modes relevant to the GX1 (taken from modedb.c) */
43057-static const struct fb_videomode __initdata gx1_modedb[] = {
43058+static const struct fb_videomode __initconst gx1_modedb[] = {
43059 /* 640x480-60 VESA */
43060 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43061 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43062diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43063index 896e53d..4d87d0b 100644
43064--- a/drivers/video/gxt4500.c
43065+++ b/drivers/video/gxt4500.c
43066@@ -156,7 +156,7 @@ struct gxt4500_par {
43067 static char *mode_option;
43068
43069 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43070-static const struct fb_videomode defaultmode __devinitdata = {
43071+static const struct fb_videomode defaultmode __devinitconst = {
43072 .refresh = 60,
43073 .xres = 1280,
43074 .yres = 1024,
43075@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43076 return 0;
43077 }
43078
43079-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43080+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43081 .id = "IBM GXT4500P",
43082 .type = FB_TYPE_PACKED_PIXELS,
43083 .visual = FB_VISUAL_PSEUDOCOLOR,
43084diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43085index f5bedee..28c6028 100644
43086--- a/drivers/video/i810/i810_accel.c
43087+++ b/drivers/video/i810/i810_accel.c
43088@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43089 }
43090 }
43091 printk("ringbuffer lockup!!!\n");
43092+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43093 i810_report_error(mmio);
43094 par->dev_flags |= LOCKUP;
43095 info->pixmap.scan_align = 1;
43096diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43097index 5743ea2..457f82c 100644
43098--- a/drivers/video/i810/i810_main.c
43099+++ b/drivers/video/i810/i810_main.c
43100@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43101 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43102
43103 /* PCI */
43104-static const char *i810_pci_list[] __devinitdata = {
43105+static const char *i810_pci_list[] __devinitconst = {
43106 "Intel(R) 810 Framebuffer Device" ,
43107 "Intel(R) 810-DC100 Framebuffer Device" ,
43108 "Intel(R) 810E Framebuffer Device" ,
43109diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43110index 3c14e43..eafa544 100644
43111--- a/drivers/video/logo/logo_linux_clut224.ppm
43112+++ b/drivers/video/logo/logo_linux_clut224.ppm
43113@@ -1,1604 +1,1123 @@
43114 P3
43115-# Standard 224-color Linux logo
43116 80 80
43117 255
43118- 0 0 0 0 0 0 0 0 0 0 0 0
43119- 0 0 0 0 0 0 0 0 0 0 0 0
43120- 0 0 0 0 0 0 0 0 0 0 0 0
43121- 0 0 0 0 0 0 0 0 0 0 0 0
43122- 0 0 0 0 0 0 0 0 0 0 0 0
43123- 0 0 0 0 0 0 0 0 0 0 0 0
43124- 0 0 0 0 0 0 0 0 0 0 0 0
43125- 0 0 0 0 0 0 0 0 0 0 0 0
43126- 0 0 0 0 0 0 0 0 0 0 0 0
43127- 6 6 6 6 6 6 10 10 10 10 10 10
43128- 10 10 10 6 6 6 6 6 6 6 6 6
43129- 0 0 0 0 0 0 0 0 0 0 0 0
43130- 0 0 0 0 0 0 0 0 0 0 0 0
43131- 0 0 0 0 0 0 0 0 0 0 0 0
43132- 0 0 0 0 0 0 0 0 0 0 0 0
43133- 0 0 0 0 0 0 0 0 0 0 0 0
43134- 0 0 0 0 0 0 0 0 0 0 0 0
43135- 0 0 0 0 0 0 0 0 0 0 0 0
43136- 0 0 0 0 0 0 0 0 0 0 0 0
43137- 0 0 0 0 0 0 0 0 0 0 0 0
43138- 0 0 0 0 0 0 0 0 0 0 0 0
43139- 0 0 0 0 0 0 0 0 0 0 0 0
43140- 0 0 0 0 0 0 0 0 0 0 0 0
43141- 0 0 0 0 0 0 0 0 0 0 0 0
43142- 0 0 0 0 0 0 0 0 0 0 0 0
43143- 0 0 0 0 0 0 0 0 0 0 0 0
43144- 0 0 0 0 0 0 0 0 0 0 0 0
43145- 0 0 0 0 0 0 0 0 0 0 0 0
43146- 0 0 0 6 6 6 10 10 10 14 14 14
43147- 22 22 22 26 26 26 30 30 30 34 34 34
43148- 30 30 30 30 30 30 26 26 26 18 18 18
43149- 14 14 14 10 10 10 6 6 6 0 0 0
43150- 0 0 0 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 0 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 0 0 0 0 0 0 0 0 0 0
43159- 0 0 0 0 0 1 0 0 1 0 0 0
43160- 0 0 0 0 0 0 0 0 0 0 0 0
43161- 0 0 0 0 0 0 0 0 0 0 0 0
43162- 0 0 0 0 0 0 0 0 0 0 0 0
43163- 0 0 0 0 0 0 0 0 0 0 0 0
43164- 0 0 0 0 0 0 0 0 0 0 0 0
43165- 0 0 0 0 0 0 0 0 0 0 0 0
43166- 6 6 6 14 14 14 26 26 26 42 42 42
43167- 54 54 54 66 66 66 78 78 78 78 78 78
43168- 78 78 78 74 74 74 66 66 66 54 54 54
43169- 42 42 42 26 26 26 18 18 18 10 10 10
43170- 6 6 6 0 0 0 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 0 0 0 0 0 0 0 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 0 0 0
43179- 0 0 1 0 0 0 0 0 0 0 0 0
43180- 0 0 0 0 0 0 0 0 0 0 0 0
43181- 0 0 0 0 0 0 0 0 0 0 0 0
43182- 0 0 0 0 0 0 0 0 0 0 0 0
43183- 0 0 0 0 0 0 0 0 0 0 0 0
43184- 0 0 0 0 0 0 0 0 0 0 0 0
43185- 0 0 0 0 0 0 0 0 0 10 10 10
43186- 22 22 22 42 42 42 66 66 66 86 86 86
43187- 66 66 66 38 38 38 38 38 38 22 22 22
43188- 26 26 26 34 34 34 54 54 54 66 66 66
43189- 86 86 86 70 70 70 46 46 46 26 26 26
43190- 14 14 14 6 6 6 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 0 0 0 0 0 0 0
43199- 0 0 1 0 0 1 0 0 1 0 0 0
43200- 0 0 0 0 0 0 0 0 0 0 0 0
43201- 0 0 0 0 0 0 0 0 0 0 0 0
43202- 0 0 0 0 0 0 0 0 0 0 0 0
43203- 0 0 0 0 0 0 0 0 0 0 0 0
43204- 0 0 0 0 0 0 0 0 0 0 0 0
43205- 0 0 0 0 0 0 10 10 10 26 26 26
43206- 50 50 50 82 82 82 58 58 58 6 6 6
43207- 2 2 6 2 2 6 2 2 6 2 2 6
43208- 2 2 6 2 2 6 2 2 6 2 2 6
43209- 6 6 6 54 54 54 86 86 86 66 66 66
43210- 38 38 38 18 18 18 6 6 6 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 0 0 0 0
43213- 0 0 0 0 0 0 0 0 0 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 0 0 0 0 0 0 0 0 0 0
43219- 0 0 0 0 0 0 0 0 0 0 0 0
43220- 0 0 0 0 0 0 0 0 0 0 0 0
43221- 0 0 0 0 0 0 0 0 0 0 0 0
43222- 0 0 0 0 0 0 0 0 0 0 0 0
43223- 0 0 0 0 0 0 0 0 0 0 0 0
43224- 0 0 0 0 0 0 0 0 0 0 0 0
43225- 0 0 0 6 6 6 22 22 22 50 50 50
43226- 78 78 78 34 34 34 2 2 6 2 2 6
43227- 2 2 6 2 2 6 2 2 6 2 2 6
43228- 2 2 6 2 2 6 2 2 6 2 2 6
43229- 2 2 6 2 2 6 6 6 6 70 70 70
43230- 78 78 78 46 46 46 22 22 22 6 6 6
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 0 0 0 0 0 0 0
43239- 0 0 1 0 0 1 0 0 1 0 0 0
43240- 0 0 0 0 0 0 0 0 0 0 0 0
43241- 0 0 0 0 0 0 0 0 0 0 0 0
43242- 0 0 0 0 0 0 0 0 0 0 0 0
43243- 0 0 0 0 0 0 0 0 0 0 0 0
43244- 0 0 0 0 0 0 0 0 0 0 0 0
43245- 6 6 6 18 18 18 42 42 42 82 82 82
43246- 26 26 26 2 2 6 2 2 6 2 2 6
43247- 2 2 6 2 2 6 2 2 6 2 2 6
43248- 2 2 6 2 2 6 2 2 6 14 14 14
43249- 46 46 46 34 34 34 6 6 6 2 2 6
43250- 42 42 42 78 78 78 42 42 42 18 18 18
43251- 6 6 6 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 0 0 0 0
43253- 0 0 0 0 0 0 0 0 0 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 0 0 0 0 0 0 0 0 0 0
43259- 0 0 1 0 0 0 0 0 1 0 0 0
43260- 0 0 0 0 0 0 0 0 0 0 0 0
43261- 0 0 0 0 0 0 0 0 0 0 0 0
43262- 0 0 0 0 0 0 0 0 0 0 0 0
43263- 0 0 0 0 0 0 0 0 0 0 0 0
43264- 0 0 0 0 0 0 0 0 0 0 0 0
43265- 10 10 10 30 30 30 66 66 66 58 58 58
43266- 2 2 6 2 2 6 2 2 6 2 2 6
43267- 2 2 6 2 2 6 2 2 6 2 2 6
43268- 2 2 6 2 2 6 2 2 6 26 26 26
43269- 86 86 86 101 101 101 46 46 46 10 10 10
43270- 2 2 6 58 58 58 70 70 70 34 34 34
43271- 10 10 10 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 0 0 0 0
43273- 0 0 0 0 0 0 0 0 0 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 0 0 0 0 0 0 0 0 0 0
43279- 0 0 1 0 0 1 0 0 1 0 0 0
43280- 0 0 0 0 0 0 0 0 0 0 0 0
43281- 0 0 0 0 0 0 0 0 0 0 0 0
43282- 0 0 0 0 0 0 0 0 0 0 0 0
43283- 0 0 0 0 0 0 0 0 0 0 0 0
43284- 0 0 0 0 0 0 0 0 0 0 0 0
43285- 14 14 14 42 42 42 86 86 86 10 10 10
43286- 2 2 6 2 2 6 2 2 6 2 2 6
43287- 2 2 6 2 2 6 2 2 6 2 2 6
43288- 2 2 6 2 2 6 2 2 6 30 30 30
43289- 94 94 94 94 94 94 58 58 58 26 26 26
43290- 2 2 6 6 6 6 78 78 78 54 54 54
43291- 22 22 22 6 6 6 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 0 0 0 0
43293- 0 0 0 0 0 0 0 0 0 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 0 0 0
43299- 0 0 0 0 0 0 0 0 0 0 0 0
43300- 0 0 0 0 0 0 0 0 0 0 0 0
43301- 0 0 0 0 0 0 0 0 0 0 0 0
43302- 0 0 0 0 0 0 0 0 0 0 0 0
43303- 0 0 0 0 0 0 0 0 0 0 0 0
43304- 0 0 0 0 0 0 0 0 0 6 6 6
43305- 22 22 22 62 62 62 62 62 62 2 2 6
43306- 2 2 6 2 2 6 2 2 6 2 2 6
43307- 2 2 6 2 2 6 2 2 6 2 2 6
43308- 2 2 6 2 2 6 2 2 6 26 26 26
43309- 54 54 54 38 38 38 18 18 18 10 10 10
43310- 2 2 6 2 2 6 34 34 34 82 82 82
43311- 38 38 38 14 14 14 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 0 0 0 0
43318- 0 0 0 0 0 0 0 0 0 0 0 0
43319- 0 0 0 0 0 1 0 0 1 0 0 0
43320- 0 0 0 0 0 0 0 0 0 0 0 0
43321- 0 0 0 0 0 0 0 0 0 0 0 0
43322- 0 0 0 0 0 0 0 0 0 0 0 0
43323- 0 0 0 0 0 0 0 0 0 0 0 0
43324- 0 0 0 0 0 0 0 0 0 6 6 6
43325- 30 30 30 78 78 78 30 30 30 2 2 6
43326- 2 2 6 2 2 6 2 2 6 2 2 6
43327- 2 2 6 2 2 6 2 2 6 2 2 6
43328- 2 2 6 2 2 6 2 2 6 10 10 10
43329- 10 10 10 2 2 6 2 2 6 2 2 6
43330- 2 2 6 2 2 6 2 2 6 78 78 78
43331- 50 50 50 18 18 18 6 6 6 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 0 0 0
43339- 0 0 1 0 0 0 0 0 0 0 0 0
43340- 0 0 0 0 0 0 0 0 0 0 0 0
43341- 0 0 0 0 0 0 0 0 0 0 0 0
43342- 0 0 0 0 0 0 0 0 0 0 0 0
43343- 0 0 0 0 0 0 0 0 0 0 0 0
43344- 0 0 0 0 0 0 0 0 0 10 10 10
43345- 38 38 38 86 86 86 14 14 14 2 2 6
43346- 2 2 6 2 2 6 2 2 6 2 2 6
43347- 2 2 6 2 2 6 2 2 6 2 2 6
43348- 2 2 6 2 2 6 2 2 6 2 2 6
43349- 2 2 6 2 2 6 2 2 6 2 2 6
43350- 2 2 6 2 2 6 2 2 6 54 54 54
43351- 66 66 66 26 26 26 6 6 6 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 0 0 0 0
43358- 0 0 0 0 0 0 0 0 0 0 0 0
43359- 0 0 0 0 0 1 0 0 1 0 0 0
43360- 0 0 0 0 0 0 0 0 0 0 0 0
43361- 0 0 0 0 0 0 0 0 0 0 0 0
43362- 0 0 0 0 0 0 0 0 0 0 0 0
43363- 0 0 0 0 0 0 0 0 0 0 0 0
43364- 0 0 0 0 0 0 0 0 0 14 14 14
43365- 42 42 42 82 82 82 2 2 6 2 2 6
43366- 2 2 6 6 6 6 10 10 10 2 2 6
43367- 2 2 6 2 2 6 2 2 6 2 2 6
43368- 2 2 6 2 2 6 2 2 6 6 6 6
43369- 14 14 14 10 10 10 2 2 6 2 2 6
43370- 2 2 6 2 2 6 2 2 6 18 18 18
43371- 82 82 82 34 34 34 10 10 10 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 0 0 0
43379- 0 0 1 0 0 0 0 0 0 0 0 0
43380- 0 0 0 0 0 0 0 0 0 0 0 0
43381- 0 0 0 0 0 0 0 0 0 0 0 0
43382- 0 0 0 0 0 0 0 0 0 0 0 0
43383- 0 0 0 0 0 0 0 0 0 0 0 0
43384- 0 0 0 0 0 0 0 0 0 14 14 14
43385- 46 46 46 86 86 86 2 2 6 2 2 6
43386- 6 6 6 6 6 6 22 22 22 34 34 34
43387- 6 6 6 2 2 6 2 2 6 2 2 6
43388- 2 2 6 2 2 6 18 18 18 34 34 34
43389- 10 10 10 50 50 50 22 22 22 2 2 6
43390- 2 2 6 2 2 6 2 2 6 10 10 10
43391- 86 86 86 42 42 42 14 14 14 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 0 0 0
43399- 0 0 1 0 0 1 0 0 1 0 0 0
43400- 0 0 0 0 0 0 0 0 0 0 0 0
43401- 0 0 0 0 0 0 0 0 0 0 0 0
43402- 0 0 0 0 0 0 0 0 0 0 0 0
43403- 0 0 0 0 0 0 0 0 0 0 0 0
43404- 0 0 0 0 0 0 0 0 0 14 14 14
43405- 46 46 46 86 86 86 2 2 6 2 2 6
43406- 38 38 38 116 116 116 94 94 94 22 22 22
43407- 22 22 22 2 2 6 2 2 6 2 2 6
43408- 14 14 14 86 86 86 138 138 138 162 162 162
43409-154 154 154 38 38 38 26 26 26 6 6 6
43410- 2 2 6 2 2 6 2 2 6 2 2 6
43411- 86 86 86 46 46 46 14 14 14 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 0 0 0 0 0 0 0 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 0 0 0
43424- 0 0 0 0 0 0 0 0 0 14 14 14
43425- 46 46 46 86 86 86 2 2 6 14 14 14
43426-134 134 134 198 198 198 195 195 195 116 116 116
43427- 10 10 10 2 2 6 2 2 6 6 6 6
43428-101 98 89 187 187 187 210 210 210 218 218 218
43429-214 214 214 134 134 134 14 14 14 6 6 6
43430- 2 2 6 2 2 6 2 2 6 2 2 6
43431- 86 86 86 50 50 50 18 18 18 6 6 6
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 1 0 0 0
43439- 0 0 1 0 0 1 0 0 1 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 0 0 0
43444- 0 0 0 0 0 0 0 0 0 14 14 14
43445- 46 46 46 86 86 86 2 2 6 54 54 54
43446-218 218 218 195 195 195 226 226 226 246 246 246
43447- 58 58 58 2 2 6 2 2 6 30 30 30
43448-210 210 210 253 253 253 174 174 174 123 123 123
43449-221 221 221 234 234 234 74 74 74 2 2 6
43450- 2 2 6 2 2 6 2 2 6 2 2 6
43451- 70 70 70 58 58 58 22 22 22 6 6 6
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 0 0 0 0
43459- 0 0 0 0 0 0 0 0 0 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 0 0 0
43464- 0 0 0 0 0 0 0 0 0 14 14 14
43465- 46 46 46 82 82 82 2 2 6 106 106 106
43466-170 170 170 26 26 26 86 86 86 226 226 226
43467-123 123 123 10 10 10 14 14 14 46 46 46
43468-231 231 231 190 190 190 6 6 6 70 70 70
43469- 90 90 90 238 238 238 158 158 158 2 2 6
43470- 2 2 6 2 2 6 2 2 6 2 2 6
43471- 70 70 70 58 58 58 22 22 22 6 6 6
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 1 0 0 0
43479- 0 0 1 0 0 1 0 0 1 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 0 0 0
43484- 0 0 0 0 0 0 0 0 0 14 14 14
43485- 42 42 42 86 86 86 6 6 6 116 116 116
43486-106 106 106 6 6 6 70 70 70 149 149 149
43487-128 128 128 18 18 18 38 38 38 54 54 54
43488-221 221 221 106 106 106 2 2 6 14 14 14
43489- 46 46 46 190 190 190 198 198 198 2 2 6
43490- 2 2 6 2 2 6 2 2 6 2 2 6
43491- 74 74 74 62 62 62 22 22 22 6 6 6
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 0 0 0 0 0 1 0 0 0
43499- 0 0 1 0 0 0 0 0 1 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 0 0 0
43504- 0 0 0 0 0 0 0 0 0 14 14 14
43505- 42 42 42 94 94 94 14 14 14 101 101 101
43506-128 128 128 2 2 6 18 18 18 116 116 116
43507-118 98 46 121 92 8 121 92 8 98 78 10
43508-162 162 162 106 106 106 2 2 6 2 2 6
43509- 2 2 6 195 195 195 195 195 195 6 6 6
43510- 2 2 6 2 2 6 2 2 6 2 2 6
43511- 74 74 74 62 62 62 22 22 22 6 6 6
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 0 0 0 0 0 0 0 0 1 0 0 1
43519- 0 0 1 0 0 0 0 0 1 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 0 0 0
43524- 0 0 0 0 0 0 0 0 0 10 10 10
43525- 38 38 38 90 90 90 14 14 14 58 58 58
43526-210 210 210 26 26 26 54 38 6 154 114 10
43527-226 170 11 236 186 11 225 175 15 184 144 12
43528-215 174 15 175 146 61 37 26 9 2 2 6
43529- 70 70 70 246 246 246 138 138 138 2 2 6
43530- 2 2 6 2 2 6 2 2 6 2 2 6
43531- 70 70 70 66 66 66 26 26 26 6 6 6
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 0 0 0
43538- 0 0 0 0 0 0 0 0 0 0 0 0
43539- 0 0 0 0 0 0 0 0 0 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 0 0 0
43544- 0 0 0 0 0 0 0 0 0 10 10 10
43545- 38 38 38 86 86 86 14 14 14 10 10 10
43546-195 195 195 188 164 115 192 133 9 225 175 15
43547-239 182 13 234 190 10 232 195 16 232 200 30
43548-245 207 45 241 208 19 232 195 16 184 144 12
43549-218 194 134 211 206 186 42 42 42 2 2 6
43550- 2 2 6 2 2 6 2 2 6 2 2 6
43551- 50 50 50 74 74 74 30 30 30 6 6 6
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 0 0 0
43558- 0 0 0 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 0 0 0 0 0 0
43564- 0 0 0 0 0 0 0 0 0 10 10 10
43565- 34 34 34 86 86 86 14 14 14 2 2 6
43566-121 87 25 192 133 9 219 162 10 239 182 13
43567-236 186 11 232 195 16 241 208 19 244 214 54
43568-246 218 60 246 218 38 246 215 20 241 208 19
43569-241 208 19 226 184 13 121 87 25 2 2 6
43570- 2 2 6 2 2 6 2 2 6 2 2 6
43571- 50 50 50 82 82 82 34 34 34 10 10 10
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 0 0 0 0 0 0
43578- 0 0 0 0 0 0 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 0 0 0 0 0 0
43584- 0 0 0 0 0 0 0 0 0 10 10 10
43585- 34 34 34 82 82 82 30 30 30 61 42 6
43586-180 123 7 206 145 10 230 174 11 239 182 13
43587-234 190 10 238 202 15 241 208 19 246 218 74
43588-246 218 38 246 215 20 246 215 20 246 215 20
43589-226 184 13 215 174 15 184 144 12 6 6 6
43590- 2 2 6 2 2 6 2 2 6 2 2 6
43591- 26 26 26 94 94 94 42 42 42 14 14 14
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 0 0 0 0 0 0 0 0 0
43598- 0 0 0 0 0 0 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 0 0 0 0 0 0 0 0 0
43604- 0 0 0 0 0 0 0 0 0 10 10 10
43605- 30 30 30 78 78 78 50 50 50 104 69 6
43606-192 133 9 216 158 10 236 178 12 236 186 11
43607-232 195 16 241 208 19 244 214 54 245 215 43
43608-246 215 20 246 215 20 241 208 19 198 155 10
43609-200 144 11 216 158 10 156 118 10 2 2 6
43610- 2 2 6 2 2 6 2 2 6 2 2 6
43611- 6 6 6 90 90 90 54 54 54 18 18 18
43612- 6 6 6 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 0 0 0 0 0 0 0 0 0 0 0 0
43618- 0 0 0 0 0 0 0 0 0 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 0 0 0 0 0 0 0 0 0 0 0 0
43624- 0 0 0 0 0 0 0 0 0 10 10 10
43625- 30 30 30 78 78 78 46 46 46 22 22 22
43626-137 92 6 210 162 10 239 182 13 238 190 10
43627-238 202 15 241 208 19 246 215 20 246 215 20
43628-241 208 19 203 166 17 185 133 11 210 150 10
43629-216 158 10 210 150 10 102 78 10 2 2 6
43630- 6 6 6 54 54 54 14 14 14 2 2 6
43631- 2 2 6 62 62 62 74 74 74 30 30 30
43632- 10 10 10 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 0 0 0 0 0 0 0 0 0 0 0 0
43638- 0 0 0 0 0 0 0 0 0 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 0 0 0
43643- 0 0 0 0 0 0 0 0 0 0 0 0
43644- 0 0 0 0 0 0 0 0 0 10 10 10
43645- 34 34 34 78 78 78 50 50 50 6 6 6
43646- 94 70 30 139 102 15 190 146 13 226 184 13
43647-232 200 30 232 195 16 215 174 15 190 146 13
43648-168 122 10 192 133 9 210 150 10 213 154 11
43649-202 150 34 182 157 106 101 98 89 2 2 6
43650- 2 2 6 78 78 78 116 116 116 58 58 58
43651- 2 2 6 22 22 22 90 90 90 46 46 46
43652- 18 18 18 6 6 6 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 0 0 0 0 0 0 0 0 0 0 0 0
43658- 0 0 0 0 0 0 0 0 0 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 0 0 0
43663- 0 0 0 0 0 0 0 0 0 0 0 0
43664- 0 0 0 0 0 0 0 0 0 10 10 10
43665- 38 38 38 86 86 86 50 50 50 6 6 6
43666-128 128 128 174 154 114 156 107 11 168 122 10
43667-198 155 10 184 144 12 197 138 11 200 144 11
43668-206 145 10 206 145 10 197 138 11 188 164 115
43669-195 195 195 198 198 198 174 174 174 14 14 14
43670- 2 2 6 22 22 22 116 116 116 116 116 116
43671- 22 22 22 2 2 6 74 74 74 70 70 70
43672- 30 30 30 10 10 10 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 0 0 0
43677- 0 0 0 0 0 0 0 0 0 0 0 0
43678- 0 0 0 0 0 0 0 0 0 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 0 0 0 0 0 0
43683- 0 0 0 0 0 0 0 0 0 0 0 0
43684- 0 0 0 0 0 0 6 6 6 18 18 18
43685- 50 50 50 101 101 101 26 26 26 10 10 10
43686-138 138 138 190 190 190 174 154 114 156 107 11
43687-197 138 11 200 144 11 197 138 11 192 133 9
43688-180 123 7 190 142 34 190 178 144 187 187 187
43689-202 202 202 221 221 221 214 214 214 66 66 66
43690- 2 2 6 2 2 6 50 50 50 62 62 62
43691- 6 6 6 2 2 6 10 10 10 90 90 90
43692- 50 50 50 18 18 18 6 6 6 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 0 0 0
43697- 0 0 0 0 0 0 0 0 0 0 0 0
43698- 0 0 0 0 0 0 0 0 0 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 0 0 0 0 0 0 0 0 0
43703- 0 0 0 0 0 0 0 0 0 0 0 0
43704- 0 0 0 0 0 0 10 10 10 34 34 34
43705- 74 74 74 74 74 74 2 2 6 6 6 6
43706-144 144 144 198 198 198 190 190 190 178 166 146
43707-154 121 60 156 107 11 156 107 11 168 124 44
43708-174 154 114 187 187 187 190 190 190 210 210 210
43709-246 246 246 253 253 253 253 253 253 182 182 182
43710- 6 6 6 2 2 6 2 2 6 2 2 6
43711- 2 2 6 2 2 6 2 2 6 62 62 62
43712- 74 74 74 34 34 34 14 14 14 0 0 0
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 0 0 0
43717- 0 0 0 0 0 0 0 0 0 0 0 0
43718- 0 0 0 0 0 0 0 0 0 0 0 0
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 0 0 0 0 0 0 0 0 0 0 0 0
43723- 0 0 0 0 0 0 0 0 0 0 0 0
43724- 0 0 0 10 10 10 22 22 22 54 54 54
43725- 94 94 94 18 18 18 2 2 6 46 46 46
43726-234 234 234 221 221 221 190 190 190 190 190 190
43727-190 190 190 187 187 187 187 187 187 190 190 190
43728-190 190 190 195 195 195 214 214 214 242 242 242
43729-253 253 253 253 253 253 253 253 253 253 253 253
43730- 82 82 82 2 2 6 2 2 6 2 2 6
43731- 2 2 6 2 2 6 2 2 6 14 14 14
43732- 86 86 86 54 54 54 22 22 22 6 6 6
43733- 0 0 0 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 0 0 0 0 0 0
43737- 0 0 0 0 0 0 0 0 0 0 0 0
43738- 0 0 0 0 0 0 0 0 0 0 0 0
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 0 0 0 0 0 0 0 0 0 0 0 0
43743- 0 0 0 0 0 0 0 0 0 0 0 0
43744- 6 6 6 18 18 18 46 46 46 90 90 90
43745- 46 46 46 18 18 18 6 6 6 182 182 182
43746-253 253 253 246 246 246 206 206 206 190 190 190
43747-190 190 190 190 190 190 190 190 190 190 190 190
43748-206 206 206 231 231 231 250 250 250 253 253 253
43749-253 253 253 253 253 253 253 253 253 253 253 253
43750-202 202 202 14 14 14 2 2 6 2 2 6
43751- 2 2 6 2 2 6 2 2 6 2 2 6
43752- 42 42 42 86 86 86 42 42 42 18 18 18
43753- 6 6 6 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 0 0 0 0 0 0
43757- 0 0 0 0 0 0 0 0 0 0 0 0
43758- 0 0 0 0 0 0 0 0 0 0 0 0
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 0 0 0 0 0 0 0 0 0 0 0 0
43763- 0 0 0 0 0 0 0 0 0 6 6 6
43764- 14 14 14 38 38 38 74 74 74 66 66 66
43765- 2 2 6 6 6 6 90 90 90 250 250 250
43766-253 253 253 253 253 253 238 238 238 198 198 198
43767-190 190 190 190 190 190 195 195 195 221 221 221
43768-246 246 246 253 253 253 253 253 253 253 253 253
43769-253 253 253 253 253 253 253 253 253 253 253 253
43770-253 253 253 82 82 82 2 2 6 2 2 6
43771- 2 2 6 2 2 6 2 2 6 2 2 6
43772- 2 2 6 78 78 78 70 70 70 34 34 34
43773- 14 14 14 6 6 6 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 0 0 0 0 0 0 0 0 0
43777- 0 0 0 0 0 0 0 0 0 0 0 0
43778- 0 0 0 0 0 0 0 0 0 0 0 0
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 0 0 0
43782- 0 0 0 0 0 0 0 0 0 0 0 0
43783- 0 0 0 0 0 0 0 0 0 14 14 14
43784- 34 34 34 66 66 66 78 78 78 6 6 6
43785- 2 2 6 18 18 18 218 218 218 253 253 253
43786-253 253 253 253 253 253 253 253 253 246 246 246
43787-226 226 226 231 231 231 246 246 246 253 253 253
43788-253 253 253 253 253 253 253 253 253 253 253 253
43789-253 253 253 253 253 253 253 253 253 253 253 253
43790-253 253 253 178 178 178 2 2 6 2 2 6
43791- 2 2 6 2 2 6 2 2 6 2 2 6
43792- 2 2 6 18 18 18 90 90 90 62 62 62
43793- 30 30 30 10 10 10 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 0 0 0 0 0 0 0 0 0
43797- 0 0 0 0 0 0 0 0 0 0 0 0
43798- 0 0 0 0 0 0 0 0 0 0 0 0
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 0 0 0
43802- 0 0 0 0 0 0 0 0 0 0 0 0
43803- 0 0 0 0 0 0 10 10 10 26 26 26
43804- 58 58 58 90 90 90 18 18 18 2 2 6
43805- 2 2 6 110 110 110 253 253 253 253 253 253
43806-253 253 253 253 253 253 253 253 253 253 253 253
43807-250 250 250 253 253 253 253 253 253 253 253 253
43808-253 253 253 253 253 253 253 253 253 253 253 253
43809-253 253 253 253 253 253 253 253 253 253 253 253
43810-253 253 253 231 231 231 18 18 18 2 2 6
43811- 2 2 6 2 2 6 2 2 6 2 2 6
43812- 2 2 6 2 2 6 18 18 18 94 94 94
43813- 54 54 54 26 26 26 10 10 10 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 0 0 0 0 0 0 0 0 0 0 0 0
43817- 0 0 0 0 0 0 0 0 0 0 0 0
43818- 0 0 0 0 0 0 0 0 0 0 0 0
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 0 0 0
43822- 0 0 0 0 0 0 0 0 0 0 0 0
43823- 0 0 0 6 6 6 22 22 22 50 50 50
43824- 90 90 90 26 26 26 2 2 6 2 2 6
43825- 14 14 14 195 195 195 250 250 250 253 253 253
43826-253 253 253 253 253 253 253 253 253 253 253 253
43827-253 253 253 253 253 253 253 253 253 253 253 253
43828-253 253 253 253 253 253 253 253 253 253 253 253
43829-253 253 253 253 253 253 253 253 253 253 253 253
43830-250 250 250 242 242 242 54 54 54 2 2 6
43831- 2 2 6 2 2 6 2 2 6 2 2 6
43832- 2 2 6 2 2 6 2 2 6 38 38 38
43833- 86 86 86 50 50 50 22 22 22 6 6 6
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 0 0 0 0 0 0 0 0 0 0 0 0
43837- 0 0 0 0 0 0 0 0 0 0 0 0
43838- 0 0 0 0 0 0 0 0 0 0 0 0
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 0 0 0 0 0 0
43842- 0 0 0 0 0 0 0 0 0 0 0 0
43843- 6 6 6 14 14 14 38 38 38 82 82 82
43844- 34 34 34 2 2 6 2 2 6 2 2 6
43845- 42 42 42 195 195 195 246 246 246 253 253 253
43846-253 253 253 253 253 253 253 253 253 250 250 250
43847-242 242 242 242 242 242 250 250 250 253 253 253
43848-253 253 253 253 253 253 253 253 253 253 253 253
43849-253 253 253 250 250 250 246 246 246 238 238 238
43850-226 226 226 231 231 231 101 101 101 6 6 6
43851- 2 2 6 2 2 6 2 2 6 2 2 6
43852- 2 2 6 2 2 6 2 2 6 2 2 6
43853- 38 38 38 82 82 82 42 42 42 14 14 14
43854- 6 6 6 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 0 0 0
43856- 0 0 0 0 0 0 0 0 0 0 0 0
43857- 0 0 0 0 0 0 0 0 0 0 0 0
43858- 0 0 0 0 0 0 0 0 0 0 0 0
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 0 0 0 0 0 0
43862- 0 0 0 0 0 0 0 0 0 0 0 0
43863- 10 10 10 26 26 26 62 62 62 66 66 66
43864- 2 2 6 2 2 6 2 2 6 6 6 6
43865- 70 70 70 170 170 170 206 206 206 234 234 234
43866-246 246 246 250 250 250 250 250 250 238 238 238
43867-226 226 226 231 231 231 238 238 238 250 250 250
43868-250 250 250 250 250 250 246 246 246 231 231 231
43869-214 214 214 206 206 206 202 202 202 202 202 202
43870-198 198 198 202 202 202 182 182 182 18 18 18
43871- 2 2 6 2 2 6 2 2 6 2 2 6
43872- 2 2 6 2 2 6 2 2 6 2 2 6
43873- 2 2 6 62 62 62 66 66 66 30 30 30
43874- 10 10 10 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 0 0 0
43876- 0 0 0 0 0 0 0 0 0 0 0 0
43877- 0 0 0 0 0 0 0 0 0 0 0 0
43878- 0 0 0 0 0 0 0 0 0 0 0 0
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 0 0 0 0 0 0 0 0 0
43882- 0 0 0 0 0 0 0 0 0 0 0 0
43883- 14 14 14 42 42 42 82 82 82 18 18 18
43884- 2 2 6 2 2 6 2 2 6 10 10 10
43885- 94 94 94 182 182 182 218 218 218 242 242 242
43886-250 250 250 253 253 253 253 253 253 250 250 250
43887-234 234 234 253 253 253 253 253 253 253 253 253
43888-253 253 253 253 253 253 253 253 253 246 246 246
43889-238 238 238 226 226 226 210 210 210 202 202 202
43890-195 195 195 195 195 195 210 210 210 158 158 158
43891- 6 6 6 14 14 14 50 50 50 14 14 14
43892- 2 2 6 2 2 6 2 2 6 2 2 6
43893- 2 2 6 6 6 6 86 86 86 46 46 46
43894- 18 18 18 6 6 6 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 0 0 0
43896- 0 0 0 0 0 0 0 0 0 0 0 0
43897- 0 0 0 0 0 0 0 0 0 0 0 0
43898- 0 0 0 0 0 0 0 0 0 0 0 0
43899- 0 0 0 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 0 0 0 0 0 0 0 0 0
43902- 0 0 0 0 0 0 0 0 0 6 6 6
43903- 22 22 22 54 54 54 70 70 70 2 2 6
43904- 2 2 6 10 10 10 2 2 6 22 22 22
43905-166 166 166 231 231 231 250 250 250 253 253 253
43906-253 253 253 253 253 253 253 253 253 250 250 250
43907-242 242 242 253 253 253 253 253 253 253 253 253
43908-253 253 253 253 253 253 253 253 253 253 253 253
43909-253 253 253 253 253 253 253 253 253 246 246 246
43910-231 231 231 206 206 206 198 198 198 226 226 226
43911- 94 94 94 2 2 6 6 6 6 38 38 38
43912- 30 30 30 2 2 6 2 2 6 2 2 6
43913- 2 2 6 2 2 6 62 62 62 66 66 66
43914- 26 26 26 10 10 10 0 0 0 0 0 0
43915- 0 0 0 0 0 0 0 0 0 0 0 0
43916- 0 0 0 0 0 0 0 0 0 0 0 0
43917- 0 0 0 0 0 0 0 0 0 0 0 0
43918- 0 0 0 0 0 0 0 0 0 0 0 0
43919- 0 0 0 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 0 0 0 0 0 0 0 0 0 0 0 0
43922- 0 0 0 0 0 0 0 0 0 10 10 10
43923- 30 30 30 74 74 74 50 50 50 2 2 6
43924- 26 26 26 26 26 26 2 2 6 106 106 106
43925-238 238 238 253 253 253 253 253 253 253 253 253
43926-253 253 253 253 253 253 253 253 253 253 253 253
43927-253 253 253 253 253 253 253 253 253 253 253 253
43928-253 253 253 253 253 253 253 253 253 253 253 253
43929-253 253 253 253 253 253 253 253 253 253 253 253
43930-253 253 253 246 246 246 218 218 218 202 202 202
43931-210 210 210 14 14 14 2 2 6 2 2 6
43932- 30 30 30 22 22 22 2 2 6 2 2 6
43933- 2 2 6 2 2 6 18 18 18 86 86 86
43934- 42 42 42 14 14 14 0 0 0 0 0 0
43935- 0 0 0 0 0 0 0 0 0 0 0 0
43936- 0 0 0 0 0 0 0 0 0 0 0 0
43937- 0 0 0 0 0 0 0 0 0 0 0 0
43938- 0 0 0 0 0 0 0 0 0 0 0 0
43939- 0 0 0 0 0 0 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 0 0 0 0 0 0 0 0 0 0 0 0
43942- 0 0 0 0 0 0 0 0 0 14 14 14
43943- 42 42 42 90 90 90 22 22 22 2 2 6
43944- 42 42 42 2 2 6 18 18 18 218 218 218
43945-253 253 253 253 253 253 253 253 253 253 253 253
43946-253 253 253 253 253 253 253 253 253 253 253 253
43947-253 253 253 253 253 253 253 253 253 253 253 253
43948-253 253 253 253 253 253 253 253 253 253 253 253
43949-253 253 253 253 253 253 253 253 253 253 253 253
43950-253 253 253 253 253 253 250 250 250 221 221 221
43951-218 218 218 101 101 101 2 2 6 14 14 14
43952- 18 18 18 38 38 38 10 10 10 2 2 6
43953- 2 2 6 2 2 6 2 2 6 78 78 78
43954- 58 58 58 22 22 22 6 6 6 0 0 0
43955- 0 0 0 0 0 0 0 0 0 0 0 0
43956- 0 0 0 0 0 0 0 0 0 0 0 0
43957- 0 0 0 0 0 0 0 0 0 0 0 0
43958- 0 0 0 0 0 0 0 0 0 0 0 0
43959- 0 0 0 0 0 0 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 0 0 0
43961- 0 0 0 0 0 0 0 0 0 0 0 0
43962- 0 0 0 0 0 0 6 6 6 18 18 18
43963- 54 54 54 82 82 82 2 2 6 26 26 26
43964- 22 22 22 2 2 6 123 123 123 253 253 253
43965-253 253 253 253 253 253 253 253 253 253 253 253
43966-253 253 253 253 253 253 253 253 253 253 253 253
43967-253 253 253 253 253 253 253 253 253 253 253 253
43968-253 253 253 253 253 253 253 253 253 253 253 253
43969-253 253 253 253 253 253 253 253 253 253 253 253
43970-253 253 253 253 253 253 253 253 253 250 250 250
43971-238 238 238 198 198 198 6 6 6 38 38 38
43972- 58 58 58 26 26 26 38 38 38 2 2 6
43973- 2 2 6 2 2 6 2 2 6 46 46 46
43974- 78 78 78 30 30 30 10 10 10 0 0 0
43975- 0 0 0 0 0 0 0 0 0 0 0 0
43976- 0 0 0 0 0 0 0 0 0 0 0 0
43977- 0 0 0 0 0 0 0 0 0 0 0 0
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 10 10 10 30 30 30
43983- 74 74 74 58 58 58 2 2 6 42 42 42
43984- 2 2 6 22 22 22 231 231 231 253 253 253
43985-253 253 253 253 253 253 253 253 253 253 253 253
43986-253 253 253 253 253 253 253 253 253 250 250 250
43987-253 253 253 253 253 253 253 253 253 253 253 253
43988-253 253 253 253 253 253 253 253 253 253 253 253
43989-253 253 253 253 253 253 253 253 253 253 253 253
43990-253 253 253 253 253 253 253 253 253 253 253 253
43991-253 253 253 246 246 246 46 46 46 38 38 38
43992- 42 42 42 14 14 14 38 38 38 14 14 14
43993- 2 2 6 2 2 6 2 2 6 6 6 6
43994- 86 86 86 46 46 46 14 14 14 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 6 6 6 14 14 14 42 42 42
44003- 90 90 90 18 18 18 18 18 18 26 26 26
44004- 2 2 6 116 116 116 253 253 253 253 253 253
44005-253 253 253 253 253 253 253 253 253 253 253 253
44006-253 253 253 253 253 253 250 250 250 238 238 238
44007-253 253 253 253 253 253 253 253 253 253 253 253
44008-253 253 253 253 253 253 253 253 253 253 253 253
44009-253 253 253 253 253 253 253 253 253 253 253 253
44010-253 253 253 253 253 253 253 253 253 253 253 253
44011-253 253 253 253 253 253 94 94 94 6 6 6
44012- 2 2 6 2 2 6 10 10 10 34 34 34
44013- 2 2 6 2 2 6 2 2 6 2 2 6
44014- 74 74 74 58 58 58 22 22 22 6 6 6
44015- 0 0 0 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 10 10 10 26 26 26 66 66 66
44023- 82 82 82 2 2 6 38 38 38 6 6 6
44024- 14 14 14 210 210 210 253 253 253 253 253 253
44025-253 253 253 253 253 253 253 253 253 253 253 253
44026-253 253 253 253 253 253 246 246 246 242 242 242
44027-253 253 253 253 253 253 253 253 253 253 253 253
44028-253 253 253 253 253 253 253 253 253 253 253 253
44029-253 253 253 253 253 253 253 253 253 253 253 253
44030-253 253 253 253 253 253 253 253 253 253 253 253
44031-253 253 253 253 253 253 144 144 144 2 2 6
44032- 2 2 6 2 2 6 2 2 6 46 46 46
44033- 2 2 6 2 2 6 2 2 6 2 2 6
44034- 42 42 42 74 74 74 30 30 30 10 10 10
44035- 0 0 0 0 0 0 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 6 6 6 14 14 14 42 42 42 90 90 90
44043- 26 26 26 6 6 6 42 42 42 2 2 6
44044- 74 74 74 250 250 250 253 253 253 253 253 253
44045-253 253 253 253 253 253 253 253 253 253 253 253
44046-253 253 253 253 253 253 242 242 242 242 242 242
44047-253 253 253 253 253 253 253 253 253 253 253 253
44048-253 253 253 253 253 253 253 253 253 253 253 253
44049-253 253 253 253 253 253 253 253 253 253 253 253
44050-253 253 253 253 253 253 253 253 253 253 253 253
44051-253 253 253 253 253 253 182 182 182 2 2 6
44052- 2 2 6 2 2 6 2 2 6 46 46 46
44053- 2 2 6 2 2 6 2 2 6 2 2 6
44054- 10 10 10 86 86 86 38 38 38 10 10 10
44055- 0 0 0 0 0 0 0 0 0 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 10 10 10 26 26 26 66 66 66 82 82 82
44063- 2 2 6 22 22 22 18 18 18 2 2 6
44064-149 149 149 253 253 253 253 253 253 253 253 253
44065-253 253 253 253 253 253 253 253 253 253 253 253
44066-253 253 253 253 253 253 234 234 234 242 242 242
44067-253 253 253 253 253 253 253 253 253 253 253 253
44068-253 253 253 253 253 253 253 253 253 253 253 253
44069-253 253 253 253 253 253 253 253 253 253 253 253
44070-253 253 253 253 253 253 253 253 253 253 253 253
44071-253 253 253 253 253 253 206 206 206 2 2 6
44072- 2 2 6 2 2 6 2 2 6 38 38 38
44073- 2 2 6 2 2 6 2 2 6 2 2 6
44074- 6 6 6 86 86 86 46 46 46 14 14 14
44075- 0 0 0 0 0 0 0 0 0 0 0 0
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 6 6 6
44082- 18 18 18 46 46 46 86 86 86 18 18 18
44083- 2 2 6 34 34 34 10 10 10 6 6 6
44084-210 210 210 253 253 253 253 253 253 253 253 253
44085-253 253 253 253 253 253 253 253 253 253 253 253
44086-253 253 253 253 253 253 234 234 234 242 242 242
44087-253 253 253 253 253 253 253 253 253 253 253 253
44088-253 253 253 253 253 253 253 253 253 253 253 253
44089-253 253 253 253 253 253 253 253 253 253 253 253
44090-253 253 253 253 253 253 253 253 253 253 253 253
44091-253 253 253 253 253 253 221 221 221 6 6 6
44092- 2 2 6 2 2 6 6 6 6 30 30 30
44093- 2 2 6 2 2 6 2 2 6 2 2 6
44094- 2 2 6 82 82 82 54 54 54 18 18 18
44095- 6 6 6 0 0 0 0 0 0 0 0 0
44096- 0 0 0 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 10 10 10
44102- 26 26 26 66 66 66 62 62 62 2 2 6
44103- 2 2 6 38 38 38 10 10 10 26 26 26
44104-238 238 238 253 253 253 253 253 253 253 253 253
44105-253 253 253 253 253 253 253 253 253 253 253 253
44106-253 253 253 253 253 253 231 231 231 238 238 238
44107-253 253 253 253 253 253 253 253 253 253 253 253
44108-253 253 253 253 253 253 253 253 253 253 253 253
44109-253 253 253 253 253 253 253 253 253 253 253 253
44110-253 253 253 253 253 253 253 253 253 253 253 253
44111-253 253 253 253 253 253 231 231 231 6 6 6
44112- 2 2 6 2 2 6 10 10 10 30 30 30
44113- 2 2 6 2 2 6 2 2 6 2 2 6
44114- 2 2 6 66 66 66 58 58 58 22 22 22
44115- 6 6 6 0 0 0 0 0 0 0 0 0
44116- 0 0 0 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 10 10 10
44122- 38 38 38 78 78 78 6 6 6 2 2 6
44123- 2 2 6 46 46 46 14 14 14 42 42 42
44124-246 246 246 253 253 253 253 253 253 253 253 253
44125-253 253 253 253 253 253 253 253 253 253 253 253
44126-253 253 253 253 253 253 231 231 231 242 242 242
44127-253 253 253 253 253 253 253 253 253 253 253 253
44128-253 253 253 253 253 253 253 253 253 253 253 253
44129-253 253 253 253 253 253 253 253 253 253 253 253
44130-253 253 253 253 253 253 253 253 253 253 253 253
44131-253 253 253 253 253 253 234 234 234 10 10 10
44132- 2 2 6 2 2 6 22 22 22 14 14 14
44133- 2 2 6 2 2 6 2 2 6 2 2 6
44134- 2 2 6 66 66 66 62 62 62 22 22 22
44135- 6 6 6 0 0 0 0 0 0 0 0 0
44136- 0 0 0 0 0 0 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 0 0 0 0 0 0 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 6 6 6 18 18 18
44142- 50 50 50 74 74 74 2 2 6 2 2 6
44143- 14 14 14 70 70 70 34 34 34 62 62 62
44144-250 250 250 253 253 253 253 253 253 253 253 253
44145-253 253 253 253 253 253 253 253 253 253 253 253
44146-253 253 253 253 253 253 231 231 231 246 246 246
44147-253 253 253 253 253 253 253 253 253 253 253 253
44148-253 253 253 253 253 253 253 253 253 253 253 253
44149-253 253 253 253 253 253 253 253 253 253 253 253
44150-253 253 253 253 253 253 253 253 253 253 253 253
44151-253 253 253 253 253 253 234 234 234 14 14 14
44152- 2 2 6 2 2 6 30 30 30 2 2 6
44153- 2 2 6 2 2 6 2 2 6 2 2 6
44154- 2 2 6 66 66 66 62 62 62 22 22 22
44155- 6 6 6 0 0 0 0 0 0 0 0 0
44156- 0 0 0 0 0 0 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 0 0 0 0 0 0 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 6 6 6 18 18 18
44162- 54 54 54 62 62 62 2 2 6 2 2 6
44163- 2 2 6 30 30 30 46 46 46 70 70 70
44164-250 250 250 253 253 253 253 253 253 253 253 253
44165-253 253 253 253 253 253 253 253 253 253 253 253
44166-253 253 253 253 253 253 231 231 231 246 246 246
44167-253 253 253 253 253 253 253 253 253 253 253 253
44168-253 253 253 253 253 253 253 253 253 253 253 253
44169-253 253 253 253 253 253 253 253 253 253 253 253
44170-253 253 253 253 253 253 253 253 253 253 253 253
44171-253 253 253 253 253 253 226 226 226 10 10 10
44172- 2 2 6 6 6 6 30 30 30 2 2 6
44173- 2 2 6 2 2 6 2 2 6 2 2 6
44174- 2 2 6 66 66 66 58 58 58 22 22 22
44175- 6 6 6 0 0 0 0 0 0 0 0 0
44176- 0 0 0 0 0 0 0 0 0 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 0 0 0
44179- 0 0 0 0 0 0 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 6 6 6 22 22 22
44182- 58 58 58 62 62 62 2 2 6 2 2 6
44183- 2 2 6 2 2 6 30 30 30 78 78 78
44184-250 250 250 253 253 253 253 253 253 253 253 253
44185-253 253 253 253 253 253 253 253 253 253 253 253
44186-253 253 253 253 253 253 231 231 231 246 246 246
44187-253 253 253 253 253 253 253 253 253 253 253 253
44188-253 253 253 253 253 253 253 253 253 253 253 253
44189-253 253 253 253 253 253 253 253 253 253 253 253
44190-253 253 253 253 253 253 253 253 253 253 253 253
44191-253 253 253 253 253 253 206 206 206 2 2 6
44192- 22 22 22 34 34 34 18 14 6 22 22 22
44193- 26 26 26 18 18 18 6 6 6 2 2 6
44194- 2 2 6 82 82 82 54 54 54 18 18 18
44195- 6 6 6 0 0 0 0 0 0 0 0 0
44196- 0 0 0 0 0 0 0 0 0 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 0 0 0
44199- 0 0 0 0 0 0 0 0 0 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 6 6 6 26 26 26
44202- 62 62 62 106 106 106 74 54 14 185 133 11
44203-210 162 10 121 92 8 6 6 6 62 62 62
44204-238 238 238 253 253 253 253 253 253 253 253 253
44205-253 253 253 253 253 253 253 253 253 253 253 253
44206-253 253 253 253 253 253 231 231 231 246 246 246
44207-253 253 253 253 253 253 253 253 253 253 253 253
44208-253 253 253 253 253 253 253 253 253 253 253 253
44209-253 253 253 253 253 253 253 253 253 253 253 253
44210-253 253 253 253 253 253 253 253 253 253 253 253
44211-253 253 253 253 253 253 158 158 158 18 18 18
44212- 14 14 14 2 2 6 2 2 6 2 2 6
44213- 6 6 6 18 18 18 66 66 66 38 38 38
44214- 6 6 6 94 94 94 50 50 50 18 18 18
44215- 6 6 6 0 0 0 0 0 0 0 0 0
44216- 0 0 0 0 0 0 0 0 0 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 0 0 0
44219- 0 0 0 0 0 0 0 0 0 0 0 0
44220- 0 0 0 0 0 0 0 0 0 6 6 6
44221- 10 10 10 10 10 10 18 18 18 38 38 38
44222- 78 78 78 142 134 106 216 158 10 242 186 14
44223-246 190 14 246 190 14 156 118 10 10 10 10
44224- 90 90 90 238 238 238 253 253 253 253 253 253
44225-253 253 253 253 253 253 253 253 253 253 253 253
44226-253 253 253 253 253 253 231 231 231 250 250 250
44227-253 253 253 253 253 253 253 253 253 253 253 253
44228-253 253 253 253 253 253 253 253 253 253 253 253
44229-253 253 253 253 253 253 253 253 253 253 253 253
44230-253 253 253 253 253 253 253 253 253 246 230 190
44231-238 204 91 238 204 91 181 142 44 37 26 9
44232- 2 2 6 2 2 6 2 2 6 2 2 6
44233- 2 2 6 2 2 6 38 38 38 46 46 46
44234- 26 26 26 106 106 106 54 54 54 18 18 18
44235- 6 6 6 0 0 0 0 0 0 0 0 0
44236- 0 0 0 0 0 0 0 0 0 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 0 0 0
44239- 0 0 0 0 0 0 0 0 0 0 0 0
44240- 0 0 0 6 6 6 14 14 14 22 22 22
44241- 30 30 30 38 38 38 50 50 50 70 70 70
44242-106 106 106 190 142 34 226 170 11 242 186 14
44243-246 190 14 246 190 14 246 190 14 154 114 10
44244- 6 6 6 74 74 74 226 226 226 253 253 253
44245-253 253 253 253 253 253 253 253 253 253 253 253
44246-253 253 253 253 253 253 231 231 231 250 250 250
44247-253 253 253 253 253 253 253 253 253 253 253 253
44248-253 253 253 253 253 253 253 253 253 253 253 253
44249-253 253 253 253 253 253 253 253 253 253 253 253
44250-253 253 253 253 253 253 253 253 253 228 184 62
44251-241 196 14 241 208 19 232 195 16 38 30 10
44252- 2 2 6 2 2 6 2 2 6 2 2 6
44253- 2 2 6 6 6 6 30 30 30 26 26 26
44254-203 166 17 154 142 90 66 66 66 26 26 26
44255- 6 6 6 0 0 0 0 0 0 0 0 0
44256- 0 0 0 0 0 0 0 0 0 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 0 0 0
44259- 0 0 0 0 0 0 0 0 0 0 0 0
44260- 6 6 6 18 18 18 38 38 38 58 58 58
44261- 78 78 78 86 86 86 101 101 101 123 123 123
44262-175 146 61 210 150 10 234 174 13 246 186 14
44263-246 190 14 246 190 14 246 190 14 238 190 10
44264-102 78 10 2 2 6 46 46 46 198 198 198
44265-253 253 253 253 253 253 253 253 253 253 253 253
44266-253 253 253 253 253 253 234 234 234 242 242 242
44267-253 253 253 253 253 253 253 253 253 253 253 253
44268-253 253 253 253 253 253 253 253 253 253 253 253
44269-253 253 253 253 253 253 253 253 253 253 253 253
44270-253 253 253 253 253 253 253 253 253 224 178 62
44271-242 186 14 241 196 14 210 166 10 22 18 6
44272- 2 2 6 2 2 6 2 2 6 2 2 6
44273- 2 2 6 2 2 6 6 6 6 121 92 8
44274-238 202 15 232 195 16 82 82 82 34 34 34
44275- 10 10 10 0 0 0 0 0 0 0 0 0
44276- 0 0 0 0 0 0 0 0 0 0 0 0
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 0 0 0
44279- 0 0 0 0 0 0 0 0 0 0 0 0
44280- 14 14 14 38 38 38 70 70 70 154 122 46
44281-190 142 34 200 144 11 197 138 11 197 138 11
44282-213 154 11 226 170 11 242 186 14 246 190 14
44283-246 190 14 246 190 14 246 190 14 246 190 14
44284-225 175 15 46 32 6 2 2 6 22 22 22
44285-158 158 158 250 250 250 253 253 253 253 253 253
44286-253 253 253 253 253 253 253 253 253 253 253 253
44287-253 253 253 253 253 253 253 253 253 253 253 253
44288-253 253 253 253 253 253 253 253 253 253 253 253
44289-253 253 253 253 253 253 253 253 253 253 253 253
44290-253 253 253 250 250 250 242 242 242 224 178 62
44291-239 182 13 236 186 11 213 154 11 46 32 6
44292- 2 2 6 2 2 6 2 2 6 2 2 6
44293- 2 2 6 2 2 6 61 42 6 225 175 15
44294-238 190 10 236 186 11 112 100 78 42 42 42
44295- 14 14 14 0 0 0 0 0 0 0 0 0
44296- 0 0 0 0 0 0 0 0 0 0 0 0
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 0 0 0
44299- 0 0 0 0 0 0 0 0 0 6 6 6
44300- 22 22 22 54 54 54 154 122 46 213 154 11
44301-226 170 11 230 174 11 226 170 11 226 170 11
44302-236 178 12 242 186 14 246 190 14 246 190 14
44303-246 190 14 246 190 14 246 190 14 246 190 14
44304-241 196 14 184 144 12 10 10 10 2 2 6
44305- 6 6 6 116 116 116 242 242 242 253 253 253
44306-253 253 253 253 253 253 253 253 253 253 253 253
44307-253 253 253 253 253 253 253 253 253 253 253 253
44308-253 253 253 253 253 253 253 253 253 253 253 253
44309-253 253 253 253 253 253 253 253 253 253 253 253
44310-253 253 253 231 231 231 198 198 198 214 170 54
44311-236 178 12 236 178 12 210 150 10 137 92 6
44312- 18 14 6 2 2 6 2 2 6 2 2 6
44313- 6 6 6 70 47 6 200 144 11 236 178 12
44314-239 182 13 239 182 13 124 112 88 58 58 58
44315- 22 22 22 6 6 6 0 0 0 0 0 0
44316- 0 0 0 0 0 0 0 0 0 0 0 0
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 0 0 0
44319- 0 0 0 0 0 0 0 0 0 10 10 10
44320- 30 30 30 70 70 70 180 133 36 226 170 11
44321-239 182 13 242 186 14 242 186 14 246 186 14
44322-246 190 14 246 190 14 246 190 14 246 190 14
44323-246 190 14 246 190 14 246 190 14 246 190 14
44324-246 190 14 232 195 16 98 70 6 2 2 6
44325- 2 2 6 2 2 6 66 66 66 221 221 221
44326-253 253 253 253 253 253 253 253 253 253 253 253
44327-253 253 253 253 253 253 253 253 253 253 253 253
44328-253 253 253 253 253 253 253 253 253 253 253 253
44329-253 253 253 253 253 253 253 253 253 253 253 253
44330-253 253 253 206 206 206 198 198 198 214 166 58
44331-230 174 11 230 174 11 216 158 10 192 133 9
44332-163 110 8 116 81 8 102 78 10 116 81 8
44333-167 114 7 197 138 11 226 170 11 239 182 13
44334-242 186 14 242 186 14 162 146 94 78 78 78
44335- 34 34 34 14 14 14 6 6 6 0 0 0
44336- 0 0 0 0 0 0 0 0 0 0 0 0
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 0 0 0
44339- 0 0 0 0 0 0 0 0 0 6 6 6
44340- 30 30 30 78 78 78 190 142 34 226 170 11
44341-239 182 13 246 190 14 246 190 14 246 190 14
44342-246 190 14 246 190 14 246 190 14 246 190 14
44343-246 190 14 246 190 14 246 190 14 246 190 14
44344-246 190 14 241 196 14 203 166 17 22 18 6
44345- 2 2 6 2 2 6 2 2 6 38 38 38
44346-218 218 218 253 253 253 253 253 253 253 253 253
44347-253 253 253 253 253 253 253 253 253 253 253 253
44348-253 253 253 253 253 253 253 253 253 253 253 253
44349-253 253 253 253 253 253 253 253 253 253 253 253
44350-250 250 250 206 206 206 198 198 198 202 162 69
44351-226 170 11 236 178 12 224 166 10 210 150 10
44352-200 144 11 197 138 11 192 133 9 197 138 11
44353-210 150 10 226 170 11 242 186 14 246 190 14
44354-246 190 14 246 186 14 225 175 15 124 112 88
44355- 62 62 62 30 30 30 14 14 14 6 6 6
44356- 0 0 0 0 0 0 0 0 0 0 0 0
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 0 0 0 0 0 0
44359- 0 0 0 0 0 0 0 0 0 10 10 10
44360- 30 30 30 78 78 78 174 135 50 224 166 10
44361-239 182 13 246 190 14 246 190 14 246 190 14
44362-246 190 14 246 190 14 246 190 14 246 190 14
44363-246 190 14 246 190 14 246 190 14 246 190 14
44364-246 190 14 246 190 14 241 196 14 139 102 15
44365- 2 2 6 2 2 6 2 2 6 2 2 6
44366- 78 78 78 250 250 250 253 253 253 253 253 253
44367-253 253 253 253 253 253 253 253 253 253 253 253
44368-253 253 253 253 253 253 253 253 253 253 253 253
44369-253 253 253 253 253 253 253 253 253 253 253 253
44370-250 250 250 214 214 214 198 198 198 190 150 46
44371-219 162 10 236 178 12 234 174 13 224 166 10
44372-216 158 10 213 154 11 213 154 11 216 158 10
44373-226 170 11 239 182 13 246 190 14 246 190 14
44374-246 190 14 246 190 14 242 186 14 206 162 42
44375-101 101 101 58 58 58 30 30 30 14 14 14
44376- 6 6 6 0 0 0 0 0 0 0 0 0
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 0 0 0 0 0 0
44379- 0 0 0 0 0 0 0 0 0 10 10 10
44380- 30 30 30 74 74 74 174 135 50 216 158 10
44381-236 178 12 246 190 14 246 190 14 246 190 14
44382-246 190 14 246 190 14 246 190 14 246 190 14
44383-246 190 14 246 190 14 246 190 14 246 190 14
44384-246 190 14 246 190 14 241 196 14 226 184 13
44385- 61 42 6 2 2 6 2 2 6 2 2 6
44386- 22 22 22 238 238 238 253 253 253 253 253 253
44387-253 253 253 253 253 253 253 253 253 253 253 253
44388-253 253 253 253 253 253 253 253 253 253 253 253
44389-253 253 253 253 253 253 253 253 253 253 253 253
44390-253 253 253 226 226 226 187 187 187 180 133 36
44391-216 158 10 236 178 12 239 182 13 236 178 12
44392-230 174 11 226 170 11 226 170 11 230 174 11
44393-236 178 12 242 186 14 246 190 14 246 190 14
44394-246 190 14 246 190 14 246 186 14 239 182 13
44395-206 162 42 106 106 106 66 66 66 34 34 34
44396- 14 14 14 6 6 6 0 0 0 0 0 0
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 0 0 0 0 0 0
44399- 0 0 0 0 0 0 0 0 0 6 6 6
44400- 26 26 26 70 70 70 163 133 67 213 154 11
44401-236 178 12 246 190 14 246 190 14 246 190 14
44402-246 190 14 246 190 14 246 190 14 246 190 14
44403-246 190 14 246 190 14 246 190 14 246 190 14
44404-246 190 14 246 190 14 246 190 14 241 196 14
44405-190 146 13 18 14 6 2 2 6 2 2 6
44406- 46 46 46 246 246 246 253 253 253 253 253 253
44407-253 253 253 253 253 253 253 253 253 253 253 253
44408-253 253 253 253 253 253 253 253 253 253 253 253
44409-253 253 253 253 253 253 253 253 253 253 253 253
44410-253 253 253 221 221 221 86 86 86 156 107 11
44411-216 158 10 236 178 12 242 186 14 246 186 14
44412-242 186 14 239 182 13 239 182 13 242 186 14
44413-242 186 14 246 186 14 246 190 14 246 190 14
44414-246 190 14 246 190 14 246 190 14 246 190 14
44415-242 186 14 225 175 15 142 122 72 66 66 66
44416- 30 30 30 10 10 10 0 0 0 0 0 0
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 0 0 0 0 0 0
44419- 0 0 0 0 0 0 0 0 0 6 6 6
44420- 26 26 26 70 70 70 163 133 67 210 150 10
44421-236 178 12 246 190 14 246 190 14 246 190 14
44422-246 190 14 246 190 14 246 190 14 246 190 14
44423-246 190 14 246 190 14 246 190 14 246 190 14
44424-246 190 14 246 190 14 246 190 14 246 190 14
44425-232 195 16 121 92 8 34 34 34 106 106 106
44426-221 221 221 253 253 253 253 253 253 253 253 253
44427-253 253 253 253 253 253 253 253 253 253 253 253
44428-253 253 253 253 253 253 253 253 253 253 253 253
44429-253 253 253 253 253 253 253 253 253 253 253 253
44430-242 242 242 82 82 82 18 14 6 163 110 8
44431-216 158 10 236 178 12 242 186 14 246 190 14
44432-246 190 14 246 190 14 246 190 14 246 190 14
44433-246 190 14 246 190 14 246 190 14 246 190 14
44434-246 190 14 246 190 14 246 190 14 246 190 14
44435-246 190 14 246 190 14 242 186 14 163 133 67
44436- 46 46 46 18 18 18 6 6 6 0 0 0
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 0 0 0
44439- 0 0 0 0 0 0 0 0 0 10 10 10
44440- 30 30 30 78 78 78 163 133 67 210 150 10
44441-236 178 12 246 186 14 246 190 14 246 190 14
44442-246 190 14 246 190 14 246 190 14 246 190 14
44443-246 190 14 246 190 14 246 190 14 246 190 14
44444-246 190 14 246 190 14 246 190 14 246 190 14
44445-241 196 14 215 174 15 190 178 144 253 253 253
44446-253 253 253 253 253 253 253 253 253 253 253 253
44447-253 253 253 253 253 253 253 253 253 253 253 253
44448-253 253 253 253 253 253 253 253 253 253 253 253
44449-253 253 253 253 253 253 253 253 253 218 218 218
44450- 58 58 58 2 2 6 22 18 6 167 114 7
44451-216 158 10 236 178 12 246 186 14 246 190 14
44452-246 190 14 246 190 14 246 190 14 246 190 14
44453-246 190 14 246 190 14 246 190 14 246 190 14
44454-246 190 14 246 190 14 246 190 14 246 190 14
44455-246 190 14 246 186 14 242 186 14 190 150 46
44456- 54 54 54 22 22 22 6 6 6 0 0 0
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 14 14 14
44460- 38 38 38 86 86 86 180 133 36 213 154 11
44461-236 178 12 246 186 14 246 190 14 246 190 14
44462-246 190 14 246 190 14 246 190 14 246 190 14
44463-246 190 14 246 190 14 246 190 14 246 190 14
44464-246 190 14 246 190 14 246 190 14 246 190 14
44465-246 190 14 232 195 16 190 146 13 214 214 214
44466-253 253 253 253 253 253 253 253 253 253 253 253
44467-253 253 253 253 253 253 253 253 253 253 253 253
44468-253 253 253 253 253 253 253 253 253 253 253 253
44469-253 253 253 250 250 250 170 170 170 26 26 26
44470- 2 2 6 2 2 6 37 26 9 163 110 8
44471-219 162 10 239 182 13 246 186 14 246 190 14
44472-246 190 14 246 190 14 246 190 14 246 190 14
44473-246 190 14 246 190 14 246 190 14 246 190 14
44474-246 190 14 246 190 14 246 190 14 246 190 14
44475-246 186 14 236 178 12 224 166 10 142 122 72
44476- 46 46 46 18 18 18 6 6 6 0 0 0
44477- 0 0 0 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 0 0 0 6 6 6 18 18 18
44480- 50 50 50 109 106 95 192 133 9 224 166 10
44481-242 186 14 246 190 14 246 190 14 246 190 14
44482-246 190 14 246 190 14 246 190 14 246 190 14
44483-246 190 14 246 190 14 246 190 14 246 190 14
44484-246 190 14 246 190 14 246 190 14 246 190 14
44485-242 186 14 226 184 13 210 162 10 142 110 46
44486-226 226 226 253 253 253 253 253 253 253 253 253
44487-253 253 253 253 253 253 253 253 253 253 253 253
44488-253 253 253 253 253 253 253 253 253 253 253 253
44489-198 198 198 66 66 66 2 2 6 2 2 6
44490- 2 2 6 2 2 6 50 34 6 156 107 11
44491-219 162 10 239 182 13 246 186 14 246 190 14
44492-246 190 14 246 190 14 246 190 14 246 190 14
44493-246 190 14 246 190 14 246 190 14 246 190 14
44494-246 190 14 246 190 14 246 190 14 242 186 14
44495-234 174 13 213 154 11 154 122 46 66 66 66
44496- 30 30 30 10 10 10 0 0 0 0 0 0
44497- 0 0 0 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 6 6 6 22 22 22
44500- 58 58 58 154 121 60 206 145 10 234 174 13
44501-242 186 14 246 186 14 246 190 14 246 190 14
44502-246 190 14 246 190 14 246 190 14 246 190 14
44503-246 190 14 246 190 14 246 190 14 246 190 14
44504-246 190 14 246 190 14 246 190 14 246 190 14
44505-246 186 14 236 178 12 210 162 10 163 110 8
44506- 61 42 6 138 138 138 218 218 218 250 250 250
44507-253 253 253 253 253 253 253 253 253 250 250 250
44508-242 242 242 210 210 210 144 144 144 66 66 66
44509- 6 6 6 2 2 6 2 2 6 2 2 6
44510- 2 2 6 2 2 6 61 42 6 163 110 8
44511-216 158 10 236 178 12 246 190 14 246 190 14
44512-246 190 14 246 190 14 246 190 14 246 190 14
44513-246 190 14 246 190 14 246 190 14 246 190 14
44514-246 190 14 239 182 13 230 174 11 216 158 10
44515-190 142 34 124 112 88 70 70 70 38 38 38
44516- 18 18 18 6 6 6 0 0 0 0 0 0
44517- 0 0 0 0 0 0 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 6 6 6 22 22 22
44520- 62 62 62 168 124 44 206 145 10 224 166 10
44521-236 178 12 239 182 13 242 186 14 242 186 14
44522-246 186 14 246 190 14 246 190 14 246 190 14
44523-246 190 14 246 190 14 246 190 14 246 190 14
44524-246 190 14 246 190 14 246 190 14 246 190 14
44525-246 190 14 236 178 12 216 158 10 175 118 6
44526- 80 54 7 2 2 6 6 6 6 30 30 30
44527- 54 54 54 62 62 62 50 50 50 38 38 38
44528- 14 14 14 2 2 6 2 2 6 2 2 6
44529- 2 2 6 2 2 6 2 2 6 2 2 6
44530- 2 2 6 6 6 6 80 54 7 167 114 7
44531-213 154 11 236 178 12 246 190 14 246 190 14
44532-246 190 14 246 190 14 246 190 14 246 190 14
44533-246 190 14 242 186 14 239 182 13 239 182 13
44534-230 174 11 210 150 10 174 135 50 124 112 88
44535- 82 82 82 54 54 54 34 34 34 18 18 18
44536- 6 6 6 0 0 0 0 0 0 0 0 0
44537- 0 0 0 0 0 0 0 0 0 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 6 6 6 18 18 18
44540- 50 50 50 158 118 36 192 133 9 200 144 11
44541-216 158 10 219 162 10 224 166 10 226 170 11
44542-230 174 11 236 178 12 239 182 13 239 182 13
44543-242 186 14 246 186 14 246 190 14 246 190 14
44544-246 190 14 246 190 14 246 190 14 246 190 14
44545-246 186 14 230 174 11 210 150 10 163 110 8
44546-104 69 6 10 10 10 2 2 6 2 2 6
44547- 2 2 6 2 2 6 2 2 6 2 2 6
44548- 2 2 6 2 2 6 2 2 6 2 2 6
44549- 2 2 6 2 2 6 2 2 6 2 2 6
44550- 2 2 6 6 6 6 91 60 6 167 114 7
44551-206 145 10 230 174 11 242 186 14 246 190 14
44552-246 190 14 246 190 14 246 186 14 242 186 14
44553-239 182 13 230 174 11 224 166 10 213 154 11
44554-180 133 36 124 112 88 86 86 86 58 58 58
44555- 38 38 38 22 22 22 10 10 10 6 6 6
44556- 0 0 0 0 0 0 0 0 0 0 0 0
44557- 0 0 0 0 0 0 0 0 0 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 0 0 0 14 14 14
44560- 34 34 34 70 70 70 138 110 50 158 118 36
44561-167 114 7 180 123 7 192 133 9 197 138 11
44562-200 144 11 206 145 10 213 154 11 219 162 10
44563-224 166 10 230 174 11 239 182 13 242 186 14
44564-246 186 14 246 186 14 246 186 14 246 186 14
44565-239 182 13 216 158 10 185 133 11 152 99 6
44566-104 69 6 18 14 6 2 2 6 2 2 6
44567- 2 2 6 2 2 6 2 2 6 2 2 6
44568- 2 2 6 2 2 6 2 2 6 2 2 6
44569- 2 2 6 2 2 6 2 2 6 2 2 6
44570- 2 2 6 6 6 6 80 54 7 152 99 6
44571-192 133 9 219 162 10 236 178 12 239 182 13
44572-246 186 14 242 186 14 239 182 13 236 178 12
44573-224 166 10 206 145 10 192 133 9 154 121 60
44574- 94 94 94 62 62 62 42 42 42 22 22 22
44575- 14 14 14 6 6 6 0 0 0 0 0 0
44576- 0 0 0 0 0 0 0 0 0 0 0 0
44577- 0 0 0 0 0 0 0 0 0 0 0 0
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 6 6 6
44580- 18 18 18 34 34 34 58 58 58 78 78 78
44581-101 98 89 124 112 88 142 110 46 156 107 11
44582-163 110 8 167 114 7 175 118 6 180 123 7
44583-185 133 11 197 138 11 210 150 10 219 162 10
44584-226 170 11 236 178 12 236 178 12 234 174 13
44585-219 162 10 197 138 11 163 110 8 130 83 6
44586- 91 60 6 10 10 10 2 2 6 2 2 6
44587- 18 18 18 38 38 38 38 38 38 38 38 38
44588- 38 38 38 38 38 38 38 38 38 38 38 38
44589- 38 38 38 38 38 38 26 26 26 2 2 6
44590- 2 2 6 6 6 6 70 47 6 137 92 6
44591-175 118 6 200 144 11 219 162 10 230 174 11
44592-234 174 13 230 174 11 219 162 10 210 150 10
44593-192 133 9 163 110 8 124 112 88 82 82 82
44594- 50 50 50 30 30 30 14 14 14 6 6 6
44595- 0 0 0 0 0 0 0 0 0 0 0 0
44596- 0 0 0 0 0 0 0 0 0 0 0 0
44597- 0 0 0 0 0 0 0 0 0 0 0 0
44598- 0 0 0 0 0 0 0 0 0 0 0 0
44599- 0 0 0 0 0 0 0 0 0 0 0 0
44600- 6 6 6 14 14 14 22 22 22 34 34 34
44601- 42 42 42 58 58 58 74 74 74 86 86 86
44602-101 98 89 122 102 70 130 98 46 121 87 25
44603-137 92 6 152 99 6 163 110 8 180 123 7
44604-185 133 11 197 138 11 206 145 10 200 144 11
44605-180 123 7 156 107 11 130 83 6 104 69 6
44606- 50 34 6 54 54 54 110 110 110 101 98 89
44607- 86 86 86 82 82 82 78 78 78 78 78 78
44608- 78 78 78 78 78 78 78 78 78 78 78 78
44609- 78 78 78 82 82 82 86 86 86 94 94 94
44610-106 106 106 101 101 101 86 66 34 124 80 6
44611-156 107 11 180 123 7 192 133 9 200 144 11
44612-206 145 10 200 144 11 192 133 9 175 118 6
44613-139 102 15 109 106 95 70 70 70 42 42 42
44614- 22 22 22 10 10 10 0 0 0 0 0 0
44615- 0 0 0 0 0 0 0 0 0 0 0 0
44616- 0 0 0 0 0 0 0 0 0 0 0 0
44617- 0 0 0 0 0 0 0 0 0 0 0 0
44618- 0 0 0 0 0 0 0 0 0 0 0 0
44619- 0 0 0 0 0 0 0 0 0 0 0 0
44620- 0 0 0 0 0 0 6 6 6 10 10 10
44621- 14 14 14 22 22 22 30 30 30 38 38 38
44622- 50 50 50 62 62 62 74 74 74 90 90 90
44623-101 98 89 112 100 78 121 87 25 124 80 6
44624-137 92 6 152 99 6 152 99 6 152 99 6
44625-138 86 6 124 80 6 98 70 6 86 66 30
44626-101 98 89 82 82 82 58 58 58 46 46 46
44627- 38 38 38 34 34 34 34 34 34 34 34 34
44628- 34 34 34 34 34 34 34 34 34 34 34 34
44629- 34 34 34 34 34 34 38 38 38 42 42 42
44630- 54 54 54 82 82 82 94 86 76 91 60 6
44631-134 86 6 156 107 11 167 114 7 175 118 6
44632-175 118 6 167 114 7 152 99 6 121 87 25
44633-101 98 89 62 62 62 34 34 34 18 18 18
44634- 6 6 6 0 0 0 0 0 0 0 0 0
44635- 0 0 0 0 0 0 0 0 0 0 0 0
44636- 0 0 0 0 0 0 0 0 0 0 0 0
44637- 0 0 0 0 0 0 0 0 0 0 0 0
44638- 0 0 0 0 0 0 0 0 0 0 0 0
44639- 0 0 0 0 0 0 0 0 0 0 0 0
44640- 0 0 0 0 0 0 0 0 0 0 0 0
44641- 0 0 0 6 6 6 6 6 6 10 10 10
44642- 18 18 18 22 22 22 30 30 30 42 42 42
44643- 50 50 50 66 66 66 86 86 86 101 98 89
44644-106 86 58 98 70 6 104 69 6 104 69 6
44645-104 69 6 91 60 6 82 62 34 90 90 90
44646- 62 62 62 38 38 38 22 22 22 14 14 14
44647- 10 10 10 10 10 10 10 10 10 10 10 10
44648- 10 10 10 10 10 10 6 6 6 10 10 10
44649- 10 10 10 10 10 10 10 10 10 14 14 14
44650- 22 22 22 42 42 42 70 70 70 89 81 66
44651- 80 54 7 104 69 6 124 80 6 137 92 6
44652-134 86 6 116 81 8 100 82 52 86 86 86
44653- 58 58 58 30 30 30 14 14 14 6 6 6
44654- 0 0 0 0 0 0 0 0 0 0 0 0
44655- 0 0 0 0 0 0 0 0 0 0 0 0
44656- 0 0 0 0 0 0 0 0 0 0 0 0
44657- 0 0 0 0 0 0 0 0 0 0 0 0
44658- 0 0 0 0 0 0 0 0 0 0 0 0
44659- 0 0 0 0 0 0 0 0 0 0 0 0
44660- 0 0 0 0 0 0 0 0 0 0 0 0
44661- 0 0 0 0 0 0 0 0 0 0 0 0
44662- 0 0 0 6 6 6 10 10 10 14 14 14
44663- 18 18 18 26 26 26 38 38 38 54 54 54
44664- 70 70 70 86 86 86 94 86 76 89 81 66
44665- 89 81 66 86 86 86 74 74 74 50 50 50
44666- 30 30 30 14 14 14 6 6 6 0 0 0
44667- 0 0 0 0 0 0 0 0 0 0 0 0
44668- 0 0 0 0 0 0 0 0 0 0 0 0
44669- 0 0 0 0 0 0 0 0 0 0 0 0
44670- 6 6 6 18 18 18 34 34 34 58 58 58
44671- 82 82 82 89 81 66 89 81 66 89 81 66
44672- 94 86 66 94 86 76 74 74 74 50 50 50
44673- 26 26 26 14 14 14 6 6 6 0 0 0
44674- 0 0 0 0 0 0 0 0 0 0 0 0
44675- 0 0 0 0 0 0 0 0 0 0 0 0
44676- 0 0 0 0 0 0 0 0 0 0 0 0
44677- 0 0 0 0 0 0 0 0 0 0 0 0
44678- 0 0 0 0 0 0 0 0 0 0 0 0
44679- 0 0 0 0 0 0 0 0 0 0 0 0
44680- 0 0 0 0 0 0 0 0 0 0 0 0
44681- 0 0 0 0 0 0 0 0 0 0 0 0
44682- 0 0 0 0 0 0 0 0 0 0 0 0
44683- 6 6 6 6 6 6 14 14 14 18 18 18
44684- 30 30 30 38 38 38 46 46 46 54 54 54
44685- 50 50 50 42 42 42 30 30 30 18 18 18
44686- 10 10 10 0 0 0 0 0 0 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 0 0 0 0 0 0 0 0 0 0 0 0
44689- 0 0 0 0 0 0 0 0 0 0 0 0
44690- 0 0 0 6 6 6 14 14 14 26 26 26
44691- 38 38 38 50 50 50 58 58 58 58 58 58
44692- 54 54 54 42 42 42 30 30 30 18 18 18
44693- 10 10 10 0 0 0 0 0 0 0 0 0
44694- 0 0 0 0 0 0 0 0 0 0 0 0
44695- 0 0 0 0 0 0 0 0 0 0 0 0
44696- 0 0 0 0 0 0 0 0 0 0 0 0
44697- 0 0 0 0 0 0 0 0 0 0 0 0
44698- 0 0 0 0 0 0 0 0 0 0 0 0
44699- 0 0 0 0 0 0 0 0 0 0 0 0
44700- 0 0 0 0 0 0 0 0 0 0 0 0
44701- 0 0 0 0 0 0 0 0 0 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 0 0 0 0 0 0 0 0 0 6 6 6
44704- 6 6 6 10 10 10 14 14 14 18 18 18
44705- 18 18 18 14 14 14 10 10 10 6 6 6
44706- 0 0 0 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 0 0 0 0 0 0 0 0 0 0 0 0
44709- 0 0 0 0 0 0 0 0 0 0 0 0
44710- 0 0 0 0 0 0 0 0 0 6 6 6
44711- 14 14 14 18 18 18 22 22 22 22 22 22
44712- 18 18 18 14 14 14 10 10 10 6 6 6
44713- 0 0 0 0 0 0 0 0 0 0 0 0
44714- 0 0 0 0 0 0 0 0 0 0 0 0
44715- 0 0 0 0 0 0 0 0 0 0 0 0
44716- 0 0 0 0 0 0 0 0 0 0 0 0
44717- 0 0 0 0 0 0 0 0 0 0 0 0
44718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731+4 4 4 4 4 4
44732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44745+4 4 4 4 4 4
44746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44759+4 4 4 4 4 4
44760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44773+4 4 4 4 4 4
44774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44787+4 4 4 4 4 4
44788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44801+4 4 4 4 4 4
44802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44806+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44807+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44811+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44812+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44813+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44815+4 4 4 4 4 4
44816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44820+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44821+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44822+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44825+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44826+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44827+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44828+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44829+4 4 4 4 4 4
44830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44834+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44835+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44836+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44839+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44840+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44841+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44842+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44843+4 4 4 4 4 4
44844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44847+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44848+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44849+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44850+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44852+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44853+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44854+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44855+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44856+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44857+4 4 4 4 4 4
44858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44861+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44862+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44863+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44864+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44865+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44866+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44867+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44868+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44869+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44870+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44871+4 4 4 4 4 4
44872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44875+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44876+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44877+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44878+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44879+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44880+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44881+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44882+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44883+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44884+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44885+4 4 4 4 4 4
44886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44888+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44889+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44890+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44891+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44892+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44893+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44894+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44895+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44896+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44897+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44898+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44899+4 4 4 4 4 4
44900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44902+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44903+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44904+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44905+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44906+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44907+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44908+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44909+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44910+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44911+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44912+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44913+4 4 4 4 4 4
44914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44916+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44917+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44918+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44919+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44920+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44921+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44922+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44923+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44924+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44925+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44926+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44927+4 4 4 4 4 4
44928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44930+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44931+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44932+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44933+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44934+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44935+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44936+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44937+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44938+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44939+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44940+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44941+4 4 4 4 4 4
44942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44943+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44944+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44945+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44946+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44947+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44948+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44949+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44950+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44951+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44952+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44953+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44954+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44955+4 4 4 4 4 4
44956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44957+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44958+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44959+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44960+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44961+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44962+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44963+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44964+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44965+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44966+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44967+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44968+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44969+0 0 0 4 4 4
44970+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44971+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44972+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44973+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44974+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44975+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44976+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44977+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44978+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44979+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44980+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44981+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44982+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44983+2 0 0 0 0 0
44984+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44985+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44986+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44987+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44988+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44989+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44990+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44991+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44992+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44993+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44994+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44995+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44996+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44997+37 38 37 0 0 0
44998+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44999+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45000+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45001+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45002+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45003+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45004+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45005+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45006+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45007+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45008+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45009+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45010+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45011+85 115 134 4 0 0
45012+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45013+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45014+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45015+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45016+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45017+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45018+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45019+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45020+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45021+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45022+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45023+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45024+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45025+60 73 81 4 0 0
45026+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45027+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45028+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45029+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45030+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45031+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45032+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45033+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45034+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45035+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45036+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45037+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45038+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45039+16 19 21 4 0 0
45040+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45041+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45042+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45043+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45044+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45045+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45046+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45047+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45048+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45049+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45050+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45051+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45052+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45053+4 0 0 4 3 3
45054+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45055+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45056+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45058+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45059+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45060+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45061+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45062+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45063+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45064+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45065+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45066+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45067+3 2 2 4 4 4
45068+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45069+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45070+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45071+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45072+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45073+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45074+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45075+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45076+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45077+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45078+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45079+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45080+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45081+4 4 4 4 4 4
45082+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45083+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45084+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45085+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45086+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45087+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45088+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45089+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45090+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45091+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45092+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45093+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45094+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45095+4 4 4 4 4 4
45096+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45097+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45098+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45099+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45100+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45101+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45102+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45103+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45104+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45105+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45106+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45107+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45108+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45109+5 5 5 5 5 5
45110+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45111+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45112+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45113+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45114+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45115+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45116+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45117+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45118+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45119+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45120+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45121+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45122+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45123+5 5 5 4 4 4
45124+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45125+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45126+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45127+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45128+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45129+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45130+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45131+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45132+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45133+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45134+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45135+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45137+4 4 4 4 4 4
45138+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45139+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45140+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45141+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45142+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45143+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45144+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45145+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45146+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45147+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45148+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45149+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45151+4 4 4 4 4 4
45152+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45153+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45154+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45155+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45156+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45157+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45158+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45159+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45160+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45161+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45162+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45165+4 4 4 4 4 4
45166+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45167+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45168+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45169+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45170+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45171+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45172+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45173+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45174+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45175+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45176+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45179+4 4 4 4 4 4
45180+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45181+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45182+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45183+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45184+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45185+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45186+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45187+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45188+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45189+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45190+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45193+4 4 4 4 4 4
45194+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45195+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45196+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45197+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45198+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45199+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45200+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45201+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45202+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45203+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45204+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45207+4 4 4 4 4 4
45208+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45209+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45210+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45211+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45212+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45213+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45214+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45215+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45216+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45217+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45218+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45221+4 4 4 4 4 4
45222+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45223+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45224+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45225+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45226+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45227+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45228+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45229+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45230+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45231+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45232+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45235+4 4 4 4 4 4
45236+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45237+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45238+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45239+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45240+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45241+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45242+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45243+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45244+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45245+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45246+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45249+4 4 4 4 4 4
45250+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45251+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45252+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45253+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45254+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45255+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45256+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45257+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45258+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45259+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45260+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45263+4 4 4 4 4 4
45264+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45265+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45266+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45267+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45268+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45269+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45270+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45271+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45272+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45273+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45274+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45277+4 4 4 4 4 4
45278+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45279+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45280+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45281+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45282+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45283+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45284+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45285+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45286+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45287+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45288+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45291+4 4 4 4 4 4
45292+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45293+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45294+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45295+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45296+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45297+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45298+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45299+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45300+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45301+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45302+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45305+4 4 4 4 4 4
45306+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45307+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45308+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45309+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45310+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45311+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45312+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45313+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45314+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45315+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45316+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45319+4 4 4 4 4 4
45320+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45321+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45322+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45323+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45324+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45325+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45326+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45327+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45328+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45329+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45330+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45333+4 4 4 4 4 4
45334+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45335+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45336+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45337+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45338+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45339+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45340+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45341+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45342+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45343+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45344+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45347+4 4 4 4 4 4
45348+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45349+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45350+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45351+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45352+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45353+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45354+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45355+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45356+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45357+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45358+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45361+4 4 4 4 4 4
45362+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45363+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45364+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45365+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45366+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45367+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45368+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45369+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45370+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45371+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45372+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45375+4 4 4 4 4 4
45376+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45377+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45378+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45379+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45380+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45381+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45382+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45383+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45384+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45385+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45386+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45389+4 4 4 4 4 4
45390+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45391+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45392+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45393+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45394+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45395+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45396+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45397+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45398+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45399+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45400+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45403+4 4 4 4 4 4
45404+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45405+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45406+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45407+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45408+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45409+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45410+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45411+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45412+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45413+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45414+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45417+4 4 4 4 4 4
45418+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45419+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45420+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45421+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45422+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45423+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45424+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45425+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45426+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45427+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45428+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45431+4 4 4 4 4 4
45432+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45433+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45434+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45435+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45436+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45437+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45438+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45439+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45440+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45441+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45442+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45445+4 4 4 4 4 4
45446+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45447+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45448+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45449+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45450+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45451+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45452+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45453+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45454+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45455+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45456+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45459+4 4 4 4 4 4
45460+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45461+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45462+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45463+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45464+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45465+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45466+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45467+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45468+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45469+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45470+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45473+4 4 4 4 4 4
45474+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45475+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45476+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45477+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45478+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45479+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45480+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45481+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45482+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45483+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45484+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45487+4 4 4 4 4 4
45488+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45489+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45490+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45491+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45492+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45493+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45494+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45495+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45496+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45497+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45498+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45501+4 4 4 4 4 4
45502+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45503+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45504+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45505+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45506+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45507+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45508+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45509+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45510+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45511+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45512+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45515+4 4 4 4 4 4
45516+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45517+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45518+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45519+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45520+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45521+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45522+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45523+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45524+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45525+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45526+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529+4 4 4 4 4 4
45530+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45531+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45532+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45533+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45534+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45535+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45536+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45537+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45538+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45539+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45543+4 4 4 4 4 4
45544+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45545+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45546+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45547+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45548+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45549+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45550+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45551+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45552+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45553+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45557+4 4 4 4 4 4
45558+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45559+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45560+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45561+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45562+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45563+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45564+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45565+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45566+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45567+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4
45572+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45573+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45574+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45575+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45576+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45577+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45578+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45579+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45580+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45581+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4
45586+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45587+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45588+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45589+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45590+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45591+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45592+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45593+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45594+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4
45600+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45601+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45602+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45603+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45604+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45605+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45606+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45607+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45608+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613+4 4 4 4 4 4
45614+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45615+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45616+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45617+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45618+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45619+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45620+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45621+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45622+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627+4 4 4 4 4 4
45628+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45629+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45630+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45631+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45632+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45633+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45634+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45635+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641+4 4 4 4 4 4
45642+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45643+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45644+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45645+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45646+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45647+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45648+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45649+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655+4 4 4 4 4 4
45656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45657+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45658+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45659+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45660+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45661+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45662+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45663+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669+4 4 4 4 4 4
45670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45672+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45673+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45674+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45675+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45676+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45677+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683+4 4 4 4 4 4
45684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45686+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45687+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45688+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45689+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45690+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45691+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697+4 4 4 4 4 4
45698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45700+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45701+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45702+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45703+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45704+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45705+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45711+4 4 4 4 4 4
45712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45715+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45716+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45717+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45718+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45725+4 4 4 4 4 4
45726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45729+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45730+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45731+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45732+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45739+4 4 4 4 4 4
45740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45743+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45744+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45745+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45746+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45753+4 4 4 4 4 4
45754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45757+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45758+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45759+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45760+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45767+4 4 4 4 4 4
45768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45772+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45773+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45774+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45781+4 4 4 4 4 4
45782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45787+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45788+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45795+4 4 4 4 4 4
45796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45800+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45801+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45802+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45809+4 4 4 4 4 4
45810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45815+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45823+4 4 4 4 4 4
45824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45829+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45837+4 4 4 4 4 4
45838diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45839index 443e3c8..c443d6a 100644
45840--- a/drivers/video/nvidia/nv_backlight.c
45841+++ b/drivers/video/nvidia/nv_backlight.c
45842@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45843 return bd->props.brightness;
45844 }
45845
45846-static struct backlight_ops nvidia_bl_ops = {
45847+static const struct backlight_ops nvidia_bl_ops = {
45848 .get_brightness = nvidia_bl_get_brightness,
45849 .update_status = nvidia_bl_update_status,
45850 };
45851diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45852index d94c57f..912984c 100644
45853--- a/drivers/video/riva/fbdev.c
45854+++ b/drivers/video/riva/fbdev.c
45855@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45856 return bd->props.brightness;
45857 }
45858
45859-static struct backlight_ops riva_bl_ops = {
45860+static const struct backlight_ops riva_bl_ops = {
45861 .get_brightness = riva_bl_get_brightness,
45862 .update_status = riva_bl_update_status,
45863 };
45864diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45865index 54fbb29..2c108fc 100644
45866--- a/drivers/video/uvesafb.c
45867+++ b/drivers/video/uvesafb.c
45868@@ -18,6 +18,7 @@
45869 #include <linux/fb.h>
45870 #include <linux/io.h>
45871 #include <linux/mutex.h>
45872+#include <linux/moduleloader.h>
45873 #include <video/edid.h>
45874 #include <video/uvesafb.h>
45875 #ifdef CONFIG_X86
45876@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45877 NULL,
45878 };
45879
45880- return call_usermodehelper(v86d_path, argv, envp, 1);
45881+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45882 }
45883
45884 /*
45885@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45886 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45887 par->pmi_setpal = par->ypan = 0;
45888 } else {
45889+
45890+#ifdef CONFIG_PAX_KERNEXEC
45891+#ifdef CONFIG_MODULES
45892+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45893+#endif
45894+ if (!par->pmi_code) {
45895+ par->pmi_setpal = par->ypan = 0;
45896+ return 0;
45897+ }
45898+#endif
45899+
45900 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45901 + task->t.regs.edi);
45902+
45903+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45904+ pax_open_kernel();
45905+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45906+ pax_close_kernel();
45907+
45908+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45909+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45910+#else
45911 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45912 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45913+#endif
45914+
45915 printk(KERN_INFO "uvesafb: protected mode interface info at "
45916 "%04x:%04x\n",
45917 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45918@@ -1799,6 +1822,11 @@ out:
45919 if (par->vbe_modes)
45920 kfree(par->vbe_modes);
45921
45922+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45923+ if (par->pmi_code)
45924+ module_free_exec(NULL, par->pmi_code);
45925+#endif
45926+
45927 framebuffer_release(info);
45928 return err;
45929 }
45930@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45931 kfree(par->vbe_state_orig);
45932 if (par->vbe_state_saved)
45933 kfree(par->vbe_state_saved);
45934+
45935+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45936+ if (par->pmi_code)
45937+ module_free_exec(NULL, par->pmi_code);
45938+#endif
45939+
45940 }
45941
45942 framebuffer_release(info);
45943diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45944index bd37ee1..cb827e8 100644
45945--- a/drivers/video/vesafb.c
45946+++ b/drivers/video/vesafb.c
45947@@ -9,6 +9,7 @@
45948 */
45949
45950 #include <linux/module.h>
45951+#include <linux/moduleloader.h>
45952 #include <linux/kernel.h>
45953 #include <linux/errno.h>
45954 #include <linux/string.h>
45955@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45956 static int vram_total __initdata; /* Set total amount of memory */
45957 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45958 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45959-static void (*pmi_start)(void) __read_mostly;
45960-static void (*pmi_pal) (void) __read_mostly;
45961+static void (*pmi_start)(void) __read_only;
45962+static void (*pmi_pal) (void) __read_only;
45963 static int depth __read_mostly;
45964 static int vga_compat __read_mostly;
45965 /* --------------------------------------------------------------------- */
45966@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45967 unsigned int size_vmode;
45968 unsigned int size_remap;
45969 unsigned int size_total;
45970+ void *pmi_code = NULL;
45971
45972 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45973 return -ENODEV;
45974@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45975 size_remap = size_total;
45976 vesafb_fix.smem_len = size_remap;
45977
45978-#ifndef __i386__
45979- screen_info.vesapm_seg = 0;
45980-#endif
45981-
45982 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45983 printk(KERN_WARNING
45984 "vesafb: cannot reserve video memory at 0x%lx\n",
45985@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45986 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45987 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45988
45989+#ifdef __i386__
45990+
45991+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45992+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
45993+ if (!pmi_code)
45994+#elif !defined(CONFIG_PAX_KERNEXEC)
45995+ if (0)
45996+#endif
45997+
45998+#endif
45999+ screen_info.vesapm_seg = 0;
46000+
46001 if (screen_info.vesapm_seg) {
46002- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46003- screen_info.vesapm_seg,screen_info.vesapm_off);
46004+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46005+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46006 }
46007
46008 if (screen_info.vesapm_seg < 0xc000)
46009@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46010
46011 if (ypan || pmi_setpal) {
46012 unsigned short *pmi_base;
46013+
46014 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46015- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46016- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46017+
46018+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46019+ pax_open_kernel();
46020+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46021+#else
46022+ pmi_code = pmi_base;
46023+#endif
46024+
46025+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46026+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46027+
46028+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46029+ pmi_start = ktva_ktla(pmi_start);
46030+ pmi_pal = ktva_ktla(pmi_pal);
46031+ pax_close_kernel();
46032+#endif
46033+
46034 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46035 if (pmi_base[3]) {
46036 printk(KERN_INFO "vesafb: pmi: ports = ");
46037@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46038 info->node, info->fix.id);
46039 return 0;
46040 err:
46041+
46042+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46043+ module_free_exec(NULL, pmi_code);
46044+#endif
46045+
46046 if (info->screen_base)
46047 iounmap(info->screen_base);
46048 framebuffer_release(info);
46049diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46050index 88a60e0..6783cc2 100644
46051--- a/drivers/xen/sys-hypervisor.c
46052+++ b/drivers/xen/sys-hypervisor.c
46053@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46054 return 0;
46055 }
46056
46057-static struct sysfs_ops hyp_sysfs_ops = {
46058+static const struct sysfs_ops hyp_sysfs_ops = {
46059 .show = hyp_sysfs_show,
46060 .store = hyp_sysfs_store,
46061 };
46062diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46063index 18f74ec..3227009 100644
46064--- a/fs/9p/vfs_inode.c
46065+++ b/fs/9p/vfs_inode.c
46066@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46067 static void
46068 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46069 {
46070- char *s = nd_get_link(nd);
46071+ const char *s = nd_get_link(nd);
46072
46073 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46074 IS_ERR(s) ? "<error>" : s);
46075diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46076index bb4cc5b..df5eaa0 100644
46077--- a/fs/Kconfig.binfmt
46078+++ b/fs/Kconfig.binfmt
46079@@ -86,7 +86,7 @@ config HAVE_AOUT
46080
46081 config BINFMT_AOUT
46082 tristate "Kernel support for a.out and ECOFF binaries"
46083- depends on HAVE_AOUT
46084+ depends on HAVE_AOUT && BROKEN
46085 ---help---
46086 A.out (Assembler.OUTput) is a set of formats for libraries and
46087 executables used in the earliest versions of UNIX. Linux used
46088diff --git a/fs/aio.c b/fs/aio.c
46089index 22a19ad..d484e5b 100644
46090--- a/fs/aio.c
46091+++ b/fs/aio.c
46092@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46093 size += sizeof(struct io_event) * nr_events;
46094 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46095
46096- if (nr_pages < 0)
46097+ if (nr_pages <= 0)
46098 return -EINVAL;
46099
46100 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46101@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46102 struct aio_timeout to;
46103 int retry = 0;
46104
46105+ pax_track_stack();
46106+
46107 /* needed to zero any padding within an entry (there shouldn't be
46108 * any, but C is fun!
46109 */
46110@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46111 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46112 {
46113 ssize_t ret;
46114+ struct iovec iovstack;
46115
46116 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46117 kiocb->ki_nbytes, 1,
46118- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46119+ &iovstack, &kiocb->ki_iovec);
46120 if (ret < 0)
46121 goto out;
46122
46123+ if (kiocb->ki_iovec == &iovstack) {
46124+ kiocb->ki_inline_vec = iovstack;
46125+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46126+ }
46127 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46128 kiocb->ki_cur_seg = 0;
46129 /* ki_nbytes/left now reflect bytes instead of segs */
46130diff --git a/fs/attr.c b/fs/attr.c
46131index 96d394b..33cf5b4 100644
46132--- a/fs/attr.c
46133+++ b/fs/attr.c
46134@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46135 unsigned long limit;
46136
46137 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46138+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46139 if (limit != RLIM_INFINITY && offset > limit)
46140 goto out_sig;
46141 if (offset > inode->i_sb->s_maxbytes)
46142diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46143index 4a1401c..05eb5ca 100644
46144--- a/fs/autofs/root.c
46145+++ b/fs/autofs/root.c
46146@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46147 set_bit(n,sbi->symlink_bitmap);
46148 sl = &sbi->symlink[n];
46149 sl->len = strlen(symname);
46150- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46151+ slsize = sl->len+1;
46152+ sl->data = kmalloc(slsize, GFP_KERNEL);
46153 if (!sl->data) {
46154 clear_bit(n,sbi->symlink_bitmap);
46155 unlock_kernel();
46156diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46157index b4ea829..e63ef18 100644
46158--- a/fs/autofs4/symlink.c
46159+++ b/fs/autofs4/symlink.c
46160@@ -15,7 +15,7 @@
46161 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46162 {
46163 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46164- nd_set_link(nd, (char *)ino->u.symlink);
46165+ nd_set_link(nd, ino->u.symlink);
46166 return NULL;
46167 }
46168
46169diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46170index 2341375..df9d1c2 100644
46171--- a/fs/autofs4/waitq.c
46172+++ b/fs/autofs4/waitq.c
46173@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46174 {
46175 unsigned long sigpipe, flags;
46176 mm_segment_t fs;
46177- const char *data = (const char *)addr;
46178+ const char __user *data = (const char __force_user *)addr;
46179 ssize_t wr = 0;
46180
46181 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46182diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46183index 9158c07..3f06659 100644
46184--- a/fs/befs/linuxvfs.c
46185+++ b/fs/befs/linuxvfs.c
46186@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46187 {
46188 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46189 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46190- char *link = nd_get_link(nd);
46191+ const char *link = nd_get_link(nd);
46192 if (!IS_ERR(link))
46193 kfree(link);
46194 }
46195diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46196index 0133b5a..b3baa9f 100644
46197--- a/fs/binfmt_aout.c
46198+++ b/fs/binfmt_aout.c
46199@@ -16,6 +16,7 @@
46200 #include <linux/string.h>
46201 #include <linux/fs.h>
46202 #include <linux/file.h>
46203+#include <linux/security.h>
46204 #include <linux/stat.h>
46205 #include <linux/fcntl.h>
46206 #include <linux/ptrace.h>
46207@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46208 #endif
46209 # define START_STACK(u) (u.start_stack)
46210
46211+ memset(&dump, 0, sizeof(dump));
46212+
46213 fs = get_fs();
46214 set_fs(KERNEL_DS);
46215 has_dumped = 1;
46216@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46217
46218 /* If the size of the dump file exceeds the rlimit, then see what would happen
46219 if we wrote the stack, but not the data area. */
46220+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46221 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46222 dump.u_dsize = 0;
46223
46224 /* Make sure we have enough room to write the stack and data areas. */
46225+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46226 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46227 dump.u_ssize = 0;
46228
46229@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46230 dump_size = dump.u_ssize << PAGE_SHIFT;
46231 DUMP_WRITE(dump_start,dump_size);
46232 }
46233-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46234- set_fs(KERNEL_DS);
46235- DUMP_WRITE(current,sizeof(*current));
46236+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46237 end_coredump:
46238 set_fs(fs);
46239 return has_dumped;
46240@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46241 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46242 if (rlim >= RLIM_INFINITY)
46243 rlim = ~0;
46244+
46245+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46246 if (ex.a_data + ex.a_bss > rlim)
46247 return -ENOMEM;
46248
46249@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46250 install_exec_creds(bprm);
46251 current->flags &= ~PF_FORKNOEXEC;
46252
46253+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46254+ current->mm->pax_flags = 0UL;
46255+#endif
46256+
46257+#ifdef CONFIG_PAX_PAGEEXEC
46258+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46259+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46260+
46261+#ifdef CONFIG_PAX_EMUTRAMP
46262+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46263+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46264+#endif
46265+
46266+#ifdef CONFIG_PAX_MPROTECT
46267+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46268+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46269+#endif
46270+
46271+ }
46272+#endif
46273+
46274 if (N_MAGIC(ex) == OMAGIC) {
46275 unsigned long text_addr, map_size;
46276 loff_t pos;
46277@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46278
46279 down_write(&current->mm->mmap_sem);
46280 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46281- PROT_READ | PROT_WRITE | PROT_EXEC,
46282+ PROT_READ | PROT_WRITE,
46283 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46284 fd_offset + ex.a_text);
46285 up_write(&current->mm->mmap_sem);
46286diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46287index 1ed37ba..b9c035f 100644
46288--- a/fs/binfmt_elf.c
46289+++ b/fs/binfmt_elf.c
46290@@ -31,6 +31,7 @@
46291 #include <linux/random.h>
46292 #include <linux/elf.h>
46293 #include <linux/utsname.h>
46294+#include <linux/xattr.h>
46295 #include <asm/uaccess.h>
46296 #include <asm/param.h>
46297 #include <asm/page.h>
46298@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46299 #define elf_core_dump NULL
46300 #endif
46301
46302+#ifdef CONFIG_PAX_MPROTECT
46303+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46304+#endif
46305+
46306 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46307 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46308 #else
46309@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46310 .load_binary = load_elf_binary,
46311 .load_shlib = load_elf_library,
46312 .core_dump = elf_core_dump,
46313+
46314+#ifdef CONFIG_PAX_MPROTECT
46315+ .handle_mprotect= elf_handle_mprotect,
46316+#endif
46317+
46318 .min_coredump = ELF_EXEC_PAGESIZE,
46319 .hasvdso = 1
46320 };
46321@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46322
46323 static int set_brk(unsigned long start, unsigned long end)
46324 {
46325+ unsigned long e = end;
46326+
46327 start = ELF_PAGEALIGN(start);
46328 end = ELF_PAGEALIGN(end);
46329 if (end > start) {
46330@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46331 if (BAD_ADDR(addr))
46332 return addr;
46333 }
46334- current->mm->start_brk = current->mm->brk = end;
46335+ current->mm->start_brk = current->mm->brk = e;
46336 return 0;
46337 }
46338
46339@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46340 elf_addr_t __user *u_rand_bytes;
46341 const char *k_platform = ELF_PLATFORM;
46342 const char *k_base_platform = ELF_BASE_PLATFORM;
46343- unsigned char k_rand_bytes[16];
46344+ u32 k_rand_bytes[4];
46345 int items;
46346 elf_addr_t *elf_info;
46347 int ei_index = 0;
46348 const struct cred *cred = current_cred();
46349 struct vm_area_struct *vma;
46350+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46351+
46352+ pax_track_stack();
46353
46354 /*
46355 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46356@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46357 * Generate 16 random bytes for userspace PRNG seeding.
46358 */
46359 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46360- u_rand_bytes = (elf_addr_t __user *)
46361- STACK_ALLOC(p, sizeof(k_rand_bytes));
46362+ srandom32(k_rand_bytes[0] ^ random32());
46363+ srandom32(k_rand_bytes[1] ^ random32());
46364+ srandom32(k_rand_bytes[2] ^ random32());
46365+ srandom32(k_rand_bytes[3] ^ random32());
46366+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46367+ u_rand_bytes = (elf_addr_t __user *) p;
46368 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46369 return -EFAULT;
46370
46371@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46372 return -EFAULT;
46373 current->mm->env_end = p;
46374
46375+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46376+
46377 /* Put the elf_info on the stack in the right place. */
46378 sp = (elf_addr_t __user *)envp + 1;
46379- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46380+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46381 return -EFAULT;
46382 return 0;
46383 }
46384@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46385 {
46386 struct elf_phdr *elf_phdata;
46387 struct elf_phdr *eppnt;
46388- unsigned long load_addr = 0;
46389+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46390 int load_addr_set = 0;
46391 unsigned long last_bss = 0, elf_bss = 0;
46392- unsigned long error = ~0UL;
46393+ unsigned long error = -EINVAL;
46394 unsigned long total_size;
46395 int retval, i, size;
46396
46397@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46398 goto out_close;
46399 }
46400
46401+#ifdef CONFIG_PAX_SEGMEXEC
46402+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46403+ pax_task_size = SEGMEXEC_TASK_SIZE;
46404+#endif
46405+
46406 eppnt = elf_phdata;
46407 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46408 if (eppnt->p_type == PT_LOAD) {
46409@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46410 k = load_addr + eppnt->p_vaddr;
46411 if (BAD_ADDR(k) ||
46412 eppnt->p_filesz > eppnt->p_memsz ||
46413- eppnt->p_memsz > TASK_SIZE ||
46414- TASK_SIZE - eppnt->p_memsz < k) {
46415+ eppnt->p_memsz > pax_task_size ||
46416+ pax_task_size - eppnt->p_memsz < k) {
46417 error = -ENOMEM;
46418 goto out_close;
46419 }
46420@@ -532,6 +558,348 @@ out:
46421 return error;
46422 }
46423
46424+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46425+{
46426+ unsigned long pax_flags = 0UL;
46427+
46428+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46429+
46430+#ifdef CONFIG_PAX_PAGEEXEC
46431+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46432+ pax_flags |= MF_PAX_PAGEEXEC;
46433+#endif
46434+
46435+#ifdef CONFIG_PAX_SEGMEXEC
46436+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46437+ pax_flags |= MF_PAX_SEGMEXEC;
46438+#endif
46439+
46440+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46441+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46442+ if (nx_enabled)
46443+ pax_flags &= ~MF_PAX_SEGMEXEC;
46444+ else
46445+ pax_flags &= ~MF_PAX_PAGEEXEC;
46446+ }
46447+#endif
46448+
46449+#ifdef CONFIG_PAX_EMUTRAMP
46450+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46451+ pax_flags |= MF_PAX_EMUTRAMP;
46452+#endif
46453+
46454+#ifdef CONFIG_PAX_MPROTECT
46455+ if (elf_phdata->p_flags & PF_MPROTECT)
46456+ pax_flags |= MF_PAX_MPROTECT;
46457+#endif
46458+
46459+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46460+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46461+ pax_flags |= MF_PAX_RANDMMAP;
46462+#endif
46463+
46464+#endif
46465+
46466+ return pax_flags;
46467+}
46468+
46469+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46470+{
46471+ unsigned long pax_flags = 0UL;
46472+
46473+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46474+
46475+#ifdef CONFIG_PAX_PAGEEXEC
46476+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46477+ pax_flags |= MF_PAX_PAGEEXEC;
46478+#endif
46479+
46480+#ifdef CONFIG_PAX_SEGMEXEC
46481+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46482+ pax_flags |= MF_PAX_SEGMEXEC;
46483+#endif
46484+
46485+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46486+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46487+ if (nx_enabled)
46488+ pax_flags &= ~MF_PAX_SEGMEXEC;
46489+ else
46490+ pax_flags &= ~MF_PAX_PAGEEXEC;
46491+ }
46492+#endif
46493+
46494+#ifdef CONFIG_PAX_EMUTRAMP
46495+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46496+ pax_flags |= MF_PAX_EMUTRAMP;
46497+#endif
46498+
46499+#ifdef CONFIG_PAX_MPROTECT
46500+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46501+ pax_flags |= MF_PAX_MPROTECT;
46502+#endif
46503+
46504+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46505+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46506+ pax_flags |= MF_PAX_RANDMMAP;
46507+#endif
46508+
46509+#endif
46510+
46511+ return pax_flags;
46512+}
46513+
46514+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46515+{
46516+ unsigned long pax_flags = 0UL;
46517+
46518+#ifdef CONFIG_PAX_EI_PAX
46519+
46520+#ifdef CONFIG_PAX_PAGEEXEC
46521+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46522+ pax_flags |= MF_PAX_PAGEEXEC;
46523+#endif
46524+
46525+#ifdef CONFIG_PAX_SEGMEXEC
46526+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46527+ pax_flags |= MF_PAX_SEGMEXEC;
46528+#endif
46529+
46530+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46531+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46532+ if (nx_enabled)
46533+ pax_flags &= ~MF_PAX_SEGMEXEC;
46534+ else
46535+ pax_flags &= ~MF_PAX_PAGEEXEC;
46536+ }
46537+#endif
46538+
46539+#ifdef CONFIG_PAX_EMUTRAMP
46540+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46541+ pax_flags |= MF_PAX_EMUTRAMP;
46542+#endif
46543+
46544+#ifdef CONFIG_PAX_MPROTECT
46545+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46546+ pax_flags |= MF_PAX_MPROTECT;
46547+#endif
46548+
46549+#ifdef CONFIG_PAX_ASLR
46550+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46551+ pax_flags |= MF_PAX_RANDMMAP;
46552+#endif
46553+
46554+#else
46555+
46556+#ifdef CONFIG_PAX_PAGEEXEC
46557+ pax_flags |= MF_PAX_PAGEEXEC;
46558+#endif
46559+
46560+#ifdef CONFIG_PAX_MPROTECT
46561+ pax_flags |= MF_PAX_MPROTECT;
46562+#endif
46563+
46564+#ifdef CONFIG_PAX_RANDMMAP
46565+ pax_flags |= MF_PAX_RANDMMAP;
46566+#endif
46567+
46568+#ifdef CONFIG_PAX_SEGMEXEC
46569+ if (!(__supported_pte_mask & _PAGE_NX)) {
46570+ pax_flags &= ~MF_PAX_PAGEEXEC;
46571+ pax_flags |= MF_PAX_SEGMEXEC;
46572+ }
46573+#endif
46574+
46575+#endif
46576+
46577+ return pax_flags;
46578+}
46579+
46580+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46581+{
46582+
46583+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46584+ unsigned long i;
46585+
46586+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46587+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46588+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46589+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46590+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46591+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46592+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46593+ return ~0UL;
46594+
46595+#ifdef CONFIG_PAX_SOFTMODE
46596+ if (pax_softmode)
46597+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46598+ else
46599+#endif
46600+
46601+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46602+ break;
46603+ }
46604+#endif
46605+
46606+ return ~0UL;
46607+}
46608+
46609+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46610+{
46611+ unsigned long pax_flags = 0UL;
46612+
46613+#ifdef CONFIG_PAX_PAGEEXEC
46614+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46615+ pax_flags |= MF_PAX_PAGEEXEC;
46616+#endif
46617+
46618+#ifdef CONFIG_PAX_SEGMEXEC
46619+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46620+ pax_flags |= MF_PAX_SEGMEXEC;
46621+#endif
46622+
46623+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46624+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46625+ if ((__supported_pte_mask & _PAGE_NX))
46626+ pax_flags &= ~MF_PAX_SEGMEXEC;
46627+ else
46628+ pax_flags &= ~MF_PAX_PAGEEXEC;
46629+ }
46630+#endif
46631+
46632+#ifdef CONFIG_PAX_EMUTRAMP
46633+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46634+ pax_flags |= MF_PAX_EMUTRAMP;
46635+#endif
46636+
46637+#ifdef CONFIG_PAX_MPROTECT
46638+ if (pax_flags_softmode & MF_PAX_MPROTECT)
46639+ pax_flags |= MF_PAX_MPROTECT;
46640+#endif
46641+
46642+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46643+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46644+ pax_flags |= MF_PAX_RANDMMAP;
46645+#endif
46646+
46647+ return pax_flags;
46648+}
46649+
46650+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46651+{
46652+ unsigned long pax_flags = 0UL;
46653+
46654+#ifdef CONFIG_PAX_PAGEEXEC
46655+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46656+ pax_flags |= MF_PAX_PAGEEXEC;
46657+#endif
46658+
46659+#ifdef CONFIG_PAX_SEGMEXEC
46660+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46661+ pax_flags |= MF_PAX_SEGMEXEC;
46662+#endif
46663+
46664+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46665+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46666+ if ((__supported_pte_mask & _PAGE_NX))
46667+ pax_flags &= ~MF_PAX_SEGMEXEC;
46668+ else
46669+ pax_flags &= ~MF_PAX_PAGEEXEC;
46670+ }
46671+#endif
46672+
46673+#ifdef CONFIG_PAX_EMUTRAMP
46674+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46675+ pax_flags |= MF_PAX_EMUTRAMP;
46676+#endif
46677+
46678+#ifdef CONFIG_PAX_MPROTECT
46679+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46680+ pax_flags |= MF_PAX_MPROTECT;
46681+#endif
46682+
46683+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46684+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46685+ pax_flags |= MF_PAX_RANDMMAP;
46686+#endif
46687+
46688+ return pax_flags;
46689+}
46690+
46691+static unsigned long pax_parse_xattr_pax(struct file * const file)
46692+{
46693+
46694+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46695+ ssize_t xattr_size, i;
46696+ unsigned char xattr_value[5];
46697+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46698+
46699+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46700+ if (xattr_size <= 0)
46701+ return ~0UL;
46702+
46703+ for (i = 0; i < xattr_size; i++)
46704+ switch (xattr_value[i]) {
46705+ default:
46706+ return ~0UL;
46707+
46708+#define parse_flag(option1, option2, flag) \
46709+ case option1: \
46710+ pax_flags_hardmode |= MF_PAX_##flag; \
46711+ break; \
46712+ case option2: \
46713+ pax_flags_softmode |= MF_PAX_##flag; \
46714+ break;
46715+
46716+ parse_flag('p', 'P', PAGEEXEC);
46717+ parse_flag('e', 'E', EMUTRAMP);
46718+ parse_flag('m', 'M', MPROTECT);
46719+ parse_flag('r', 'R', RANDMMAP);
46720+ parse_flag('s', 'S', SEGMEXEC);
46721+
46722+#undef parse_flag
46723+ }
46724+
46725+ if (pax_flags_hardmode & pax_flags_softmode)
46726+ return ~0UL;
46727+
46728+#ifdef CONFIG_PAX_SOFTMODE
46729+ if (pax_softmode)
46730+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46731+ else
46732+#endif
46733+
46734+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46735+#else
46736+ return ~0UL;
46737+#endif
46738+}
46739+
46740+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46741+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46742+{
46743+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46744+
46745+ pax_flags = pax_parse_ei_pax(elf_ex);
46746+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46747+ xattr_pax_flags = pax_parse_xattr_pax(file);
46748+
46749+ if (pt_pax_flags == ~0UL)
46750+ pt_pax_flags = xattr_pax_flags;
46751+ else if (xattr_pax_flags == ~0UL)
46752+ xattr_pax_flags = pt_pax_flags;
46753+ if (pt_pax_flags != xattr_pax_flags)
46754+ return -EINVAL;
46755+ if (pt_pax_flags != ~0UL)
46756+ pax_flags = pt_pax_flags;
46757+
46758+ if (0 > pax_check_flags(&pax_flags))
46759+ return -EINVAL;
46760+
46761+ current->mm->pax_flags = pax_flags;
46762+ return 0;
46763+}
46764+#endif
46765+
46766 /*
46767 * These are the functions used to load ELF style executables and shared
46768 * libraries. There is no binary dependent code anywhere else.
46769@@ -548,6 +916,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46770 {
46771 unsigned int random_variable = 0;
46772
46773+#ifdef CONFIG_PAX_RANDUSTACK
46774+ if (randomize_va_space)
46775+ return stack_top - current->mm->delta_stack;
46776+#endif
46777+
46778 if ((current->flags & PF_RANDOMIZE) &&
46779 !(current->personality & ADDR_NO_RANDOMIZE)) {
46780 random_variable = get_random_int() & STACK_RND_MASK;
46781@@ -566,7 +939,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46782 unsigned long load_addr = 0, load_bias = 0;
46783 int load_addr_set = 0;
46784 char * elf_interpreter = NULL;
46785- unsigned long error;
46786+ unsigned long error = 0;
46787 struct elf_phdr *elf_ppnt, *elf_phdata;
46788 unsigned long elf_bss, elf_brk;
46789 int retval, i;
46790@@ -576,11 +949,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46791 unsigned long start_code, end_code, start_data, end_data;
46792 unsigned long reloc_func_desc = 0;
46793 int executable_stack = EXSTACK_DEFAULT;
46794- unsigned long def_flags = 0;
46795 struct {
46796 struct elfhdr elf_ex;
46797 struct elfhdr interp_elf_ex;
46798 } *loc;
46799+ unsigned long pax_task_size = TASK_SIZE;
46800
46801 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46802 if (!loc) {
46803@@ -718,11 +1091,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46804
46805 /* OK, This is the point of no return */
46806 current->flags &= ~PF_FORKNOEXEC;
46807- current->mm->def_flags = def_flags;
46808+
46809+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46810+ current->mm->pax_flags = 0UL;
46811+#endif
46812+
46813+#ifdef CONFIG_PAX_DLRESOLVE
46814+ current->mm->call_dl_resolve = 0UL;
46815+#endif
46816+
46817+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46818+ current->mm->call_syscall = 0UL;
46819+#endif
46820+
46821+#ifdef CONFIG_PAX_ASLR
46822+ current->mm->delta_mmap = 0UL;
46823+ current->mm->delta_stack = 0UL;
46824+#endif
46825+
46826+ current->mm->def_flags = 0;
46827+
46828+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46829+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
46830+ send_sig(SIGKILL, current, 0);
46831+ goto out_free_dentry;
46832+ }
46833+#endif
46834+
46835+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46836+ pax_set_initial_flags(bprm);
46837+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46838+ if (pax_set_initial_flags_func)
46839+ (pax_set_initial_flags_func)(bprm);
46840+#endif
46841+
46842+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46843+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46844+ current->mm->context.user_cs_limit = PAGE_SIZE;
46845+ current->mm->def_flags |= VM_PAGEEXEC;
46846+ }
46847+#endif
46848+
46849+#ifdef CONFIG_PAX_SEGMEXEC
46850+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46851+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46852+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46853+ pax_task_size = SEGMEXEC_TASK_SIZE;
46854+ }
46855+#endif
46856+
46857+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46858+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46859+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46860+ put_cpu();
46861+ }
46862+#endif
46863
46864 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46865 may depend on the personality. */
46866 SET_PERSONALITY(loc->elf_ex);
46867+
46868+#ifdef CONFIG_PAX_ASLR
46869+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46870+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46871+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46872+ }
46873+#endif
46874+
46875+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46876+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46877+ executable_stack = EXSTACK_DISABLE_X;
46878+ current->personality &= ~READ_IMPLIES_EXEC;
46879+ } else
46880+#endif
46881+
46882 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46883 current->personality |= READ_IMPLIES_EXEC;
46884
46885@@ -800,10 +1242,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46886 * might try to exec. This is because the brk will
46887 * follow the loader, and is not movable. */
46888 #ifdef CONFIG_X86
46889- load_bias = 0;
46890+ if (current->flags & PF_RANDOMIZE)
46891+ load_bias = 0;
46892+ else
46893+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46894 #else
46895 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46896 #endif
46897+
46898+#ifdef CONFIG_PAX_RANDMMAP
46899+ /* PaX: randomize base address at the default exe base if requested */
46900+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46901+#ifdef CONFIG_SPARC64
46902+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46903+#else
46904+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46905+#endif
46906+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46907+ elf_flags |= MAP_FIXED;
46908+ }
46909+#endif
46910+
46911 }
46912
46913 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46914@@ -836,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46915 * allowed task size. Note that p_filesz must always be
46916 * <= p_memsz so it is only necessary to check p_memsz.
46917 */
46918- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46919- elf_ppnt->p_memsz > TASK_SIZE ||
46920- TASK_SIZE - elf_ppnt->p_memsz < k) {
46921+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46922+ elf_ppnt->p_memsz > pax_task_size ||
46923+ pax_task_size - elf_ppnt->p_memsz < k) {
46924 /* set_brk can never work. Avoid overflows. */
46925 send_sig(SIGKILL, current, 0);
46926 retval = -EINVAL;
46927@@ -866,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46928 start_data += load_bias;
46929 end_data += load_bias;
46930
46931+#ifdef CONFIG_PAX_RANDMMAP
46932+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46933+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46934+#endif
46935+
46936 /* Calling set_brk effectively mmaps the pages that we need
46937 * for the bss and break sections. We must do this before
46938 * mapping in the interpreter, to make sure it doesn't wind
46939@@ -877,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46940 goto out_free_dentry;
46941 }
46942 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46943- send_sig(SIGSEGV, current, 0);
46944- retval = -EFAULT; /* Nobody gets to see this, but.. */
46945- goto out_free_dentry;
46946+ /*
46947+ * This bss-zeroing can fail if the ELF
46948+ * file specifies odd protections. So
46949+ * we don't check the return value
46950+ */
46951 }
46952
46953 if (elf_interpreter) {
46954@@ -1112,8 +1578,10 @@ static int dump_seek(struct file *file, loff_t off)
46955 unsigned long n = off;
46956 if (n > PAGE_SIZE)
46957 n = PAGE_SIZE;
46958- if (!dump_write(file, buf, n))
46959+ if (!dump_write(file, buf, n)) {
46960+ free_page((unsigned long)buf);
46961 return 0;
46962+ }
46963 off -= n;
46964 }
46965 free_page((unsigned long)buf);
46966@@ -1125,7 +1593,7 @@ static int dump_seek(struct file *file, loff_t off)
46967 * Decide what to dump of a segment, part, all or none.
46968 */
46969 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46970- unsigned long mm_flags)
46971+ unsigned long mm_flags, long signr)
46972 {
46973 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46974
46975@@ -1159,7 +1627,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46976 if (vma->vm_file == NULL)
46977 return 0;
46978
46979- if (FILTER(MAPPED_PRIVATE))
46980+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46981 goto whole;
46982
46983 /*
46984@@ -1255,8 +1723,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46985 #undef DUMP_WRITE
46986
46987 #define DUMP_WRITE(addr, nr) \
46988+ do { \
46989+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46990 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46991- goto end_coredump;
46992+ goto end_coredump; \
46993+ } while (0);
46994
46995 static void fill_elf_header(struct elfhdr *elf, int segs,
46996 u16 machine, u32 flags, u8 osabi)
46997@@ -1385,9 +1856,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46998 {
46999 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47000 int i = 0;
47001- do
47002+ do {
47003 i += 2;
47004- while (auxv[i - 2] != AT_NULL);
47005+ } while (auxv[i - 2] != AT_NULL);
47006 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47007 }
47008
47009@@ -1973,7 +2444,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47010 phdr.p_offset = offset;
47011 phdr.p_vaddr = vma->vm_start;
47012 phdr.p_paddr = 0;
47013- phdr.p_filesz = vma_dump_size(vma, mm_flags);
47014+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47015 phdr.p_memsz = vma->vm_end - vma->vm_start;
47016 offset += phdr.p_filesz;
47017 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47018@@ -2006,7 +2477,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47019 unsigned long addr;
47020 unsigned long end;
47021
47022- end = vma->vm_start + vma_dump_size(vma, mm_flags);
47023+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47024
47025 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47026 struct page *page;
47027@@ -2015,6 +2486,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47028 page = get_dump_page(addr);
47029 if (page) {
47030 void *kaddr = kmap(page);
47031+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47032 stop = ((size += PAGE_SIZE) > limit) ||
47033 !dump_write(file, kaddr, PAGE_SIZE);
47034 kunmap(page);
47035@@ -2042,6 +2514,97 @@ out:
47036
47037 #endif /* USE_ELF_CORE_DUMP */
47038
47039+#ifdef CONFIG_PAX_MPROTECT
47040+/* PaX: non-PIC ELF libraries need relocations on their executable segments
47041+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47042+ * we'll remove VM_MAYWRITE for good on RELRO segments.
47043+ *
47044+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47045+ * basis because we want to allow the common case and not the special ones.
47046+ */
47047+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47048+{
47049+ struct elfhdr elf_h;
47050+ struct elf_phdr elf_p;
47051+ unsigned long i;
47052+ unsigned long oldflags;
47053+ bool is_textrel_rw, is_textrel_rx, is_relro;
47054+
47055+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47056+ return;
47057+
47058+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47059+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47060+
47061+#ifdef CONFIG_PAX_ELFRELOCS
47062+ /* possible TEXTREL */
47063+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47064+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47065+#else
47066+ is_textrel_rw = false;
47067+ is_textrel_rx = false;
47068+#endif
47069+
47070+ /* possible RELRO */
47071+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47072+
47073+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47074+ return;
47075+
47076+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47077+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47078+
47079+#ifdef CONFIG_PAX_ETEXECRELOCS
47080+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47081+#else
47082+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47083+#endif
47084+
47085+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47086+ !elf_check_arch(&elf_h) ||
47087+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47088+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47089+ return;
47090+
47091+ for (i = 0UL; i < elf_h.e_phnum; i++) {
47092+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47093+ return;
47094+ switch (elf_p.p_type) {
47095+ case PT_DYNAMIC:
47096+ if (!is_textrel_rw && !is_textrel_rx)
47097+ continue;
47098+ i = 0UL;
47099+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47100+ elf_dyn dyn;
47101+
47102+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47103+ return;
47104+ if (dyn.d_tag == DT_NULL)
47105+ return;
47106+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47107+ gr_log_textrel(vma);
47108+ if (is_textrel_rw)
47109+ vma->vm_flags |= VM_MAYWRITE;
47110+ else
47111+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47112+ vma->vm_flags &= ~VM_MAYWRITE;
47113+ return;
47114+ }
47115+ i++;
47116+ }
47117+ return;
47118+
47119+ case PT_GNU_RELRO:
47120+ if (!is_relro)
47121+ continue;
47122+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47123+ vma->vm_flags &= ~VM_MAYWRITE;
47124+ return;
47125+ }
47126+ }
47127+}
47128+#endif
47129+
47130 static int __init init_elf_binfmt(void)
47131 {
47132 return register_binfmt(&elf_format);
47133diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47134index ca88c46..f155a60 100644
47135--- a/fs/binfmt_flat.c
47136+++ b/fs/binfmt_flat.c
47137@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47138 realdatastart = (unsigned long) -ENOMEM;
47139 printk("Unable to allocate RAM for process data, errno %d\n",
47140 (int)-realdatastart);
47141+ down_write(&current->mm->mmap_sem);
47142 do_munmap(current->mm, textpos, text_len);
47143+ up_write(&current->mm->mmap_sem);
47144 ret = realdatastart;
47145 goto err;
47146 }
47147@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47148 }
47149 if (IS_ERR_VALUE(result)) {
47150 printk("Unable to read data+bss, errno %d\n", (int)-result);
47151+ down_write(&current->mm->mmap_sem);
47152 do_munmap(current->mm, textpos, text_len);
47153 do_munmap(current->mm, realdatastart, data_len + extra);
47154+ up_write(&current->mm->mmap_sem);
47155 ret = result;
47156 goto err;
47157 }
47158@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47159 }
47160 if (IS_ERR_VALUE(result)) {
47161 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47162+ down_write(&current->mm->mmap_sem);
47163 do_munmap(current->mm, textpos, text_len + data_len + extra +
47164 MAX_SHARED_LIBS * sizeof(unsigned long));
47165+ up_write(&current->mm->mmap_sem);
47166 ret = result;
47167 goto err;
47168 }
47169diff --git a/fs/bio.c b/fs/bio.c
47170index e696713..83de133 100644
47171--- a/fs/bio.c
47172+++ b/fs/bio.c
47173@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47174
47175 i = 0;
47176 while (i < bio_slab_nr) {
47177- struct bio_slab *bslab = &bio_slabs[i];
47178+ bslab = &bio_slabs[i];
47179
47180 if (!bslab->slab && entry == -1)
47181 entry = i;
47182@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47183 const int read = bio_data_dir(bio) == READ;
47184 struct bio_map_data *bmd = bio->bi_private;
47185 int i;
47186- char *p = bmd->sgvecs[0].iov_base;
47187+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47188
47189 __bio_for_each_segment(bvec, bio, i, 0) {
47190 char *addr = page_address(bvec->bv_page);
47191diff --git a/fs/block_dev.c b/fs/block_dev.c
47192index e65efa2..04fae57 100644
47193--- a/fs/block_dev.c
47194+++ b/fs/block_dev.c
47195@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47196 else if (bdev->bd_contains == bdev)
47197 res = 0; /* is a whole device which isn't held */
47198
47199- else if (bdev->bd_contains->bd_holder == bd_claim)
47200+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47201 res = 0; /* is a partition of a device that is being partitioned */
47202 else if (bdev->bd_contains->bd_holder != NULL)
47203 res = -EBUSY; /* is a partition of a held device */
47204diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47205index c4bc570..42acd8d 100644
47206--- a/fs/btrfs/ctree.c
47207+++ b/fs/btrfs/ctree.c
47208@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47209 free_extent_buffer(buf);
47210 add_root_to_dirty_list(root);
47211 } else {
47212- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47213- parent_start = parent->start;
47214- else
47215+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47216+ if (parent)
47217+ parent_start = parent->start;
47218+ else
47219+ parent_start = 0;
47220+ } else
47221 parent_start = 0;
47222
47223 WARN_ON(trans->transid != btrfs_header_generation(parent));
47224@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47225
47226 ret = 0;
47227 if (slot == 0) {
47228- struct btrfs_disk_key disk_key;
47229 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47230 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47231 }
47232diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47233index f447188..59c17c5 100644
47234--- a/fs/btrfs/disk-io.c
47235+++ b/fs/btrfs/disk-io.c
47236@@ -39,7 +39,7 @@
47237 #include "tree-log.h"
47238 #include "free-space-cache.h"
47239
47240-static struct extent_io_ops btree_extent_io_ops;
47241+static const struct extent_io_ops btree_extent_io_ops;
47242 static void end_workqueue_fn(struct btrfs_work *work);
47243 static void free_fs_root(struct btrfs_root *root);
47244
47245@@ -2607,7 +2607,7 @@ out:
47246 return 0;
47247 }
47248
47249-static struct extent_io_ops btree_extent_io_ops = {
47250+static const struct extent_io_ops btree_extent_io_ops = {
47251 .write_cache_pages_lock_hook = btree_lock_page_hook,
47252 .readpage_end_io_hook = btree_readpage_end_io_hook,
47253 .submit_bio_hook = btree_submit_bio_hook,
47254diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47255index 559f724..a026171 100644
47256--- a/fs/btrfs/extent-tree.c
47257+++ b/fs/btrfs/extent-tree.c
47258@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47259 u64 group_start = group->key.objectid;
47260 new_extents = kmalloc(sizeof(*new_extents),
47261 GFP_NOFS);
47262+ if (!new_extents) {
47263+ ret = -ENOMEM;
47264+ goto out;
47265+ }
47266 nr_extents = 1;
47267 ret = get_new_locations(reloc_inode,
47268 extent_key,
47269diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47270index 36de250..7ec75c7 100644
47271--- a/fs/btrfs/extent_io.h
47272+++ b/fs/btrfs/extent_io.h
47273@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47274 struct bio *bio, int mirror_num,
47275 unsigned long bio_flags);
47276 struct extent_io_ops {
47277- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47278+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47279 u64 start, u64 end, int *page_started,
47280 unsigned long *nr_written);
47281- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47282- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47283+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47284+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47285 extent_submit_bio_hook_t *submit_bio_hook;
47286- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47287+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47288 size_t size, struct bio *bio,
47289 unsigned long bio_flags);
47290- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47291- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47292+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47293+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47294 u64 start, u64 end,
47295 struct extent_state *state);
47296- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47297+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47298 u64 start, u64 end,
47299 struct extent_state *state);
47300- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47301+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47302 struct extent_state *state);
47303- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47304+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47305 struct extent_state *state, int uptodate);
47306- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47307+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47308 unsigned long old, unsigned long bits);
47309- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47310+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47311 unsigned long bits);
47312- int (*merge_extent_hook)(struct inode *inode,
47313+ int (* const merge_extent_hook)(struct inode *inode,
47314 struct extent_state *new,
47315 struct extent_state *other);
47316- int (*split_extent_hook)(struct inode *inode,
47317+ int (* const split_extent_hook)(struct inode *inode,
47318 struct extent_state *orig, u64 split);
47319- int (*write_cache_pages_lock_hook)(struct page *page);
47320+ int (* const write_cache_pages_lock_hook)(struct page *page);
47321 };
47322
47323 struct extent_io_tree {
47324@@ -88,7 +88,7 @@ struct extent_io_tree {
47325 u64 dirty_bytes;
47326 spinlock_t lock;
47327 spinlock_t buffer_lock;
47328- struct extent_io_ops *ops;
47329+ const struct extent_io_ops *ops;
47330 };
47331
47332 struct extent_state {
47333diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47334index cb2849f..3718fb4 100644
47335--- a/fs/btrfs/free-space-cache.c
47336+++ b/fs/btrfs/free-space-cache.c
47337@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47338
47339 while(1) {
47340 if (entry->bytes < bytes || entry->offset < min_start) {
47341- struct rb_node *node;
47342-
47343 node = rb_next(&entry->offset_index);
47344 if (!node)
47345 break;
47346@@ -1226,7 +1224,7 @@ again:
47347 */
47348 while (entry->bitmap || found_bitmap ||
47349 (!entry->bitmap && entry->bytes < min_bytes)) {
47350- struct rb_node *node = rb_next(&entry->offset_index);
47351+ node = rb_next(&entry->offset_index);
47352
47353 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47354 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47355diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47356index e03a836..323837e 100644
47357--- a/fs/btrfs/inode.c
47358+++ b/fs/btrfs/inode.c
47359@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47360 static const struct address_space_operations btrfs_aops;
47361 static const struct address_space_operations btrfs_symlink_aops;
47362 static const struct file_operations btrfs_dir_file_operations;
47363-static struct extent_io_ops btrfs_extent_io_ops;
47364+static const struct extent_io_ops btrfs_extent_io_ops;
47365
47366 static struct kmem_cache *btrfs_inode_cachep;
47367 struct kmem_cache *btrfs_trans_handle_cachep;
47368@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47369 1, 0, NULL, GFP_NOFS);
47370 while (start < end) {
47371 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47372+ BUG_ON(!async_cow);
47373 async_cow->inode = inode;
47374 async_cow->root = root;
47375 async_cow->locked_page = locked_page;
47376@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47377 inline_size = btrfs_file_extent_inline_item_len(leaf,
47378 btrfs_item_nr(leaf, path->slots[0]));
47379 tmp = kmalloc(inline_size, GFP_NOFS);
47380+ if (!tmp)
47381+ return -ENOMEM;
47382 ptr = btrfs_file_extent_inline_start(item);
47383
47384 read_extent_buffer(leaf, tmp, ptr, inline_size);
47385@@ -5410,7 +5413,7 @@ fail:
47386 return -ENOMEM;
47387 }
47388
47389-static int btrfs_getattr(struct vfsmount *mnt,
47390+int btrfs_getattr(struct vfsmount *mnt,
47391 struct dentry *dentry, struct kstat *stat)
47392 {
47393 struct inode *inode = dentry->d_inode;
47394@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47395 return 0;
47396 }
47397
47398+EXPORT_SYMBOL(btrfs_getattr);
47399+
47400+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47401+{
47402+ return BTRFS_I(inode)->root->anon_super.s_dev;
47403+}
47404+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47405+
47406 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47407 struct inode *new_dir, struct dentry *new_dentry)
47408 {
47409@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47410 .fsync = btrfs_sync_file,
47411 };
47412
47413-static struct extent_io_ops btrfs_extent_io_ops = {
47414+static const struct extent_io_ops btrfs_extent_io_ops = {
47415 .fill_delalloc = run_delalloc_range,
47416 .submit_bio_hook = btrfs_submit_bio_hook,
47417 .merge_bio_hook = btrfs_merge_bio_hook,
47418diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47419index ab7ab53..94e0781 100644
47420--- a/fs/btrfs/relocation.c
47421+++ b/fs/btrfs/relocation.c
47422@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47423 }
47424 spin_unlock(&rc->reloc_root_tree.lock);
47425
47426- BUG_ON((struct btrfs_root *)node->data != root);
47427+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47428
47429 if (!del) {
47430 spin_lock(&rc->reloc_root_tree.lock);
47431diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47432index a240b6f..4ce16ef 100644
47433--- a/fs/btrfs/sysfs.c
47434+++ b/fs/btrfs/sysfs.c
47435@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47436 complete(&root->kobj_unregister);
47437 }
47438
47439-static struct sysfs_ops btrfs_super_attr_ops = {
47440+static const struct sysfs_ops btrfs_super_attr_ops = {
47441 .show = btrfs_super_attr_show,
47442 .store = btrfs_super_attr_store,
47443 };
47444
47445-static struct sysfs_ops btrfs_root_attr_ops = {
47446+static const struct sysfs_ops btrfs_root_attr_ops = {
47447 .show = btrfs_root_attr_show,
47448 .store = btrfs_root_attr_store,
47449 };
47450diff --git a/fs/buffer.c b/fs/buffer.c
47451index 6fa5302..395d9f6 100644
47452--- a/fs/buffer.c
47453+++ b/fs/buffer.c
47454@@ -25,6 +25,7 @@
47455 #include <linux/percpu.h>
47456 #include <linux/slab.h>
47457 #include <linux/capability.h>
47458+#include <linux/security.h>
47459 #include <linux/blkdev.h>
47460 #include <linux/file.h>
47461 #include <linux/quotaops.h>
47462diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47463index 3797e00..ce776f6 100644
47464--- a/fs/cachefiles/bind.c
47465+++ b/fs/cachefiles/bind.c
47466@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47467 args);
47468
47469 /* start by checking things over */
47470- ASSERT(cache->fstop_percent >= 0 &&
47471- cache->fstop_percent < cache->fcull_percent &&
47472+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47473 cache->fcull_percent < cache->frun_percent &&
47474 cache->frun_percent < 100);
47475
47476- ASSERT(cache->bstop_percent >= 0 &&
47477- cache->bstop_percent < cache->bcull_percent &&
47478+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47479 cache->bcull_percent < cache->brun_percent &&
47480 cache->brun_percent < 100);
47481
47482diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47483index 4618516..bb30d01 100644
47484--- a/fs/cachefiles/daemon.c
47485+++ b/fs/cachefiles/daemon.c
47486@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47487 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47488 return -EIO;
47489
47490- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47491+ if (datalen > PAGE_SIZE - 1)
47492 return -EOPNOTSUPP;
47493
47494 /* drag the command string into the kernel so we can parse it */
47495@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47496 if (args[0] != '%' || args[1] != '\0')
47497 return -EINVAL;
47498
47499- if (fstop < 0 || fstop >= cache->fcull_percent)
47500+ if (fstop >= cache->fcull_percent)
47501 return cachefiles_daemon_range_error(cache, args);
47502
47503 cache->fstop_percent = fstop;
47504@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47505 if (args[0] != '%' || args[1] != '\0')
47506 return -EINVAL;
47507
47508- if (bstop < 0 || bstop >= cache->bcull_percent)
47509+ if (bstop >= cache->bcull_percent)
47510 return cachefiles_daemon_range_error(cache, args);
47511
47512 cache->bstop_percent = bstop;
47513diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47514index f7c255f..fcd61de 100644
47515--- a/fs/cachefiles/internal.h
47516+++ b/fs/cachefiles/internal.h
47517@@ -56,7 +56,7 @@ struct cachefiles_cache {
47518 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47519 struct rb_root active_nodes; /* active nodes (can't be culled) */
47520 rwlock_t active_lock; /* lock for active_nodes */
47521- atomic_t gravecounter; /* graveyard uniquifier */
47522+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47523 unsigned frun_percent; /* when to stop culling (% files) */
47524 unsigned fcull_percent; /* when to start culling (% files) */
47525 unsigned fstop_percent; /* when to stop allocating (% files) */
47526@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47527 * proc.c
47528 */
47529 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47530-extern atomic_t cachefiles_lookup_histogram[HZ];
47531-extern atomic_t cachefiles_mkdir_histogram[HZ];
47532-extern atomic_t cachefiles_create_histogram[HZ];
47533+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47534+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47535+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47536
47537 extern int __init cachefiles_proc_init(void);
47538 extern void cachefiles_proc_cleanup(void);
47539 static inline
47540-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47541+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47542 {
47543 unsigned long jif = jiffies - start_jif;
47544 if (jif >= HZ)
47545 jif = HZ - 1;
47546- atomic_inc(&histogram[jif]);
47547+ atomic_inc_unchecked(&histogram[jif]);
47548 }
47549
47550 #else
47551diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47552index 14ac480..a62766c 100644
47553--- a/fs/cachefiles/namei.c
47554+++ b/fs/cachefiles/namei.c
47555@@ -250,7 +250,7 @@ try_again:
47556 /* first step is to make up a grave dentry in the graveyard */
47557 sprintf(nbuffer, "%08x%08x",
47558 (uint32_t) get_seconds(),
47559- (uint32_t) atomic_inc_return(&cache->gravecounter));
47560+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47561
47562 /* do the multiway lock magic */
47563 trap = lock_rename(cache->graveyard, dir);
47564diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47565index eccd339..4c1d995 100644
47566--- a/fs/cachefiles/proc.c
47567+++ b/fs/cachefiles/proc.c
47568@@ -14,9 +14,9 @@
47569 #include <linux/seq_file.h>
47570 #include "internal.h"
47571
47572-atomic_t cachefiles_lookup_histogram[HZ];
47573-atomic_t cachefiles_mkdir_histogram[HZ];
47574-atomic_t cachefiles_create_histogram[HZ];
47575+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47576+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47577+atomic_unchecked_t cachefiles_create_histogram[HZ];
47578
47579 /*
47580 * display the latency histogram
47581@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47582 return 0;
47583 default:
47584 index = (unsigned long) v - 3;
47585- x = atomic_read(&cachefiles_lookup_histogram[index]);
47586- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47587- z = atomic_read(&cachefiles_create_histogram[index]);
47588+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47589+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47590+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47591 if (x == 0 && y == 0 && z == 0)
47592 return 0;
47593
47594diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47595index a6c8c6f..5cf8517 100644
47596--- a/fs/cachefiles/rdwr.c
47597+++ b/fs/cachefiles/rdwr.c
47598@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47599 old_fs = get_fs();
47600 set_fs(KERNEL_DS);
47601 ret = file->f_op->write(
47602- file, (const void __user *) data, len, &pos);
47603+ file, (const void __force_user *) data, len, &pos);
47604 set_fs(old_fs);
47605 kunmap(page);
47606 if (ret != len)
47607diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47608index 42cec2a..2aba466 100644
47609--- a/fs/cifs/cifs_debug.c
47610+++ b/fs/cifs/cifs_debug.c
47611@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47612 tcon = list_entry(tmp3,
47613 struct cifsTconInfo,
47614 tcon_list);
47615- atomic_set(&tcon->num_smbs_sent, 0);
47616- atomic_set(&tcon->num_writes, 0);
47617- atomic_set(&tcon->num_reads, 0);
47618- atomic_set(&tcon->num_oplock_brks, 0);
47619- atomic_set(&tcon->num_opens, 0);
47620- atomic_set(&tcon->num_posixopens, 0);
47621- atomic_set(&tcon->num_posixmkdirs, 0);
47622- atomic_set(&tcon->num_closes, 0);
47623- atomic_set(&tcon->num_deletes, 0);
47624- atomic_set(&tcon->num_mkdirs, 0);
47625- atomic_set(&tcon->num_rmdirs, 0);
47626- atomic_set(&tcon->num_renames, 0);
47627- atomic_set(&tcon->num_t2renames, 0);
47628- atomic_set(&tcon->num_ffirst, 0);
47629- atomic_set(&tcon->num_fnext, 0);
47630- atomic_set(&tcon->num_fclose, 0);
47631- atomic_set(&tcon->num_hardlinks, 0);
47632- atomic_set(&tcon->num_symlinks, 0);
47633- atomic_set(&tcon->num_locks, 0);
47634+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47635+ atomic_set_unchecked(&tcon->num_writes, 0);
47636+ atomic_set_unchecked(&tcon->num_reads, 0);
47637+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47638+ atomic_set_unchecked(&tcon->num_opens, 0);
47639+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47640+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47641+ atomic_set_unchecked(&tcon->num_closes, 0);
47642+ atomic_set_unchecked(&tcon->num_deletes, 0);
47643+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47644+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47645+ atomic_set_unchecked(&tcon->num_renames, 0);
47646+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47647+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47648+ atomic_set_unchecked(&tcon->num_fnext, 0);
47649+ atomic_set_unchecked(&tcon->num_fclose, 0);
47650+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47651+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47652+ atomic_set_unchecked(&tcon->num_locks, 0);
47653 }
47654 }
47655 }
47656@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47657 if (tcon->need_reconnect)
47658 seq_puts(m, "\tDISCONNECTED ");
47659 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47660- atomic_read(&tcon->num_smbs_sent),
47661- atomic_read(&tcon->num_oplock_brks));
47662+ atomic_read_unchecked(&tcon->num_smbs_sent),
47663+ atomic_read_unchecked(&tcon->num_oplock_brks));
47664 seq_printf(m, "\nReads: %d Bytes: %lld",
47665- atomic_read(&tcon->num_reads),
47666+ atomic_read_unchecked(&tcon->num_reads),
47667 (long long)(tcon->bytes_read));
47668 seq_printf(m, "\nWrites: %d Bytes: %lld",
47669- atomic_read(&tcon->num_writes),
47670+ atomic_read_unchecked(&tcon->num_writes),
47671 (long long)(tcon->bytes_written));
47672 seq_printf(m, "\nFlushes: %d",
47673- atomic_read(&tcon->num_flushes));
47674+ atomic_read_unchecked(&tcon->num_flushes));
47675 seq_printf(m, "\nLocks: %d HardLinks: %d "
47676 "Symlinks: %d",
47677- atomic_read(&tcon->num_locks),
47678- atomic_read(&tcon->num_hardlinks),
47679- atomic_read(&tcon->num_symlinks));
47680+ atomic_read_unchecked(&tcon->num_locks),
47681+ atomic_read_unchecked(&tcon->num_hardlinks),
47682+ atomic_read_unchecked(&tcon->num_symlinks));
47683 seq_printf(m, "\nOpens: %d Closes: %d "
47684 "Deletes: %d",
47685- atomic_read(&tcon->num_opens),
47686- atomic_read(&tcon->num_closes),
47687- atomic_read(&tcon->num_deletes));
47688+ atomic_read_unchecked(&tcon->num_opens),
47689+ atomic_read_unchecked(&tcon->num_closes),
47690+ atomic_read_unchecked(&tcon->num_deletes));
47691 seq_printf(m, "\nPosix Opens: %d "
47692 "Posix Mkdirs: %d",
47693- atomic_read(&tcon->num_posixopens),
47694- atomic_read(&tcon->num_posixmkdirs));
47695+ atomic_read_unchecked(&tcon->num_posixopens),
47696+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47697 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47698- atomic_read(&tcon->num_mkdirs),
47699- atomic_read(&tcon->num_rmdirs));
47700+ atomic_read_unchecked(&tcon->num_mkdirs),
47701+ atomic_read_unchecked(&tcon->num_rmdirs));
47702 seq_printf(m, "\nRenames: %d T2 Renames %d",
47703- atomic_read(&tcon->num_renames),
47704- atomic_read(&tcon->num_t2renames));
47705+ atomic_read_unchecked(&tcon->num_renames),
47706+ atomic_read_unchecked(&tcon->num_t2renames));
47707 seq_printf(m, "\nFindFirst: %d FNext %d "
47708 "FClose %d",
47709- atomic_read(&tcon->num_ffirst),
47710- atomic_read(&tcon->num_fnext),
47711- atomic_read(&tcon->num_fclose));
47712+ atomic_read_unchecked(&tcon->num_ffirst),
47713+ atomic_read_unchecked(&tcon->num_fnext),
47714+ atomic_read_unchecked(&tcon->num_fclose));
47715 }
47716 }
47717 }
47718diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47719index 1445407..68cb0dc 100644
47720--- a/fs/cifs/cifsfs.c
47721+++ b/fs/cifs/cifsfs.c
47722@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47723 cifs_req_cachep = kmem_cache_create("cifs_request",
47724 CIFSMaxBufSize +
47725 MAX_CIFS_HDR_SIZE, 0,
47726- SLAB_HWCACHE_ALIGN, NULL);
47727+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47728 if (cifs_req_cachep == NULL)
47729 return -ENOMEM;
47730
47731@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47732 efficient to alloc 1 per page off the slab compared to 17K (5page)
47733 alloc of large cifs buffers even when page debugging is on */
47734 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47735- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47736+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47737 NULL);
47738 if (cifs_sm_req_cachep == NULL) {
47739 mempool_destroy(cifs_req_poolp);
47740@@ -991,8 +991,8 @@ init_cifs(void)
47741 atomic_set(&bufAllocCount, 0);
47742 atomic_set(&smBufAllocCount, 0);
47743 #ifdef CONFIG_CIFS_STATS2
47744- atomic_set(&totBufAllocCount, 0);
47745- atomic_set(&totSmBufAllocCount, 0);
47746+ atomic_set_unchecked(&totBufAllocCount, 0);
47747+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47748 #endif /* CONFIG_CIFS_STATS2 */
47749
47750 atomic_set(&midCount, 0);
47751diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47752index e29581e..1c22bab 100644
47753--- a/fs/cifs/cifsglob.h
47754+++ b/fs/cifs/cifsglob.h
47755@@ -252,28 +252,28 @@ struct cifsTconInfo {
47756 __u16 Flags; /* optional support bits */
47757 enum statusEnum tidStatus;
47758 #ifdef CONFIG_CIFS_STATS
47759- atomic_t num_smbs_sent;
47760- atomic_t num_writes;
47761- atomic_t num_reads;
47762- atomic_t num_flushes;
47763- atomic_t num_oplock_brks;
47764- atomic_t num_opens;
47765- atomic_t num_closes;
47766- atomic_t num_deletes;
47767- atomic_t num_mkdirs;
47768- atomic_t num_posixopens;
47769- atomic_t num_posixmkdirs;
47770- atomic_t num_rmdirs;
47771- atomic_t num_renames;
47772- atomic_t num_t2renames;
47773- atomic_t num_ffirst;
47774- atomic_t num_fnext;
47775- atomic_t num_fclose;
47776- atomic_t num_hardlinks;
47777- atomic_t num_symlinks;
47778- atomic_t num_locks;
47779- atomic_t num_acl_get;
47780- atomic_t num_acl_set;
47781+ atomic_unchecked_t num_smbs_sent;
47782+ atomic_unchecked_t num_writes;
47783+ atomic_unchecked_t num_reads;
47784+ atomic_unchecked_t num_flushes;
47785+ atomic_unchecked_t num_oplock_brks;
47786+ atomic_unchecked_t num_opens;
47787+ atomic_unchecked_t num_closes;
47788+ atomic_unchecked_t num_deletes;
47789+ atomic_unchecked_t num_mkdirs;
47790+ atomic_unchecked_t num_posixopens;
47791+ atomic_unchecked_t num_posixmkdirs;
47792+ atomic_unchecked_t num_rmdirs;
47793+ atomic_unchecked_t num_renames;
47794+ atomic_unchecked_t num_t2renames;
47795+ atomic_unchecked_t num_ffirst;
47796+ atomic_unchecked_t num_fnext;
47797+ atomic_unchecked_t num_fclose;
47798+ atomic_unchecked_t num_hardlinks;
47799+ atomic_unchecked_t num_symlinks;
47800+ atomic_unchecked_t num_locks;
47801+ atomic_unchecked_t num_acl_get;
47802+ atomic_unchecked_t num_acl_set;
47803 #ifdef CONFIG_CIFS_STATS2
47804 unsigned long long time_writes;
47805 unsigned long long time_reads;
47806@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47807 }
47808
47809 #ifdef CONFIG_CIFS_STATS
47810-#define cifs_stats_inc atomic_inc
47811+#define cifs_stats_inc atomic_inc_unchecked
47812
47813 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47814 unsigned int bytes)
47815@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47816 /* Various Debug counters */
47817 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47818 #ifdef CONFIG_CIFS_STATS2
47819-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47820-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47821+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47822+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47823 #endif
47824 GLOBAL_EXTERN atomic_t smBufAllocCount;
47825 GLOBAL_EXTERN atomic_t midCount;
47826diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47827index fc1e048..28b3441 100644
47828--- a/fs/cifs/link.c
47829+++ b/fs/cifs/link.c
47830@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47831
47832 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47833 {
47834- char *p = nd_get_link(nd);
47835+ const char *p = nd_get_link(nd);
47836 if (!IS_ERR(p))
47837 kfree(p);
47838 }
47839diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47840index 95b82e8..12a538d 100644
47841--- a/fs/cifs/misc.c
47842+++ b/fs/cifs/misc.c
47843@@ -155,7 +155,7 @@ cifs_buf_get(void)
47844 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47845 atomic_inc(&bufAllocCount);
47846 #ifdef CONFIG_CIFS_STATS2
47847- atomic_inc(&totBufAllocCount);
47848+ atomic_inc_unchecked(&totBufAllocCount);
47849 #endif /* CONFIG_CIFS_STATS2 */
47850 }
47851
47852@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47853 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47854 atomic_inc(&smBufAllocCount);
47855 #ifdef CONFIG_CIFS_STATS2
47856- atomic_inc(&totSmBufAllocCount);
47857+ atomic_inc_unchecked(&totSmBufAllocCount);
47858 #endif /* CONFIG_CIFS_STATS2 */
47859
47860 }
47861diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47862index a5bf577..6d19845 100644
47863--- a/fs/coda/cache.c
47864+++ b/fs/coda/cache.c
47865@@ -24,14 +24,14 @@
47866 #include <linux/coda_fs_i.h>
47867 #include <linux/coda_cache.h>
47868
47869-static atomic_t permission_epoch = ATOMIC_INIT(0);
47870+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47871
47872 /* replace or extend an acl cache hit */
47873 void coda_cache_enter(struct inode *inode, int mask)
47874 {
47875 struct coda_inode_info *cii = ITOC(inode);
47876
47877- cii->c_cached_epoch = atomic_read(&permission_epoch);
47878+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47879 if (cii->c_uid != current_fsuid()) {
47880 cii->c_uid = current_fsuid();
47881 cii->c_cached_perm = mask;
47882@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47883 void coda_cache_clear_inode(struct inode *inode)
47884 {
47885 struct coda_inode_info *cii = ITOC(inode);
47886- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47887+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47888 }
47889
47890 /* remove all acl caches */
47891 void coda_cache_clear_all(struct super_block *sb)
47892 {
47893- atomic_inc(&permission_epoch);
47894+ atomic_inc_unchecked(&permission_epoch);
47895 }
47896
47897
47898@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47899
47900 hit = (mask & cii->c_cached_perm) == mask &&
47901 cii->c_uid == current_fsuid() &&
47902- cii->c_cached_epoch == atomic_read(&permission_epoch);
47903+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47904
47905 return hit;
47906 }
47907diff --git a/fs/compat.c b/fs/compat.c
47908index d1e2411..b1eda5d 100644
47909--- a/fs/compat.c
47910+++ b/fs/compat.c
47911@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47912 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47913 {
47914 compat_ino_t ino = stat->ino;
47915- typeof(ubuf->st_uid) uid = 0;
47916- typeof(ubuf->st_gid) gid = 0;
47917+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47918+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47919 int err;
47920
47921 SET_UID(uid, stat->uid);
47922@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47923
47924 set_fs(KERNEL_DS);
47925 /* The __user pointer cast is valid because of the set_fs() */
47926- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47927+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47928 set_fs(oldfs);
47929 /* truncating is ok because it's a user address */
47930 if (!ret)
47931@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47932
47933 struct compat_readdir_callback {
47934 struct compat_old_linux_dirent __user *dirent;
47935+ struct file * file;
47936 int result;
47937 };
47938
47939@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47940 buf->result = -EOVERFLOW;
47941 return -EOVERFLOW;
47942 }
47943+
47944+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47945+ return 0;
47946+
47947 buf->result++;
47948 dirent = buf->dirent;
47949 if (!access_ok(VERIFY_WRITE, dirent,
47950@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47951
47952 buf.result = 0;
47953 buf.dirent = dirent;
47954+ buf.file = file;
47955
47956 error = vfs_readdir(file, compat_fillonedir, &buf);
47957 if (buf.result)
47958@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47959 struct compat_getdents_callback {
47960 struct compat_linux_dirent __user *current_dir;
47961 struct compat_linux_dirent __user *previous;
47962+ struct file * file;
47963 int count;
47964 int error;
47965 };
47966@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47967 buf->error = -EOVERFLOW;
47968 return -EOVERFLOW;
47969 }
47970+
47971+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47972+ return 0;
47973+
47974 dirent = buf->previous;
47975 if (dirent) {
47976 if (__put_user(offset, &dirent->d_off))
47977@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47978 buf.previous = NULL;
47979 buf.count = count;
47980 buf.error = 0;
47981+ buf.file = file;
47982
47983 error = vfs_readdir(file, compat_filldir, &buf);
47984 if (error >= 0)
47985@@ -987,6 +999,7 @@ out:
47986 struct compat_getdents_callback64 {
47987 struct linux_dirent64 __user *current_dir;
47988 struct linux_dirent64 __user *previous;
47989+ struct file * file;
47990 int count;
47991 int error;
47992 };
47993@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47994 buf->error = -EINVAL; /* only used if we fail.. */
47995 if (reclen > buf->count)
47996 return -EINVAL;
47997+
47998+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47999+ return 0;
48000+
48001 dirent = buf->previous;
48002
48003 if (dirent) {
48004@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48005 buf.previous = NULL;
48006 buf.count = count;
48007 buf.error = 0;
48008+ buf.file = file;
48009
48010 error = vfs_readdir(file, compat_filldir64, &buf);
48011 if (error >= 0)
48012 error = buf.error;
48013 lastdirent = buf.previous;
48014 if (lastdirent) {
48015- typeof(lastdirent->d_off) d_off = file->f_pos;
48016+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48017 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48018 error = -EFAULT;
48019 else
48020@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48021 * verify all the pointers
48022 */
48023 ret = -EINVAL;
48024- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48025+ if (nr_segs > UIO_MAXIOV)
48026 goto out;
48027 if (!file->f_op)
48028 goto out;
48029@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48030 compat_uptr_t __user *envp,
48031 struct pt_regs * regs)
48032 {
48033+#ifdef CONFIG_GRKERNSEC
48034+ struct file *old_exec_file;
48035+ struct acl_subject_label *old_acl;
48036+ struct rlimit old_rlim[RLIM_NLIMITS];
48037+#endif
48038 struct linux_binprm *bprm;
48039 struct file *file;
48040 struct files_struct *displaced;
48041 bool clear_in_exec;
48042 int retval;
48043+ const struct cred *cred = current_cred();
48044+
48045+ /*
48046+ * We move the actual failure in case of RLIMIT_NPROC excess from
48047+ * set*uid() to execve() because too many poorly written programs
48048+ * don't check setuid() return code. Here we additionally recheck
48049+ * whether NPROC limit is still exceeded.
48050+ */
48051+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48052+
48053+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48054+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48055+ retval = -EAGAIN;
48056+ goto out_ret;
48057+ }
48058+
48059+ /* We're below the limit (still or again), so we don't want to make
48060+ * further execve() calls fail. */
48061+ current->flags &= ~PF_NPROC_EXCEEDED;
48062
48063 retval = unshare_files(&displaced);
48064 if (retval)
48065@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
48066 if (IS_ERR(file))
48067 goto out_unmark;
48068
48069+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48070+ retval = -EPERM;
48071+ goto out_file;
48072+ }
48073+
48074 sched_exec();
48075
48076 bprm->file = file;
48077 bprm->filename = filename;
48078 bprm->interp = filename;
48079
48080+ if (gr_process_user_ban()) {
48081+ retval = -EPERM;
48082+ goto out_file;
48083+ }
48084+
48085+ retval = -EACCES;
48086+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48087+ goto out_file;
48088+
48089 retval = bprm_mm_init(bprm);
48090 if (retval)
48091 goto out_file;
48092@@ -1528,9 +1584,40 @@ int compat_do_execve(char * filename,
48093 if (retval < 0)
48094 goto out;
48095
48096+ if (!gr_tpe_allow(file)) {
48097+ retval = -EACCES;
48098+ goto out;
48099+ }
48100+
48101+ if (gr_check_crash_exec(file)) {
48102+ retval = -EACCES;
48103+ goto out;
48104+ }
48105+
48106+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48107+
48108+ gr_handle_exec_args_compat(bprm, argv);
48109+
48110+#ifdef CONFIG_GRKERNSEC
48111+ old_acl = current->acl;
48112+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48113+ old_exec_file = current->exec_file;
48114+ get_file(file);
48115+ current->exec_file = file;
48116+#endif
48117+
48118+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48119+ bprm->unsafe);
48120+ if (retval < 0)
48121+ goto out_fail;
48122+
48123 retval = search_binary_handler(bprm, regs);
48124 if (retval < 0)
48125- goto out;
48126+ goto out_fail;
48127+#ifdef CONFIG_GRKERNSEC
48128+ if (old_exec_file)
48129+ fput(old_exec_file);
48130+#endif
48131
48132 /* execve succeeded */
48133 current->fs->in_exec = 0;
48134@@ -1541,6 +1628,14 @@ int compat_do_execve(char * filename,
48135 put_files_struct(displaced);
48136 return retval;
48137
48138+out_fail:
48139+#ifdef CONFIG_GRKERNSEC
48140+ current->acl = old_acl;
48141+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48142+ fput(current->exec_file);
48143+ current->exec_file = old_exec_file;
48144+#endif
48145+
48146 out:
48147 if (bprm->mm) {
48148 acct_arg_size(bprm, 0);
48149@@ -1711,6 +1806,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48150 struct fdtable *fdt;
48151 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48152
48153+ pax_track_stack();
48154+
48155 if (n < 0)
48156 goto out_nofds;
48157
48158@@ -2151,7 +2248,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48159 oldfs = get_fs();
48160 set_fs(KERNEL_DS);
48161 /* The __user pointer casts are valid because of the set_fs() */
48162- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48163+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48164 set_fs(oldfs);
48165
48166 if (err)
48167diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48168index 0adced2..bbb1b0d 100644
48169--- a/fs/compat_binfmt_elf.c
48170+++ b/fs/compat_binfmt_elf.c
48171@@ -29,10 +29,12 @@
48172 #undef elfhdr
48173 #undef elf_phdr
48174 #undef elf_note
48175+#undef elf_dyn
48176 #undef elf_addr_t
48177 #define elfhdr elf32_hdr
48178 #define elf_phdr elf32_phdr
48179 #define elf_note elf32_note
48180+#define elf_dyn Elf32_Dyn
48181 #define elf_addr_t Elf32_Addr
48182
48183 /*
48184diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48185index d84e705..d8c364c 100644
48186--- a/fs/compat_ioctl.c
48187+++ b/fs/compat_ioctl.c
48188@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48189 up = (struct compat_video_spu_palette __user *) arg;
48190 err = get_user(palp, &up->palette);
48191 err |= get_user(length, &up->length);
48192+ if (err)
48193+ return -EFAULT;
48194
48195 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48196 err = put_user(compat_ptr(palp), &up_native->palette);
48197@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48198 return -EFAULT;
48199 if (__get_user(udata, &ss32->iomem_base))
48200 return -EFAULT;
48201- ss.iomem_base = compat_ptr(udata);
48202+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48203 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48204 __get_user(ss.port_high, &ss32->port_high))
48205 return -EFAULT;
48206@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48207 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48208 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48209 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48210- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48211+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48212 return -EFAULT;
48213
48214 return ioctl_preallocate(file, p);
48215diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48216index 8e48b52..f01ed91 100644
48217--- a/fs/configfs/dir.c
48218+++ b/fs/configfs/dir.c
48219@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48220 }
48221 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48222 struct configfs_dirent *next;
48223- const char * name;
48224+ const unsigned char * name;
48225+ char d_name[sizeof(next->s_dentry->d_iname)];
48226 int len;
48227
48228 next = list_entry(p, struct configfs_dirent,
48229@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48230 continue;
48231
48232 name = configfs_get_name(next);
48233- len = strlen(name);
48234+ if (next->s_dentry && name == next->s_dentry->d_iname) {
48235+ len = next->s_dentry->d_name.len;
48236+ memcpy(d_name, name, len);
48237+ name = d_name;
48238+ } else
48239+ len = strlen(name);
48240 if (next->s_dentry)
48241 ino = next->s_dentry->d_inode->i_ino;
48242 else
48243diff --git a/fs/dcache.c b/fs/dcache.c
48244index 44c0aea..2529092 100644
48245--- a/fs/dcache.c
48246+++ b/fs/dcache.c
48247@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48248
48249 static struct kmem_cache *dentry_cache __read_mostly;
48250
48251-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48252-
48253 /*
48254 * This is the single most critical data structure when it comes
48255 * to the dcache: the hashtable for lookups. Somebody should try
48256@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48257 mempages -= reserve;
48258
48259 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48260- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48261+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48262
48263 dcache_init();
48264 inode_init();
48265diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48266index 39c6ee8..dcee0f1 100644
48267--- a/fs/debugfs/inode.c
48268+++ b/fs/debugfs/inode.c
48269@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48270 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48271 {
48272 return debugfs_create_file(name,
48273+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48274+ S_IFDIR | S_IRWXU,
48275+#else
48276 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48277+#endif
48278 parent, NULL, NULL);
48279 }
48280 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48281diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48282index c010ecf..a8d8c59 100644
48283--- a/fs/dlm/lockspace.c
48284+++ b/fs/dlm/lockspace.c
48285@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48286 kfree(ls);
48287 }
48288
48289-static struct sysfs_ops dlm_attr_ops = {
48290+static const struct sysfs_ops dlm_attr_ops = {
48291 .show = dlm_attr_show,
48292 .store = dlm_attr_store,
48293 };
48294diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48295index 7a5f1ac..205b034 100644
48296--- a/fs/ecryptfs/crypto.c
48297+++ b/fs/ecryptfs/crypto.c
48298@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48299 rc);
48300 goto out;
48301 }
48302- if (unlikely(ecryptfs_verbosity > 0)) {
48303- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48304- "with iv:\n");
48305- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48306- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48307- "encryption:\n");
48308- ecryptfs_dump_hex((char *)
48309- (page_address(page)
48310- + (extent_offset * crypt_stat->extent_size)),
48311- 8);
48312- }
48313 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48314 page, (extent_offset
48315 * crypt_stat->extent_size),
48316@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48317 goto out;
48318 }
48319 rc = 0;
48320- if (unlikely(ecryptfs_verbosity > 0)) {
48321- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48322- "rc = [%d]\n", (extent_base + extent_offset),
48323- rc);
48324- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48325- "encryption:\n");
48326- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48327- }
48328 out:
48329 return rc;
48330 }
48331@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48332 rc);
48333 goto out;
48334 }
48335- if (unlikely(ecryptfs_verbosity > 0)) {
48336- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48337- "with iv:\n");
48338- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48339- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48340- "decryption:\n");
48341- ecryptfs_dump_hex((char *)
48342- (page_address(enc_extent_page)
48343- + (extent_offset * crypt_stat->extent_size)),
48344- 8);
48345- }
48346 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48347 (extent_offset
48348 * crypt_stat->extent_size),
48349@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48350 goto out;
48351 }
48352 rc = 0;
48353- if (unlikely(ecryptfs_verbosity > 0)) {
48354- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48355- "rc = [%d]\n", (extent_base + extent_offset),
48356- rc);
48357- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48358- "decryption:\n");
48359- ecryptfs_dump_hex((char *)(page_address(page)
48360- + (extent_offset
48361- * crypt_stat->extent_size)), 8);
48362- }
48363 out:
48364 return rc;
48365 }
48366diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48367index 88ba4d4..073f003 100644
48368--- a/fs/ecryptfs/inode.c
48369+++ b/fs/ecryptfs/inode.c
48370@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48371 old_fs = get_fs();
48372 set_fs(get_ds());
48373 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48374- (char __user *)lower_buf,
48375+ (char __force_user *)lower_buf,
48376 lower_bufsiz);
48377 set_fs(old_fs);
48378 if (rc < 0)
48379@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48380 }
48381 old_fs = get_fs();
48382 set_fs(get_ds());
48383- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48384+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48385 set_fs(old_fs);
48386 if (rc < 0)
48387 goto out_free;
48388diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
48389index 6b78546..0404659 100644
48390--- a/fs/ecryptfs/read_write.c
48391+++ b/fs/ecryptfs/read_write.c
48392@@ -134,7 +134,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48393 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
48394 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
48395 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
48396- size_t total_remaining_bytes = ((offset + size) - pos);
48397+ loff_t total_remaining_bytes = ((offset + size) - pos);
48398
48399 if (fatal_signal_pending(current)) {
48400 rc = -EINTR;
48401@@ -145,7 +145,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48402 num_bytes = total_remaining_bytes;
48403 if (pos < offset) {
48404 /* remaining zeros to write, up to destination offset */
48405- size_t total_remaining_zeros = (offset - pos);
48406+ loff_t total_remaining_zeros = (offset - pos);
48407
48408 if (num_bytes > total_remaining_zeros)
48409 num_bytes = total_remaining_zeros;
48410diff --git a/fs/exec.c b/fs/exec.c
48411index 86fafc6..2215975 100644
48412--- a/fs/exec.c
48413+++ b/fs/exec.c
48414@@ -56,12 +56,28 @@
48415 #include <linux/fsnotify.h>
48416 #include <linux/fs_struct.h>
48417 #include <linux/pipe_fs_i.h>
48418+#include <linux/random.h>
48419+#include <linux/seq_file.h>
48420+
48421+#ifdef CONFIG_PAX_REFCOUNT
48422+#include <linux/kallsyms.h>
48423+#include <linux/kdebug.h>
48424+#endif
48425
48426 #include <asm/uaccess.h>
48427 #include <asm/mmu_context.h>
48428 #include <asm/tlb.h>
48429 #include "internal.h"
48430
48431+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48432+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48433+#endif
48434+
48435+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48436+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48437+EXPORT_SYMBOL(pax_set_initial_flags_func);
48438+#endif
48439+
48440 int core_uses_pid;
48441 char core_pattern[CORENAME_MAX_SIZE] = "core";
48442 unsigned int core_pipe_limit;
48443@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48444 int write)
48445 {
48446 struct page *page;
48447- int ret;
48448
48449-#ifdef CONFIG_STACK_GROWSUP
48450- if (write) {
48451- ret = expand_stack_downwards(bprm->vma, pos);
48452- if (ret < 0)
48453- return NULL;
48454- }
48455-#endif
48456- ret = get_user_pages(current, bprm->mm, pos,
48457- 1, write, 1, &page, NULL);
48458- if (ret <= 0)
48459+ if (0 > expand_stack_downwards(bprm->vma, pos))
48460+ return NULL;
48461+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48462 return NULL;
48463
48464 if (write) {
48465@@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48466 vma->vm_end = STACK_TOP_MAX;
48467 vma->vm_start = vma->vm_end - PAGE_SIZE;
48468 vma->vm_flags = VM_STACK_FLAGS;
48469+
48470+#ifdef CONFIG_PAX_SEGMEXEC
48471+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48472+#endif
48473+
48474 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48475
48476 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48477@@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48478 mm->stack_vm = mm->total_vm = 1;
48479 up_write(&mm->mmap_sem);
48480 bprm->p = vma->vm_end - sizeof(void *);
48481+
48482+#ifdef CONFIG_PAX_RANDUSTACK
48483+ if (randomize_va_space)
48484+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48485+#endif
48486+
48487 return 0;
48488 err:
48489 up_write(&mm->mmap_sem);
48490@@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48491 int r;
48492 mm_segment_t oldfs = get_fs();
48493 set_fs(KERNEL_DS);
48494- r = copy_strings(argc, (char __user * __user *)argv, bprm);
48495+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48496 set_fs(oldfs);
48497 return r;
48498 }
48499@@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48500 unsigned long new_end = old_end - shift;
48501 struct mmu_gather *tlb;
48502
48503- BUG_ON(new_start > new_end);
48504+ if (new_start >= new_end || new_start < mmap_min_addr)
48505+ return -ENOMEM;
48506
48507 /*
48508 * ensure there are no vmas between where we want to go
48509@@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48510 if (vma != find_vma(mm, new_start))
48511 return -EFAULT;
48512
48513+#ifdef CONFIG_PAX_SEGMEXEC
48514+ BUG_ON(pax_find_mirror_vma(vma));
48515+#endif
48516+
48517 /*
48518 * cover the whole range: [new_start, old_end)
48519 */
48520@@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48521 stack_top = arch_align_stack(stack_top);
48522 stack_top = PAGE_ALIGN(stack_top);
48523
48524- if (unlikely(stack_top < mmap_min_addr) ||
48525- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48526- return -ENOMEM;
48527-
48528 stack_shift = vma->vm_end - stack_top;
48529
48530 bprm->p -= stack_shift;
48531@@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48532 bprm->exec -= stack_shift;
48533
48534 down_write(&mm->mmap_sem);
48535+
48536+ /* Move stack pages down in memory. */
48537+ if (stack_shift) {
48538+ ret = shift_arg_pages(vma, stack_shift);
48539+ if (ret)
48540+ goto out_unlock;
48541+ }
48542+
48543 vm_flags = VM_STACK_FLAGS;
48544
48545 /*
48546@@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48547 vm_flags &= ~VM_EXEC;
48548 vm_flags |= mm->def_flags;
48549
48550+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48551+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48552+ vm_flags &= ~VM_EXEC;
48553+
48554+#ifdef CONFIG_PAX_MPROTECT
48555+ if (mm->pax_flags & MF_PAX_MPROTECT)
48556+ vm_flags &= ~VM_MAYEXEC;
48557+#endif
48558+
48559+ }
48560+#endif
48561+
48562 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48563 vm_flags);
48564 if (ret)
48565 goto out_unlock;
48566 BUG_ON(prev != vma);
48567
48568- /* Move stack pages down in memory. */
48569- if (stack_shift) {
48570- ret = shift_arg_pages(vma, stack_shift);
48571- if (ret)
48572- goto out_unlock;
48573- }
48574-
48575 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48576 stack_size = vma->vm_end - vma->vm_start;
48577 /*
48578@@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
48579 old_fs = get_fs();
48580 set_fs(get_ds());
48581 /* The cast to a user pointer is valid due to the set_fs() */
48582- result = vfs_read(file, (void __user *)addr, count, &pos);
48583+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48584 set_fs(old_fs);
48585 return result;
48586 }
48587@@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48588 }
48589 rcu_read_unlock();
48590
48591- if (p->fs->users > n_fs) {
48592+ if (atomic_read(&p->fs->users) > n_fs) {
48593 bprm->unsafe |= LSM_UNSAFE_SHARE;
48594 } else {
48595 res = -EAGAIN;
48596@@ -1339,6 +1372,10 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
48597
48598 EXPORT_SYMBOL(search_binary_handler);
48599
48600+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48601+atomic64_unchecked_t global_exec_counter = ATOMIC64_INIT(0);
48602+#endif
48603+
48604 /*
48605 * sys_execve() executes a new program.
48606 */
48607@@ -1347,11 +1384,35 @@ int do_execve(char * filename,
48608 char __user *__user *envp,
48609 struct pt_regs * regs)
48610 {
48611+#ifdef CONFIG_GRKERNSEC
48612+ struct file *old_exec_file;
48613+ struct acl_subject_label *old_acl;
48614+ struct rlimit old_rlim[RLIM_NLIMITS];
48615+#endif
48616 struct linux_binprm *bprm;
48617 struct file *file;
48618 struct files_struct *displaced;
48619 bool clear_in_exec;
48620 int retval;
48621+ const struct cred *cred = current_cred();
48622+
48623+ /*
48624+ * We move the actual failure in case of RLIMIT_NPROC excess from
48625+ * set*uid() to execve() because too many poorly written programs
48626+ * don't check setuid() return code. Here we additionally recheck
48627+ * whether NPROC limit is still exceeded.
48628+ */
48629+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48630+
48631+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48632+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48633+ retval = -EAGAIN;
48634+ goto out_ret;
48635+ }
48636+
48637+ /* We're below the limit (still or again), so we don't want to make
48638+ * further execve() calls fail. */
48639+ current->flags &= ~PF_NPROC_EXCEEDED;
48640
48641 retval = unshare_files(&displaced);
48642 if (retval)
48643@@ -1377,12 +1438,27 @@ int do_execve(char * filename,
48644 if (IS_ERR(file))
48645 goto out_unmark;
48646
48647+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48648+ retval = -EPERM;
48649+ goto out_file;
48650+ }
48651+
48652 sched_exec();
48653
48654 bprm->file = file;
48655 bprm->filename = filename;
48656 bprm->interp = filename;
48657
48658+ if (gr_process_user_ban()) {
48659+ retval = -EPERM;
48660+ goto out_file;
48661+ }
48662+
48663+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48664+ retval = -EACCES;
48665+ goto out_file;
48666+ }
48667+
48668 retval = bprm_mm_init(bprm);
48669 if (retval)
48670 goto out_file;
48671@@ -1412,12 +1488,47 @@ int do_execve(char * filename,
48672 if (retval < 0)
48673 goto out;
48674
48675+ if (!gr_tpe_allow(file)) {
48676+ retval = -EACCES;
48677+ goto out;
48678+ }
48679+
48680+ if (gr_check_crash_exec(file)) {
48681+ retval = -EACCES;
48682+ goto out;
48683+ }
48684+
48685+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48686+
48687+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48688+
48689+#ifdef CONFIG_GRKERNSEC
48690+ old_acl = current->acl;
48691+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48692+ old_exec_file = current->exec_file;
48693+ get_file(file);
48694+ current->exec_file = file;
48695+#endif
48696+
48697+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48698+ bprm->unsafe);
48699+ if (retval < 0)
48700+ goto out_fail;
48701+
48702 current->flags &= ~PF_KTHREAD;
48703 retval = search_binary_handler(bprm,regs);
48704 if (retval < 0)
48705- goto out;
48706+ goto out_fail;
48707+#ifdef CONFIG_GRKERNSEC
48708+ if (old_exec_file)
48709+ fput(old_exec_file);
48710+#endif
48711
48712 /* execve succeeded */
48713+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
48714+ current->exec_id = atomic64_inc_return_unchecked(&global_exec_counter);
48715+#endif
48716+
48717 current->fs->in_exec = 0;
48718 current->in_execve = 0;
48719 acct_update_integrals(current);
48720@@ -1426,6 +1537,14 @@ int do_execve(char * filename,
48721 put_files_struct(displaced);
48722 return retval;
48723
48724+out_fail:
48725+#ifdef CONFIG_GRKERNSEC
48726+ current->acl = old_acl;
48727+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48728+ fput(current->exec_file);
48729+ current->exec_file = old_exec_file;
48730+#endif
48731+
48732 out:
48733 if (bprm->mm) {
48734 acct_arg_size(bprm, 0);
48735@@ -1591,6 +1710,220 @@ out:
48736 return ispipe;
48737 }
48738
48739+int pax_check_flags(unsigned long *flags)
48740+{
48741+ int retval = 0;
48742+
48743+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48744+ if (*flags & MF_PAX_SEGMEXEC)
48745+ {
48746+ *flags &= ~MF_PAX_SEGMEXEC;
48747+ retval = -EINVAL;
48748+ }
48749+#endif
48750+
48751+ if ((*flags & MF_PAX_PAGEEXEC)
48752+
48753+#ifdef CONFIG_PAX_PAGEEXEC
48754+ && (*flags & MF_PAX_SEGMEXEC)
48755+#endif
48756+
48757+ )
48758+ {
48759+ *flags &= ~MF_PAX_PAGEEXEC;
48760+ retval = -EINVAL;
48761+ }
48762+
48763+ if ((*flags & MF_PAX_MPROTECT)
48764+
48765+#ifdef CONFIG_PAX_MPROTECT
48766+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48767+#endif
48768+
48769+ )
48770+ {
48771+ *flags &= ~MF_PAX_MPROTECT;
48772+ retval = -EINVAL;
48773+ }
48774+
48775+ if ((*flags & MF_PAX_EMUTRAMP)
48776+
48777+#ifdef CONFIG_PAX_EMUTRAMP
48778+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48779+#endif
48780+
48781+ )
48782+ {
48783+ *flags &= ~MF_PAX_EMUTRAMP;
48784+ retval = -EINVAL;
48785+ }
48786+
48787+ return retval;
48788+}
48789+
48790+EXPORT_SYMBOL(pax_check_flags);
48791+
48792+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48793+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48794+{
48795+ struct task_struct *tsk = current;
48796+ struct mm_struct *mm = current->mm;
48797+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48798+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48799+ char *path_exec = NULL;
48800+ char *path_fault = NULL;
48801+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48802+
48803+ if (buffer_exec && buffer_fault) {
48804+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48805+
48806+ down_read(&mm->mmap_sem);
48807+ vma = mm->mmap;
48808+ while (vma && (!vma_exec || !vma_fault)) {
48809+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48810+ vma_exec = vma;
48811+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48812+ vma_fault = vma;
48813+ vma = vma->vm_next;
48814+ }
48815+ if (vma_exec) {
48816+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48817+ if (IS_ERR(path_exec))
48818+ path_exec = "<path too long>";
48819+ else {
48820+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48821+ if (path_exec) {
48822+ *path_exec = 0;
48823+ path_exec = buffer_exec;
48824+ } else
48825+ path_exec = "<path too long>";
48826+ }
48827+ }
48828+ if (vma_fault) {
48829+ start = vma_fault->vm_start;
48830+ end = vma_fault->vm_end;
48831+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48832+ if (vma_fault->vm_file) {
48833+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48834+ if (IS_ERR(path_fault))
48835+ path_fault = "<path too long>";
48836+ else {
48837+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48838+ if (path_fault) {
48839+ *path_fault = 0;
48840+ path_fault = buffer_fault;
48841+ } else
48842+ path_fault = "<path too long>";
48843+ }
48844+ } else
48845+ path_fault = "<anonymous mapping>";
48846+ }
48847+ up_read(&mm->mmap_sem);
48848+ }
48849+ if (tsk->signal->curr_ip)
48850+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48851+ else
48852+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48853+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48854+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48855+ task_uid(tsk), task_euid(tsk), pc, sp);
48856+ free_page((unsigned long)buffer_exec);
48857+ free_page((unsigned long)buffer_fault);
48858+ pax_report_insns(regs, pc, sp);
48859+ do_coredump(SIGKILL, SIGKILL, regs);
48860+}
48861+#endif
48862+
48863+#ifdef CONFIG_PAX_REFCOUNT
48864+void pax_report_refcount_overflow(struct pt_regs *regs)
48865+{
48866+ if (current->signal->curr_ip)
48867+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48868+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48869+ else
48870+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48871+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48872+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48873+ show_regs(regs);
48874+ force_sig_specific(SIGKILL, current);
48875+}
48876+#endif
48877+
48878+#ifdef CONFIG_PAX_USERCOPY
48879+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48880+int object_is_on_stack(const void *obj, unsigned long len)
48881+{
48882+ const void * const stack = task_stack_page(current);
48883+ const void * const stackend = stack + THREAD_SIZE;
48884+
48885+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48886+ const void *frame = NULL;
48887+ const void *oldframe;
48888+#endif
48889+
48890+ if (obj + len < obj)
48891+ return -1;
48892+
48893+ if (obj + len <= stack || stackend <= obj)
48894+ return 0;
48895+
48896+ if (obj < stack || stackend < obj + len)
48897+ return -1;
48898+
48899+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48900+ oldframe = __builtin_frame_address(1);
48901+ if (oldframe)
48902+ frame = __builtin_frame_address(2);
48903+ /*
48904+ low ----------------------------------------------> high
48905+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48906+ ^----------------^
48907+ allow copies only within here
48908+ */
48909+ while (stack <= frame && frame < stackend) {
48910+ /* if obj + len extends past the last frame, this
48911+ check won't pass and the next frame will be 0,
48912+ causing us to bail out and correctly report
48913+ the copy as invalid
48914+ */
48915+ if (obj + len <= frame)
48916+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48917+ oldframe = frame;
48918+ frame = *(const void * const *)frame;
48919+ }
48920+ return -1;
48921+#else
48922+ return 1;
48923+#endif
48924+}
48925+
48926+
48927+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48928+{
48929+ if (current->signal->curr_ip)
48930+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48931+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48932+ else
48933+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48934+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48935+
48936+ dump_stack();
48937+ gr_handle_kernel_exploit();
48938+ do_group_exit(SIGKILL);
48939+}
48940+#endif
48941+
48942+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48943+void pax_track_stack(void)
48944+{
48945+ unsigned long sp = (unsigned long)&sp;
48946+ if (sp < current_thread_info()->lowest_stack &&
48947+ sp > (unsigned long)task_stack_page(current))
48948+ current_thread_info()->lowest_stack = sp;
48949+}
48950+EXPORT_SYMBOL(pax_track_stack);
48951+#endif
48952+
48953 static int zap_process(struct task_struct *start)
48954 {
48955 struct task_struct *t;
48956@@ -1793,17 +2126,17 @@ static void wait_for_dump_helpers(struct file *file)
48957 pipe = file->f_path.dentry->d_inode->i_pipe;
48958
48959 pipe_lock(pipe);
48960- pipe->readers++;
48961- pipe->writers--;
48962+ atomic_inc(&pipe->readers);
48963+ atomic_dec(&pipe->writers);
48964
48965- while ((pipe->readers > 1) && (!signal_pending(current))) {
48966+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48967 wake_up_interruptible_sync(&pipe->wait);
48968 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48969 pipe_wait(pipe);
48970 }
48971
48972- pipe->readers--;
48973- pipe->writers++;
48974+ atomic_dec(&pipe->readers);
48975+ atomic_inc(&pipe->writers);
48976 pipe_unlock(pipe);
48977
48978 }
48979@@ -1826,10 +2159,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48980 char **helper_argv = NULL;
48981 int helper_argc = 0;
48982 int dump_count = 0;
48983- static atomic_t core_dump_count = ATOMIC_INIT(0);
48984+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48985
48986 audit_core_dumps(signr);
48987
48988+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48989+ gr_handle_brute_attach(current, mm->flags);
48990+
48991 binfmt = mm->binfmt;
48992 if (!binfmt || !binfmt->core_dump)
48993 goto fail;
48994@@ -1874,6 +2210,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48995 */
48996 clear_thread_flag(TIF_SIGPENDING);
48997
48998+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48999+
49000 /*
49001 * lock_kernel() because format_corename() is controlled by sysctl, which
49002 * uses lock_kernel()
49003@@ -1908,7 +2246,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
49004 goto fail_unlock;
49005 }
49006
49007- dump_count = atomic_inc_return(&core_dump_count);
49008+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
49009 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
49010 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
49011 task_tgid_vnr(current), current->comm);
49012@@ -1972,7 +2310,7 @@ close_fail:
49013 filp_close(file, NULL);
49014 fail_dropcount:
49015 if (dump_count)
49016- atomic_dec(&core_dump_count);
49017+ atomic_dec_unchecked(&core_dump_count);
49018 fail_unlock:
49019 if (helper_argv)
49020 argv_free(helper_argv);
49021diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49022index 7f8d2e5..a1abdbb 100644
49023--- a/fs/ext2/balloc.c
49024+++ b/fs/ext2/balloc.c
49025@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49026
49027 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49028 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49029- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49030+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49031 sbi->s_resuid != current_fsuid() &&
49032 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49033 return 0;
49034diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49035index 27967f9..9f2a5fb 100644
49036--- a/fs/ext3/balloc.c
49037+++ b/fs/ext3/balloc.c
49038@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49039
49040 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49041 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49042- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49043+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49044 sbi->s_resuid != current_fsuid() &&
49045 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49046 return 0;
49047diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49048index e85b63c..80398e6 100644
49049--- a/fs/ext4/balloc.c
49050+++ b/fs/ext4/balloc.c
49051@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49052 /* Hm, nope. Are (enough) root reserved blocks available? */
49053 if (sbi->s_resuid == current_fsuid() ||
49054 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49055- capable(CAP_SYS_RESOURCE)) {
49056+ capable_nolog(CAP_SYS_RESOURCE)) {
49057 if (free_blocks >= (nblocks + dirty_blocks))
49058 return 1;
49059 }
49060diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49061index 67c46ed..1f237e5 100644
49062--- a/fs/ext4/ext4.h
49063+++ b/fs/ext4/ext4.h
49064@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49065
49066 /* stats for buddy allocator */
49067 spinlock_t s_mb_pa_lock;
49068- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49069- atomic_t s_bal_success; /* we found long enough chunks */
49070- atomic_t s_bal_allocated; /* in blocks */
49071- atomic_t s_bal_ex_scanned; /* total extents scanned */
49072- atomic_t s_bal_goals; /* goal hits */
49073- atomic_t s_bal_breaks; /* too long searches */
49074- atomic_t s_bal_2orders; /* 2^order hits */
49075+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49076+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49077+ atomic_unchecked_t s_bal_allocated; /* in blocks */
49078+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49079+ atomic_unchecked_t s_bal_goals; /* goal hits */
49080+ atomic_unchecked_t s_bal_breaks; /* too long searches */
49081+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49082 spinlock_t s_bal_lock;
49083 unsigned long s_mb_buddies_generated;
49084 unsigned long long s_mb_generation_time;
49085- atomic_t s_mb_lost_chunks;
49086- atomic_t s_mb_preallocated;
49087- atomic_t s_mb_discarded;
49088+ atomic_unchecked_t s_mb_lost_chunks;
49089+ atomic_unchecked_t s_mb_preallocated;
49090+ atomic_unchecked_t s_mb_discarded;
49091 atomic_t s_lock_busy;
49092
49093 /* locality groups */
49094diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49095index 2a60541..7439d61 100644
49096--- a/fs/ext4/file.c
49097+++ b/fs/ext4/file.c
49098@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49099 cp = d_path(&path, buf, sizeof(buf));
49100 path_put(&path);
49101 if (!IS_ERR(cp)) {
49102- memcpy(sbi->s_es->s_last_mounted, cp,
49103- sizeof(sbi->s_es->s_last_mounted));
49104+ strlcpy(sbi->s_es->s_last_mounted, cp,
49105+ sizeof(sbi->s_es->s_last_mounted));
49106 sb->s_dirt = 1;
49107 }
49108 }
49109diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49110index 42bac1b..0aab9d8 100644
49111--- a/fs/ext4/mballoc.c
49112+++ b/fs/ext4/mballoc.c
49113@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49114 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49115
49116 if (EXT4_SB(sb)->s_mb_stats)
49117- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49118+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49119
49120 break;
49121 }
49122@@ -2131,7 +2131,7 @@ repeat:
49123 ac->ac_status = AC_STATUS_CONTINUE;
49124 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49125 cr = 3;
49126- atomic_inc(&sbi->s_mb_lost_chunks);
49127+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49128 goto repeat;
49129 }
49130 }
49131@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49132 ext4_grpblk_t counters[16];
49133 } sg;
49134
49135+ pax_track_stack();
49136+
49137 group--;
49138 if (group == 0)
49139 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49140@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49141 if (sbi->s_mb_stats) {
49142 printk(KERN_INFO
49143 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49144- atomic_read(&sbi->s_bal_allocated),
49145- atomic_read(&sbi->s_bal_reqs),
49146- atomic_read(&sbi->s_bal_success));
49147+ atomic_read_unchecked(&sbi->s_bal_allocated),
49148+ atomic_read_unchecked(&sbi->s_bal_reqs),
49149+ atomic_read_unchecked(&sbi->s_bal_success));
49150 printk(KERN_INFO
49151 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49152 "%u 2^N hits, %u breaks, %u lost\n",
49153- atomic_read(&sbi->s_bal_ex_scanned),
49154- atomic_read(&sbi->s_bal_goals),
49155- atomic_read(&sbi->s_bal_2orders),
49156- atomic_read(&sbi->s_bal_breaks),
49157- atomic_read(&sbi->s_mb_lost_chunks));
49158+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49159+ atomic_read_unchecked(&sbi->s_bal_goals),
49160+ atomic_read_unchecked(&sbi->s_bal_2orders),
49161+ atomic_read_unchecked(&sbi->s_bal_breaks),
49162+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49163 printk(KERN_INFO
49164 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49165 sbi->s_mb_buddies_generated++,
49166 sbi->s_mb_generation_time);
49167 printk(KERN_INFO
49168 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49169- atomic_read(&sbi->s_mb_preallocated),
49170- atomic_read(&sbi->s_mb_discarded));
49171+ atomic_read_unchecked(&sbi->s_mb_preallocated),
49172+ atomic_read_unchecked(&sbi->s_mb_discarded));
49173 }
49174
49175 free_percpu(sbi->s_locality_groups);
49176@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49177 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49178
49179 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49180- atomic_inc(&sbi->s_bal_reqs);
49181- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49182+ atomic_inc_unchecked(&sbi->s_bal_reqs);
49183+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49184 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49185- atomic_inc(&sbi->s_bal_success);
49186- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49187+ atomic_inc_unchecked(&sbi->s_bal_success);
49188+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49189 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49190 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49191- atomic_inc(&sbi->s_bal_goals);
49192+ atomic_inc_unchecked(&sbi->s_bal_goals);
49193 if (ac->ac_found > sbi->s_mb_max_to_scan)
49194- atomic_inc(&sbi->s_bal_breaks);
49195+ atomic_inc_unchecked(&sbi->s_bal_breaks);
49196 }
49197
49198 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49199@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49200 trace_ext4_mb_new_inode_pa(ac, pa);
49201
49202 ext4_mb_use_inode_pa(ac, pa);
49203- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49204+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49205
49206 ei = EXT4_I(ac->ac_inode);
49207 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49208@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49209 trace_ext4_mb_new_group_pa(ac, pa);
49210
49211 ext4_mb_use_group_pa(ac, pa);
49212- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49213+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49214
49215 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49216 lg = ac->ac_lg;
49217@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49218 * from the bitmap and continue.
49219 */
49220 }
49221- atomic_add(free, &sbi->s_mb_discarded);
49222+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
49223
49224 return err;
49225 }
49226@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49227 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49228 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49229 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49230- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49231+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49232
49233 if (ac) {
49234 ac->ac_sb = sb;
49235diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49236index f1e7077..edd86b2 100644
49237--- a/fs/ext4/super.c
49238+++ b/fs/ext4/super.c
49239@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49240 }
49241
49242
49243-static struct sysfs_ops ext4_attr_ops = {
49244+static const struct sysfs_ops ext4_attr_ops = {
49245 .show = ext4_attr_show,
49246 .store = ext4_attr_store,
49247 };
49248diff --git a/fs/fcntl.c b/fs/fcntl.c
49249index 97e01dc..e9aab2d 100644
49250--- a/fs/fcntl.c
49251+++ b/fs/fcntl.c
49252@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49253 if (err)
49254 return err;
49255
49256+ if (gr_handle_chroot_fowner(pid, type))
49257+ return -ENOENT;
49258+ if (gr_check_protected_task_fowner(pid, type))
49259+ return -EACCES;
49260+
49261 f_modown(filp, pid, type, force);
49262 return 0;
49263 }
49264@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49265
49266 static int f_setown_ex(struct file *filp, unsigned long arg)
49267 {
49268- struct f_owner_ex * __user owner_p = (void * __user)arg;
49269+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49270 struct f_owner_ex owner;
49271 struct pid *pid;
49272 int type;
49273@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49274
49275 static int f_getown_ex(struct file *filp, unsigned long arg)
49276 {
49277- struct f_owner_ex * __user owner_p = (void * __user)arg;
49278+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49279 struct f_owner_ex owner;
49280 int ret = 0;
49281
49282@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49283 switch (cmd) {
49284 case F_DUPFD:
49285 case F_DUPFD_CLOEXEC:
49286+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49287 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49288 break;
49289 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49290diff --git a/fs/fifo.c b/fs/fifo.c
49291index f8f97b8..b1f2259 100644
49292--- a/fs/fifo.c
49293+++ b/fs/fifo.c
49294@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49295 */
49296 filp->f_op = &read_pipefifo_fops;
49297 pipe->r_counter++;
49298- if (pipe->readers++ == 0)
49299+ if (atomic_inc_return(&pipe->readers) == 1)
49300 wake_up_partner(inode);
49301
49302- if (!pipe->writers) {
49303+ if (!atomic_read(&pipe->writers)) {
49304 if ((filp->f_flags & O_NONBLOCK)) {
49305 /* suppress POLLHUP until we have
49306 * seen a writer */
49307@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49308 * errno=ENXIO when there is no process reading the FIFO.
49309 */
49310 ret = -ENXIO;
49311- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49312+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49313 goto err;
49314
49315 filp->f_op = &write_pipefifo_fops;
49316 pipe->w_counter++;
49317- if (!pipe->writers++)
49318+ if (atomic_inc_return(&pipe->writers) == 1)
49319 wake_up_partner(inode);
49320
49321- if (!pipe->readers) {
49322+ if (!atomic_read(&pipe->readers)) {
49323 wait_for_partner(inode, &pipe->r_counter);
49324 if (signal_pending(current))
49325 goto err_wr;
49326@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49327 */
49328 filp->f_op = &rdwr_pipefifo_fops;
49329
49330- pipe->readers++;
49331- pipe->writers++;
49332+ atomic_inc(&pipe->readers);
49333+ atomic_inc(&pipe->writers);
49334 pipe->r_counter++;
49335 pipe->w_counter++;
49336- if (pipe->readers == 1 || pipe->writers == 1)
49337+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49338 wake_up_partner(inode);
49339 break;
49340
49341@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49342 return 0;
49343
49344 err_rd:
49345- if (!--pipe->readers)
49346+ if (atomic_dec_and_test(&pipe->readers))
49347 wake_up_interruptible(&pipe->wait);
49348 ret = -ERESTARTSYS;
49349 goto err;
49350
49351 err_wr:
49352- if (!--pipe->writers)
49353+ if (atomic_dec_and_test(&pipe->writers))
49354 wake_up_interruptible(&pipe->wait);
49355 ret = -ERESTARTSYS;
49356 goto err;
49357
49358 err:
49359- if (!pipe->readers && !pipe->writers)
49360+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49361 free_pipe_info(inode);
49362
49363 err_nocleanup:
49364diff --git a/fs/file.c b/fs/file.c
49365index 87e1290..a930cc4 100644
49366--- a/fs/file.c
49367+++ b/fs/file.c
49368@@ -14,6 +14,7 @@
49369 #include <linux/slab.h>
49370 #include <linux/vmalloc.h>
49371 #include <linux/file.h>
49372+#include <linux/security.h>
49373 #include <linux/fdtable.h>
49374 #include <linux/bitops.h>
49375 #include <linux/interrupt.h>
49376@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49377 * N.B. For clone tasks sharing a files structure, this test
49378 * will limit the total number of files that can be opened.
49379 */
49380+
49381+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49382 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49383 return -EMFILE;
49384
49385diff --git a/fs/filesystems.c b/fs/filesystems.c
49386index a24c58e..53f91ee 100644
49387--- a/fs/filesystems.c
49388+++ b/fs/filesystems.c
49389@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49390 int len = dot ? dot - name : strlen(name);
49391
49392 fs = __get_fs_type(name, len);
49393+
49394+#ifdef CONFIG_GRKERNSEC_MODHARDEN
49395+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49396+#else
49397 if (!fs && (request_module("%.*s", len, name) == 0))
49398+#endif
49399 fs = __get_fs_type(name, len);
49400
49401 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49402diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49403index eee0590..ef5bc0e 100644
49404--- a/fs/fs_struct.c
49405+++ b/fs/fs_struct.c
49406@@ -4,6 +4,7 @@
49407 #include <linux/path.h>
49408 #include <linux/slab.h>
49409 #include <linux/fs_struct.h>
49410+#include <linux/grsecurity.h>
49411
49412 /*
49413 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49414@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49415 old_root = fs->root;
49416 fs->root = *path;
49417 path_get(path);
49418+ gr_set_chroot_entries(current, path);
49419 write_unlock(&fs->lock);
49420 if (old_root.dentry)
49421 path_put(&old_root);
49422@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49423 && fs->root.mnt == old_root->mnt) {
49424 path_get(new_root);
49425 fs->root = *new_root;
49426+ gr_set_chroot_entries(p, new_root);
49427 count++;
49428 }
49429 if (fs->pwd.dentry == old_root->dentry
49430@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49431 task_lock(tsk);
49432 write_lock(&fs->lock);
49433 tsk->fs = NULL;
49434- kill = !--fs->users;
49435+ gr_clear_chroot_entries(tsk);
49436+ kill = !atomic_dec_return(&fs->users);
49437 write_unlock(&fs->lock);
49438 task_unlock(tsk);
49439 if (kill)
49440@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49441 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49442 /* We don't need to lock fs - think why ;-) */
49443 if (fs) {
49444- fs->users = 1;
49445+ atomic_set(&fs->users, 1);
49446 fs->in_exec = 0;
49447 rwlock_init(&fs->lock);
49448 fs->umask = old->umask;
49449@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49450
49451 task_lock(current);
49452 write_lock(&fs->lock);
49453- kill = !--fs->users;
49454+ kill = !atomic_dec_return(&fs->users);
49455 current->fs = new_fs;
49456+ gr_set_chroot_entries(current, &new_fs->root);
49457 write_unlock(&fs->lock);
49458 task_unlock(current);
49459
49460@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49461
49462 /* to be mentioned only in INIT_TASK */
49463 struct fs_struct init_fs = {
49464- .users = 1,
49465+ .users = ATOMIC_INIT(1),
49466 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49467 .umask = 0022,
49468 };
49469@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49470 task_lock(current);
49471
49472 write_lock(&init_fs.lock);
49473- init_fs.users++;
49474+ atomic_inc(&init_fs.users);
49475 write_unlock(&init_fs.lock);
49476
49477 write_lock(&fs->lock);
49478 current->fs = &init_fs;
49479- kill = !--fs->users;
49480+ gr_set_chroot_entries(current, &current->fs->root);
49481+ kill = !atomic_dec_return(&fs->users);
49482 write_unlock(&fs->lock);
49483
49484 task_unlock(current);
49485diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49486index 9905350..02eaec4 100644
49487--- a/fs/fscache/cookie.c
49488+++ b/fs/fscache/cookie.c
49489@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49490 parent ? (char *) parent->def->name : "<no-parent>",
49491 def->name, netfs_data);
49492
49493- fscache_stat(&fscache_n_acquires);
49494+ fscache_stat_unchecked(&fscache_n_acquires);
49495
49496 /* if there's no parent cookie, then we don't create one here either */
49497 if (!parent) {
49498- fscache_stat(&fscache_n_acquires_null);
49499+ fscache_stat_unchecked(&fscache_n_acquires_null);
49500 _leave(" [no parent]");
49501 return NULL;
49502 }
49503@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49504 /* allocate and initialise a cookie */
49505 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49506 if (!cookie) {
49507- fscache_stat(&fscache_n_acquires_oom);
49508+ fscache_stat_unchecked(&fscache_n_acquires_oom);
49509 _leave(" [ENOMEM]");
49510 return NULL;
49511 }
49512@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49513
49514 switch (cookie->def->type) {
49515 case FSCACHE_COOKIE_TYPE_INDEX:
49516- fscache_stat(&fscache_n_cookie_index);
49517+ fscache_stat_unchecked(&fscache_n_cookie_index);
49518 break;
49519 case FSCACHE_COOKIE_TYPE_DATAFILE:
49520- fscache_stat(&fscache_n_cookie_data);
49521+ fscache_stat_unchecked(&fscache_n_cookie_data);
49522 break;
49523 default:
49524- fscache_stat(&fscache_n_cookie_special);
49525+ fscache_stat_unchecked(&fscache_n_cookie_special);
49526 break;
49527 }
49528
49529@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49530 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49531 atomic_dec(&parent->n_children);
49532 __fscache_cookie_put(cookie);
49533- fscache_stat(&fscache_n_acquires_nobufs);
49534+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49535 _leave(" = NULL");
49536 return NULL;
49537 }
49538 }
49539
49540- fscache_stat(&fscache_n_acquires_ok);
49541+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49542 _leave(" = %p", cookie);
49543 return cookie;
49544 }
49545@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49546 cache = fscache_select_cache_for_object(cookie->parent);
49547 if (!cache) {
49548 up_read(&fscache_addremove_sem);
49549- fscache_stat(&fscache_n_acquires_no_cache);
49550+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49551 _leave(" = -ENOMEDIUM [no cache]");
49552 return -ENOMEDIUM;
49553 }
49554@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49555 object = cache->ops->alloc_object(cache, cookie);
49556 fscache_stat_d(&fscache_n_cop_alloc_object);
49557 if (IS_ERR(object)) {
49558- fscache_stat(&fscache_n_object_no_alloc);
49559+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49560 ret = PTR_ERR(object);
49561 goto error;
49562 }
49563
49564- fscache_stat(&fscache_n_object_alloc);
49565+ fscache_stat_unchecked(&fscache_n_object_alloc);
49566
49567 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49568
49569@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49570 struct fscache_object *object;
49571 struct hlist_node *_p;
49572
49573- fscache_stat(&fscache_n_updates);
49574+ fscache_stat_unchecked(&fscache_n_updates);
49575
49576 if (!cookie) {
49577- fscache_stat(&fscache_n_updates_null);
49578+ fscache_stat_unchecked(&fscache_n_updates_null);
49579 _leave(" [no cookie]");
49580 return;
49581 }
49582@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49583 struct fscache_object *object;
49584 unsigned long event;
49585
49586- fscache_stat(&fscache_n_relinquishes);
49587+ fscache_stat_unchecked(&fscache_n_relinquishes);
49588 if (retire)
49589- fscache_stat(&fscache_n_relinquishes_retire);
49590+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49591
49592 if (!cookie) {
49593- fscache_stat(&fscache_n_relinquishes_null);
49594+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49595 _leave(" [no cookie]");
49596 return;
49597 }
49598@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49599
49600 /* wait for the cookie to finish being instantiated (or to fail) */
49601 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49602- fscache_stat(&fscache_n_relinquishes_waitcrt);
49603+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49604 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49605 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49606 }
49607diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49608index edd7434..0725e66 100644
49609--- a/fs/fscache/internal.h
49610+++ b/fs/fscache/internal.h
49611@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49612 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49613 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49614
49615-extern atomic_t fscache_n_op_pend;
49616-extern atomic_t fscache_n_op_run;
49617-extern atomic_t fscache_n_op_enqueue;
49618-extern atomic_t fscache_n_op_deferred_release;
49619-extern atomic_t fscache_n_op_release;
49620-extern atomic_t fscache_n_op_gc;
49621-extern atomic_t fscache_n_op_cancelled;
49622-extern atomic_t fscache_n_op_rejected;
49623+extern atomic_unchecked_t fscache_n_op_pend;
49624+extern atomic_unchecked_t fscache_n_op_run;
49625+extern atomic_unchecked_t fscache_n_op_enqueue;
49626+extern atomic_unchecked_t fscache_n_op_deferred_release;
49627+extern atomic_unchecked_t fscache_n_op_release;
49628+extern atomic_unchecked_t fscache_n_op_gc;
49629+extern atomic_unchecked_t fscache_n_op_cancelled;
49630+extern atomic_unchecked_t fscache_n_op_rejected;
49631
49632-extern atomic_t fscache_n_attr_changed;
49633-extern atomic_t fscache_n_attr_changed_ok;
49634-extern atomic_t fscache_n_attr_changed_nobufs;
49635-extern atomic_t fscache_n_attr_changed_nomem;
49636-extern atomic_t fscache_n_attr_changed_calls;
49637+extern atomic_unchecked_t fscache_n_attr_changed;
49638+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49639+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49640+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49641+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49642
49643-extern atomic_t fscache_n_allocs;
49644-extern atomic_t fscache_n_allocs_ok;
49645-extern atomic_t fscache_n_allocs_wait;
49646-extern atomic_t fscache_n_allocs_nobufs;
49647-extern atomic_t fscache_n_allocs_intr;
49648-extern atomic_t fscache_n_allocs_object_dead;
49649-extern atomic_t fscache_n_alloc_ops;
49650-extern atomic_t fscache_n_alloc_op_waits;
49651+extern atomic_unchecked_t fscache_n_allocs;
49652+extern atomic_unchecked_t fscache_n_allocs_ok;
49653+extern atomic_unchecked_t fscache_n_allocs_wait;
49654+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49655+extern atomic_unchecked_t fscache_n_allocs_intr;
49656+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49657+extern atomic_unchecked_t fscache_n_alloc_ops;
49658+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49659
49660-extern atomic_t fscache_n_retrievals;
49661-extern atomic_t fscache_n_retrievals_ok;
49662-extern atomic_t fscache_n_retrievals_wait;
49663-extern atomic_t fscache_n_retrievals_nodata;
49664-extern atomic_t fscache_n_retrievals_nobufs;
49665-extern atomic_t fscache_n_retrievals_intr;
49666-extern atomic_t fscache_n_retrievals_nomem;
49667-extern atomic_t fscache_n_retrievals_object_dead;
49668-extern atomic_t fscache_n_retrieval_ops;
49669-extern atomic_t fscache_n_retrieval_op_waits;
49670+extern atomic_unchecked_t fscache_n_retrievals;
49671+extern atomic_unchecked_t fscache_n_retrievals_ok;
49672+extern atomic_unchecked_t fscache_n_retrievals_wait;
49673+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49674+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49675+extern atomic_unchecked_t fscache_n_retrievals_intr;
49676+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49677+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49678+extern atomic_unchecked_t fscache_n_retrieval_ops;
49679+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49680
49681-extern atomic_t fscache_n_stores;
49682-extern atomic_t fscache_n_stores_ok;
49683-extern atomic_t fscache_n_stores_again;
49684-extern atomic_t fscache_n_stores_nobufs;
49685-extern atomic_t fscache_n_stores_oom;
49686-extern atomic_t fscache_n_store_ops;
49687-extern atomic_t fscache_n_store_calls;
49688-extern atomic_t fscache_n_store_pages;
49689-extern atomic_t fscache_n_store_radix_deletes;
49690-extern atomic_t fscache_n_store_pages_over_limit;
49691+extern atomic_unchecked_t fscache_n_stores;
49692+extern atomic_unchecked_t fscache_n_stores_ok;
49693+extern atomic_unchecked_t fscache_n_stores_again;
49694+extern atomic_unchecked_t fscache_n_stores_nobufs;
49695+extern atomic_unchecked_t fscache_n_stores_oom;
49696+extern atomic_unchecked_t fscache_n_store_ops;
49697+extern atomic_unchecked_t fscache_n_store_calls;
49698+extern atomic_unchecked_t fscache_n_store_pages;
49699+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49700+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49701
49702-extern atomic_t fscache_n_store_vmscan_not_storing;
49703-extern atomic_t fscache_n_store_vmscan_gone;
49704-extern atomic_t fscache_n_store_vmscan_busy;
49705-extern atomic_t fscache_n_store_vmscan_cancelled;
49706+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49707+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49708+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49709+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49710
49711-extern atomic_t fscache_n_marks;
49712-extern atomic_t fscache_n_uncaches;
49713+extern atomic_unchecked_t fscache_n_marks;
49714+extern atomic_unchecked_t fscache_n_uncaches;
49715
49716-extern atomic_t fscache_n_acquires;
49717-extern atomic_t fscache_n_acquires_null;
49718-extern atomic_t fscache_n_acquires_no_cache;
49719-extern atomic_t fscache_n_acquires_ok;
49720-extern atomic_t fscache_n_acquires_nobufs;
49721-extern atomic_t fscache_n_acquires_oom;
49722+extern atomic_unchecked_t fscache_n_acquires;
49723+extern atomic_unchecked_t fscache_n_acquires_null;
49724+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49725+extern atomic_unchecked_t fscache_n_acquires_ok;
49726+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49727+extern atomic_unchecked_t fscache_n_acquires_oom;
49728
49729-extern atomic_t fscache_n_updates;
49730-extern atomic_t fscache_n_updates_null;
49731-extern atomic_t fscache_n_updates_run;
49732+extern atomic_unchecked_t fscache_n_updates;
49733+extern atomic_unchecked_t fscache_n_updates_null;
49734+extern atomic_unchecked_t fscache_n_updates_run;
49735
49736-extern atomic_t fscache_n_relinquishes;
49737-extern atomic_t fscache_n_relinquishes_null;
49738-extern atomic_t fscache_n_relinquishes_waitcrt;
49739-extern atomic_t fscache_n_relinquishes_retire;
49740+extern atomic_unchecked_t fscache_n_relinquishes;
49741+extern atomic_unchecked_t fscache_n_relinquishes_null;
49742+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49743+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49744
49745-extern atomic_t fscache_n_cookie_index;
49746-extern atomic_t fscache_n_cookie_data;
49747-extern atomic_t fscache_n_cookie_special;
49748+extern atomic_unchecked_t fscache_n_cookie_index;
49749+extern atomic_unchecked_t fscache_n_cookie_data;
49750+extern atomic_unchecked_t fscache_n_cookie_special;
49751
49752-extern atomic_t fscache_n_object_alloc;
49753-extern atomic_t fscache_n_object_no_alloc;
49754-extern atomic_t fscache_n_object_lookups;
49755-extern atomic_t fscache_n_object_lookups_negative;
49756-extern atomic_t fscache_n_object_lookups_positive;
49757-extern atomic_t fscache_n_object_lookups_timed_out;
49758-extern atomic_t fscache_n_object_created;
49759-extern atomic_t fscache_n_object_avail;
49760-extern atomic_t fscache_n_object_dead;
49761+extern atomic_unchecked_t fscache_n_object_alloc;
49762+extern atomic_unchecked_t fscache_n_object_no_alloc;
49763+extern atomic_unchecked_t fscache_n_object_lookups;
49764+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49765+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49766+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49767+extern atomic_unchecked_t fscache_n_object_created;
49768+extern atomic_unchecked_t fscache_n_object_avail;
49769+extern atomic_unchecked_t fscache_n_object_dead;
49770
49771-extern atomic_t fscache_n_checkaux_none;
49772-extern atomic_t fscache_n_checkaux_okay;
49773-extern atomic_t fscache_n_checkaux_update;
49774-extern atomic_t fscache_n_checkaux_obsolete;
49775+extern atomic_unchecked_t fscache_n_checkaux_none;
49776+extern atomic_unchecked_t fscache_n_checkaux_okay;
49777+extern atomic_unchecked_t fscache_n_checkaux_update;
49778+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49779
49780 extern atomic_t fscache_n_cop_alloc_object;
49781 extern atomic_t fscache_n_cop_lookup_object;
49782@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49783 atomic_inc(stat);
49784 }
49785
49786+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49787+{
49788+ atomic_inc_unchecked(stat);
49789+}
49790+
49791 static inline void fscache_stat_d(atomic_t *stat)
49792 {
49793 atomic_dec(stat);
49794@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49795
49796 #define __fscache_stat(stat) (NULL)
49797 #define fscache_stat(stat) do {} while (0)
49798+#define fscache_stat_unchecked(stat) do {} while (0)
49799 #define fscache_stat_d(stat) do {} while (0)
49800 #endif
49801
49802diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49803index e513ac5..e888d34 100644
49804--- a/fs/fscache/object.c
49805+++ b/fs/fscache/object.c
49806@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49807 /* update the object metadata on disk */
49808 case FSCACHE_OBJECT_UPDATING:
49809 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49810- fscache_stat(&fscache_n_updates_run);
49811+ fscache_stat_unchecked(&fscache_n_updates_run);
49812 fscache_stat(&fscache_n_cop_update_object);
49813 object->cache->ops->update_object(object);
49814 fscache_stat_d(&fscache_n_cop_update_object);
49815@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49816 spin_lock(&object->lock);
49817 object->state = FSCACHE_OBJECT_DEAD;
49818 spin_unlock(&object->lock);
49819- fscache_stat(&fscache_n_object_dead);
49820+ fscache_stat_unchecked(&fscache_n_object_dead);
49821 goto terminal_transit;
49822
49823 /* handle the parent cache of this object being withdrawn from
49824@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49825 spin_lock(&object->lock);
49826 object->state = FSCACHE_OBJECT_DEAD;
49827 spin_unlock(&object->lock);
49828- fscache_stat(&fscache_n_object_dead);
49829+ fscache_stat_unchecked(&fscache_n_object_dead);
49830 goto terminal_transit;
49831
49832 /* complain about the object being woken up once it is
49833@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49834 parent->cookie->def->name, cookie->def->name,
49835 object->cache->tag->name);
49836
49837- fscache_stat(&fscache_n_object_lookups);
49838+ fscache_stat_unchecked(&fscache_n_object_lookups);
49839 fscache_stat(&fscache_n_cop_lookup_object);
49840 ret = object->cache->ops->lookup_object(object);
49841 fscache_stat_d(&fscache_n_cop_lookup_object);
49842@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49843 if (ret == -ETIMEDOUT) {
49844 /* probably stuck behind another object, so move this one to
49845 * the back of the queue */
49846- fscache_stat(&fscache_n_object_lookups_timed_out);
49847+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49848 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49849 }
49850
49851@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49852
49853 spin_lock(&object->lock);
49854 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49855- fscache_stat(&fscache_n_object_lookups_negative);
49856+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49857
49858 /* transit here to allow write requests to begin stacking up
49859 * and read requests to begin returning ENODATA */
49860@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49861 * result, in which case there may be data available */
49862 spin_lock(&object->lock);
49863 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49864- fscache_stat(&fscache_n_object_lookups_positive);
49865+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49866
49867 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49868
49869@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49870 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49871 } else {
49872 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49873- fscache_stat(&fscache_n_object_created);
49874+ fscache_stat_unchecked(&fscache_n_object_created);
49875
49876 object->state = FSCACHE_OBJECT_AVAILABLE;
49877 spin_unlock(&object->lock);
49878@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49879 fscache_enqueue_dependents(object);
49880
49881 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49882- fscache_stat(&fscache_n_object_avail);
49883+ fscache_stat_unchecked(&fscache_n_object_avail);
49884
49885 _leave("");
49886 }
49887@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49888 enum fscache_checkaux result;
49889
49890 if (!object->cookie->def->check_aux) {
49891- fscache_stat(&fscache_n_checkaux_none);
49892+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49893 return FSCACHE_CHECKAUX_OKAY;
49894 }
49895
49896@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49897 switch (result) {
49898 /* entry okay as is */
49899 case FSCACHE_CHECKAUX_OKAY:
49900- fscache_stat(&fscache_n_checkaux_okay);
49901+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49902 break;
49903
49904 /* entry requires update */
49905 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49906- fscache_stat(&fscache_n_checkaux_update);
49907+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49908 break;
49909
49910 /* entry requires deletion */
49911 case FSCACHE_CHECKAUX_OBSOLETE:
49912- fscache_stat(&fscache_n_checkaux_obsolete);
49913+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49914 break;
49915
49916 default:
49917diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49918index 313e79a..775240f 100644
49919--- a/fs/fscache/operation.c
49920+++ b/fs/fscache/operation.c
49921@@ -16,7 +16,7 @@
49922 #include <linux/seq_file.h>
49923 #include "internal.h"
49924
49925-atomic_t fscache_op_debug_id;
49926+atomic_unchecked_t fscache_op_debug_id;
49927 EXPORT_SYMBOL(fscache_op_debug_id);
49928
49929 /**
49930@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49931 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49932 ASSERTCMP(atomic_read(&op->usage), >, 0);
49933
49934- fscache_stat(&fscache_n_op_enqueue);
49935+ fscache_stat_unchecked(&fscache_n_op_enqueue);
49936 switch (op->flags & FSCACHE_OP_TYPE) {
49937 case FSCACHE_OP_FAST:
49938 _debug("queue fast");
49939@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49940 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49941 if (op->processor)
49942 fscache_enqueue_operation(op);
49943- fscache_stat(&fscache_n_op_run);
49944+ fscache_stat_unchecked(&fscache_n_op_run);
49945 }
49946
49947 /*
49948@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49949 if (object->n_ops > 0) {
49950 atomic_inc(&op->usage);
49951 list_add_tail(&op->pend_link, &object->pending_ops);
49952- fscache_stat(&fscache_n_op_pend);
49953+ fscache_stat_unchecked(&fscache_n_op_pend);
49954 } else if (!list_empty(&object->pending_ops)) {
49955 atomic_inc(&op->usage);
49956 list_add_tail(&op->pend_link, &object->pending_ops);
49957- fscache_stat(&fscache_n_op_pend);
49958+ fscache_stat_unchecked(&fscache_n_op_pend);
49959 fscache_start_operations(object);
49960 } else {
49961 ASSERTCMP(object->n_in_progress, ==, 0);
49962@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49963 object->n_exclusive++; /* reads and writes must wait */
49964 atomic_inc(&op->usage);
49965 list_add_tail(&op->pend_link, &object->pending_ops);
49966- fscache_stat(&fscache_n_op_pend);
49967+ fscache_stat_unchecked(&fscache_n_op_pend);
49968 ret = 0;
49969 } else {
49970 /* not allowed to submit ops in any other state */
49971@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49972 if (object->n_exclusive > 0) {
49973 atomic_inc(&op->usage);
49974 list_add_tail(&op->pend_link, &object->pending_ops);
49975- fscache_stat(&fscache_n_op_pend);
49976+ fscache_stat_unchecked(&fscache_n_op_pend);
49977 } else if (!list_empty(&object->pending_ops)) {
49978 atomic_inc(&op->usage);
49979 list_add_tail(&op->pend_link, &object->pending_ops);
49980- fscache_stat(&fscache_n_op_pend);
49981+ fscache_stat_unchecked(&fscache_n_op_pend);
49982 fscache_start_operations(object);
49983 } else {
49984 ASSERTCMP(object->n_exclusive, ==, 0);
49985@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49986 object->n_ops++;
49987 atomic_inc(&op->usage);
49988 list_add_tail(&op->pend_link, &object->pending_ops);
49989- fscache_stat(&fscache_n_op_pend);
49990+ fscache_stat_unchecked(&fscache_n_op_pend);
49991 ret = 0;
49992 } else if (object->state == FSCACHE_OBJECT_DYING ||
49993 object->state == FSCACHE_OBJECT_LC_DYING ||
49994 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49995- fscache_stat(&fscache_n_op_rejected);
49996+ fscache_stat_unchecked(&fscache_n_op_rejected);
49997 ret = -ENOBUFS;
49998 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49999 fscache_report_unexpected_submission(object, op, ostate);
50000@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
50001
50002 ret = -EBUSY;
50003 if (!list_empty(&op->pend_link)) {
50004- fscache_stat(&fscache_n_op_cancelled);
50005+ fscache_stat_unchecked(&fscache_n_op_cancelled);
50006 list_del_init(&op->pend_link);
50007 object->n_ops--;
50008 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
50009@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
50010 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
50011 BUG();
50012
50013- fscache_stat(&fscache_n_op_release);
50014+ fscache_stat_unchecked(&fscache_n_op_release);
50015
50016 if (op->release) {
50017 op->release(op);
50018@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50019 * lock, and defer it otherwise */
50020 if (!spin_trylock(&object->lock)) {
50021 _debug("defer put");
50022- fscache_stat(&fscache_n_op_deferred_release);
50023+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
50024
50025 cache = object->cache;
50026 spin_lock(&cache->op_gc_list_lock);
50027@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50028
50029 _debug("GC DEFERRED REL OBJ%x OP%x",
50030 object->debug_id, op->debug_id);
50031- fscache_stat(&fscache_n_op_gc);
50032+ fscache_stat_unchecked(&fscache_n_op_gc);
50033
50034 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50035
50036diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50037index c598ea4..6aac13e 100644
50038--- a/fs/fscache/page.c
50039+++ b/fs/fscache/page.c
50040@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50041 val = radix_tree_lookup(&cookie->stores, page->index);
50042 if (!val) {
50043 rcu_read_unlock();
50044- fscache_stat(&fscache_n_store_vmscan_not_storing);
50045+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50046 __fscache_uncache_page(cookie, page);
50047 return true;
50048 }
50049@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50050 spin_unlock(&cookie->stores_lock);
50051
50052 if (xpage) {
50053- fscache_stat(&fscache_n_store_vmscan_cancelled);
50054- fscache_stat(&fscache_n_store_radix_deletes);
50055+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50056+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50057 ASSERTCMP(xpage, ==, page);
50058 } else {
50059- fscache_stat(&fscache_n_store_vmscan_gone);
50060+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50061 }
50062
50063 wake_up_bit(&cookie->flags, 0);
50064@@ -106,7 +106,7 @@ page_busy:
50065 /* we might want to wait here, but that could deadlock the allocator as
50066 * the slow-work threads writing to the cache may all end up sleeping
50067 * on memory allocation */
50068- fscache_stat(&fscache_n_store_vmscan_busy);
50069+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50070 return false;
50071 }
50072 EXPORT_SYMBOL(__fscache_maybe_release_page);
50073@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50074 FSCACHE_COOKIE_STORING_TAG);
50075 if (!radix_tree_tag_get(&cookie->stores, page->index,
50076 FSCACHE_COOKIE_PENDING_TAG)) {
50077- fscache_stat(&fscache_n_store_radix_deletes);
50078+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50079 xpage = radix_tree_delete(&cookie->stores, page->index);
50080 }
50081 spin_unlock(&cookie->stores_lock);
50082@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50083
50084 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50085
50086- fscache_stat(&fscache_n_attr_changed_calls);
50087+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50088
50089 if (fscache_object_is_active(object)) {
50090 fscache_set_op_state(op, "CallFS");
50091@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50092
50093 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50094
50095- fscache_stat(&fscache_n_attr_changed);
50096+ fscache_stat_unchecked(&fscache_n_attr_changed);
50097
50098 op = kzalloc(sizeof(*op), GFP_KERNEL);
50099 if (!op) {
50100- fscache_stat(&fscache_n_attr_changed_nomem);
50101+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50102 _leave(" = -ENOMEM");
50103 return -ENOMEM;
50104 }
50105@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50106 if (fscache_submit_exclusive_op(object, op) < 0)
50107 goto nobufs;
50108 spin_unlock(&cookie->lock);
50109- fscache_stat(&fscache_n_attr_changed_ok);
50110+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50111 fscache_put_operation(op);
50112 _leave(" = 0");
50113 return 0;
50114@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50115 nobufs:
50116 spin_unlock(&cookie->lock);
50117 kfree(op);
50118- fscache_stat(&fscache_n_attr_changed_nobufs);
50119+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50120 _leave(" = %d", -ENOBUFS);
50121 return -ENOBUFS;
50122 }
50123@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50124 /* allocate a retrieval operation and attempt to submit it */
50125 op = kzalloc(sizeof(*op), GFP_NOIO);
50126 if (!op) {
50127- fscache_stat(&fscache_n_retrievals_nomem);
50128+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50129 return NULL;
50130 }
50131
50132@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50133 return 0;
50134 }
50135
50136- fscache_stat(&fscache_n_retrievals_wait);
50137+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
50138
50139 jif = jiffies;
50140 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50141 fscache_wait_bit_interruptible,
50142 TASK_INTERRUPTIBLE) != 0) {
50143- fscache_stat(&fscache_n_retrievals_intr);
50144+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50145 _leave(" = -ERESTARTSYS");
50146 return -ERESTARTSYS;
50147 }
50148@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50149 */
50150 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50151 struct fscache_retrieval *op,
50152- atomic_t *stat_op_waits,
50153- atomic_t *stat_object_dead)
50154+ atomic_unchecked_t *stat_op_waits,
50155+ atomic_unchecked_t *stat_object_dead)
50156 {
50157 int ret;
50158
50159@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50160 goto check_if_dead;
50161
50162 _debug(">>> WT");
50163- fscache_stat(stat_op_waits);
50164+ fscache_stat_unchecked(stat_op_waits);
50165 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50166 fscache_wait_bit_interruptible,
50167 TASK_INTERRUPTIBLE) < 0) {
50168@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50169
50170 check_if_dead:
50171 if (unlikely(fscache_object_is_dead(object))) {
50172- fscache_stat(stat_object_dead);
50173+ fscache_stat_unchecked(stat_object_dead);
50174 return -ENOBUFS;
50175 }
50176 return 0;
50177@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50178
50179 _enter("%p,%p,,,", cookie, page);
50180
50181- fscache_stat(&fscache_n_retrievals);
50182+ fscache_stat_unchecked(&fscache_n_retrievals);
50183
50184 if (hlist_empty(&cookie->backing_objects))
50185 goto nobufs;
50186@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50187 goto nobufs_unlock;
50188 spin_unlock(&cookie->lock);
50189
50190- fscache_stat(&fscache_n_retrieval_ops);
50191+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50192
50193 /* pin the netfs read context in case we need to do the actual netfs
50194 * read because we've encountered a cache read failure */
50195@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50196
50197 error:
50198 if (ret == -ENOMEM)
50199- fscache_stat(&fscache_n_retrievals_nomem);
50200+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50201 else if (ret == -ERESTARTSYS)
50202- fscache_stat(&fscache_n_retrievals_intr);
50203+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50204 else if (ret == -ENODATA)
50205- fscache_stat(&fscache_n_retrievals_nodata);
50206+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50207 else if (ret < 0)
50208- fscache_stat(&fscache_n_retrievals_nobufs);
50209+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50210 else
50211- fscache_stat(&fscache_n_retrievals_ok);
50212+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50213
50214 fscache_put_retrieval(op);
50215 _leave(" = %d", ret);
50216@@ -453,7 +453,7 @@ nobufs_unlock:
50217 spin_unlock(&cookie->lock);
50218 kfree(op);
50219 nobufs:
50220- fscache_stat(&fscache_n_retrievals_nobufs);
50221+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50222 _leave(" = -ENOBUFS");
50223 return -ENOBUFS;
50224 }
50225@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50226
50227 _enter("%p,,%d,,,", cookie, *nr_pages);
50228
50229- fscache_stat(&fscache_n_retrievals);
50230+ fscache_stat_unchecked(&fscache_n_retrievals);
50231
50232 if (hlist_empty(&cookie->backing_objects))
50233 goto nobufs;
50234@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50235 goto nobufs_unlock;
50236 spin_unlock(&cookie->lock);
50237
50238- fscache_stat(&fscache_n_retrieval_ops);
50239+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50240
50241 /* pin the netfs read context in case we need to do the actual netfs
50242 * read because we've encountered a cache read failure */
50243@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50244
50245 error:
50246 if (ret == -ENOMEM)
50247- fscache_stat(&fscache_n_retrievals_nomem);
50248+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50249 else if (ret == -ERESTARTSYS)
50250- fscache_stat(&fscache_n_retrievals_intr);
50251+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50252 else if (ret == -ENODATA)
50253- fscache_stat(&fscache_n_retrievals_nodata);
50254+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50255 else if (ret < 0)
50256- fscache_stat(&fscache_n_retrievals_nobufs);
50257+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50258 else
50259- fscache_stat(&fscache_n_retrievals_ok);
50260+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50261
50262 fscache_put_retrieval(op);
50263 _leave(" = %d", ret);
50264@@ -570,7 +570,7 @@ nobufs_unlock:
50265 spin_unlock(&cookie->lock);
50266 kfree(op);
50267 nobufs:
50268- fscache_stat(&fscache_n_retrievals_nobufs);
50269+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50270 _leave(" = -ENOBUFS");
50271 return -ENOBUFS;
50272 }
50273@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50274
50275 _enter("%p,%p,,,", cookie, page);
50276
50277- fscache_stat(&fscache_n_allocs);
50278+ fscache_stat_unchecked(&fscache_n_allocs);
50279
50280 if (hlist_empty(&cookie->backing_objects))
50281 goto nobufs;
50282@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50283 goto nobufs_unlock;
50284 spin_unlock(&cookie->lock);
50285
50286- fscache_stat(&fscache_n_alloc_ops);
50287+ fscache_stat_unchecked(&fscache_n_alloc_ops);
50288
50289 ret = fscache_wait_for_retrieval_activation(
50290 object, op,
50291@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50292
50293 error:
50294 if (ret == -ERESTARTSYS)
50295- fscache_stat(&fscache_n_allocs_intr);
50296+ fscache_stat_unchecked(&fscache_n_allocs_intr);
50297 else if (ret < 0)
50298- fscache_stat(&fscache_n_allocs_nobufs);
50299+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50300 else
50301- fscache_stat(&fscache_n_allocs_ok);
50302+ fscache_stat_unchecked(&fscache_n_allocs_ok);
50303
50304 fscache_put_retrieval(op);
50305 _leave(" = %d", ret);
50306@@ -651,7 +651,7 @@ nobufs_unlock:
50307 spin_unlock(&cookie->lock);
50308 kfree(op);
50309 nobufs:
50310- fscache_stat(&fscache_n_allocs_nobufs);
50311+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50312 _leave(" = -ENOBUFS");
50313 return -ENOBUFS;
50314 }
50315@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50316
50317 spin_lock(&cookie->stores_lock);
50318
50319- fscache_stat(&fscache_n_store_calls);
50320+ fscache_stat_unchecked(&fscache_n_store_calls);
50321
50322 /* find a page to store */
50323 page = NULL;
50324@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50325 page = results[0];
50326 _debug("gang %d [%lx]", n, page->index);
50327 if (page->index > op->store_limit) {
50328- fscache_stat(&fscache_n_store_pages_over_limit);
50329+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50330 goto superseded;
50331 }
50332
50333@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50334
50335 if (page) {
50336 fscache_set_op_state(&op->op, "Store");
50337- fscache_stat(&fscache_n_store_pages);
50338+ fscache_stat_unchecked(&fscache_n_store_pages);
50339 fscache_stat(&fscache_n_cop_write_page);
50340 ret = object->cache->ops->write_page(op, page);
50341 fscache_stat_d(&fscache_n_cop_write_page);
50342@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50343 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50344 ASSERT(PageFsCache(page));
50345
50346- fscache_stat(&fscache_n_stores);
50347+ fscache_stat_unchecked(&fscache_n_stores);
50348
50349 op = kzalloc(sizeof(*op), GFP_NOIO);
50350 if (!op)
50351@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50352 spin_unlock(&cookie->stores_lock);
50353 spin_unlock(&object->lock);
50354
50355- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50356+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50357 op->store_limit = object->store_limit;
50358
50359 if (fscache_submit_op(object, &op->op) < 0)
50360@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50361
50362 spin_unlock(&cookie->lock);
50363 radix_tree_preload_end();
50364- fscache_stat(&fscache_n_store_ops);
50365- fscache_stat(&fscache_n_stores_ok);
50366+ fscache_stat_unchecked(&fscache_n_store_ops);
50367+ fscache_stat_unchecked(&fscache_n_stores_ok);
50368
50369 /* the slow work queue now carries its own ref on the object */
50370 fscache_put_operation(&op->op);
50371@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50372 return 0;
50373
50374 already_queued:
50375- fscache_stat(&fscache_n_stores_again);
50376+ fscache_stat_unchecked(&fscache_n_stores_again);
50377 already_pending:
50378 spin_unlock(&cookie->stores_lock);
50379 spin_unlock(&object->lock);
50380 spin_unlock(&cookie->lock);
50381 radix_tree_preload_end();
50382 kfree(op);
50383- fscache_stat(&fscache_n_stores_ok);
50384+ fscache_stat_unchecked(&fscache_n_stores_ok);
50385 _leave(" = 0");
50386 return 0;
50387
50388@@ -886,14 +886,14 @@ nobufs:
50389 spin_unlock(&cookie->lock);
50390 radix_tree_preload_end();
50391 kfree(op);
50392- fscache_stat(&fscache_n_stores_nobufs);
50393+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
50394 _leave(" = -ENOBUFS");
50395 return -ENOBUFS;
50396
50397 nomem_free:
50398 kfree(op);
50399 nomem:
50400- fscache_stat(&fscache_n_stores_oom);
50401+ fscache_stat_unchecked(&fscache_n_stores_oom);
50402 _leave(" = -ENOMEM");
50403 return -ENOMEM;
50404 }
50405@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50406 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50407 ASSERTCMP(page, !=, NULL);
50408
50409- fscache_stat(&fscache_n_uncaches);
50410+ fscache_stat_unchecked(&fscache_n_uncaches);
50411
50412 /* cache withdrawal may beat us to it */
50413 if (!PageFsCache(page))
50414@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50415 unsigned long loop;
50416
50417 #ifdef CONFIG_FSCACHE_STATS
50418- atomic_add(pagevec->nr, &fscache_n_marks);
50419+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50420 #endif
50421
50422 for (loop = 0; loop < pagevec->nr; loop++) {
50423diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50424index 46435f3..8cddf18 100644
50425--- a/fs/fscache/stats.c
50426+++ b/fs/fscache/stats.c
50427@@ -18,95 +18,95 @@
50428 /*
50429 * operation counters
50430 */
50431-atomic_t fscache_n_op_pend;
50432-atomic_t fscache_n_op_run;
50433-atomic_t fscache_n_op_enqueue;
50434-atomic_t fscache_n_op_requeue;
50435-atomic_t fscache_n_op_deferred_release;
50436-atomic_t fscache_n_op_release;
50437-atomic_t fscache_n_op_gc;
50438-atomic_t fscache_n_op_cancelled;
50439-atomic_t fscache_n_op_rejected;
50440+atomic_unchecked_t fscache_n_op_pend;
50441+atomic_unchecked_t fscache_n_op_run;
50442+atomic_unchecked_t fscache_n_op_enqueue;
50443+atomic_unchecked_t fscache_n_op_requeue;
50444+atomic_unchecked_t fscache_n_op_deferred_release;
50445+atomic_unchecked_t fscache_n_op_release;
50446+atomic_unchecked_t fscache_n_op_gc;
50447+atomic_unchecked_t fscache_n_op_cancelled;
50448+atomic_unchecked_t fscache_n_op_rejected;
50449
50450-atomic_t fscache_n_attr_changed;
50451-atomic_t fscache_n_attr_changed_ok;
50452-atomic_t fscache_n_attr_changed_nobufs;
50453-atomic_t fscache_n_attr_changed_nomem;
50454-atomic_t fscache_n_attr_changed_calls;
50455+atomic_unchecked_t fscache_n_attr_changed;
50456+atomic_unchecked_t fscache_n_attr_changed_ok;
50457+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50458+atomic_unchecked_t fscache_n_attr_changed_nomem;
50459+atomic_unchecked_t fscache_n_attr_changed_calls;
50460
50461-atomic_t fscache_n_allocs;
50462-atomic_t fscache_n_allocs_ok;
50463-atomic_t fscache_n_allocs_wait;
50464-atomic_t fscache_n_allocs_nobufs;
50465-atomic_t fscache_n_allocs_intr;
50466-atomic_t fscache_n_allocs_object_dead;
50467-atomic_t fscache_n_alloc_ops;
50468-atomic_t fscache_n_alloc_op_waits;
50469+atomic_unchecked_t fscache_n_allocs;
50470+atomic_unchecked_t fscache_n_allocs_ok;
50471+atomic_unchecked_t fscache_n_allocs_wait;
50472+atomic_unchecked_t fscache_n_allocs_nobufs;
50473+atomic_unchecked_t fscache_n_allocs_intr;
50474+atomic_unchecked_t fscache_n_allocs_object_dead;
50475+atomic_unchecked_t fscache_n_alloc_ops;
50476+atomic_unchecked_t fscache_n_alloc_op_waits;
50477
50478-atomic_t fscache_n_retrievals;
50479-atomic_t fscache_n_retrievals_ok;
50480-atomic_t fscache_n_retrievals_wait;
50481-atomic_t fscache_n_retrievals_nodata;
50482-atomic_t fscache_n_retrievals_nobufs;
50483-atomic_t fscache_n_retrievals_intr;
50484-atomic_t fscache_n_retrievals_nomem;
50485-atomic_t fscache_n_retrievals_object_dead;
50486-atomic_t fscache_n_retrieval_ops;
50487-atomic_t fscache_n_retrieval_op_waits;
50488+atomic_unchecked_t fscache_n_retrievals;
50489+atomic_unchecked_t fscache_n_retrievals_ok;
50490+atomic_unchecked_t fscache_n_retrievals_wait;
50491+atomic_unchecked_t fscache_n_retrievals_nodata;
50492+atomic_unchecked_t fscache_n_retrievals_nobufs;
50493+atomic_unchecked_t fscache_n_retrievals_intr;
50494+atomic_unchecked_t fscache_n_retrievals_nomem;
50495+atomic_unchecked_t fscache_n_retrievals_object_dead;
50496+atomic_unchecked_t fscache_n_retrieval_ops;
50497+atomic_unchecked_t fscache_n_retrieval_op_waits;
50498
50499-atomic_t fscache_n_stores;
50500-atomic_t fscache_n_stores_ok;
50501-atomic_t fscache_n_stores_again;
50502-atomic_t fscache_n_stores_nobufs;
50503-atomic_t fscache_n_stores_oom;
50504-atomic_t fscache_n_store_ops;
50505-atomic_t fscache_n_store_calls;
50506-atomic_t fscache_n_store_pages;
50507-atomic_t fscache_n_store_radix_deletes;
50508-atomic_t fscache_n_store_pages_over_limit;
50509+atomic_unchecked_t fscache_n_stores;
50510+atomic_unchecked_t fscache_n_stores_ok;
50511+atomic_unchecked_t fscache_n_stores_again;
50512+atomic_unchecked_t fscache_n_stores_nobufs;
50513+atomic_unchecked_t fscache_n_stores_oom;
50514+atomic_unchecked_t fscache_n_store_ops;
50515+atomic_unchecked_t fscache_n_store_calls;
50516+atomic_unchecked_t fscache_n_store_pages;
50517+atomic_unchecked_t fscache_n_store_radix_deletes;
50518+atomic_unchecked_t fscache_n_store_pages_over_limit;
50519
50520-atomic_t fscache_n_store_vmscan_not_storing;
50521-atomic_t fscache_n_store_vmscan_gone;
50522-atomic_t fscache_n_store_vmscan_busy;
50523-atomic_t fscache_n_store_vmscan_cancelled;
50524+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50525+atomic_unchecked_t fscache_n_store_vmscan_gone;
50526+atomic_unchecked_t fscache_n_store_vmscan_busy;
50527+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50528
50529-atomic_t fscache_n_marks;
50530-atomic_t fscache_n_uncaches;
50531+atomic_unchecked_t fscache_n_marks;
50532+atomic_unchecked_t fscache_n_uncaches;
50533
50534-atomic_t fscache_n_acquires;
50535-atomic_t fscache_n_acquires_null;
50536-atomic_t fscache_n_acquires_no_cache;
50537-atomic_t fscache_n_acquires_ok;
50538-atomic_t fscache_n_acquires_nobufs;
50539-atomic_t fscache_n_acquires_oom;
50540+atomic_unchecked_t fscache_n_acquires;
50541+atomic_unchecked_t fscache_n_acquires_null;
50542+atomic_unchecked_t fscache_n_acquires_no_cache;
50543+atomic_unchecked_t fscache_n_acquires_ok;
50544+atomic_unchecked_t fscache_n_acquires_nobufs;
50545+atomic_unchecked_t fscache_n_acquires_oom;
50546
50547-atomic_t fscache_n_updates;
50548-atomic_t fscache_n_updates_null;
50549-atomic_t fscache_n_updates_run;
50550+atomic_unchecked_t fscache_n_updates;
50551+atomic_unchecked_t fscache_n_updates_null;
50552+atomic_unchecked_t fscache_n_updates_run;
50553
50554-atomic_t fscache_n_relinquishes;
50555-atomic_t fscache_n_relinquishes_null;
50556-atomic_t fscache_n_relinquishes_waitcrt;
50557-atomic_t fscache_n_relinquishes_retire;
50558+atomic_unchecked_t fscache_n_relinquishes;
50559+atomic_unchecked_t fscache_n_relinquishes_null;
50560+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50561+atomic_unchecked_t fscache_n_relinquishes_retire;
50562
50563-atomic_t fscache_n_cookie_index;
50564-atomic_t fscache_n_cookie_data;
50565-atomic_t fscache_n_cookie_special;
50566+atomic_unchecked_t fscache_n_cookie_index;
50567+atomic_unchecked_t fscache_n_cookie_data;
50568+atomic_unchecked_t fscache_n_cookie_special;
50569
50570-atomic_t fscache_n_object_alloc;
50571-atomic_t fscache_n_object_no_alloc;
50572-atomic_t fscache_n_object_lookups;
50573-atomic_t fscache_n_object_lookups_negative;
50574-atomic_t fscache_n_object_lookups_positive;
50575-atomic_t fscache_n_object_lookups_timed_out;
50576-atomic_t fscache_n_object_created;
50577-atomic_t fscache_n_object_avail;
50578-atomic_t fscache_n_object_dead;
50579+atomic_unchecked_t fscache_n_object_alloc;
50580+atomic_unchecked_t fscache_n_object_no_alloc;
50581+atomic_unchecked_t fscache_n_object_lookups;
50582+atomic_unchecked_t fscache_n_object_lookups_negative;
50583+atomic_unchecked_t fscache_n_object_lookups_positive;
50584+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50585+atomic_unchecked_t fscache_n_object_created;
50586+atomic_unchecked_t fscache_n_object_avail;
50587+atomic_unchecked_t fscache_n_object_dead;
50588
50589-atomic_t fscache_n_checkaux_none;
50590-atomic_t fscache_n_checkaux_okay;
50591-atomic_t fscache_n_checkaux_update;
50592-atomic_t fscache_n_checkaux_obsolete;
50593+atomic_unchecked_t fscache_n_checkaux_none;
50594+atomic_unchecked_t fscache_n_checkaux_okay;
50595+atomic_unchecked_t fscache_n_checkaux_update;
50596+atomic_unchecked_t fscache_n_checkaux_obsolete;
50597
50598 atomic_t fscache_n_cop_alloc_object;
50599 atomic_t fscache_n_cop_lookup_object;
50600@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50601 seq_puts(m, "FS-Cache statistics\n");
50602
50603 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50604- atomic_read(&fscache_n_cookie_index),
50605- atomic_read(&fscache_n_cookie_data),
50606- atomic_read(&fscache_n_cookie_special));
50607+ atomic_read_unchecked(&fscache_n_cookie_index),
50608+ atomic_read_unchecked(&fscache_n_cookie_data),
50609+ atomic_read_unchecked(&fscache_n_cookie_special));
50610
50611 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50612- atomic_read(&fscache_n_object_alloc),
50613- atomic_read(&fscache_n_object_no_alloc),
50614- atomic_read(&fscache_n_object_avail),
50615- atomic_read(&fscache_n_object_dead));
50616+ atomic_read_unchecked(&fscache_n_object_alloc),
50617+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50618+ atomic_read_unchecked(&fscache_n_object_avail),
50619+ atomic_read_unchecked(&fscache_n_object_dead));
50620 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50621- atomic_read(&fscache_n_checkaux_none),
50622- atomic_read(&fscache_n_checkaux_okay),
50623- atomic_read(&fscache_n_checkaux_update),
50624- atomic_read(&fscache_n_checkaux_obsolete));
50625+ atomic_read_unchecked(&fscache_n_checkaux_none),
50626+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50627+ atomic_read_unchecked(&fscache_n_checkaux_update),
50628+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50629
50630 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50631- atomic_read(&fscache_n_marks),
50632- atomic_read(&fscache_n_uncaches));
50633+ atomic_read_unchecked(&fscache_n_marks),
50634+ atomic_read_unchecked(&fscache_n_uncaches));
50635
50636 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50637 " oom=%u\n",
50638- atomic_read(&fscache_n_acquires),
50639- atomic_read(&fscache_n_acquires_null),
50640- atomic_read(&fscache_n_acquires_no_cache),
50641- atomic_read(&fscache_n_acquires_ok),
50642- atomic_read(&fscache_n_acquires_nobufs),
50643- atomic_read(&fscache_n_acquires_oom));
50644+ atomic_read_unchecked(&fscache_n_acquires),
50645+ atomic_read_unchecked(&fscache_n_acquires_null),
50646+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50647+ atomic_read_unchecked(&fscache_n_acquires_ok),
50648+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50649+ atomic_read_unchecked(&fscache_n_acquires_oom));
50650
50651 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50652- atomic_read(&fscache_n_object_lookups),
50653- atomic_read(&fscache_n_object_lookups_negative),
50654- atomic_read(&fscache_n_object_lookups_positive),
50655- atomic_read(&fscache_n_object_lookups_timed_out),
50656- atomic_read(&fscache_n_object_created));
50657+ atomic_read_unchecked(&fscache_n_object_lookups),
50658+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50659+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50660+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50661+ atomic_read_unchecked(&fscache_n_object_created));
50662
50663 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50664- atomic_read(&fscache_n_updates),
50665- atomic_read(&fscache_n_updates_null),
50666- atomic_read(&fscache_n_updates_run));
50667+ atomic_read_unchecked(&fscache_n_updates),
50668+ atomic_read_unchecked(&fscache_n_updates_null),
50669+ atomic_read_unchecked(&fscache_n_updates_run));
50670
50671 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50672- atomic_read(&fscache_n_relinquishes),
50673- atomic_read(&fscache_n_relinquishes_null),
50674- atomic_read(&fscache_n_relinquishes_waitcrt),
50675- atomic_read(&fscache_n_relinquishes_retire));
50676+ atomic_read_unchecked(&fscache_n_relinquishes),
50677+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50678+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50679+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50680
50681 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50682- atomic_read(&fscache_n_attr_changed),
50683- atomic_read(&fscache_n_attr_changed_ok),
50684- atomic_read(&fscache_n_attr_changed_nobufs),
50685- atomic_read(&fscache_n_attr_changed_nomem),
50686- atomic_read(&fscache_n_attr_changed_calls));
50687+ atomic_read_unchecked(&fscache_n_attr_changed),
50688+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50689+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50690+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50691+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50692
50693 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50694- atomic_read(&fscache_n_allocs),
50695- atomic_read(&fscache_n_allocs_ok),
50696- atomic_read(&fscache_n_allocs_wait),
50697- atomic_read(&fscache_n_allocs_nobufs),
50698- atomic_read(&fscache_n_allocs_intr));
50699+ atomic_read_unchecked(&fscache_n_allocs),
50700+ atomic_read_unchecked(&fscache_n_allocs_ok),
50701+ atomic_read_unchecked(&fscache_n_allocs_wait),
50702+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50703+ atomic_read_unchecked(&fscache_n_allocs_intr));
50704 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50705- atomic_read(&fscache_n_alloc_ops),
50706- atomic_read(&fscache_n_alloc_op_waits),
50707- atomic_read(&fscache_n_allocs_object_dead));
50708+ atomic_read_unchecked(&fscache_n_alloc_ops),
50709+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50710+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50711
50712 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50713 " int=%u oom=%u\n",
50714- atomic_read(&fscache_n_retrievals),
50715- atomic_read(&fscache_n_retrievals_ok),
50716- atomic_read(&fscache_n_retrievals_wait),
50717- atomic_read(&fscache_n_retrievals_nodata),
50718- atomic_read(&fscache_n_retrievals_nobufs),
50719- atomic_read(&fscache_n_retrievals_intr),
50720- atomic_read(&fscache_n_retrievals_nomem));
50721+ atomic_read_unchecked(&fscache_n_retrievals),
50722+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50723+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50724+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50725+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50726+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50727+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50728 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50729- atomic_read(&fscache_n_retrieval_ops),
50730- atomic_read(&fscache_n_retrieval_op_waits),
50731- atomic_read(&fscache_n_retrievals_object_dead));
50732+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50733+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50734+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50735
50736 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50737- atomic_read(&fscache_n_stores),
50738- atomic_read(&fscache_n_stores_ok),
50739- atomic_read(&fscache_n_stores_again),
50740- atomic_read(&fscache_n_stores_nobufs),
50741- atomic_read(&fscache_n_stores_oom));
50742+ atomic_read_unchecked(&fscache_n_stores),
50743+ atomic_read_unchecked(&fscache_n_stores_ok),
50744+ atomic_read_unchecked(&fscache_n_stores_again),
50745+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50746+ atomic_read_unchecked(&fscache_n_stores_oom));
50747 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50748- atomic_read(&fscache_n_store_ops),
50749- atomic_read(&fscache_n_store_calls),
50750- atomic_read(&fscache_n_store_pages),
50751- atomic_read(&fscache_n_store_radix_deletes),
50752- atomic_read(&fscache_n_store_pages_over_limit));
50753+ atomic_read_unchecked(&fscache_n_store_ops),
50754+ atomic_read_unchecked(&fscache_n_store_calls),
50755+ atomic_read_unchecked(&fscache_n_store_pages),
50756+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50757+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50758
50759 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50760- atomic_read(&fscache_n_store_vmscan_not_storing),
50761- atomic_read(&fscache_n_store_vmscan_gone),
50762- atomic_read(&fscache_n_store_vmscan_busy),
50763- atomic_read(&fscache_n_store_vmscan_cancelled));
50764+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50765+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50766+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50767+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50768
50769 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50770- atomic_read(&fscache_n_op_pend),
50771- atomic_read(&fscache_n_op_run),
50772- atomic_read(&fscache_n_op_enqueue),
50773- atomic_read(&fscache_n_op_cancelled),
50774- atomic_read(&fscache_n_op_rejected));
50775+ atomic_read_unchecked(&fscache_n_op_pend),
50776+ atomic_read_unchecked(&fscache_n_op_run),
50777+ atomic_read_unchecked(&fscache_n_op_enqueue),
50778+ atomic_read_unchecked(&fscache_n_op_cancelled),
50779+ atomic_read_unchecked(&fscache_n_op_rejected));
50780 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50781- atomic_read(&fscache_n_op_deferred_release),
50782- atomic_read(&fscache_n_op_release),
50783- atomic_read(&fscache_n_op_gc));
50784+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50785+ atomic_read_unchecked(&fscache_n_op_release),
50786+ atomic_read_unchecked(&fscache_n_op_gc));
50787
50788 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50789 atomic_read(&fscache_n_cop_alloc_object),
50790diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50791index de792dc..448b532 100644
50792--- a/fs/fuse/cuse.c
50793+++ b/fs/fuse/cuse.c
50794@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50795 INIT_LIST_HEAD(&cuse_conntbl[i]);
50796
50797 /* inherit and extend fuse_dev_operations */
50798- cuse_channel_fops = fuse_dev_operations;
50799- cuse_channel_fops.owner = THIS_MODULE;
50800- cuse_channel_fops.open = cuse_channel_open;
50801- cuse_channel_fops.release = cuse_channel_release;
50802+ pax_open_kernel();
50803+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50804+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50805+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50806+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50807+ pax_close_kernel();
50808
50809 cuse_class = class_create(THIS_MODULE, "cuse");
50810 if (IS_ERR(cuse_class))
50811diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50812index 1facb39..7f48557 100644
50813--- a/fs/fuse/dev.c
50814+++ b/fs/fuse/dev.c
50815@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50816 {
50817 struct fuse_notify_inval_entry_out outarg;
50818 int err = -EINVAL;
50819- char buf[FUSE_NAME_MAX+1];
50820+ char *buf = NULL;
50821 struct qstr name;
50822
50823 if (size < sizeof(outarg))
50824@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50825 if (outarg.namelen > FUSE_NAME_MAX)
50826 goto err;
50827
50828+ err = -ENOMEM;
50829+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50830+ if (!buf)
50831+ goto err;
50832+
50833 err = -EINVAL;
50834 if (size != sizeof(outarg) + outarg.namelen + 1)
50835 goto err;
50836@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50837
50838 down_read(&fc->killsb);
50839 err = -ENOENT;
50840- if (!fc->sb)
50841- goto err_unlock;
50842-
50843- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50844-
50845-err_unlock:
50846+ if (fc->sb)
50847+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50848 up_read(&fc->killsb);
50849+ kfree(buf);
50850 return err;
50851
50852 err:
50853 fuse_copy_finish(cs);
50854+ kfree(buf);
50855 return err;
50856 }
50857
50858diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50859index 4787ae6..73efff7 100644
50860--- a/fs/fuse/dir.c
50861+++ b/fs/fuse/dir.c
50862@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50863 return link;
50864 }
50865
50866-static void free_link(char *link)
50867+static void free_link(const char *link)
50868 {
50869 if (!IS_ERR(link))
50870 free_page((unsigned long) link);
50871diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50872index 247436c..e650ccb 100644
50873--- a/fs/gfs2/ops_inode.c
50874+++ b/fs/gfs2/ops_inode.c
50875@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50876 unsigned int x;
50877 int error;
50878
50879+ pax_track_stack();
50880+
50881 if (ndentry->d_inode) {
50882 nip = GFS2_I(ndentry->d_inode);
50883 if (ip == nip)
50884diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50885index 4463297..4fed53b 100644
50886--- a/fs/gfs2/sys.c
50887+++ b/fs/gfs2/sys.c
50888@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50889 return a->store ? a->store(sdp, buf, len) : len;
50890 }
50891
50892-static struct sysfs_ops gfs2_attr_ops = {
50893+static const struct sysfs_ops gfs2_attr_ops = {
50894 .show = gfs2_attr_show,
50895 .store = gfs2_attr_store,
50896 };
50897@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50898 return 0;
50899 }
50900
50901-static struct kset_uevent_ops gfs2_uevent_ops = {
50902+static const struct kset_uevent_ops gfs2_uevent_ops = {
50903 .uevent = gfs2_uevent,
50904 };
50905
50906diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50907index f6874ac..7cd98a8 100644
50908--- a/fs/hfsplus/catalog.c
50909+++ b/fs/hfsplus/catalog.c
50910@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50911 int err;
50912 u16 type;
50913
50914+ pax_track_stack();
50915+
50916 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50917 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50918 if (err)
50919@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50920 int entry_size;
50921 int err;
50922
50923+ pax_track_stack();
50924+
50925 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50926 sb = dir->i_sb;
50927 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50928@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50929 int entry_size, type;
50930 int err = 0;
50931
50932+ pax_track_stack();
50933+
50934 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50935 dst_dir->i_ino, dst_name->name);
50936 sb = src_dir->i_sb;
50937diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50938index 5f40236..dac3421 100644
50939--- a/fs/hfsplus/dir.c
50940+++ b/fs/hfsplus/dir.c
50941@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50942 struct hfsplus_readdir_data *rd;
50943 u16 type;
50944
50945+ pax_track_stack();
50946+
50947 if (filp->f_pos >= inode->i_size)
50948 return 0;
50949
50950diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50951index 1bcf597..905a251 100644
50952--- a/fs/hfsplus/inode.c
50953+++ b/fs/hfsplus/inode.c
50954@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50955 int res = 0;
50956 u16 type;
50957
50958+ pax_track_stack();
50959+
50960 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50961
50962 HFSPLUS_I(inode).dev = 0;
50963@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50964 struct hfs_find_data fd;
50965 hfsplus_cat_entry entry;
50966
50967+ pax_track_stack();
50968+
50969 if (HFSPLUS_IS_RSRC(inode))
50970 main_inode = HFSPLUS_I(inode).rsrc_inode;
50971
50972diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50973index f457d2c..7ef4ad5 100644
50974--- a/fs/hfsplus/ioctl.c
50975+++ b/fs/hfsplus/ioctl.c
50976@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50977 struct hfsplus_cat_file *file;
50978 int res;
50979
50980+ pax_track_stack();
50981+
50982 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50983 return -EOPNOTSUPP;
50984
50985@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50986 struct hfsplus_cat_file *file;
50987 ssize_t res = 0;
50988
50989+ pax_track_stack();
50990+
50991 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50992 return -EOPNOTSUPP;
50993
50994diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50995index 43022f3..7298079 100644
50996--- a/fs/hfsplus/super.c
50997+++ b/fs/hfsplus/super.c
50998@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50999 struct nls_table *nls = NULL;
51000 int err = -EINVAL;
51001
51002+ pax_track_stack();
51003+
51004 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
51005 if (!sbi)
51006 return -ENOMEM;
51007diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
51008index 87a1258..5694d91 100644
51009--- a/fs/hugetlbfs/inode.c
51010+++ b/fs/hugetlbfs/inode.c
51011@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51012 .kill_sb = kill_litter_super,
51013 };
51014
51015-static struct vfsmount *hugetlbfs_vfsmount;
51016+struct vfsmount *hugetlbfs_vfsmount;
51017
51018 static int can_do_hugetlb_shm(void)
51019 {
51020diff --git a/fs/ioctl.c b/fs/ioctl.c
51021index 6c75110..19d2c3c 100644
51022--- a/fs/ioctl.c
51023+++ b/fs/ioctl.c
51024@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51025 u64 phys, u64 len, u32 flags)
51026 {
51027 struct fiemap_extent extent;
51028- struct fiemap_extent *dest = fieinfo->fi_extents_start;
51029+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51030
51031 /* only count the extents */
51032 if (fieinfo->fi_extents_max == 0) {
51033@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51034
51035 fieinfo.fi_flags = fiemap.fm_flags;
51036 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51037- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51038+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51039
51040 if (fiemap.fm_extent_count != 0 &&
51041 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51042@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51043 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51044 fiemap.fm_flags = fieinfo.fi_flags;
51045 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51046- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51047+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51048 error = -EFAULT;
51049
51050 return error;
51051diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51052index b0435dd..81ee0be 100644
51053--- a/fs/jbd/checkpoint.c
51054+++ b/fs/jbd/checkpoint.c
51055@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51056 tid_t this_tid;
51057 int result;
51058
51059+ pax_track_stack();
51060+
51061 jbd_debug(1, "Start checkpoint\n");
51062
51063 /*
51064diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51065index 546d153..736896c 100644
51066--- a/fs/jffs2/compr_rtime.c
51067+++ b/fs/jffs2/compr_rtime.c
51068@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51069 int outpos = 0;
51070 int pos=0;
51071
51072+ pax_track_stack();
51073+
51074 memset(positions,0,sizeof(positions));
51075
51076 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51077@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51078 int outpos = 0;
51079 int pos=0;
51080
51081+ pax_track_stack();
51082+
51083 memset(positions,0,sizeof(positions));
51084
51085 while (outpos<destlen) {
51086diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51087index 170d289..3254b98 100644
51088--- a/fs/jffs2/compr_rubin.c
51089+++ b/fs/jffs2/compr_rubin.c
51090@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51091 int ret;
51092 uint32_t mysrclen, mydstlen;
51093
51094+ pax_track_stack();
51095+
51096 mysrclen = *sourcelen;
51097 mydstlen = *dstlen - 8;
51098
51099diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51100index b47679b..00d65d3 100644
51101--- a/fs/jffs2/erase.c
51102+++ b/fs/jffs2/erase.c
51103@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51104 struct jffs2_unknown_node marker = {
51105 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51106 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51107- .totlen = cpu_to_je32(c->cleanmarker_size)
51108+ .totlen = cpu_to_je32(c->cleanmarker_size),
51109+ .hdr_crc = cpu_to_je32(0)
51110 };
51111
51112 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51113diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51114index 5ef7bac..4fd1e3c 100644
51115--- a/fs/jffs2/wbuf.c
51116+++ b/fs/jffs2/wbuf.c
51117@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51118 {
51119 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51120 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51121- .totlen = constant_cpu_to_je32(8)
51122+ .totlen = constant_cpu_to_je32(8),
51123+ .hdr_crc = constant_cpu_to_je32(0)
51124 };
51125
51126 /*
51127diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51128index 082e844..52012a1 100644
51129--- a/fs/jffs2/xattr.c
51130+++ b/fs/jffs2/xattr.c
51131@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51132
51133 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51134
51135+ pax_track_stack();
51136+
51137 /* Phase.1 : Merge same xref */
51138 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51139 xref_tmphash[i] = NULL;
51140diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51141index 2234c73..f6e6e6b 100644
51142--- a/fs/jfs/super.c
51143+++ b/fs/jfs/super.c
51144@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51145
51146 jfs_inode_cachep =
51147 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51148- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51149+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51150 init_once);
51151 if (jfs_inode_cachep == NULL)
51152 return -ENOMEM;
51153diff --git a/fs/libfs.c b/fs/libfs.c
51154index ba36e93..3153fce 100644
51155--- a/fs/libfs.c
51156+++ b/fs/libfs.c
51157@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51158
51159 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51160 struct dentry *next;
51161+ char d_name[sizeof(next->d_iname)];
51162+ const unsigned char *name;
51163+
51164 next = list_entry(p, struct dentry, d_u.d_child);
51165 if (d_unhashed(next) || !next->d_inode)
51166 continue;
51167
51168 spin_unlock(&dcache_lock);
51169- if (filldir(dirent, next->d_name.name,
51170+ name = next->d_name.name;
51171+ if (name == next->d_iname) {
51172+ memcpy(d_name, name, next->d_name.len);
51173+ name = d_name;
51174+ }
51175+ if (filldir(dirent, name,
51176 next->d_name.len, filp->f_pos,
51177 next->d_inode->i_ino,
51178 dt_type(next->d_inode)) < 0)
51179diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51180index c325a83..d15b07b 100644
51181--- a/fs/lockd/clntproc.c
51182+++ b/fs/lockd/clntproc.c
51183@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51184 /*
51185 * Cookie counter for NLM requests
51186 */
51187-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51188+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51189
51190 void nlmclnt_next_cookie(struct nlm_cookie *c)
51191 {
51192- u32 cookie = atomic_inc_return(&nlm_cookie);
51193+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51194
51195 memcpy(c->data, &cookie, 4);
51196 c->len=4;
51197@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51198 struct nlm_rqst reqst, *req;
51199 int status;
51200
51201+ pax_track_stack();
51202+
51203 req = &reqst;
51204 memset(req, 0, sizeof(*req));
51205 locks_init_lock(&req->a_args.lock.fl);
51206diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51207index 1a54ae1..6a16c27 100644
51208--- a/fs/lockd/svc.c
51209+++ b/fs/lockd/svc.c
51210@@ -43,7 +43,7 @@
51211
51212 static struct svc_program nlmsvc_program;
51213
51214-struct nlmsvc_binding * nlmsvc_ops;
51215+const struct nlmsvc_binding * nlmsvc_ops;
51216 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51217
51218 static DEFINE_MUTEX(nlmsvc_mutex);
51219diff --git a/fs/locks.c b/fs/locks.c
51220index a8794f2..4041e55 100644
51221--- a/fs/locks.c
51222+++ b/fs/locks.c
51223@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51224
51225 static struct kmem_cache *filelock_cache __read_mostly;
51226
51227+static void locks_init_lock_always(struct file_lock *fl)
51228+{
51229+ fl->fl_next = NULL;
51230+ fl->fl_fasync = NULL;
51231+ fl->fl_owner = NULL;
51232+ fl->fl_pid = 0;
51233+ fl->fl_nspid = NULL;
51234+ fl->fl_file = NULL;
51235+ fl->fl_flags = 0;
51236+ fl->fl_type = 0;
51237+ fl->fl_start = fl->fl_end = 0;
51238+}
51239+
51240 /* Allocate an empty lock structure. */
51241 static struct file_lock *locks_alloc_lock(void)
51242 {
51243- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51244+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51245+
51246+ if (fl)
51247+ locks_init_lock_always(fl);
51248+
51249+ return fl;
51250 }
51251
51252 void locks_release_private(struct file_lock *fl)
51253@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51254 INIT_LIST_HEAD(&fl->fl_link);
51255 INIT_LIST_HEAD(&fl->fl_block);
51256 init_waitqueue_head(&fl->fl_wait);
51257- fl->fl_next = NULL;
51258- fl->fl_fasync = NULL;
51259- fl->fl_owner = NULL;
51260- fl->fl_pid = 0;
51261- fl->fl_nspid = NULL;
51262- fl->fl_file = NULL;
51263- fl->fl_flags = 0;
51264- fl->fl_type = 0;
51265- fl->fl_start = fl->fl_end = 0;
51266 fl->fl_ops = NULL;
51267 fl->fl_lmops = NULL;
51268+ locks_init_lock_always(fl);
51269 }
51270
51271 EXPORT_SYMBOL(locks_init_lock);
51272@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51273 return;
51274
51275 if (filp->f_op && filp->f_op->flock) {
51276- struct file_lock fl = {
51277+ struct file_lock flock = {
51278 .fl_pid = current->tgid,
51279 .fl_file = filp,
51280 .fl_flags = FL_FLOCK,
51281 .fl_type = F_UNLCK,
51282 .fl_end = OFFSET_MAX,
51283 };
51284- filp->f_op->flock(filp, F_SETLKW, &fl);
51285- if (fl.fl_ops && fl.fl_ops->fl_release_private)
51286- fl.fl_ops->fl_release_private(&fl);
51287+ filp->f_op->flock(filp, F_SETLKW, &flock);
51288+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
51289+ flock.fl_ops->fl_release_private(&flock);
51290 }
51291
51292 lock_kernel();
51293diff --git a/fs/mbcache.c b/fs/mbcache.c
51294index ec88ff3..b843a82 100644
51295--- a/fs/mbcache.c
51296+++ b/fs/mbcache.c
51297@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51298 if (!cache)
51299 goto fail;
51300 cache->c_name = name;
51301- cache->c_op.free = NULL;
51302+ *(void **)&cache->c_op.free = NULL;
51303 if (cache_op)
51304- cache->c_op.free = cache_op->free;
51305+ *(void **)&cache->c_op.free = cache_op->free;
51306 atomic_set(&cache->c_entry_count, 0);
51307 cache->c_bucket_bits = bucket_bits;
51308 #ifdef MB_CACHE_INDEXES_COUNT
51309diff --git a/fs/namei.c b/fs/namei.c
51310index b0afbd4..8d065a1 100644
51311--- a/fs/namei.c
51312+++ b/fs/namei.c
51313@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51314 return ret;
51315
51316 /*
51317+ * Searching includes executable on directories, else just read.
51318+ */
51319+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51320+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51321+ if (capable(CAP_DAC_READ_SEARCH))
51322+ return 0;
51323+
51324+ /*
51325 * Read/write DACs are always overridable.
51326 * Executable DACs are overridable if at least one exec bit is set.
51327 */
51328@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51329 if (capable(CAP_DAC_OVERRIDE))
51330 return 0;
51331
51332- /*
51333- * Searching includes executable on directories, else just read.
51334- */
51335- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51336- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51337- if (capable(CAP_DAC_READ_SEARCH))
51338- return 0;
51339-
51340 return -EACCES;
51341 }
51342
51343@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51344 if (!ret)
51345 goto ok;
51346
51347- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51348+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51349+ capable(CAP_DAC_OVERRIDE))
51350 goto ok;
51351
51352 return ret;
51353@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51354 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51355 error = PTR_ERR(cookie);
51356 if (!IS_ERR(cookie)) {
51357- char *s = nd_get_link(nd);
51358+ const char *s = nd_get_link(nd);
51359 error = 0;
51360 if (s)
51361 error = __vfs_follow_link(nd, s);
51362@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51363 err = security_inode_follow_link(path->dentry, nd);
51364 if (err)
51365 goto loop;
51366+
51367+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51368+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51369+ err = -EACCES;
51370+ goto loop;
51371+ }
51372+
51373 current->link_count++;
51374 current->total_link_count++;
51375 nd->depth++;
51376@@ -1016,11 +1024,19 @@ return_reval:
51377 break;
51378 }
51379 return_base:
51380+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51381+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51382+ path_put(&nd->path);
51383+ return -ENOENT;
51384+ }
51385 return 0;
51386 out_dput:
51387 path_put_conditional(&next, nd);
51388 break;
51389 }
51390+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51391+ err = -ENOENT;
51392+
51393 path_put(&nd->path);
51394 return_err:
51395 return err;
51396@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51397 int retval = path_init(dfd, name, flags, nd);
51398 if (!retval)
51399 retval = path_walk(name, nd);
51400- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51401- nd->path.dentry->d_inode))
51402- audit_inode(name, nd->path.dentry);
51403+
51404+ if (likely(!retval)) {
51405+ if (nd->path.dentry && nd->path.dentry->d_inode) {
51406+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51407+ retval = -ENOENT;
51408+ if (!audit_dummy_context())
51409+ audit_inode(name, nd->path.dentry);
51410+ }
51411+ }
51412 if (nd->root.mnt) {
51413 path_put(&nd->root);
51414 nd->root.mnt = NULL;
51415 }
51416+
51417 return retval;
51418 }
51419
51420@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51421 if (error)
51422 goto err_out;
51423
51424+
51425+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51426+ error = -EPERM;
51427+ goto err_out;
51428+ }
51429+ if (gr_handle_rawio(inode)) {
51430+ error = -EPERM;
51431+ goto err_out;
51432+ }
51433+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51434+ error = -EACCES;
51435+ goto err_out;
51436+ }
51437+
51438 if (flag & O_TRUNC) {
51439 error = get_write_access(inode);
51440 if (error)
51441@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51442 {
51443 int error;
51444 struct dentry *dir = nd->path.dentry;
51445+ int acc_mode = ACC_MODE(flag);
51446+
51447+ if (flag & O_TRUNC)
51448+ acc_mode |= MAY_WRITE;
51449+ if (flag & O_APPEND)
51450+ acc_mode |= MAY_APPEND;
51451+
51452+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51453+ error = -EACCES;
51454+ goto out_unlock;
51455+ }
51456
51457 if (!IS_POSIXACL(dir->d_inode))
51458 mode &= ~current_umask();
51459@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51460 if (error)
51461 goto out_unlock;
51462 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51463+ if (!error)
51464+ gr_handle_create(path->dentry, nd->path.mnt);
51465 out_unlock:
51466 mutex_unlock(&dir->d_inode->i_mutex);
51467 dput(nd->path.dentry);
51468@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51469 &nd, flag);
51470 if (error)
51471 return ERR_PTR(error);
51472+
51473+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51474+ error = -EPERM;
51475+ goto exit;
51476+ }
51477+
51478+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51479+ error = -EPERM;
51480+ goto exit;
51481+ }
51482+
51483+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51484+ error = -EACCES;
51485+ goto exit;
51486+ }
51487+
51488 goto ok;
51489 }
51490
51491@@ -1795,6 +1861,19 @@ do_last:
51492 /*
51493 * It already exists.
51494 */
51495+
51496+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51497+ error = -ENOENT;
51498+ goto exit_mutex_unlock;
51499+ }
51500+
51501+ /* only check if O_CREAT is specified, all other checks need
51502+ to go into may_open */
51503+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51504+ error = -EACCES;
51505+ goto exit_mutex_unlock;
51506+ }
51507+
51508 mutex_unlock(&dir->d_inode->i_mutex);
51509 audit_inode(pathname, path.dentry);
51510
51511@@ -1887,6 +1966,13 @@ do_link:
51512 error = security_inode_follow_link(path.dentry, &nd);
51513 if (error)
51514 goto exit_dput;
51515+
51516+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51517+ path.dentry, nd.path.mnt)) {
51518+ error = -EACCES;
51519+ goto exit_dput;
51520+ }
51521+
51522 error = __do_follow_link(&path, &nd);
51523 if (error) {
51524 /* Does someone understand code flow here? Or it is only
51525@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51526 }
51527 return dentry;
51528 eexist:
51529+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51530+ dput(dentry);
51531+ return ERR_PTR(-ENOENT);
51532+ }
51533 dput(dentry);
51534 dentry = ERR_PTR(-EEXIST);
51535 fail:
51536@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51537 error = may_mknod(mode);
51538 if (error)
51539 goto out_dput;
51540+
51541+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51542+ error = -EPERM;
51543+ goto out_dput;
51544+ }
51545+
51546+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51547+ error = -EACCES;
51548+ goto out_dput;
51549+ }
51550+
51551 error = mnt_want_write(nd.path.mnt);
51552 if (error)
51553 goto out_dput;
51554@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51555 }
51556 out_drop_write:
51557 mnt_drop_write(nd.path.mnt);
51558+
51559+ if (!error)
51560+ gr_handle_create(dentry, nd.path.mnt);
51561 out_dput:
51562 dput(dentry);
51563 out_unlock:
51564@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51565 if (IS_ERR(dentry))
51566 goto out_unlock;
51567
51568+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51569+ error = -EACCES;
51570+ goto out_dput;
51571+ }
51572+
51573 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51574 mode &= ~current_umask();
51575 error = mnt_want_write(nd.path.mnt);
51576@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51577 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51578 out_drop_write:
51579 mnt_drop_write(nd.path.mnt);
51580+
51581+ if (!error)
51582+ gr_handle_create(dentry, nd.path.mnt);
51583+
51584 out_dput:
51585 dput(dentry);
51586 out_unlock:
51587@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51588 char * name;
51589 struct dentry *dentry;
51590 struct nameidata nd;
51591+ ino_t saved_ino = 0;
51592+ dev_t saved_dev = 0;
51593
51594 error = user_path_parent(dfd, pathname, &nd, &name);
51595 if (error)
51596@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51597 error = PTR_ERR(dentry);
51598 if (IS_ERR(dentry))
51599 goto exit2;
51600+
51601+ if (dentry->d_inode != NULL) {
51602+ saved_ino = dentry->d_inode->i_ino;
51603+ saved_dev = gr_get_dev_from_dentry(dentry);
51604+
51605+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51606+ error = -EACCES;
51607+ goto exit3;
51608+ }
51609+ }
51610+
51611 error = mnt_want_write(nd.path.mnt);
51612 if (error)
51613 goto exit3;
51614@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51615 if (error)
51616 goto exit4;
51617 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51618+ if (!error && (saved_dev || saved_ino))
51619+ gr_handle_delete(saved_ino, saved_dev);
51620 exit4:
51621 mnt_drop_write(nd.path.mnt);
51622 exit3:
51623@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51624 struct dentry *dentry;
51625 struct nameidata nd;
51626 struct inode *inode = NULL;
51627+ ino_t saved_ino = 0;
51628+ dev_t saved_dev = 0;
51629
51630 error = user_path_parent(dfd, pathname, &nd, &name);
51631 if (error)
51632@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51633 if (nd.last.name[nd.last.len])
51634 goto slashes;
51635 inode = dentry->d_inode;
51636- if (inode)
51637+ if (inode) {
51638+ if (inode->i_nlink <= 1) {
51639+ saved_ino = inode->i_ino;
51640+ saved_dev = gr_get_dev_from_dentry(dentry);
51641+ }
51642+
51643 atomic_inc(&inode->i_count);
51644+
51645+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51646+ error = -EACCES;
51647+ goto exit2;
51648+ }
51649+ }
51650 error = mnt_want_write(nd.path.mnt);
51651 if (error)
51652 goto exit2;
51653@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51654 if (error)
51655 goto exit3;
51656 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51657+ if (!error && (saved_ino || saved_dev))
51658+ gr_handle_delete(saved_ino, saved_dev);
51659 exit3:
51660 mnt_drop_write(nd.path.mnt);
51661 exit2:
51662@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51663 if (IS_ERR(dentry))
51664 goto out_unlock;
51665
51666+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51667+ error = -EACCES;
51668+ goto out_dput;
51669+ }
51670+
51671 error = mnt_want_write(nd.path.mnt);
51672 if (error)
51673 goto out_dput;
51674@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51675 if (error)
51676 goto out_drop_write;
51677 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51678+ if (!error)
51679+ gr_handle_create(dentry, nd.path.mnt);
51680 out_drop_write:
51681 mnt_drop_write(nd.path.mnt);
51682 out_dput:
51683@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51684 error = PTR_ERR(new_dentry);
51685 if (IS_ERR(new_dentry))
51686 goto out_unlock;
51687+
51688+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51689+ old_path.dentry->d_inode,
51690+ old_path.dentry->d_inode->i_mode, to)) {
51691+ error = -EACCES;
51692+ goto out_dput;
51693+ }
51694+
51695+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51696+ old_path.dentry, old_path.mnt, to)) {
51697+ error = -EACCES;
51698+ goto out_dput;
51699+ }
51700+
51701 error = mnt_want_write(nd.path.mnt);
51702 if (error)
51703 goto out_dput;
51704@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51705 if (error)
51706 goto out_drop_write;
51707 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51708+ if (!error)
51709+ gr_handle_create(new_dentry, nd.path.mnt);
51710 out_drop_write:
51711 mnt_drop_write(nd.path.mnt);
51712 out_dput:
51713@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51714 char *to;
51715 int error;
51716
51717+ pax_track_stack();
51718+
51719 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51720 if (error)
51721 goto exit;
51722@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51723 if (new_dentry == trap)
51724 goto exit5;
51725
51726+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51727+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51728+ to);
51729+ if (error)
51730+ goto exit5;
51731+
51732 error = mnt_want_write(oldnd.path.mnt);
51733 if (error)
51734 goto exit5;
51735@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51736 goto exit6;
51737 error = vfs_rename(old_dir->d_inode, old_dentry,
51738 new_dir->d_inode, new_dentry);
51739+ if (!error)
51740+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51741+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51742 exit6:
51743 mnt_drop_write(oldnd.path.mnt);
51744 exit5:
51745@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51746
51747 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51748 {
51749+ char tmpbuf[64];
51750+ const char *newlink;
51751 int len;
51752
51753 len = PTR_ERR(link);
51754@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51755 len = strlen(link);
51756 if (len > (unsigned) buflen)
51757 len = buflen;
51758- if (copy_to_user(buffer, link, len))
51759+
51760+ if (len < sizeof(tmpbuf)) {
51761+ memcpy(tmpbuf, link, len);
51762+ newlink = tmpbuf;
51763+ } else
51764+ newlink = link;
51765+
51766+ if (copy_to_user(buffer, newlink, len))
51767 len = -EFAULT;
51768 out:
51769 return len;
51770diff --git a/fs/namespace.c b/fs/namespace.c
51771index 2beb0fb..11a95a5 100644
51772--- a/fs/namespace.c
51773+++ b/fs/namespace.c
51774@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51775 if (!(sb->s_flags & MS_RDONLY))
51776 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51777 up_write(&sb->s_umount);
51778+
51779+ gr_log_remount(mnt->mnt_devname, retval);
51780+
51781 return retval;
51782 }
51783
51784@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51785 security_sb_umount_busy(mnt);
51786 up_write(&namespace_sem);
51787 release_mounts(&umount_list);
51788+
51789+ gr_log_unmount(mnt->mnt_devname, retval);
51790+
51791 return retval;
51792 }
51793
51794@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51795 if (retval)
51796 goto dput_out;
51797
51798+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51799+ retval = -EPERM;
51800+ goto dput_out;
51801+ }
51802+
51803+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51804+ retval = -EPERM;
51805+ goto dput_out;
51806+ }
51807+
51808 if (flags & MS_REMOUNT)
51809 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51810 data_page);
51811@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51812 dev_name, data_page);
51813 dput_out:
51814 path_put(&path);
51815+
51816+ gr_log_mount(dev_name, dir_name, retval);
51817+
51818 return retval;
51819 }
51820
51821@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51822 goto out1;
51823 }
51824
51825+ if (gr_handle_chroot_pivot()) {
51826+ error = -EPERM;
51827+ path_put(&old);
51828+ goto out1;
51829+ }
51830+
51831 read_lock(&current->fs->lock);
51832 root = current->fs->root;
51833 path_get(&current->fs->root);
51834diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51835index b8b5b30..2bd9ccb 100644
51836--- a/fs/ncpfs/dir.c
51837+++ b/fs/ncpfs/dir.c
51838@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51839 int res, val = 0, len;
51840 __u8 __name[NCP_MAXPATHLEN + 1];
51841
51842+ pax_track_stack();
51843+
51844 parent = dget_parent(dentry);
51845 dir = parent->d_inode;
51846
51847@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51848 int error, res, len;
51849 __u8 __name[NCP_MAXPATHLEN + 1];
51850
51851+ pax_track_stack();
51852+
51853 lock_kernel();
51854 error = -EIO;
51855 if (!ncp_conn_valid(server))
51856@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51857 int error, result, len;
51858 int opmode;
51859 __u8 __name[NCP_MAXPATHLEN + 1];
51860-
51861+
51862 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51863 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51864
51865+ pax_track_stack();
51866+
51867 error = -EIO;
51868 lock_kernel();
51869 if (!ncp_conn_valid(server))
51870@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51871 int error, len;
51872 __u8 __name[NCP_MAXPATHLEN + 1];
51873
51874+ pax_track_stack();
51875+
51876 DPRINTK("ncp_mkdir: making %s/%s\n",
51877 dentry->d_parent->d_name.name, dentry->d_name.name);
51878
51879@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51880 if (!ncp_conn_valid(server))
51881 goto out;
51882
51883+ pax_track_stack();
51884+
51885 ncp_age_dentry(server, dentry);
51886 len = sizeof(__name);
51887 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51888@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51889 int old_len, new_len;
51890 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51891
51892+ pax_track_stack();
51893+
51894 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51895 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51896 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51897diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51898index cf98da1..da890a9 100644
51899--- a/fs/ncpfs/inode.c
51900+++ b/fs/ncpfs/inode.c
51901@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51902 #endif
51903 struct ncp_entry_info finfo;
51904
51905+ pax_track_stack();
51906+
51907 data.wdog_pid = NULL;
51908 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51909 if (!server)
51910diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51911index bfaef7b..e9d03ca 100644
51912--- a/fs/nfs/inode.c
51913+++ b/fs/nfs/inode.c
51914@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51915 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51916 nfsi->attrtimeo_timestamp = jiffies;
51917
51918- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51919+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51920 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51921 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51922 else
51923@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51924 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51925 }
51926
51927-static atomic_long_t nfs_attr_generation_counter;
51928+static atomic_long_unchecked_t nfs_attr_generation_counter;
51929
51930 static unsigned long nfs_read_attr_generation_counter(void)
51931 {
51932- return atomic_long_read(&nfs_attr_generation_counter);
51933+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51934 }
51935
51936 unsigned long nfs_inc_attr_generation_counter(void)
51937 {
51938- return atomic_long_inc_return(&nfs_attr_generation_counter);
51939+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51940 }
51941
51942 void nfs_fattr_init(struct nfs_fattr *fattr)
51943diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51944index cc2f505..f6a236f 100644
51945--- a/fs/nfsd/lockd.c
51946+++ b/fs/nfsd/lockd.c
51947@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51948 fput(filp);
51949 }
51950
51951-static struct nlmsvc_binding nfsd_nlm_ops = {
51952+static const struct nlmsvc_binding nfsd_nlm_ops = {
51953 .fopen = nlm_fopen, /* open file for locking */
51954 .fclose = nlm_fclose, /* close file */
51955 };
51956diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51957index cfc3391..dcc083a 100644
51958--- a/fs/nfsd/nfs4state.c
51959+++ b/fs/nfsd/nfs4state.c
51960@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51961 unsigned int cmd;
51962 int err;
51963
51964+ pax_track_stack();
51965+
51966 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51967 (long long) lock->lk_offset,
51968 (long long) lock->lk_length);
51969diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51970index 4a82a96..0d5fb49 100644
51971--- a/fs/nfsd/nfs4xdr.c
51972+++ b/fs/nfsd/nfs4xdr.c
51973@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51974 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51975 u32 minorversion = resp->cstate.minorversion;
51976
51977+ pax_track_stack();
51978+
51979 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51980 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51981 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51982diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51983index 2e09588..596421d 100644
51984--- a/fs/nfsd/vfs.c
51985+++ b/fs/nfsd/vfs.c
51986@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51987 } else {
51988 oldfs = get_fs();
51989 set_fs(KERNEL_DS);
51990- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51991+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51992 set_fs(oldfs);
51993 }
51994
51995@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51996
51997 /* Write the data. */
51998 oldfs = get_fs(); set_fs(KERNEL_DS);
51999- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
52000+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
52001 set_fs(oldfs);
52002 if (host_err < 0)
52003 goto out_nfserr;
52004@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
52005 */
52006
52007 oldfs = get_fs(); set_fs(KERNEL_DS);
52008- host_err = inode->i_op->readlink(dentry, buf, *lenp);
52009+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
52010 set_fs(oldfs);
52011
52012 if (host_err < 0)
52013diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52014index f6af760..d0adf34 100644
52015--- a/fs/nilfs2/ioctl.c
52016+++ b/fs/nilfs2/ioctl.c
52017@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52018 unsigned int cmd, void __user *argp)
52019 {
52020 struct nilfs_argv argv[5];
52021- const static size_t argsz[5] = {
52022+ static const size_t argsz[5] = {
52023 sizeof(struct nilfs_vdesc),
52024 sizeof(struct nilfs_period),
52025 sizeof(__u64),
52026@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52027 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52028 goto out_free;
52029
52030+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52031+ goto out_free;
52032+
52033 len = argv[n].v_size * argv[n].v_nmembs;
52034 base = (void __user *)(unsigned long)argv[n].v_base;
52035 if (len == 0) {
52036diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52037index 7e54e52..9337248 100644
52038--- a/fs/notify/dnotify/dnotify.c
52039+++ b/fs/notify/dnotify/dnotify.c
52040@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52041 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52042 }
52043
52044-static struct fsnotify_ops dnotify_fsnotify_ops = {
52045+static const struct fsnotify_ops dnotify_fsnotify_ops = {
52046 .handle_event = dnotify_handle_event,
52047 .should_send_event = dnotify_should_send_event,
52048 .free_group_priv = NULL,
52049diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52050index b8bf53b..c518688 100644
52051--- a/fs/notify/notification.c
52052+++ b/fs/notify/notification.c
52053@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52054 * get set to 0 so it will never get 'freed'
52055 */
52056 static struct fsnotify_event q_overflow_event;
52057-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52058+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52059
52060 /**
52061 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52062@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52063 */
52064 u32 fsnotify_get_cookie(void)
52065 {
52066- return atomic_inc_return(&fsnotify_sync_cookie);
52067+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52068 }
52069 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52070
52071diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52072index 5a9e344..0f8cd28 100644
52073--- a/fs/ntfs/dir.c
52074+++ b/fs/ntfs/dir.c
52075@@ -1328,7 +1328,7 @@ find_next_index_buffer:
52076 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52077 ~(s64)(ndir->itype.index.block_size - 1)));
52078 /* Bounds checks. */
52079- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52080+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52081 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52082 "inode 0x%lx or driver bug.", vdir->i_ino);
52083 goto err_out;
52084diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52085index 663c0e3..b6868e9 100644
52086--- a/fs/ntfs/file.c
52087+++ b/fs/ntfs/file.c
52088@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52089 #endif /* NTFS_RW */
52090 };
52091
52092-const struct file_operations ntfs_empty_file_ops = {};
52093+const struct file_operations ntfs_empty_file_ops __read_only;
52094
52095-const struct inode_operations ntfs_empty_inode_ops = {};
52096+const struct inode_operations ntfs_empty_inode_ops __read_only;
52097diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52098index 1cd2934..880b5d2 100644
52099--- a/fs/ocfs2/cluster/masklog.c
52100+++ b/fs/ocfs2/cluster/masklog.c
52101@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52102 return mlog_mask_store(mlog_attr->mask, buf, count);
52103 }
52104
52105-static struct sysfs_ops mlog_attr_ops = {
52106+static const struct sysfs_ops mlog_attr_ops = {
52107 .show = mlog_show,
52108 .store = mlog_store,
52109 };
52110diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52111index ac10f83..2cd2607 100644
52112--- a/fs/ocfs2/localalloc.c
52113+++ b/fs/ocfs2/localalloc.c
52114@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52115 goto bail;
52116 }
52117
52118- atomic_inc(&osb->alloc_stats.moves);
52119+ atomic_inc_unchecked(&osb->alloc_stats.moves);
52120
52121 status = 0;
52122 bail:
52123diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52124index f010b22..9f9ed34 100644
52125--- a/fs/ocfs2/namei.c
52126+++ b/fs/ocfs2/namei.c
52127@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52128 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52129 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52130
52131+ pax_track_stack();
52132+
52133 /* At some point it might be nice to break this function up a
52134 * bit. */
52135
52136diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52137index d963d86..914cfbd 100644
52138--- a/fs/ocfs2/ocfs2.h
52139+++ b/fs/ocfs2/ocfs2.h
52140@@ -217,11 +217,11 @@ enum ocfs2_vol_state
52141
52142 struct ocfs2_alloc_stats
52143 {
52144- atomic_t moves;
52145- atomic_t local_data;
52146- atomic_t bitmap_data;
52147- atomic_t bg_allocs;
52148- atomic_t bg_extends;
52149+ atomic_unchecked_t moves;
52150+ atomic_unchecked_t local_data;
52151+ atomic_unchecked_t bitmap_data;
52152+ atomic_unchecked_t bg_allocs;
52153+ atomic_unchecked_t bg_extends;
52154 };
52155
52156 enum ocfs2_local_alloc_state
52157diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52158index 79b5dac..d322952 100644
52159--- a/fs/ocfs2/suballoc.c
52160+++ b/fs/ocfs2/suballoc.c
52161@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52162 mlog_errno(status);
52163 goto bail;
52164 }
52165- atomic_inc(&osb->alloc_stats.bg_extends);
52166+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52167
52168 /* You should never ask for this much metadata */
52169 BUG_ON(bits_wanted >
52170@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52171 mlog_errno(status);
52172 goto bail;
52173 }
52174- atomic_inc(&osb->alloc_stats.bg_allocs);
52175+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52176
52177 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52178 ac->ac_bits_given += (*num_bits);
52179@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52180 mlog_errno(status);
52181 goto bail;
52182 }
52183- atomic_inc(&osb->alloc_stats.bg_allocs);
52184+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52185
52186 BUG_ON(num_bits != 1);
52187
52188@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52189 cluster_start,
52190 num_clusters);
52191 if (!status)
52192- atomic_inc(&osb->alloc_stats.local_data);
52193+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
52194 } else {
52195 if (min_clusters > (osb->bitmap_cpg - 1)) {
52196 /* The only paths asking for contiguousness
52197@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52198 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52199 bg_blkno,
52200 bg_bit_off);
52201- atomic_inc(&osb->alloc_stats.bitmap_data);
52202+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52203 }
52204 }
52205 if (status < 0) {
52206diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52207index 9f55be4..a3f8048 100644
52208--- a/fs/ocfs2/super.c
52209+++ b/fs/ocfs2/super.c
52210@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52211 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52212 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52213 "Stats",
52214- atomic_read(&osb->alloc_stats.bitmap_data),
52215- atomic_read(&osb->alloc_stats.local_data),
52216- atomic_read(&osb->alloc_stats.bg_allocs),
52217- atomic_read(&osb->alloc_stats.moves),
52218- atomic_read(&osb->alloc_stats.bg_extends));
52219+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52220+ atomic_read_unchecked(&osb->alloc_stats.local_data),
52221+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52222+ atomic_read_unchecked(&osb->alloc_stats.moves),
52223+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52224
52225 out += snprintf(buf + out, len - out,
52226 "%10s => State: %u Descriptor: %llu Size: %u bits "
52227@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52228 spin_lock_init(&osb->osb_xattr_lock);
52229 ocfs2_init_inode_steal_slot(osb);
52230
52231- atomic_set(&osb->alloc_stats.moves, 0);
52232- atomic_set(&osb->alloc_stats.local_data, 0);
52233- atomic_set(&osb->alloc_stats.bitmap_data, 0);
52234- atomic_set(&osb->alloc_stats.bg_allocs, 0);
52235- atomic_set(&osb->alloc_stats.bg_extends, 0);
52236+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52237+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52238+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52239+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52240+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52241
52242 /* Copy the blockcheck stats from the superblock probe */
52243 osb->osb_ecc_stats = *stats;
52244diff --git a/fs/open.c b/fs/open.c
52245index 4f01e06..091f6c3 100644
52246--- a/fs/open.c
52247+++ b/fs/open.c
52248@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52249 error = locks_verify_truncate(inode, NULL, length);
52250 if (!error)
52251 error = security_path_truncate(&path, length, 0);
52252+
52253+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52254+ error = -EACCES;
52255+
52256 if (!error) {
52257 vfs_dq_init(inode);
52258 error = do_truncate(path.dentry, length, 0, NULL);
52259@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52260 if (__mnt_is_readonly(path.mnt))
52261 res = -EROFS;
52262
52263+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52264+ res = -EACCES;
52265+
52266 out_path_release:
52267 path_put(&path);
52268 out:
52269@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52270 if (error)
52271 goto dput_and_out;
52272
52273+ gr_log_chdir(path.dentry, path.mnt);
52274+
52275 set_fs_pwd(current->fs, &path);
52276
52277 dput_and_out:
52278@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52279 goto out_putf;
52280
52281 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52282+
52283+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52284+ error = -EPERM;
52285+
52286+ if (!error)
52287+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52288+
52289 if (!error)
52290 set_fs_pwd(current->fs, &file->f_path);
52291 out_putf:
52292@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52293 if (!capable(CAP_SYS_CHROOT))
52294 goto dput_and_out;
52295
52296+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52297+ goto dput_and_out;
52298+
52299 set_fs_root(current->fs, &path);
52300+
52301+ gr_handle_chroot_chdir(&path);
52302+
52303 error = 0;
52304 dput_and_out:
52305 path_put(&path);
52306@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52307 err = mnt_want_write_file(file);
52308 if (err)
52309 goto out_putf;
52310+
52311 mutex_lock(&inode->i_mutex);
52312+
52313+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52314+ err = -EACCES;
52315+ goto out_unlock;
52316+ }
52317+
52318 if (mode == (mode_t) -1)
52319 mode = inode->i_mode;
52320+
52321+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52322+ err = -EPERM;
52323+ goto out_unlock;
52324+ }
52325+
52326 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52327 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52328 err = notify_change(dentry, &newattrs);
52329+
52330+out_unlock:
52331 mutex_unlock(&inode->i_mutex);
52332 mnt_drop_write(file->f_path.mnt);
52333 out_putf:
52334@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52335 error = mnt_want_write(path.mnt);
52336 if (error)
52337 goto dput_and_out;
52338+
52339 mutex_lock(&inode->i_mutex);
52340+
52341+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52342+ error = -EACCES;
52343+ goto out_unlock;
52344+ }
52345+
52346 if (mode == (mode_t) -1)
52347 mode = inode->i_mode;
52348+
52349+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52350+ error = -EACCES;
52351+ goto out_unlock;
52352+ }
52353+
52354 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52355 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52356 error = notify_change(path.dentry, &newattrs);
52357+
52358+out_unlock:
52359 mutex_unlock(&inode->i_mutex);
52360 mnt_drop_write(path.mnt);
52361 dput_and_out:
52362@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52363 return sys_fchmodat(AT_FDCWD, filename, mode);
52364 }
52365
52366-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52367+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52368 {
52369 struct inode *inode = dentry->d_inode;
52370 int error;
52371 struct iattr newattrs;
52372
52373+ if (!gr_acl_handle_chown(dentry, mnt))
52374+ return -EACCES;
52375+
52376 newattrs.ia_valid = ATTR_CTIME;
52377 if (user != (uid_t) -1) {
52378 newattrs.ia_valid |= ATTR_UID;
52379@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52380 error = mnt_want_write(path.mnt);
52381 if (error)
52382 goto out_release;
52383- error = chown_common(path.dentry, user, group);
52384+ error = chown_common(path.dentry, user, group, path.mnt);
52385 mnt_drop_write(path.mnt);
52386 out_release:
52387 path_put(&path);
52388@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52389 error = mnt_want_write(path.mnt);
52390 if (error)
52391 goto out_release;
52392- error = chown_common(path.dentry, user, group);
52393+ error = chown_common(path.dentry, user, group, path.mnt);
52394 mnt_drop_write(path.mnt);
52395 out_release:
52396 path_put(&path);
52397@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52398 error = mnt_want_write(path.mnt);
52399 if (error)
52400 goto out_release;
52401- error = chown_common(path.dentry, user, group);
52402+ error = chown_common(path.dentry, user, group, path.mnt);
52403 mnt_drop_write(path.mnt);
52404 out_release:
52405 path_put(&path);
52406@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52407 goto out_fput;
52408 dentry = file->f_path.dentry;
52409 audit_inode(NULL, dentry);
52410- error = chown_common(dentry, user, group);
52411+ error = chown_common(dentry, user, group, file->f_path.mnt);
52412 mnt_drop_write(file->f_path.mnt);
52413 out_fput:
52414 fput(file);
52415@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52416 if (!IS_ERR(tmp)) {
52417 fd = get_unused_fd_flags(flags);
52418 if (fd >= 0) {
52419- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52420+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52421 if (IS_ERR(f)) {
52422 put_unused_fd(fd);
52423 fd = PTR_ERR(f);
52424diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52425index 6ab70f4..f4103d1 100644
52426--- a/fs/partitions/efi.c
52427+++ b/fs/partitions/efi.c
52428@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52429 if (!bdev || !gpt)
52430 return NULL;
52431
52432+ if (!le32_to_cpu(gpt->num_partition_entries))
52433+ return NULL;
52434+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52435+ if (!pte)
52436+ return NULL;
52437+
52438 count = le32_to_cpu(gpt->num_partition_entries) *
52439 le32_to_cpu(gpt->sizeof_partition_entry);
52440- if (!count)
52441- return NULL;
52442- pte = kzalloc(count, GFP_KERNEL);
52443- if (!pte)
52444- return NULL;
52445-
52446 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52447 (u8 *) pte,
52448 count) < count) {
52449diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52450index dd6efdb..3babc6c 100644
52451--- a/fs/partitions/ldm.c
52452+++ b/fs/partitions/ldm.c
52453@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52454 ldm_error ("A VBLK claims to have %d parts.", num);
52455 return false;
52456 }
52457+
52458 if (rec >= num) {
52459 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52460 return false;
52461@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52462 goto found;
52463 }
52464
52465- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52466+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52467 if (!f) {
52468 ldm_crit ("Out of memory.");
52469 return false;
52470diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52471index 5765198..7f8e9e0 100644
52472--- a/fs/partitions/mac.c
52473+++ b/fs/partitions/mac.c
52474@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52475 return 0; /* not a MacOS disk */
52476 }
52477 blocks_in_map = be32_to_cpu(part->map_count);
52478- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52479- put_dev_sector(sect);
52480- return 0;
52481- }
52482 printk(" [mac]");
52483+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52484+ put_dev_sector(sect);
52485+ return 0;
52486+ }
52487 for (slot = 1; slot <= blocks_in_map; ++slot) {
52488 int pos = slot * secsize;
52489 put_dev_sector(sect);
52490diff --git a/fs/pipe.c b/fs/pipe.c
52491index d0cc080..8a6f211 100644
52492--- a/fs/pipe.c
52493+++ b/fs/pipe.c
52494@@ -401,9 +401,9 @@ redo:
52495 }
52496 if (bufs) /* More to do? */
52497 continue;
52498- if (!pipe->writers)
52499+ if (!atomic_read(&pipe->writers))
52500 break;
52501- if (!pipe->waiting_writers) {
52502+ if (!atomic_read(&pipe->waiting_writers)) {
52503 /* syscall merging: Usually we must not sleep
52504 * if O_NONBLOCK is set, or if we got some data.
52505 * But if a writer sleeps in kernel space, then
52506@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52507 mutex_lock(&inode->i_mutex);
52508 pipe = inode->i_pipe;
52509
52510- if (!pipe->readers) {
52511+ if (!atomic_read(&pipe->readers)) {
52512 send_sig(SIGPIPE, current, 0);
52513 ret = -EPIPE;
52514 goto out;
52515@@ -511,7 +511,7 @@ redo1:
52516 for (;;) {
52517 int bufs;
52518
52519- if (!pipe->readers) {
52520+ if (!atomic_read(&pipe->readers)) {
52521 send_sig(SIGPIPE, current, 0);
52522 if (!ret)
52523 ret = -EPIPE;
52524@@ -597,9 +597,9 @@ redo2:
52525 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52526 do_wakeup = 0;
52527 }
52528- pipe->waiting_writers++;
52529+ atomic_inc(&pipe->waiting_writers);
52530 pipe_wait(pipe);
52531- pipe->waiting_writers--;
52532+ atomic_dec(&pipe->waiting_writers);
52533 }
52534 out:
52535 mutex_unlock(&inode->i_mutex);
52536@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52537 mask = 0;
52538 if (filp->f_mode & FMODE_READ) {
52539 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52540- if (!pipe->writers && filp->f_version != pipe->w_counter)
52541+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52542 mask |= POLLHUP;
52543 }
52544
52545@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52546 * Most Unices do not set POLLERR for FIFOs but on Linux they
52547 * behave exactly like pipes for poll().
52548 */
52549- if (!pipe->readers)
52550+ if (!atomic_read(&pipe->readers))
52551 mask |= POLLERR;
52552 }
52553
52554@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52555
52556 mutex_lock(&inode->i_mutex);
52557 pipe = inode->i_pipe;
52558- pipe->readers -= decr;
52559- pipe->writers -= decw;
52560+ atomic_sub(decr, &pipe->readers);
52561+ atomic_sub(decw, &pipe->writers);
52562
52563- if (!pipe->readers && !pipe->writers) {
52564+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52565 free_pipe_info(inode);
52566 } else {
52567 wake_up_interruptible_sync(&pipe->wait);
52568@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52569
52570 if (inode->i_pipe) {
52571 ret = 0;
52572- inode->i_pipe->readers++;
52573+ atomic_inc(&inode->i_pipe->readers);
52574 }
52575
52576 mutex_unlock(&inode->i_mutex);
52577@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52578
52579 if (inode->i_pipe) {
52580 ret = 0;
52581- inode->i_pipe->writers++;
52582+ atomic_inc(&inode->i_pipe->writers);
52583 }
52584
52585 mutex_unlock(&inode->i_mutex);
52586@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52587 if (inode->i_pipe) {
52588 ret = 0;
52589 if (filp->f_mode & FMODE_READ)
52590- inode->i_pipe->readers++;
52591+ atomic_inc(&inode->i_pipe->readers);
52592 if (filp->f_mode & FMODE_WRITE)
52593- inode->i_pipe->writers++;
52594+ atomic_inc(&inode->i_pipe->writers);
52595 }
52596
52597 mutex_unlock(&inode->i_mutex);
52598@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52599 inode->i_pipe = NULL;
52600 }
52601
52602-static struct vfsmount *pipe_mnt __read_mostly;
52603+struct vfsmount *pipe_mnt __read_mostly;
52604 static int pipefs_delete_dentry(struct dentry *dentry)
52605 {
52606 /*
52607@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52608 goto fail_iput;
52609 inode->i_pipe = pipe;
52610
52611- pipe->readers = pipe->writers = 1;
52612+ atomic_set(&pipe->readers, 1);
52613+ atomic_set(&pipe->writers, 1);
52614 inode->i_fop = &rdwr_pipefifo_fops;
52615
52616 /*
52617diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52618index 50f8f06..c5755df 100644
52619--- a/fs/proc/Kconfig
52620+++ b/fs/proc/Kconfig
52621@@ -30,12 +30,12 @@ config PROC_FS
52622
52623 config PROC_KCORE
52624 bool "/proc/kcore support" if !ARM
52625- depends on PROC_FS && MMU
52626+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52627
52628 config PROC_VMCORE
52629 bool "/proc/vmcore support (EXPERIMENTAL)"
52630- depends on PROC_FS && CRASH_DUMP
52631- default y
52632+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52633+ default n
52634 help
52635 Exports the dump image of crashed kernel in ELF format.
52636
52637@@ -59,8 +59,8 @@ config PROC_SYSCTL
52638 limited in memory.
52639
52640 config PROC_PAGE_MONITOR
52641- default y
52642- depends on PROC_FS && MMU
52643+ default n
52644+ depends on PROC_FS && MMU && !GRKERNSEC
52645 bool "Enable /proc page monitoring" if EMBEDDED
52646 help
52647 Various /proc files exist to monitor process memory utilization:
52648diff --git a/fs/proc/array.c b/fs/proc/array.c
52649index c5ef152..24a1b87 100644
52650--- a/fs/proc/array.c
52651+++ b/fs/proc/array.c
52652@@ -60,6 +60,7 @@
52653 #include <linux/tty.h>
52654 #include <linux/string.h>
52655 #include <linux/mman.h>
52656+#include <linux/grsecurity.h>
52657 #include <linux/proc_fs.h>
52658 #include <linux/ioport.h>
52659 #include <linux/uaccess.h>
52660@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52661 p->nivcsw);
52662 }
52663
52664+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52665+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52666+{
52667+ if (p->mm)
52668+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52669+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52670+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52671+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52672+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52673+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52674+ else
52675+ seq_printf(m, "PaX:\t-----\n");
52676+}
52677+#endif
52678+
52679 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52680 struct pid *pid, struct task_struct *task)
52681 {
52682@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52683 task_cap(m, task);
52684 cpuset_task_status_allowed(m, task);
52685 task_context_switch_counts(m, task);
52686+
52687+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52688+ task_pax(m, task);
52689+#endif
52690+
52691+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52692+ task_grsec_rbac(m, task);
52693+#endif
52694+
52695 return 0;
52696 }
52697
52698+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52699+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52700+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52701+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52702+#endif
52703+
52704 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52705 struct pid *pid, struct task_struct *task, int whole)
52706 {
52707@@ -358,9 +389,18 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52708 cputime_t cutime, cstime, utime, stime;
52709 cputime_t cgtime, gtime;
52710 unsigned long rsslim = 0;
52711- char tcomm[sizeof(task->comm)];
52712+ char tcomm[sizeof(task->comm)] = { 0 };
52713 unsigned long flags;
52714
52715+ pax_track_stack();
52716+
52717+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52718+ if (current->exec_id != m->exec_id) {
52719+ gr_log_badprocpid("stat");
52720+ return 0;
52721+ }
52722+#endif
52723+
52724 state = *get_task_state(task);
52725 vsize = eip = esp = 0;
52726 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52727@@ -433,6 +473,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52728 gtime = task_gtime(task);
52729 }
52730
52731+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52732+ if (PAX_RAND_FLAGS(mm)) {
52733+ eip = 0;
52734+ esp = 0;
52735+ wchan = 0;
52736+ }
52737+#endif
52738+#ifdef CONFIG_GRKERNSEC_HIDESYM
52739+ wchan = 0;
52740+ eip =0;
52741+ esp =0;
52742+#endif
52743+
52744 /* scale priority and nice values from timeslices to -20..20 */
52745 /* to make it look like a "normal" Unix priority/nice value */
52746 priority = task_prio(task);
52747@@ -473,9 +526,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52748 vsize,
52749 mm ? get_mm_rss(mm) : 0,
52750 rsslim,
52751+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52752+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52753+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52754+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52755+#else
52756 mm ? (permitted ? mm->start_code : 1) : 0,
52757 mm ? (permitted ? mm->end_code : 1) : 0,
52758 (permitted && mm) ? mm->start_stack : 0,
52759+#endif
52760 esp,
52761 eip,
52762 /* The signal information here is obsolete.
52763@@ -519,6 +578,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52764 int size = 0, resident = 0, shared = 0, text = 0, lib = 0, data = 0;
52765 struct mm_struct *mm = get_task_mm(task);
52766
52767+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52768+ if (current->exec_id != m->exec_id) {
52769+ gr_log_badprocpid("statm");
52770+ return 0;
52771+ }
52772+#endif
52773+
52774 if (mm) {
52775 size = task_statm(mm, &shared, &text, &data, &resident);
52776 mmput(mm);
52777@@ -528,3 +594,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52778
52779 return 0;
52780 }
52781+
52782+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52783+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52784+{
52785+ u32 curr_ip = 0;
52786+ unsigned long flags;
52787+
52788+ if (lock_task_sighand(task, &flags)) {
52789+ curr_ip = task->signal->curr_ip;
52790+ unlock_task_sighand(task, &flags);
52791+ }
52792+
52793+ return sprintf(buffer, "%pI4\n", &curr_ip);
52794+}
52795+#endif
52796diff --git a/fs/proc/base.c b/fs/proc/base.c
52797index 67f7dc0..67ab883 100644
52798--- a/fs/proc/base.c
52799+++ b/fs/proc/base.c
52800@@ -102,6 +102,22 @@ struct pid_entry {
52801 union proc_op op;
52802 };
52803
52804+struct getdents_callback {
52805+ struct linux_dirent __user * current_dir;
52806+ struct linux_dirent __user * previous;
52807+ struct file * file;
52808+ int count;
52809+ int error;
52810+};
52811+
52812+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52813+ loff_t offset, u64 ino, unsigned int d_type)
52814+{
52815+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52816+ buf->error = -EINVAL;
52817+ return 0;
52818+}
52819+
52820 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52821 .name = (NAME), \
52822 .len = sizeof(NAME) - 1, \
52823@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52824 if (task == current)
52825 return 0;
52826
52827+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52828+ return -EPERM;
52829+
52830 /*
52831 * If current is actively ptrace'ing, and would also be
52832 * permitted to freshly attach with ptrace now, permit it.
52833@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52834 if (!mm->arg_end)
52835 goto out_mm; /* Shh! No looking before we're done */
52836
52837+ if (gr_acl_handle_procpidmem(task))
52838+ goto out_mm;
52839+
52840 len = mm->arg_end - mm->arg_start;
52841
52842 if (len > PAGE_SIZE)
52843@@ -287,12 +309,28 @@ out:
52844 return res;
52845 }
52846
52847+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52848+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52849+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52850+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52851+#endif
52852+
52853 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52854 {
52855 int res = 0;
52856 struct mm_struct *mm = get_task_mm(task);
52857 if (mm) {
52858 unsigned int nwords = 0;
52859+
52860+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52861+ /* allow if we're currently ptracing this task */
52862+ if (PAX_RAND_FLAGS(mm) &&
52863+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52864+ mmput(mm);
52865+ return 0;
52866+ }
52867+#endif
52868+
52869 do {
52870 nwords += 2;
52871 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52872@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52873 }
52874
52875
52876-#ifdef CONFIG_KALLSYMS
52877+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52878 /*
52879 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52880 * Returns the resolved symbol. If that fails, simply return the address.
52881@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52882 mutex_unlock(&task->cred_guard_mutex);
52883 }
52884
52885-#ifdef CONFIG_STACKTRACE
52886+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52887
52888 #define MAX_STACK_TRACE_DEPTH 64
52889
52890@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52891 return count;
52892 }
52893
52894-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52895+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52896 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52897 {
52898 long nr;
52899@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52900 /************************************************************************/
52901
52902 /* permission checks */
52903-static int proc_fd_access_allowed(struct inode *inode)
52904+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52905 {
52906 struct task_struct *task;
52907 int allowed = 0;
52908@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52909 */
52910 task = get_proc_task(inode);
52911 if (task) {
52912- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52913+ if (log)
52914+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52915+ else
52916+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52917 put_task_struct(task);
52918 }
52919 return allowed;
52920@@ -809,6 +850,8 @@ static int mem_open(struct inode* inode, struct file* file)
52921 return 0;
52922 }
52923
52924+static int task_dumpable(struct task_struct *task);
52925+
52926 static ssize_t mem_read(struct file * file, char __user * buf,
52927 size_t count, loff_t *ppos)
52928 {
52929@@ -824,6 +867,12 @@ static ssize_t mem_read(struct file * file, char __user * buf,
52930 if (check_mem_permission(task))
52931 goto out;
52932
52933+ // XXX: temporary workaround
52934+ if (!task_dumpable(task) && task == current) {
52935+ ret = -EACCES;
52936+ goto out;
52937+ }
52938+
52939 ret = -ENOMEM;
52940 page = (char *)__get_free_page(GFP_TEMPORARY);
52941 if (!page)
52942@@ -963,6 +1012,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52943 if (!task)
52944 goto out_no_task;
52945
52946+ if (gr_acl_handle_procpidmem(task))
52947+ goto out;
52948+
52949 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52950 goto out;
52951
52952@@ -1377,7 +1429,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52953 path_put(&nd->path);
52954
52955 /* Are we allowed to snoop on the tasks file descriptors? */
52956- if (!proc_fd_access_allowed(inode))
52957+ if (!proc_fd_access_allowed(inode,0))
52958 goto out;
52959
52960 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52961@@ -1417,8 +1469,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52962 struct path path;
52963
52964 /* Are we allowed to snoop on the tasks file descriptors? */
52965- if (!proc_fd_access_allowed(inode))
52966- goto out;
52967+ /* logging this is needed for learning on chromium to work properly,
52968+ but we don't want to flood the logs from 'ps' which does a readlink
52969+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52970+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
52971+ */
52972+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52973+ if (!proc_fd_access_allowed(inode,0))
52974+ goto out;
52975+ } else {
52976+ if (!proc_fd_access_allowed(inode,1))
52977+ goto out;
52978+ }
52979
52980 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52981 if (error)
52982@@ -1483,7 +1545,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52983 rcu_read_lock();
52984 cred = __task_cred(task);
52985 inode->i_uid = cred->euid;
52986+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52987+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52988+#else
52989 inode->i_gid = cred->egid;
52990+#endif
52991 rcu_read_unlock();
52992 }
52993 security_task_to_inode(task, inode);
52994@@ -1501,6 +1567,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52995 struct inode *inode = dentry->d_inode;
52996 struct task_struct *task;
52997 const struct cred *cred;
52998+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52999+ const struct cred *tmpcred = current_cred();
53000+#endif
53001
53002 generic_fillattr(inode, stat);
53003
53004@@ -1508,13 +1577,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
53005 stat->uid = 0;
53006 stat->gid = 0;
53007 task = pid_task(proc_pid(inode), PIDTYPE_PID);
53008+
53009+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
53010+ rcu_read_unlock();
53011+ return -ENOENT;
53012+ }
53013+
53014 if (task) {
53015+ cred = __task_cred(task);
53016+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53017+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
53018+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53019+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53020+#endif
53021+ ) {
53022+#endif
53023 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53024+#ifdef CONFIG_GRKERNSEC_PROC_USER
53025+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53026+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53027+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53028+#endif
53029 task_dumpable(task)) {
53030- cred = __task_cred(task);
53031 stat->uid = cred->euid;
53032+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53033+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53034+#else
53035 stat->gid = cred->egid;
53036+#endif
53037 }
53038+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53039+ } else {
53040+ rcu_read_unlock();
53041+ return -ENOENT;
53042+ }
53043+#endif
53044 }
53045 rcu_read_unlock();
53046 return 0;
53047@@ -1545,11 +1642,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53048
53049 if (task) {
53050 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53051+#ifdef CONFIG_GRKERNSEC_PROC_USER
53052+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53053+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53054+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53055+#endif
53056 task_dumpable(task)) {
53057 rcu_read_lock();
53058 cred = __task_cred(task);
53059 inode->i_uid = cred->euid;
53060+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53061+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53062+#else
53063 inode->i_gid = cred->egid;
53064+#endif
53065 rcu_read_unlock();
53066 } else {
53067 inode->i_uid = 0;
53068@@ -1670,7 +1776,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53069 int fd = proc_fd(inode);
53070
53071 if (task) {
53072- files = get_files_struct(task);
53073+ if (!gr_acl_handle_procpidmem(task))
53074+ files = get_files_struct(task);
53075 put_task_struct(task);
53076 }
53077 if (files) {
53078@@ -1922,12 +2029,22 @@ static const struct file_operations proc_fd_operations = {
53079 static int proc_fd_permission(struct inode *inode, int mask)
53080 {
53081 int rv;
53082+ struct task_struct *task;
53083
53084 rv = generic_permission(inode, mask, NULL);
53085- if (rv == 0)
53086- return 0;
53087+
53088 if (task_pid(current) == proc_pid(inode))
53089 rv = 0;
53090+
53091+ task = get_proc_task(inode);
53092+ if (task == NULL)
53093+ return rv;
53094+
53095+ if (gr_acl_handle_procpidmem(task))
53096+ rv = -EACCES;
53097+
53098+ put_task_struct(task);
53099+
53100 return rv;
53101 }
53102
53103@@ -2036,6 +2153,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53104 if (!task)
53105 goto out_no_task;
53106
53107+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53108+ goto out;
53109+
53110 /*
53111 * Yes, it does not scale. And it should not. Don't add
53112 * new entries into /proc/<tgid>/ without very good reasons.
53113@@ -2080,6 +2200,9 @@ static int proc_pident_readdir(struct file *filp,
53114 if (!task)
53115 goto out_no_task;
53116
53117+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53118+ goto out;
53119+
53120 ret = 0;
53121 i = filp->f_pos;
53122 switch (i) {
53123@@ -2347,7 +2470,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53124 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53125 void *cookie)
53126 {
53127- char *s = nd_get_link(nd);
53128+ const char *s = nd_get_link(nd);
53129 if (!IS_ERR(s))
53130 __putname(s);
53131 }
53132@@ -2553,7 +2676,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53133 #ifdef CONFIG_SCHED_DEBUG
53134 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53135 #endif
53136-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53137+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53138 INF("syscall", S_IRUGO, proc_pid_syscall),
53139 #endif
53140 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53141@@ -2578,10 +2701,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53142 #ifdef CONFIG_SECURITY
53143 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53144 #endif
53145-#ifdef CONFIG_KALLSYMS
53146+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53147 INF("wchan", S_IRUGO, proc_pid_wchan),
53148 #endif
53149-#ifdef CONFIG_STACKTRACE
53150+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53151 ONE("stack", S_IRUGO, proc_pid_stack),
53152 #endif
53153 #ifdef CONFIG_SCHEDSTATS
53154@@ -2611,6 +2734,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53155 #ifdef CONFIG_TASK_IO_ACCOUNTING
53156 INF("io", S_IRUSR, proc_tgid_io_accounting),
53157 #endif
53158+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53159+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53160+#endif
53161 };
53162
53163 static int proc_tgid_base_readdir(struct file * filp,
53164@@ -2735,7 +2861,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53165 if (!inode)
53166 goto out;
53167
53168+#ifdef CONFIG_GRKERNSEC_PROC_USER
53169+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53170+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53171+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53172+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53173+#else
53174 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53175+#endif
53176 inode->i_op = &proc_tgid_base_inode_operations;
53177 inode->i_fop = &proc_tgid_base_operations;
53178 inode->i_flags|=S_IMMUTABLE;
53179@@ -2777,7 +2910,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53180 if (!task)
53181 goto out;
53182
53183+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53184+ goto out_put_task;
53185+
53186 result = proc_pid_instantiate(dir, dentry, task, NULL);
53187+out_put_task:
53188 put_task_struct(task);
53189 out:
53190 return result;
53191@@ -2842,6 +2979,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53192 {
53193 unsigned int nr;
53194 struct task_struct *reaper;
53195+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53196+ const struct cred *tmpcred = current_cred();
53197+ const struct cred *itercred;
53198+#endif
53199+ filldir_t __filldir = filldir;
53200 struct tgid_iter iter;
53201 struct pid_namespace *ns;
53202
53203@@ -2865,8 +3007,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53204 for (iter = next_tgid(ns, iter);
53205 iter.task;
53206 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53207+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53208+ rcu_read_lock();
53209+ itercred = __task_cred(iter.task);
53210+#endif
53211+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53212+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53213+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53214+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53215+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53216+#endif
53217+ )
53218+#endif
53219+ )
53220+ __filldir = &gr_fake_filldir;
53221+ else
53222+ __filldir = filldir;
53223+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53224+ rcu_read_unlock();
53225+#endif
53226 filp->f_pos = iter.tgid + TGID_OFFSET;
53227- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53228+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53229 put_task_struct(iter.task);
53230 goto out;
53231 }
53232@@ -2892,7 +3053,7 @@ static const struct pid_entry tid_base_stuff[] = {
53233 #ifdef CONFIG_SCHED_DEBUG
53234 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53235 #endif
53236-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53237+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53238 INF("syscall", S_IRUGO, proc_pid_syscall),
53239 #endif
53240 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53241@@ -2916,10 +3077,10 @@ static const struct pid_entry tid_base_stuff[] = {
53242 #ifdef CONFIG_SECURITY
53243 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53244 #endif
53245-#ifdef CONFIG_KALLSYMS
53246+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53247 INF("wchan", S_IRUGO, proc_pid_wchan),
53248 #endif
53249-#ifdef CONFIG_STACKTRACE
53250+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53251 ONE("stack", S_IRUGO, proc_pid_stack),
53252 #endif
53253 #ifdef CONFIG_SCHEDSTATS
53254diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53255index 82676e3..5f8518a 100644
53256--- a/fs/proc/cmdline.c
53257+++ b/fs/proc/cmdline.c
53258@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53259
53260 static int __init proc_cmdline_init(void)
53261 {
53262+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53263+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53264+#else
53265 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53266+#endif
53267 return 0;
53268 }
53269 module_init(proc_cmdline_init);
53270diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53271index 59ee7da..469b4b6 100644
53272--- a/fs/proc/devices.c
53273+++ b/fs/proc/devices.c
53274@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53275
53276 static int __init proc_devices_init(void)
53277 {
53278+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53279+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53280+#else
53281 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53282+#endif
53283 return 0;
53284 }
53285 module_init(proc_devices_init);
53286diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53287index d78ade3..81767f9 100644
53288--- a/fs/proc/inode.c
53289+++ b/fs/proc/inode.c
53290@@ -18,12 +18,19 @@
53291 #include <linux/module.h>
53292 #include <linux/smp_lock.h>
53293 #include <linux/sysctl.h>
53294+#include <linux/grsecurity.h>
53295
53296 #include <asm/system.h>
53297 #include <asm/uaccess.h>
53298
53299 #include "internal.h"
53300
53301+#ifdef CONFIG_PROC_SYSCTL
53302+extern const struct inode_operations proc_sys_inode_operations;
53303+extern const struct inode_operations proc_sys_dir_operations;
53304+#endif
53305+
53306+
53307 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53308 {
53309 atomic_inc(&de->count);
53310@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53311 de_put(de);
53312 if (PROC_I(inode)->sysctl)
53313 sysctl_head_put(PROC_I(inode)->sysctl);
53314+
53315+#ifdef CONFIG_PROC_SYSCTL
53316+ if (inode->i_op == &proc_sys_inode_operations ||
53317+ inode->i_op == &proc_sys_dir_operations)
53318+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53319+#endif
53320+
53321 clear_inode(inode);
53322 }
53323
53324@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53325 if (de->mode) {
53326 inode->i_mode = de->mode;
53327 inode->i_uid = de->uid;
53328+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53329+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53330+#else
53331 inode->i_gid = de->gid;
53332+#endif
53333 }
53334 if (de->size)
53335 inode->i_size = de->size;
53336diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53337index 753ca37..26bcf3b 100644
53338--- a/fs/proc/internal.h
53339+++ b/fs/proc/internal.h
53340@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53341 struct pid *pid, struct task_struct *task);
53342 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53343 struct pid *pid, struct task_struct *task);
53344+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53345+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53346+#endif
53347 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53348
53349 extern const struct file_operations proc_maps_operations;
53350diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53351index b442dac..aab29cb 100644
53352--- a/fs/proc/kcore.c
53353+++ b/fs/proc/kcore.c
53354@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53355 off_t offset = 0;
53356 struct kcore_list *m;
53357
53358+ pax_track_stack();
53359+
53360 /* setup ELF header */
53361 elf = (struct elfhdr *) bufp;
53362 bufp += sizeof(struct elfhdr);
53363@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53364 * the addresses in the elf_phdr on our list.
53365 */
53366 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53367- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53368+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53369+ if (tsz > buflen)
53370 tsz = buflen;
53371-
53372+
53373 while (buflen) {
53374 struct kcore_list *m;
53375
53376@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53377 kfree(elf_buf);
53378 } else {
53379 if (kern_addr_valid(start)) {
53380- unsigned long n;
53381+ char *elf_buf;
53382+ mm_segment_t oldfs;
53383
53384- n = copy_to_user(buffer, (char *)start, tsz);
53385- /*
53386- * We cannot distingush between fault on source
53387- * and fault on destination. When this happens
53388- * we clear too and hope it will trigger the
53389- * EFAULT again.
53390- */
53391- if (n) {
53392- if (clear_user(buffer + tsz - n,
53393- n))
53394+ elf_buf = kmalloc(tsz, GFP_KERNEL);
53395+ if (!elf_buf)
53396+ return -ENOMEM;
53397+ oldfs = get_fs();
53398+ set_fs(KERNEL_DS);
53399+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53400+ set_fs(oldfs);
53401+ if (copy_to_user(buffer, elf_buf, tsz)) {
53402+ kfree(elf_buf);
53403 return -EFAULT;
53404+ }
53405 }
53406+ set_fs(oldfs);
53407+ kfree(elf_buf);
53408 } else {
53409 if (clear_user(buffer, tsz))
53410 return -EFAULT;
53411@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53412
53413 static int open_kcore(struct inode *inode, struct file *filp)
53414 {
53415+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53416+ return -EPERM;
53417+#endif
53418 if (!capable(CAP_SYS_RAWIO))
53419 return -EPERM;
53420 if (kcore_need_update)
53421diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53422index 7ca7834..cfe90a4 100644
53423--- a/fs/proc/kmsg.c
53424+++ b/fs/proc/kmsg.c
53425@@ -12,37 +12,37 @@
53426 #include <linux/poll.h>
53427 #include <linux/proc_fs.h>
53428 #include <linux/fs.h>
53429+#include <linux/syslog.h>
53430
53431 #include <asm/uaccess.h>
53432 #include <asm/io.h>
53433
53434 extern wait_queue_head_t log_wait;
53435
53436-extern int do_syslog(int type, char __user *bug, int count);
53437-
53438 static int kmsg_open(struct inode * inode, struct file * file)
53439 {
53440- return do_syslog(1,NULL,0);
53441+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53442 }
53443
53444 static int kmsg_release(struct inode * inode, struct file * file)
53445 {
53446- (void) do_syslog(0,NULL,0);
53447+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53448 return 0;
53449 }
53450
53451 static ssize_t kmsg_read(struct file *file, char __user *buf,
53452 size_t count, loff_t *ppos)
53453 {
53454- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53455+ if ((file->f_flags & O_NONBLOCK) &&
53456+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53457 return -EAGAIN;
53458- return do_syslog(2, buf, count);
53459+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53460 }
53461
53462 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53463 {
53464 poll_wait(file, &log_wait, wait);
53465- if (do_syslog(9, NULL, 0))
53466+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53467 return POLLIN | POLLRDNORM;
53468 return 0;
53469 }
53470diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53471index a65239c..ad1182a 100644
53472--- a/fs/proc/meminfo.c
53473+++ b/fs/proc/meminfo.c
53474@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53475 unsigned long pages[NR_LRU_LISTS];
53476 int lru;
53477
53478+ pax_track_stack();
53479+
53480 /*
53481 * display in kilobytes.
53482 */
53483@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53484 vmi.used >> 10,
53485 vmi.largest_chunk >> 10
53486 #ifdef CONFIG_MEMORY_FAILURE
53487- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53488+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53489 #endif
53490 );
53491
53492diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53493index 9fe7d7e..cdb62c9 100644
53494--- a/fs/proc/nommu.c
53495+++ b/fs/proc/nommu.c
53496@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53497 if (len < 1)
53498 len = 1;
53499 seq_printf(m, "%*c", len, ' ');
53500- seq_path(m, &file->f_path, "");
53501+ seq_path(m, &file->f_path, "\n\\");
53502 }
53503
53504 seq_putc(m, '\n');
53505diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53506index 04d1270..25e1173 100644
53507--- a/fs/proc/proc_net.c
53508+++ b/fs/proc/proc_net.c
53509@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53510 struct task_struct *task;
53511 struct nsproxy *ns;
53512 struct net *net = NULL;
53513+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53514+ const struct cred *cred = current_cred();
53515+#endif
53516+
53517+#ifdef CONFIG_GRKERNSEC_PROC_USER
53518+ if (cred->fsuid)
53519+ return net;
53520+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53521+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53522+ return net;
53523+#endif
53524
53525 rcu_read_lock();
53526 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53527diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53528index f667e8a..55f4d96 100644
53529--- a/fs/proc/proc_sysctl.c
53530+++ b/fs/proc/proc_sysctl.c
53531@@ -7,11 +7,13 @@
53532 #include <linux/security.h>
53533 #include "internal.h"
53534
53535+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53536+
53537 static const struct dentry_operations proc_sys_dentry_operations;
53538 static const struct file_operations proc_sys_file_operations;
53539-static const struct inode_operations proc_sys_inode_operations;
53540+const struct inode_operations proc_sys_inode_operations;
53541 static const struct file_operations proc_sys_dir_file_operations;
53542-static const struct inode_operations proc_sys_dir_operations;
53543+const struct inode_operations proc_sys_dir_operations;
53544
53545 static struct inode *proc_sys_make_inode(struct super_block *sb,
53546 struct ctl_table_header *head, struct ctl_table *table)
53547@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53548 if (!p)
53549 goto out;
53550
53551+ if (gr_handle_sysctl(p, MAY_EXEC))
53552+ goto out;
53553+
53554 err = ERR_PTR(-ENOMEM);
53555 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53556 if (h)
53557@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53558
53559 err = NULL;
53560 dentry->d_op = &proc_sys_dentry_operations;
53561+
53562+ gr_handle_proc_create(dentry, inode);
53563+
53564 d_add(dentry, inode);
53565
53566 out:
53567@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53568 return -ENOMEM;
53569 } else {
53570 child->d_op = &proc_sys_dentry_operations;
53571+
53572+ gr_handle_proc_create(child, inode);
53573+
53574 d_add(child, inode);
53575 }
53576 } else {
53577@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53578 if (*pos < file->f_pos)
53579 continue;
53580
53581+ if (gr_handle_sysctl(table, 0))
53582+ continue;
53583+
53584 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53585 if (res)
53586 return res;
53587@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53588 if (IS_ERR(head))
53589 return PTR_ERR(head);
53590
53591+ if (table && gr_handle_sysctl(table, MAY_EXEC))
53592+ return -ENOENT;
53593+
53594 generic_fillattr(inode, stat);
53595 if (table)
53596 stat->mode = (stat->mode & S_IFMT) | table->mode;
53597@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53598 };
53599
53600 static const struct file_operations proc_sys_dir_file_operations = {
53601+ .read = generic_read_dir,
53602 .readdir = proc_sys_readdir,
53603 .llseek = generic_file_llseek,
53604 };
53605
53606-static const struct inode_operations proc_sys_inode_operations = {
53607+const struct inode_operations proc_sys_inode_operations = {
53608 .permission = proc_sys_permission,
53609 .setattr = proc_sys_setattr,
53610 .getattr = proc_sys_getattr,
53611 };
53612
53613-static const struct inode_operations proc_sys_dir_operations = {
53614+const struct inode_operations proc_sys_dir_operations = {
53615 .lookup = proc_sys_lookup,
53616 .permission = proc_sys_permission,
53617 .setattr = proc_sys_setattr,
53618diff --git a/fs/proc/root.c b/fs/proc/root.c
53619index b080b79..d957e63 100644
53620--- a/fs/proc/root.c
53621+++ b/fs/proc/root.c
53622@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53623 #ifdef CONFIG_PROC_DEVICETREE
53624 proc_device_tree_init();
53625 #endif
53626+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53627+#ifdef CONFIG_GRKERNSEC_PROC_USER
53628+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53629+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53630+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53631+#endif
53632+#else
53633 proc_mkdir("bus", NULL);
53634+#endif
53635 proc_sys_init();
53636 }
53637
53638diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53639index 3b7b82a..4b420b0 100644
53640--- a/fs/proc/task_mmu.c
53641+++ b/fs/proc/task_mmu.c
53642@@ -8,6 +8,7 @@
53643 #include <linux/mempolicy.h>
53644 #include <linux/swap.h>
53645 #include <linux/swapops.h>
53646+#include <linux/grsecurity.h>
53647
53648 #include <asm/elf.h>
53649 #include <asm/uaccess.h>
53650@@ -46,15 +47,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53651 "VmStk:\t%8lu kB\n"
53652 "VmExe:\t%8lu kB\n"
53653 "VmLib:\t%8lu kB\n"
53654- "VmPTE:\t%8lu kB\n",
53655- hiwater_vm << (PAGE_SHIFT-10),
53656+ "VmPTE:\t%8lu kB\n"
53657+
53658+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53659+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53660+#endif
53661+
53662+ ,hiwater_vm << (PAGE_SHIFT-10),
53663 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53664 mm->locked_vm << (PAGE_SHIFT-10),
53665 hiwater_rss << (PAGE_SHIFT-10),
53666 total_rss << (PAGE_SHIFT-10),
53667 data << (PAGE_SHIFT-10),
53668 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53669- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53670+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53671+
53672+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53673+ , mm->context.user_cs_base, mm->context.user_cs_limit
53674+#endif
53675+
53676+ );
53677 }
53678
53679 unsigned long task_vsize(struct mm_struct *mm)
53680@@ -175,7 +187,8 @@ static void m_stop(struct seq_file *m, void *v)
53681 struct proc_maps_private *priv = m->private;
53682 struct vm_area_struct *vma = v;
53683
53684- vma_stop(priv, vma);
53685+ if (!IS_ERR(vma))
53686+ vma_stop(priv, vma);
53687 if (priv->task)
53688 put_task_struct(priv->task);
53689 }
53690@@ -199,6 +212,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53691 return ret;
53692 }
53693
53694+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53695+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53696+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53697+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53698+#endif
53699+
53700 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53701 {
53702 struct mm_struct *mm = vma->vm_mm;
53703@@ -206,7 +225,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53704 int flags = vma->vm_flags;
53705 unsigned long ino = 0;
53706 unsigned long long pgoff = 0;
53707- unsigned long start;
53708 dev_t dev = 0;
53709 int len;
53710
53711@@ -217,20 +235,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53712 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53713 }
53714
53715- /* We don't show the stack guard page in /proc/maps */
53716- start = vma->vm_start;
53717- if (vma->vm_flags & VM_GROWSDOWN)
53718- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53719- start += PAGE_SIZE;
53720-
53721 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53722- start,
53723+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53724+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53725+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53726+#else
53727+ vma->vm_start,
53728 vma->vm_end,
53729+#endif
53730 flags & VM_READ ? 'r' : '-',
53731 flags & VM_WRITE ? 'w' : '-',
53732 flags & VM_EXEC ? 'x' : '-',
53733 flags & VM_MAYSHARE ? 's' : 'p',
53734+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53735+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53736+#else
53737 pgoff,
53738+#endif
53739 MAJOR(dev), MINOR(dev), ino, &len);
53740
53741 /*
53742@@ -239,7 +260,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53743 */
53744 if (file) {
53745 pad_len_spaces(m, len);
53746- seq_path(m, &file->f_path, "\n");
53747+ seq_path(m, &file->f_path, "\n\\");
53748 } else {
53749 const char *name = arch_vma_name(vma);
53750 if (!name) {
53751@@ -247,8 +268,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53752 if (vma->vm_start <= mm->brk &&
53753 vma->vm_end >= mm->start_brk) {
53754 name = "[heap]";
53755- } else if (vma->vm_start <= mm->start_stack &&
53756- vma->vm_end >= mm->start_stack) {
53757+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53758+ (vma->vm_start <= mm->start_stack &&
53759+ vma->vm_end >= mm->start_stack)) {
53760 name = "[stack]";
53761 }
53762 } else {
53763@@ -269,6 +291,13 @@ static int show_map(struct seq_file *m, void *v)
53764 struct proc_maps_private *priv = m->private;
53765 struct task_struct *task = priv->task;
53766
53767+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53768+ if (current->exec_id != m->exec_id) {
53769+ gr_log_badprocpid("maps");
53770+ return 0;
53771+ }
53772+#endif
53773+
53774 show_map_vma(m, vma);
53775
53776 if (m->count < m->size) /* vma is copied successfully */
53777@@ -390,10 +419,23 @@ static int show_smap(struct seq_file *m, void *v)
53778 .private = &mss,
53779 };
53780
53781+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53782+ if (current->exec_id != m->exec_id) {
53783+ gr_log_badprocpid("smaps");
53784+ return 0;
53785+ }
53786+#endif
53787 memset(&mss, 0, sizeof mss);
53788- mss.vma = vma;
53789- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53790- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53791+
53792+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53793+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53794+#endif
53795+ mss.vma = vma;
53796+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53797+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53798+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53799+ }
53800+#endif
53801
53802 show_map_vma(m, vma);
53803
53804@@ -409,7 +451,11 @@ static int show_smap(struct seq_file *m, void *v)
53805 "Swap: %8lu kB\n"
53806 "KernelPageSize: %8lu kB\n"
53807 "MMUPageSize: %8lu kB\n",
53808+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53809+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53810+#else
53811 (vma->vm_end - vma->vm_start) >> 10,
53812+#endif
53813 mss.resident >> 10,
53814 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53815 mss.shared_clean >> 10,
53816diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53817index 8f5c05d..c99c76d 100644
53818--- a/fs/proc/task_nommu.c
53819+++ b/fs/proc/task_nommu.c
53820@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53821 else
53822 bytes += kobjsize(mm);
53823
53824- if (current->fs && current->fs->users > 1)
53825+ if (current->fs && atomic_read(&current->fs->users) > 1)
53826 sbytes += kobjsize(current->fs);
53827 else
53828 bytes += kobjsize(current->fs);
53829@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53830 if (len < 1)
53831 len = 1;
53832 seq_printf(m, "%*c", len, ' ');
53833- seq_path(m, &file->f_path, "");
53834+ seq_path(m, &file->f_path, "\n\\");
53835 }
53836
53837 seq_putc(m, '\n');
53838diff --git a/fs/readdir.c b/fs/readdir.c
53839index 7723401..30059a6 100644
53840--- a/fs/readdir.c
53841+++ b/fs/readdir.c
53842@@ -16,6 +16,7 @@
53843 #include <linux/security.h>
53844 #include <linux/syscalls.h>
53845 #include <linux/unistd.h>
53846+#include <linux/namei.h>
53847
53848 #include <asm/uaccess.h>
53849
53850@@ -67,6 +68,7 @@ struct old_linux_dirent {
53851
53852 struct readdir_callback {
53853 struct old_linux_dirent __user * dirent;
53854+ struct file * file;
53855 int result;
53856 };
53857
53858@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53859 buf->result = -EOVERFLOW;
53860 return -EOVERFLOW;
53861 }
53862+
53863+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53864+ return 0;
53865+
53866 buf->result++;
53867 dirent = buf->dirent;
53868 if (!access_ok(VERIFY_WRITE, dirent,
53869@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53870
53871 buf.result = 0;
53872 buf.dirent = dirent;
53873+ buf.file = file;
53874
53875 error = vfs_readdir(file, fillonedir, &buf);
53876 if (buf.result)
53877@@ -142,6 +149,7 @@ struct linux_dirent {
53878 struct getdents_callback {
53879 struct linux_dirent __user * current_dir;
53880 struct linux_dirent __user * previous;
53881+ struct file * file;
53882 int count;
53883 int error;
53884 };
53885@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53886 buf->error = -EOVERFLOW;
53887 return -EOVERFLOW;
53888 }
53889+
53890+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53891+ return 0;
53892+
53893 dirent = buf->previous;
53894 if (dirent) {
53895 if (__put_user(offset, &dirent->d_off))
53896@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53897 buf.previous = NULL;
53898 buf.count = count;
53899 buf.error = 0;
53900+ buf.file = file;
53901
53902 error = vfs_readdir(file, filldir, &buf);
53903 if (error >= 0)
53904@@ -228,6 +241,7 @@ out:
53905 struct getdents_callback64 {
53906 struct linux_dirent64 __user * current_dir;
53907 struct linux_dirent64 __user * previous;
53908+ struct file *file;
53909 int count;
53910 int error;
53911 };
53912@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53913 buf->error = -EINVAL; /* only used if we fail.. */
53914 if (reclen > buf->count)
53915 return -EINVAL;
53916+
53917+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53918+ return 0;
53919+
53920 dirent = buf->previous;
53921 if (dirent) {
53922 if (__put_user(offset, &dirent->d_off))
53923@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53924
53925 buf.current_dir = dirent;
53926 buf.previous = NULL;
53927+ buf.file = file;
53928 buf.count = count;
53929 buf.error = 0;
53930
53931@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53932 error = buf.error;
53933 lastdirent = buf.previous;
53934 if (lastdirent) {
53935- typeof(lastdirent->d_off) d_off = file->f_pos;
53936+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53937 if (__put_user(d_off, &lastdirent->d_off))
53938 error = -EFAULT;
53939 else
53940diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53941index d42c30c..4fd8718 100644
53942--- a/fs/reiserfs/dir.c
53943+++ b/fs/reiserfs/dir.c
53944@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53945 struct reiserfs_dir_entry de;
53946 int ret = 0;
53947
53948+ pax_track_stack();
53949+
53950 reiserfs_write_lock(inode->i_sb);
53951
53952 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53953diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53954index 128d3f7..8840d44 100644
53955--- a/fs/reiserfs/do_balan.c
53956+++ b/fs/reiserfs/do_balan.c
53957@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53958 return;
53959 }
53960
53961- atomic_inc(&(fs_generation(tb->tb_sb)));
53962+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53963 do_balance_starts(tb);
53964
53965 /* balance leaf returns 0 except if combining L R and S into
53966diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53967index 72cb1cc..d0e3181 100644
53968--- a/fs/reiserfs/item_ops.c
53969+++ b/fs/reiserfs/item_ops.c
53970@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53971 vi->vi_index, vi->vi_type, vi->vi_ih);
53972 }
53973
53974-static struct item_operations stat_data_ops = {
53975+static const struct item_operations stat_data_ops = {
53976 .bytes_number = sd_bytes_number,
53977 .decrement_key = sd_decrement_key,
53978 .is_left_mergeable = sd_is_left_mergeable,
53979@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53980 vi->vi_index, vi->vi_type, vi->vi_ih);
53981 }
53982
53983-static struct item_operations direct_ops = {
53984+static const struct item_operations direct_ops = {
53985 .bytes_number = direct_bytes_number,
53986 .decrement_key = direct_decrement_key,
53987 .is_left_mergeable = direct_is_left_mergeable,
53988@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53989 vi->vi_index, vi->vi_type, vi->vi_ih);
53990 }
53991
53992-static struct item_operations indirect_ops = {
53993+static const struct item_operations indirect_ops = {
53994 .bytes_number = indirect_bytes_number,
53995 .decrement_key = indirect_decrement_key,
53996 .is_left_mergeable = indirect_is_left_mergeable,
53997@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53998 printk("\n");
53999 }
54000
54001-static struct item_operations direntry_ops = {
54002+static const struct item_operations direntry_ops = {
54003 .bytes_number = direntry_bytes_number,
54004 .decrement_key = direntry_decrement_key,
54005 .is_left_mergeable = direntry_is_left_mergeable,
54006@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
54007 "Invalid item type observed, run fsck ASAP");
54008 }
54009
54010-static struct item_operations errcatch_ops = {
54011+static const struct item_operations errcatch_ops = {
54012 errcatch_bytes_number,
54013 errcatch_decrement_key,
54014 errcatch_is_left_mergeable,
54015@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
54016 #error Item types must use disk-format assigned values.
54017 #endif
54018
54019-struct item_operations *item_ops[TYPE_ANY + 1] = {
54020+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
54021 &stat_data_ops,
54022 &indirect_ops,
54023 &direct_ops,
54024diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
54025index b5fe0aa..e0e25c4 100644
54026--- a/fs/reiserfs/journal.c
54027+++ b/fs/reiserfs/journal.c
54028@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
54029 struct buffer_head *bh;
54030 int i, j;
54031
54032+ pax_track_stack();
54033+
54034 bh = __getblk(dev, block, bufsize);
54035 if (buffer_uptodate(bh))
54036 return (bh);
54037diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
54038index 2715791..b8996db 100644
54039--- a/fs/reiserfs/namei.c
54040+++ b/fs/reiserfs/namei.c
54041@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
54042 unsigned long savelink = 1;
54043 struct timespec ctime;
54044
54045+ pax_track_stack();
54046+
54047 /* three balancings: (1) old name removal, (2) new name insertion
54048 and (3) maybe "save" link insertion
54049 stat data updates: (1) old directory,
54050diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
54051index 9229e55..3d2e3b7 100644
54052--- a/fs/reiserfs/procfs.c
54053+++ b/fs/reiserfs/procfs.c
54054@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
54055 "SMALL_TAILS " : "NO_TAILS ",
54056 replay_only(sb) ? "REPLAY_ONLY " : "",
54057 convert_reiserfs(sb) ? "CONV " : "",
54058- atomic_read(&r->s_generation_counter),
54059+ atomic_read_unchecked(&r->s_generation_counter),
54060 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
54061 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54062 SF(s_good_search_by_key_reada), SF(s_bmaps),
54063@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54064 struct journal_params *jp = &rs->s_v1.s_journal;
54065 char b[BDEVNAME_SIZE];
54066
54067+ pax_track_stack();
54068+
54069 seq_printf(m, /* on-disk fields */
54070 "jp_journal_1st_block: \t%i\n"
54071 "jp_journal_dev: \t%s[%x]\n"
54072diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54073index d036ee5..4c7dca1 100644
54074--- a/fs/reiserfs/stree.c
54075+++ b/fs/reiserfs/stree.c
54076@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54077 int iter = 0;
54078 #endif
54079
54080+ pax_track_stack();
54081+
54082 BUG_ON(!th->t_trans_id);
54083
54084 init_tb_struct(th, &s_del_balance, sb, path,
54085@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54086 int retval;
54087 int quota_cut_bytes = 0;
54088
54089+ pax_track_stack();
54090+
54091 BUG_ON(!th->t_trans_id);
54092
54093 le_key2cpu_key(&cpu_key, key);
54094@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54095 int quota_cut_bytes;
54096 loff_t tail_pos = 0;
54097
54098+ pax_track_stack();
54099+
54100 BUG_ON(!th->t_trans_id);
54101
54102 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54103@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54104 int retval;
54105 int fs_gen;
54106
54107+ pax_track_stack();
54108+
54109 BUG_ON(!th->t_trans_id);
54110
54111 fs_gen = get_generation(inode->i_sb);
54112@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54113 int fs_gen = 0;
54114 int quota_bytes = 0;
54115
54116+ pax_track_stack();
54117+
54118 BUG_ON(!th->t_trans_id);
54119
54120 if (inode) { /* Do we count quotas for item? */
54121diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54122index 7cb1285..c726cd0 100644
54123--- a/fs/reiserfs/super.c
54124+++ b/fs/reiserfs/super.c
54125@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54126 {.option_name = NULL}
54127 };
54128
54129+ pax_track_stack();
54130+
54131 *blocks = 0;
54132 if (!options || !*options)
54133 /* use default configuration: create tails, journaling on, no
54134diff --git a/fs/select.c b/fs/select.c
54135index fd38ce2..f5381b8 100644
54136--- a/fs/select.c
54137+++ b/fs/select.c
54138@@ -20,6 +20,7 @@
54139 #include <linux/module.h>
54140 #include <linux/slab.h>
54141 #include <linux/poll.h>
54142+#include <linux/security.h>
54143 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54144 #include <linux/file.h>
54145 #include <linux/fdtable.h>
54146@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54147 int retval, i, timed_out = 0;
54148 unsigned long slack = 0;
54149
54150+ pax_track_stack();
54151+
54152 rcu_read_lock();
54153 retval = max_select_fd(n, fds);
54154 rcu_read_unlock();
54155@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54156 /* Allocate small arguments on the stack to save memory and be faster */
54157 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54158
54159+ pax_track_stack();
54160+
54161 ret = -EINVAL;
54162 if (n < 0)
54163 goto out_nofds;
54164@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54165 struct poll_list *walk = head;
54166 unsigned long todo = nfds;
54167
54168+ pax_track_stack();
54169+
54170+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54171 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54172 return -EINVAL;
54173
54174diff --git a/fs/seq_file.c b/fs/seq_file.c
54175index eae7d9d..12c71e3 100644
54176--- a/fs/seq_file.c
54177+++ b/fs/seq_file.c
54178@@ -40,6 +40,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
54179 memset(p, 0, sizeof(*p));
54180 mutex_init(&p->lock);
54181 p->op = op;
54182+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
54183+ p->exec_id = current->exec_id;
54184+#endif
54185
54186 /*
54187 * Wrappers around seq_open(e.g. swaps_open) need to be
54188@@ -76,7 +79,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54189 return 0;
54190 }
54191 if (!m->buf) {
54192- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54193+ m->size = PAGE_SIZE;
54194+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54195 if (!m->buf)
54196 return -ENOMEM;
54197 }
54198@@ -116,7 +120,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54199 Eoverflow:
54200 m->op->stop(m, p);
54201 kfree(m->buf);
54202- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54203+ m->size <<= 1;
54204+ m->buf = kmalloc(m->size, GFP_KERNEL);
54205 return !m->buf ? -ENOMEM : -EAGAIN;
54206 }
54207
54208@@ -169,7 +174,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54209 m->version = file->f_version;
54210 /* grab buffer if we didn't have one */
54211 if (!m->buf) {
54212- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54213+ m->size = PAGE_SIZE;
54214+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54215 if (!m->buf)
54216 goto Enomem;
54217 }
54218@@ -210,7 +216,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54219 goto Fill;
54220 m->op->stop(m, p);
54221 kfree(m->buf);
54222- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54223+ m->size <<= 1;
54224+ m->buf = kmalloc(m->size, GFP_KERNEL);
54225 if (!m->buf)
54226 goto Enomem;
54227 m->count = 0;
54228@@ -551,7 +558,7 @@ static void single_stop(struct seq_file *p, void *v)
54229 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54230 void *data)
54231 {
54232- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54233+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54234 int res = -ENOMEM;
54235
54236 if (op) {
54237diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54238index 71c29b6..54694dd 100644
54239--- a/fs/smbfs/proc.c
54240+++ b/fs/smbfs/proc.c
54241@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54242
54243 out:
54244 if (server->local_nls != NULL && server->remote_nls != NULL)
54245- server->ops->convert = convert_cp;
54246+ *(void **)&server->ops->convert = convert_cp;
54247 else
54248- server->ops->convert = convert_memcpy;
54249+ *(void **)&server->ops->convert = convert_memcpy;
54250
54251 smb_unlock_server(server);
54252 return n;
54253@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54254
54255 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54256 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54257- server->ops->getattr = smb_proc_getattr_core;
54258+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
54259 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54260- server->ops->getattr = smb_proc_getattr_ff;
54261+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54262 }
54263
54264 /* Decode server capabilities */
54265@@ -3439,7 +3439,7 @@ out:
54266 static void
54267 install_ops(struct smb_ops *dst, struct smb_ops *src)
54268 {
54269- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54270+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54271 }
54272
54273 /* < LANMAN2 */
54274diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54275index 00b2909..2ace383 100644
54276--- a/fs/smbfs/symlink.c
54277+++ b/fs/smbfs/symlink.c
54278@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54279
54280 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54281 {
54282- char *s = nd_get_link(nd);
54283+ const char *s = nd_get_link(nd);
54284 if (!IS_ERR(s))
54285 __putname(s);
54286 }
54287diff --git a/fs/splice.c b/fs/splice.c
54288index bb92b7c..5aa72b0 100644
54289--- a/fs/splice.c
54290+++ b/fs/splice.c
54291@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54292 pipe_lock(pipe);
54293
54294 for (;;) {
54295- if (!pipe->readers) {
54296+ if (!atomic_read(&pipe->readers)) {
54297 send_sig(SIGPIPE, current, 0);
54298 if (!ret)
54299 ret = -EPIPE;
54300@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54301 do_wakeup = 0;
54302 }
54303
54304- pipe->waiting_writers++;
54305+ atomic_inc(&pipe->waiting_writers);
54306 pipe_wait(pipe);
54307- pipe->waiting_writers--;
54308+ atomic_dec(&pipe->waiting_writers);
54309 }
54310
54311 pipe_unlock(pipe);
54312@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54313 .spd_release = spd_release_page,
54314 };
54315
54316+ pax_track_stack();
54317+
54318 index = *ppos >> PAGE_CACHE_SHIFT;
54319 loff = *ppos & ~PAGE_CACHE_MASK;
54320 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54321@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54322 old_fs = get_fs();
54323 set_fs(get_ds());
54324 /* The cast to a user pointer is valid due to the set_fs() */
54325- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54326+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54327 set_fs(old_fs);
54328
54329 return res;
54330@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54331 old_fs = get_fs();
54332 set_fs(get_ds());
54333 /* The cast to a user pointer is valid due to the set_fs() */
54334- res = vfs_write(file, (const char __user *)buf, count, &pos);
54335+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54336 set_fs(old_fs);
54337
54338 return res;
54339@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54340 .spd_release = spd_release_page,
54341 };
54342
54343+ pax_track_stack();
54344+
54345 index = *ppos >> PAGE_CACHE_SHIFT;
54346 offset = *ppos & ~PAGE_CACHE_MASK;
54347 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54348@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54349 goto err;
54350
54351 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54352- vec[i].iov_base = (void __user *) page_address(page);
54353+ vec[i].iov_base = (__force void __user *) page_address(page);
54354 vec[i].iov_len = this_len;
54355 pages[i] = page;
54356 spd.nr_pages++;
54357@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54358 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54359 {
54360 while (!pipe->nrbufs) {
54361- if (!pipe->writers)
54362+ if (!atomic_read(&pipe->writers))
54363 return 0;
54364
54365- if (!pipe->waiting_writers && sd->num_spliced)
54366+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54367 return 0;
54368
54369 if (sd->flags & SPLICE_F_NONBLOCK)
54370@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54371 * out of the pipe right after the splice_to_pipe(). So set
54372 * PIPE_READERS appropriately.
54373 */
54374- pipe->readers = 1;
54375+ atomic_set(&pipe->readers, 1);
54376
54377 current->splice_pipe = pipe;
54378 }
54379@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54380 .spd_release = spd_release_page,
54381 };
54382
54383+ pax_track_stack();
54384+
54385 pipe = pipe_info(file->f_path.dentry->d_inode);
54386 if (!pipe)
54387 return -EBADF;
54388@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54389 ret = -ERESTARTSYS;
54390 break;
54391 }
54392- if (!pipe->writers)
54393+ if (!atomic_read(&pipe->writers))
54394 break;
54395- if (!pipe->waiting_writers) {
54396+ if (!atomic_read(&pipe->waiting_writers)) {
54397 if (flags & SPLICE_F_NONBLOCK) {
54398 ret = -EAGAIN;
54399 break;
54400@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54401 pipe_lock(pipe);
54402
54403 while (pipe->nrbufs >= PIPE_BUFFERS) {
54404- if (!pipe->readers) {
54405+ if (!atomic_read(&pipe->readers)) {
54406 send_sig(SIGPIPE, current, 0);
54407 ret = -EPIPE;
54408 break;
54409@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54410 ret = -ERESTARTSYS;
54411 break;
54412 }
54413- pipe->waiting_writers++;
54414+ atomic_inc(&pipe->waiting_writers);
54415 pipe_wait(pipe);
54416- pipe->waiting_writers--;
54417+ atomic_dec(&pipe->waiting_writers);
54418 }
54419
54420 pipe_unlock(pipe);
54421@@ -1786,14 +1792,14 @@ retry:
54422 pipe_double_lock(ipipe, opipe);
54423
54424 do {
54425- if (!opipe->readers) {
54426+ if (!atomic_read(&opipe->readers)) {
54427 send_sig(SIGPIPE, current, 0);
54428 if (!ret)
54429 ret = -EPIPE;
54430 break;
54431 }
54432
54433- if (!ipipe->nrbufs && !ipipe->writers)
54434+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54435 break;
54436
54437 /*
54438@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54439 pipe_double_lock(ipipe, opipe);
54440
54441 do {
54442- if (!opipe->readers) {
54443+ if (!atomic_read(&opipe->readers)) {
54444 send_sig(SIGPIPE, current, 0);
54445 if (!ret)
54446 ret = -EPIPE;
54447@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54448 * return EAGAIN if we have the potential of some data in the
54449 * future, otherwise just return 0
54450 */
54451- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54452+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54453 ret = -EAGAIN;
54454
54455 pipe_unlock(ipipe);
54456diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54457index e020183..18d64b4 100644
54458--- a/fs/sysfs/dir.c
54459+++ b/fs/sysfs/dir.c
54460@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54461 struct sysfs_dirent *sd;
54462 int rc;
54463
54464+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54465+ const char *parent_name = parent_sd->s_name;
54466+
54467+ mode = S_IFDIR | S_IRWXU;
54468+
54469+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54470+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54471+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54472+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54473+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54474+#endif
54475+
54476 /* allocate */
54477 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54478 if (!sd)
54479diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54480index 7118a38..70af853 100644
54481--- a/fs/sysfs/file.c
54482+++ b/fs/sysfs/file.c
54483@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54484
54485 struct sysfs_open_dirent {
54486 atomic_t refcnt;
54487- atomic_t event;
54488+ atomic_unchecked_t event;
54489 wait_queue_head_t poll;
54490 struct list_head buffers; /* goes through sysfs_buffer.list */
54491 };
54492@@ -53,7 +53,7 @@ struct sysfs_buffer {
54493 size_t count;
54494 loff_t pos;
54495 char * page;
54496- struct sysfs_ops * ops;
54497+ const struct sysfs_ops * ops;
54498 struct mutex mutex;
54499 int needs_read_fill;
54500 int event;
54501@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54502 {
54503 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54504 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54505- struct sysfs_ops * ops = buffer->ops;
54506+ const struct sysfs_ops * ops = buffer->ops;
54507 int ret = 0;
54508 ssize_t count;
54509
54510@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54511 if (!sysfs_get_active_two(attr_sd))
54512 return -ENODEV;
54513
54514- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54515+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54516 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54517
54518 sysfs_put_active_two(attr_sd);
54519@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54520 {
54521 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54522 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54523- struct sysfs_ops * ops = buffer->ops;
54524+ const struct sysfs_ops * ops = buffer->ops;
54525 int rc;
54526
54527 /* need attr_sd for attr and ops, its parent for kobj */
54528@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54529 return -ENOMEM;
54530
54531 atomic_set(&new_od->refcnt, 0);
54532- atomic_set(&new_od->event, 1);
54533+ atomic_set_unchecked(&new_od->event, 1);
54534 init_waitqueue_head(&new_od->poll);
54535 INIT_LIST_HEAD(&new_od->buffers);
54536 goto retry;
54537@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54538 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54539 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54540 struct sysfs_buffer *buffer;
54541- struct sysfs_ops *ops;
54542+ const struct sysfs_ops *ops;
54543 int error = -EACCES;
54544 char *p;
54545
54546@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54547
54548 sysfs_put_active_two(attr_sd);
54549
54550- if (buffer->event != atomic_read(&od->event))
54551+ if (buffer->event != atomic_read_unchecked(&od->event))
54552 goto trigger;
54553
54554 return DEFAULT_POLLMASK;
54555@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54556
54557 od = sd->s_attr.open;
54558 if (od) {
54559- atomic_inc(&od->event);
54560+ atomic_inc_unchecked(&od->event);
54561 wake_up_interruptible(&od->poll);
54562 }
54563
54564diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54565index c5081ad..342ea86 100644
54566--- a/fs/sysfs/symlink.c
54567+++ b/fs/sysfs/symlink.c
54568@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54569
54570 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54571 {
54572- char *page = nd_get_link(nd);
54573+ const char *page = nd_get_link(nd);
54574 if (!IS_ERR(page))
54575 free_page((unsigned long)page);
54576 }
54577diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54578index 1e06853..b06d325 100644
54579--- a/fs/udf/balloc.c
54580+++ b/fs/udf/balloc.c
54581@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54582
54583 mutex_lock(&sbi->s_alloc_mutex);
54584 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54585- if (bloc->logicalBlockNum < 0 ||
54586- (bloc->logicalBlockNum + count) >
54587- partmap->s_partition_len) {
54588+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54589 udf_debug("%d < %d || %d + %d > %d\n",
54590 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54591 count, partmap->s_partition_len);
54592@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54593
54594 mutex_lock(&sbi->s_alloc_mutex);
54595 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54596- if (bloc->logicalBlockNum < 0 ||
54597- (bloc->logicalBlockNum + count) >
54598- partmap->s_partition_len) {
54599+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54600 udf_debug("%d < %d || %d + %d > %d\n",
54601 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54602 partmap->s_partition_len);
54603diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54604index 6d24c2c..fff470f 100644
54605--- a/fs/udf/inode.c
54606+++ b/fs/udf/inode.c
54607@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54608 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54609 int lastblock = 0;
54610
54611+ pax_track_stack();
54612+
54613 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54614 prev_epos.block = iinfo->i_location;
54615 prev_epos.bh = NULL;
54616diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54617index 9215700..bf1f68e 100644
54618--- a/fs/udf/misc.c
54619+++ b/fs/udf/misc.c
54620@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54621
54622 u8 udf_tag_checksum(const struct tag *t)
54623 {
54624- u8 *data = (u8 *)t;
54625+ const u8 *data = (const u8 *)t;
54626 u8 checksum = 0;
54627 int i;
54628 for (i = 0; i < sizeof(struct tag); ++i)
54629diff --git a/fs/utimes.c b/fs/utimes.c
54630index e4c75db..b4df0e0 100644
54631--- a/fs/utimes.c
54632+++ b/fs/utimes.c
54633@@ -1,6 +1,7 @@
54634 #include <linux/compiler.h>
54635 #include <linux/file.h>
54636 #include <linux/fs.h>
54637+#include <linux/security.h>
54638 #include <linux/linkage.h>
54639 #include <linux/mount.h>
54640 #include <linux/namei.h>
54641@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54642 goto mnt_drop_write_and_out;
54643 }
54644 }
54645+
54646+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54647+ error = -EACCES;
54648+ goto mnt_drop_write_and_out;
54649+ }
54650+
54651 mutex_lock(&inode->i_mutex);
54652 error = notify_change(path->dentry, &newattrs);
54653 mutex_unlock(&inode->i_mutex);
54654diff --git a/fs/xattr.c b/fs/xattr.c
54655index 6d4f6d3..cda3958 100644
54656--- a/fs/xattr.c
54657+++ b/fs/xattr.c
54658@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54659 * Extended attribute SET operations
54660 */
54661 static long
54662-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54663+setxattr(struct path *path, const char __user *name, const void __user *value,
54664 size_t size, int flags)
54665 {
54666 int error;
54667@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54668 return PTR_ERR(kvalue);
54669 }
54670
54671- error = vfs_setxattr(d, kname, kvalue, size, flags);
54672+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54673+ error = -EACCES;
54674+ goto out;
54675+ }
54676+
54677+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54678+out:
54679 kfree(kvalue);
54680 return error;
54681 }
54682@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54683 return error;
54684 error = mnt_want_write(path.mnt);
54685 if (!error) {
54686- error = setxattr(path.dentry, name, value, size, flags);
54687+ error = setxattr(&path, name, value, size, flags);
54688 mnt_drop_write(path.mnt);
54689 }
54690 path_put(&path);
54691@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54692 return error;
54693 error = mnt_want_write(path.mnt);
54694 if (!error) {
54695- error = setxattr(path.dentry, name, value, size, flags);
54696+ error = setxattr(&path, name, value, size, flags);
54697 mnt_drop_write(path.mnt);
54698 }
54699 path_put(&path);
54700@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54701 const void __user *,value, size_t, size, int, flags)
54702 {
54703 struct file *f;
54704- struct dentry *dentry;
54705 int error = -EBADF;
54706
54707 f = fget(fd);
54708 if (!f)
54709 return error;
54710- dentry = f->f_path.dentry;
54711- audit_inode(NULL, dentry);
54712+ audit_inode(NULL, f->f_path.dentry);
54713 error = mnt_want_write_file(f);
54714 if (!error) {
54715- error = setxattr(dentry, name, value, size, flags);
54716+ error = setxattr(&f->f_path, name, value, size, flags);
54717 mnt_drop_write(f->f_path.mnt);
54718 }
54719 fput(f);
54720diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54721index c6ad7c7..f2847a7 100644
54722--- a/fs/xattr_acl.c
54723+++ b/fs/xattr_acl.c
54724@@ -17,8 +17,8 @@
54725 struct posix_acl *
54726 posix_acl_from_xattr(const void *value, size_t size)
54727 {
54728- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54729- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54730+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54731+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54732 int count;
54733 struct posix_acl *acl;
54734 struct posix_acl_entry *acl_e;
54735diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54736index 942362f..88f96f5 100644
54737--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54738+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54739@@ -134,7 +134,7 @@ xfs_find_handle(
54740 }
54741
54742 error = -EFAULT;
54743- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54744+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54745 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54746 goto out_put;
54747
54748@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54749 if (IS_ERR(dentry))
54750 return PTR_ERR(dentry);
54751
54752- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54753+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54754 if (!kbuf)
54755 goto out_dput;
54756
54757@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54758 xfs_mount_t *mp,
54759 void __user *arg)
54760 {
54761- xfs_fsop_geom_t fsgeo;
54762+ xfs_fsop_geom_t fsgeo;
54763 int error;
54764
54765 error = xfs_fs_geometry(mp, &fsgeo, 3);
54766diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54767index bad485a..479bd32 100644
54768--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54769+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54770@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54771 xfs_fsop_geom_t fsgeo;
54772 int error;
54773
54774+ memset(&fsgeo, 0, sizeof(fsgeo));
54775 error = xfs_fs_geometry(mp, &fsgeo, 3);
54776 if (error)
54777 return -error;
54778diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54779index 1f3b4b8..6102f6d 100644
54780--- a/fs/xfs/linux-2.6/xfs_iops.c
54781+++ b/fs/xfs/linux-2.6/xfs_iops.c
54782@@ -468,7 +468,7 @@ xfs_vn_put_link(
54783 struct nameidata *nd,
54784 void *p)
54785 {
54786- char *s = nd_get_link(nd);
54787+ const char *s = nd_get_link(nd);
54788
54789 if (!IS_ERR(s))
54790 kfree(s);
54791diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54792index 8971fb0..5fc1eb2 100644
54793--- a/fs/xfs/xfs_bmap.c
54794+++ b/fs/xfs/xfs_bmap.c
54795@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54796 int nmap,
54797 int ret_nmap);
54798 #else
54799-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54800+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54801 #endif /* DEBUG */
54802
54803 #if defined(XFS_RW_TRACE)
54804diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54805index e89734e..5e84d8d 100644
54806--- a/fs/xfs/xfs_dir2_sf.c
54807+++ b/fs/xfs/xfs_dir2_sf.c
54808@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54809 }
54810
54811 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54812- if (filldir(dirent, sfep->name, sfep->namelen,
54813+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54814+ char name[sfep->namelen];
54815+ memcpy(name, sfep->name, sfep->namelen);
54816+ if (filldir(dirent, name, sfep->namelen,
54817+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54818+ *offset = off & 0x7fffffff;
54819+ return 0;
54820+ }
54821+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54822 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54823 *offset = off & 0x7fffffff;
54824 return 0;
54825diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54826index 8f32f50..b6a41e8 100644
54827--- a/fs/xfs/xfs_vnodeops.c
54828+++ b/fs/xfs/xfs_vnodeops.c
54829@@ -564,13 +564,18 @@ xfs_readlink(
54830
54831 xfs_ilock(ip, XFS_ILOCK_SHARED);
54832
54833- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54834- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54835-
54836 pathlen = ip->i_d.di_size;
54837 if (!pathlen)
54838 goto out;
54839
54840+ if (pathlen > MAXPATHLEN) {
54841+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54842+ __func__, (unsigned long long)ip->i_ino, pathlen);
54843+ ASSERT(0);
54844+ error = XFS_ERROR(EFSCORRUPTED);
54845+ goto out;
54846+ }
54847+
54848 if (ip->i_df.if_flags & XFS_IFINLINE) {
54849 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54850 link[pathlen] = '\0';
54851diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54852new file mode 100644
54853index 0000000..9ac9020
54854--- /dev/null
54855+++ b/grsecurity/Kconfig
54856@@ -0,0 +1,1072 @@
54857+#
54858+# grecurity configuration
54859+#
54860+
54861+menu "Grsecurity"
54862+
54863+config GRKERNSEC
54864+ bool "Grsecurity"
54865+ select CRYPTO
54866+ select CRYPTO_SHA256
54867+ help
54868+ If you say Y here, you will be able to configure many features
54869+ that will enhance the security of your system. It is highly
54870+ recommended that you say Y here and read through the help
54871+ for each option so that you fully understand the features and
54872+ can evaluate their usefulness for your machine.
54873+
54874+choice
54875+ prompt "Security Level"
54876+ depends on GRKERNSEC
54877+ default GRKERNSEC_CUSTOM
54878+
54879+config GRKERNSEC_LOW
54880+ bool "Low"
54881+ select GRKERNSEC_LINK
54882+ select GRKERNSEC_FIFO
54883+ select GRKERNSEC_RANDNET
54884+ select GRKERNSEC_DMESG
54885+ select GRKERNSEC_CHROOT
54886+ select GRKERNSEC_CHROOT_CHDIR
54887+
54888+ help
54889+ If you choose this option, several of the grsecurity options will
54890+ be enabled that will give you greater protection against a number
54891+ of attacks, while assuring that none of your software will have any
54892+ conflicts with the additional security measures. If you run a lot
54893+ of unusual software, or you are having problems with the higher
54894+ security levels, you should say Y here. With this option, the
54895+ following features are enabled:
54896+
54897+ - Linking restrictions
54898+ - FIFO restrictions
54899+ - Restricted dmesg
54900+ - Enforced chdir("/") on chroot
54901+ - Runtime module disabling
54902+
54903+config GRKERNSEC_MEDIUM
54904+ bool "Medium"
54905+ select PAX
54906+ select PAX_EI_PAX
54907+ select PAX_PT_PAX_FLAGS
54908+ select PAX_HAVE_ACL_FLAGS
54909+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54910+ select GRKERNSEC_CHROOT
54911+ select GRKERNSEC_CHROOT_SYSCTL
54912+ select GRKERNSEC_LINK
54913+ select GRKERNSEC_FIFO
54914+ select GRKERNSEC_DMESG
54915+ select GRKERNSEC_RANDNET
54916+ select GRKERNSEC_FORKFAIL
54917+ select GRKERNSEC_TIME
54918+ select GRKERNSEC_SIGNAL
54919+ select GRKERNSEC_CHROOT
54920+ select GRKERNSEC_CHROOT_UNIX
54921+ select GRKERNSEC_CHROOT_MOUNT
54922+ select GRKERNSEC_CHROOT_PIVOT
54923+ select GRKERNSEC_CHROOT_DOUBLE
54924+ select GRKERNSEC_CHROOT_CHDIR
54925+ select GRKERNSEC_CHROOT_MKNOD
54926+ select GRKERNSEC_PROC
54927+ select GRKERNSEC_PROC_USERGROUP
54928+ select PAX_RANDUSTACK
54929+ select PAX_ASLR
54930+ select PAX_RANDMMAP
54931+ select PAX_REFCOUNT if (X86 || SPARC64)
54932+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54933+
54934+ help
54935+ If you say Y here, several features in addition to those included
54936+ in the low additional security level will be enabled. These
54937+ features provide even more security to your system, though in rare
54938+ cases they may be incompatible with very old or poorly written
54939+ software. If you enable this option, make sure that your auth
54940+ service (identd) is running as gid 1001. With this option,
54941+ the following features (in addition to those provided in the
54942+ low additional security level) will be enabled:
54943+
54944+ - Failed fork logging
54945+ - Time change logging
54946+ - Signal logging
54947+ - Deny mounts in chroot
54948+ - Deny double chrooting
54949+ - Deny sysctl writes in chroot
54950+ - Deny mknod in chroot
54951+ - Deny access to abstract AF_UNIX sockets out of chroot
54952+ - Deny pivot_root in chroot
54953+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54954+ - /proc restrictions with special GID set to 10 (usually wheel)
54955+ - Address Space Layout Randomization (ASLR)
54956+ - Prevent exploitation of most refcount overflows
54957+ - Bounds checking of copying between the kernel and userland
54958+
54959+config GRKERNSEC_HIGH
54960+ bool "High"
54961+ select GRKERNSEC_LINK
54962+ select GRKERNSEC_FIFO
54963+ select GRKERNSEC_DMESG
54964+ select GRKERNSEC_FORKFAIL
54965+ select GRKERNSEC_TIME
54966+ select GRKERNSEC_SIGNAL
54967+ select GRKERNSEC_CHROOT
54968+ select GRKERNSEC_CHROOT_SHMAT
54969+ select GRKERNSEC_CHROOT_UNIX
54970+ select GRKERNSEC_CHROOT_MOUNT
54971+ select GRKERNSEC_CHROOT_FCHDIR
54972+ select GRKERNSEC_CHROOT_PIVOT
54973+ select GRKERNSEC_CHROOT_DOUBLE
54974+ select GRKERNSEC_CHROOT_CHDIR
54975+ select GRKERNSEC_CHROOT_MKNOD
54976+ select GRKERNSEC_CHROOT_CAPS
54977+ select GRKERNSEC_CHROOT_SYSCTL
54978+ select GRKERNSEC_CHROOT_FINDTASK
54979+ select GRKERNSEC_SYSFS_RESTRICT
54980+ select GRKERNSEC_PROC
54981+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54982+ select GRKERNSEC_HIDESYM
54983+ select GRKERNSEC_BRUTE
54984+ select GRKERNSEC_PROC_USERGROUP
54985+ select GRKERNSEC_KMEM
54986+ select GRKERNSEC_RESLOG
54987+ select GRKERNSEC_RANDNET
54988+ select GRKERNSEC_PROC_ADD
54989+ select GRKERNSEC_CHROOT_CHMOD
54990+ select GRKERNSEC_CHROOT_NICE
54991+ select GRKERNSEC_SETXID
54992+ select GRKERNSEC_AUDIT_MOUNT
54993+ select GRKERNSEC_MODHARDEN if (MODULES)
54994+ select GRKERNSEC_HARDEN_PTRACE
54995+ select GRKERNSEC_PTRACE_READEXEC
54996+ select GRKERNSEC_VM86 if (X86_32)
54997+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54998+ select PAX
54999+ select PAX_RANDUSTACK
55000+ select PAX_ASLR
55001+ select PAX_RANDMMAP
55002+ select PAX_NOEXEC
55003+ select PAX_MPROTECT
55004+ select PAX_EI_PAX
55005+ select PAX_PT_PAX_FLAGS
55006+ select PAX_HAVE_ACL_FLAGS
55007+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
55008+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
55009+ select PAX_RANDKSTACK if (X86_TSC && X86)
55010+ select PAX_SEGMEXEC if (X86_32)
55011+ select PAX_PAGEEXEC
55012+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
55013+ select PAX_EMUTRAMP if (PARISC)
55014+ select PAX_EMUSIGRT if (PARISC)
55015+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
55016+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
55017+ select PAX_REFCOUNT if (X86 || SPARC64)
55018+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
55019+ help
55020+ If you say Y here, many of the features of grsecurity will be
55021+ enabled, which will protect you against many kinds of attacks
55022+ against your system. The heightened security comes at a cost
55023+ of an increased chance of incompatibilities with rare software
55024+ on your machine. Since this security level enables PaX, you should
55025+ view <http://pax.grsecurity.net> and read about the PaX
55026+ project. While you are there, download chpax and run it on
55027+ binaries that cause problems with PaX. Also remember that
55028+ since the /proc restrictions are enabled, you must run your
55029+ identd as gid 1001. This security level enables the following
55030+ features in addition to those listed in the low and medium
55031+ security levels:
55032+
55033+ - Additional /proc restrictions
55034+ - Chmod restrictions in chroot
55035+ - No signals, ptrace, or viewing of processes outside of chroot
55036+ - Capability restrictions in chroot
55037+ - Deny fchdir out of chroot
55038+ - Priority restrictions in chroot
55039+ - Segmentation-based implementation of PaX
55040+ - Mprotect restrictions
55041+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
55042+ - Kernel stack randomization
55043+ - Mount/unmount/remount logging
55044+ - Kernel symbol hiding
55045+ - Hardening of module auto-loading
55046+ - Ptrace restrictions
55047+ - Restricted vm86 mode
55048+ - Restricted sysfs/debugfs
55049+ - Active kernel exploit response
55050+
55051+config GRKERNSEC_CUSTOM
55052+ bool "Custom"
55053+ help
55054+ If you say Y here, you will be able to configure every grsecurity
55055+ option, which allows you to enable many more features that aren't
55056+ covered in the basic security levels. These additional features
55057+ include TPE, socket restrictions, and the sysctl system for
55058+ grsecurity. It is advised that you read through the help for
55059+ each option to determine its usefulness in your situation.
55060+
55061+endchoice
55062+
55063+menu "Address Space Protection"
55064+depends on GRKERNSEC
55065+
55066+config GRKERNSEC_KMEM
55067+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
55068+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
55069+ help
55070+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
55071+ be written to or read from to modify or leak the contents of the running
55072+ kernel. /dev/port will also not be allowed to be opened. If you have module
55073+ support disabled, enabling this will close up four ways that are
55074+ currently used to insert malicious code into the running kernel.
55075+ Even with all these features enabled, we still highly recommend that
55076+ you use the RBAC system, as it is still possible for an attacker to
55077+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55078+ If you are not using XFree86, you may be able to stop this additional
55079+ case by enabling the 'Disable privileged I/O' option. Though nothing
55080+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55081+ but only to video memory, which is the only writing we allow in this
55082+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55083+ not be allowed to mprotect it with PROT_WRITE later.
55084+ It is highly recommended that you say Y here if you meet all the
55085+ conditions above.
55086+
55087+config GRKERNSEC_VM86
55088+ bool "Restrict VM86 mode"
55089+ depends on X86_32
55090+
55091+ help
55092+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55093+ make use of a special execution mode on 32bit x86 processors called
55094+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55095+ video cards and will still work with this option enabled. The purpose
55096+ of the option is to prevent exploitation of emulation errors in
55097+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55098+ Nearly all users should be able to enable this option.
55099+
55100+config GRKERNSEC_IO
55101+ bool "Disable privileged I/O"
55102+ depends on X86
55103+ select RTC_CLASS
55104+ select RTC_INTF_DEV
55105+ select RTC_DRV_CMOS
55106+
55107+ help
55108+ If you say Y here, all ioperm and iopl calls will return an error.
55109+ Ioperm and iopl can be used to modify the running kernel.
55110+ Unfortunately, some programs need this access to operate properly,
55111+ the most notable of which are XFree86 and hwclock. hwclock can be
55112+ remedied by having RTC support in the kernel, so real-time
55113+ clock support is enabled if this option is enabled, to ensure
55114+ that hwclock operates correctly. XFree86 still will not
55115+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55116+ IF YOU USE XFree86. If you use XFree86 and you still want to
55117+ protect your kernel against modification, use the RBAC system.
55118+
55119+config GRKERNSEC_PROC_MEMMAP
55120+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55121+ default y if (PAX_NOEXEC || PAX_ASLR)
55122+ depends on PAX_NOEXEC || PAX_ASLR
55123+ help
55124+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55125+ give no information about the addresses of its mappings if
55126+ PaX features that rely on random addresses are enabled on the task.
55127+ In addition to sanitizing this information and disabling other
55128+ dangerous sources of information, this option causes reads of sensitive
55129+ /proc/<pid> entries where the file descriptor was opened in a different
55130+ task than the one performing the read. Such attempts are logged.
55131+ If you use PaX it is greatly recommended that you say Y here as it
55132+ closes up a hole that makes the full ASLR useless for suid
55133+ binaries.
55134+
55135+config GRKERNSEC_BRUTE
55136+ bool "Deter exploit bruteforcing"
55137+ help
55138+ If you say Y here, attempts to bruteforce exploits against forking
55139+ daemons such as apache or sshd, as well as against suid/sgid binaries
55140+ will be deterred. When a child of a forking daemon is killed by PaX
55141+ or crashes due to an illegal instruction or other suspicious signal,
55142+ the parent process will be delayed 30 seconds upon every subsequent
55143+ fork until the administrator is able to assess the situation and
55144+ restart the daemon.
55145+ In the suid/sgid case, the attempt is logged, the user has all their
55146+ processes terminated, and they are prevented from executing any further
55147+ processes for 15 minutes.
55148+ It is recommended that you also enable signal logging in the auditing
55149+ section so that logs are generated when a process triggers a suspicious
55150+ signal.
55151+ If the sysctl option is enabled, a sysctl option with name
55152+ "deter_bruteforce" is created.
55153+
55154+config GRKERNSEC_MODHARDEN
55155+ bool "Harden module auto-loading"
55156+ depends on MODULES
55157+ help
55158+ If you say Y here, module auto-loading in response to use of some
55159+ feature implemented by an unloaded module will be restricted to
55160+ root users. Enabling this option helps defend against attacks
55161+ by unprivileged users who abuse the auto-loading behavior to
55162+ cause a vulnerable module to load that is then exploited.
55163+
55164+ If this option prevents a legitimate use of auto-loading for a
55165+ non-root user, the administrator can execute modprobe manually
55166+ with the exact name of the module mentioned in the alert log.
55167+ Alternatively, the administrator can add the module to the list
55168+ of modules loaded at boot by modifying init scripts.
55169+
55170+ Modification of init scripts will most likely be needed on
55171+ Ubuntu servers with encrypted home directory support enabled,
55172+ as the first non-root user logging in will cause the ecb(aes),
55173+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55174+
55175+config GRKERNSEC_HIDESYM
55176+ bool "Hide kernel symbols"
55177+ help
55178+ If you say Y here, getting information on loaded modules, and
55179+ displaying all kernel symbols through a syscall will be restricted
55180+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55181+ /proc/kallsyms will be restricted to the root user. The RBAC
55182+ system can hide that entry even from root.
55183+
55184+ This option also prevents leaking of kernel addresses through
55185+ several /proc entries.
55186+
55187+ Note that this option is only effective provided the following
55188+ conditions are met:
55189+ 1) The kernel using grsecurity is not precompiled by some distribution
55190+ 2) You have also enabled GRKERNSEC_DMESG
55191+ 3) You are using the RBAC system and hiding other files such as your
55192+ kernel image and System.map. Alternatively, enabling this option
55193+ causes the permissions on /boot, /lib/modules, and the kernel
55194+ source directory to change at compile time to prevent
55195+ reading by non-root users.
55196+ If the above conditions are met, this option will aid in providing a
55197+ useful protection against local kernel exploitation of overflows
55198+ and arbitrary read/write vulnerabilities.
55199+
55200+config GRKERNSEC_KERN_LOCKOUT
55201+ bool "Active kernel exploit response"
55202+ depends on X86 || ARM || PPC || SPARC
55203+ help
55204+ If you say Y here, when a PaX alert is triggered due to suspicious
55205+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55206+ or an OOPs occurs due to bad memory accesses, instead of just
55207+ terminating the offending process (and potentially allowing
55208+ a subsequent exploit from the same user), we will take one of two
55209+ actions:
55210+ If the user was root, we will panic the system
55211+ If the user was non-root, we will log the attempt, terminate
55212+ all processes owned by the user, then prevent them from creating
55213+ any new processes until the system is restarted
55214+ This deters repeated kernel exploitation/bruteforcing attempts
55215+ and is useful for later forensics.
55216+
55217+endmenu
55218+menu "Role Based Access Control Options"
55219+depends on GRKERNSEC
55220+
55221+config GRKERNSEC_RBAC_DEBUG
55222+ bool
55223+
55224+config GRKERNSEC_NO_RBAC
55225+ bool "Disable RBAC system"
55226+ help
55227+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55228+ preventing the RBAC system from being enabled. You should only say Y
55229+ here if you have no intention of using the RBAC system, so as to prevent
55230+ an attacker with root access from misusing the RBAC system to hide files
55231+ and processes when loadable module support and /dev/[k]mem have been
55232+ locked down.
55233+
55234+config GRKERNSEC_ACL_HIDEKERN
55235+ bool "Hide kernel processes"
55236+ help
55237+ If you say Y here, all kernel threads will be hidden to all
55238+ processes but those whose subject has the "view hidden processes"
55239+ flag.
55240+
55241+config GRKERNSEC_ACL_MAXTRIES
55242+ int "Maximum tries before password lockout"
55243+ default 3
55244+ help
55245+ This option enforces the maximum number of times a user can attempt
55246+ to authorize themselves with the grsecurity RBAC system before being
55247+ denied the ability to attempt authorization again for a specified time.
55248+ The lower the number, the harder it will be to brute-force a password.
55249+
55250+config GRKERNSEC_ACL_TIMEOUT
55251+ int "Time to wait after max password tries, in seconds"
55252+ default 30
55253+ help
55254+ This option specifies the time the user must wait after attempting to
55255+ authorize to the RBAC system with the maximum number of invalid
55256+ passwords. The higher the number, the harder it will be to brute-force
55257+ a password.
55258+
55259+endmenu
55260+menu "Filesystem Protections"
55261+depends on GRKERNSEC
55262+
55263+config GRKERNSEC_PROC
55264+ bool "Proc restrictions"
55265+ help
55266+ If you say Y here, the permissions of the /proc filesystem
55267+ will be altered to enhance system security and privacy. You MUST
55268+ choose either a user only restriction or a user and group restriction.
55269+ Depending upon the option you choose, you can either restrict users to
55270+ see only the processes they themselves run, or choose a group that can
55271+ view all processes and files normally restricted to root if you choose
55272+ the "restrict to user only" option. NOTE: If you're running identd as
55273+ a non-root user, you will have to run it as the group you specify here.
55274+
55275+config GRKERNSEC_PROC_USER
55276+ bool "Restrict /proc to user only"
55277+ depends on GRKERNSEC_PROC
55278+ help
55279+ If you say Y here, non-root users will only be able to view their own
55280+ processes, and restricts them from viewing network-related information,
55281+ and viewing kernel symbol and module information.
55282+
55283+config GRKERNSEC_PROC_USERGROUP
55284+ bool "Allow special group"
55285+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55286+ help
55287+ If you say Y here, you will be able to select a group that will be
55288+ able to view all processes and network-related information. If you've
55289+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55290+ remain hidden. This option is useful if you want to run identd as
55291+ a non-root user.
55292+
55293+config GRKERNSEC_PROC_GID
55294+ int "GID for special group"
55295+ depends on GRKERNSEC_PROC_USERGROUP
55296+ default 1001
55297+
55298+config GRKERNSEC_PROC_ADD
55299+ bool "Additional restrictions"
55300+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55301+ help
55302+ If you say Y here, additional restrictions will be placed on
55303+ /proc that keep normal users from viewing device information and
55304+ slabinfo information that could be useful for exploits.
55305+
55306+config GRKERNSEC_LINK
55307+ bool "Linking restrictions"
55308+ help
55309+ If you say Y here, /tmp race exploits will be prevented, since users
55310+ will no longer be able to follow symlinks owned by other users in
55311+ world-writable +t directories (e.g. /tmp), unless the owner of the
55312+ symlink is the owner of the directory. users will also not be
55313+ able to hardlink to files they do not own. If the sysctl option is
55314+ enabled, a sysctl option with name "linking_restrictions" is created.
55315+
55316+config GRKERNSEC_FIFO
55317+ bool "FIFO restrictions"
55318+ help
55319+ If you say Y here, users will not be able to write to FIFOs they don't
55320+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55321+ the FIFO is the same owner of the directory it's held in. If the sysctl
55322+ option is enabled, a sysctl option with name "fifo_restrictions" is
55323+ created.
55324+
55325+config GRKERNSEC_SYSFS_RESTRICT
55326+ bool "Sysfs/debugfs restriction"
55327+ depends on SYSFS
55328+ help
55329+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55330+ any filesystem normally mounted under it (e.g. debugfs) will be
55331+ mostly accessible only by root. These filesystems generally provide access
55332+ to hardware and debug information that isn't appropriate for unprivileged
55333+ users of the system. Sysfs and debugfs have also become a large source
55334+ of new vulnerabilities, ranging from infoleaks to local compromise.
55335+ There has been very little oversight with an eye toward security involved
55336+ in adding new exporters of information to these filesystems, so their
55337+ use is discouraged.
55338+ For reasons of compatibility, a few directories have been whitelisted
55339+ for access by non-root users:
55340+ /sys/fs/selinux
55341+ /sys/fs/fuse
55342+ /sys/devices/system/cpu
55343+
55344+config GRKERNSEC_ROFS
55345+ bool "Runtime read-only mount protection"
55346+ help
55347+ If you say Y here, a sysctl option with name "romount_protect" will
55348+ be created. By setting this option to 1 at runtime, filesystems
55349+ will be protected in the following ways:
55350+ * No new writable mounts will be allowed
55351+ * Existing read-only mounts won't be able to be remounted read/write
55352+ * Write operations will be denied on all block devices
55353+ This option acts independently of grsec_lock: once it is set to 1,
55354+ it cannot be turned off. Therefore, please be mindful of the resulting
55355+ behavior if this option is enabled in an init script on a read-only
55356+ filesystem. This feature is mainly intended for secure embedded systems.
55357+
55358+config GRKERNSEC_CHROOT
55359+ bool "Chroot jail restrictions"
55360+ help
55361+ If you say Y here, you will be able to choose several options that will
55362+ make breaking out of a chrooted jail much more difficult. If you
55363+ encounter no software incompatibilities with the following options, it
55364+ is recommended that you enable each one.
55365+
55366+config GRKERNSEC_CHROOT_MOUNT
55367+ bool "Deny mounts"
55368+ depends on GRKERNSEC_CHROOT
55369+ help
55370+ If you say Y here, processes inside a chroot will not be able to
55371+ mount or remount filesystems. If the sysctl option is enabled, a
55372+ sysctl option with name "chroot_deny_mount" is created.
55373+
55374+config GRKERNSEC_CHROOT_DOUBLE
55375+ bool "Deny double-chroots"
55376+ depends on GRKERNSEC_CHROOT
55377+ help
55378+ If you say Y here, processes inside a chroot will not be able to chroot
55379+ again outside the chroot. This is a widely used method of breaking
55380+ out of a chroot jail and should not be allowed. If the sysctl
55381+ option is enabled, a sysctl option with name
55382+ "chroot_deny_chroot" is created.
55383+
55384+config GRKERNSEC_CHROOT_PIVOT
55385+ bool "Deny pivot_root in chroot"
55386+ depends on GRKERNSEC_CHROOT
55387+ help
55388+ If you say Y here, processes inside a chroot will not be able to use
55389+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55390+ works similar to chroot in that it changes the root filesystem. This
55391+ function could be misused in a chrooted process to attempt to break out
55392+ of the chroot, and therefore should not be allowed. If the sysctl
55393+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55394+ created.
55395+
55396+config GRKERNSEC_CHROOT_CHDIR
55397+ bool "Enforce chdir(\"/\") on all chroots"
55398+ depends on GRKERNSEC_CHROOT
55399+ help
55400+ If you say Y here, the current working directory of all newly-chrooted
55401+ applications will be set to the the root directory of the chroot.
55402+ The man page on chroot(2) states:
55403+ Note that this call does not change the current working
55404+ directory, so that `.' can be outside the tree rooted at
55405+ `/'. In particular, the super-user can escape from a
55406+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55407+
55408+ It is recommended that you say Y here, since it's not known to break
55409+ any software. If the sysctl option is enabled, a sysctl option with
55410+ name "chroot_enforce_chdir" is created.
55411+
55412+config GRKERNSEC_CHROOT_CHMOD
55413+ bool "Deny (f)chmod +s"
55414+ depends on GRKERNSEC_CHROOT
55415+ help
55416+ If you say Y here, processes inside a chroot will not be able to chmod
55417+ or fchmod files to make them have suid or sgid bits. This protects
55418+ against another published method of breaking a chroot. If the sysctl
55419+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55420+ created.
55421+
55422+config GRKERNSEC_CHROOT_FCHDIR
55423+ bool "Deny fchdir out of chroot"
55424+ depends on GRKERNSEC_CHROOT
55425+ help
55426+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55427+ to a file descriptor of the chrooting process that points to a directory
55428+ outside the filesystem will be stopped. If the sysctl option
55429+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55430+
55431+config GRKERNSEC_CHROOT_MKNOD
55432+ bool "Deny mknod"
55433+ depends on GRKERNSEC_CHROOT
55434+ help
55435+ If you say Y here, processes inside a chroot will not be allowed to
55436+ mknod. The problem with using mknod inside a chroot is that it
55437+ would allow an attacker to create a device entry that is the same
55438+ as one on the physical root of your system, which could range from
55439+ anything from the console device to a device for your harddrive (which
55440+ they could then use to wipe the drive or steal data). It is recommended
55441+ that you say Y here, unless you run into software incompatibilities.
55442+ If the sysctl option is enabled, a sysctl option with name
55443+ "chroot_deny_mknod" is created.
55444+
55445+config GRKERNSEC_CHROOT_SHMAT
55446+ bool "Deny shmat() out of chroot"
55447+ depends on GRKERNSEC_CHROOT
55448+ help
55449+ If you say Y here, processes inside a chroot will not be able to attach
55450+ to shared memory segments that were created outside of the chroot jail.
55451+ It is recommended that you say Y here. If the sysctl option is enabled,
55452+ a sysctl option with name "chroot_deny_shmat" is created.
55453+
55454+config GRKERNSEC_CHROOT_UNIX
55455+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55456+ depends on GRKERNSEC_CHROOT
55457+ help
55458+ If you say Y here, processes inside a chroot will not be able to
55459+ connect to abstract (meaning not belonging to a filesystem) Unix
55460+ domain sockets that were bound outside of a chroot. It is recommended
55461+ that you say Y here. If the sysctl option is enabled, a sysctl option
55462+ with name "chroot_deny_unix" is created.
55463+
55464+config GRKERNSEC_CHROOT_FINDTASK
55465+ bool "Protect outside processes"
55466+ depends on GRKERNSEC_CHROOT
55467+ help
55468+ If you say Y here, processes inside a chroot will not be able to
55469+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55470+ getsid, or view any process outside of the chroot. If the sysctl
55471+ option is enabled, a sysctl option with name "chroot_findtask" is
55472+ created.
55473+
55474+config GRKERNSEC_CHROOT_NICE
55475+ bool "Restrict priority changes"
55476+ depends on GRKERNSEC_CHROOT
55477+ help
55478+ If you say Y here, processes inside a chroot will not be able to raise
55479+ the priority of processes in the chroot, or alter the priority of
55480+ processes outside the chroot. This provides more security than simply
55481+ removing CAP_SYS_NICE from the process' capability set. If the
55482+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55483+ is created.
55484+
55485+config GRKERNSEC_CHROOT_SYSCTL
55486+ bool "Deny sysctl writes"
55487+ depends on GRKERNSEC_CHROOT
55488+ help
55489+ If you say Y here, an attacker in a chroot will not be able to
55490+ write to sysctl entries, either by sysctl(2) or through a /proc
55491+ interface. It is strongly recommended that you say Y here. If the
55492+ sysctl option is enabled, a sysctl option with name
55493+ "chroot_deny_sysctl" is created.
55494+
55495+config GRKERNSEC_CHROOT_CAPS
55496+ bool "Capability restrictions"
55497+ depends on GRKERNSEC_CHROOT
55498+ help
55499+ If you say Y here, the capabilities on all processes within a
55500+ chroot jail will be lowered to stop module insertion, raw i/o,
55501+ system and net admin tasks, rebooting the system, modifying immutable
55502+ files, modifying IPC owned by another, and changing the system time.
55503+ This is left an option because it can break some apps. Disable this
55504+ if your chrooted apps are having problems performing those kinds of
55505+ tasks. If the sysctl option is enabled, a sysctl option with
55506+ name "chroot_caps" is created.
55507+
55508+endmenu
55509+menu "Kernel Auditing"
55510+depends on GRKERNSEC
55511+
55512+config GRKERNSEC_AUDIT_GROUP
55513+ bool "Single group for auditing"
55514+ help
55515+ If you say Y here, the exec, chdir, and (un)mount logging features
55516+ will only operate on a group you specify. This option is recommended
55517+ if you only want to watch certain users instead of having a large
55518+ amount of logs from the entire system. If the sysctl option is enabled,
55519+ a sysctl option with name "audit_group" is created.
55520+
55521+config GRKERNSEC_AUDIT_GID
55522+ int "GID for auditing"
55523+ depends on GRKERNSEC_AUDIT_GROUP
55524+ default 1007
55525+
55526+config GRKERNSEC_EXECLOG
55527+ bool "Exec logging"
55528+ help
55529+ If you say Y here, all execve() calls will be logged (since the
55530+ other exec*() calls are frontends to execve(), all execution
55531+ will be logged). Useful for shell-servers that like to keep track
55532+ of their users. If the sysctl option is enabled, a sysctl option with
55533+ name "exec_logging" is created.
55534+ WARNING: This option when enabled will produce a LOT of logs, especially
55535+ on an active system.
55536+
55537+config GRKERNSEC_RESLOG
55538+ bool "Resource logging"
55539+ help
55540+ If you say Y here, all attempts to overstep resource limits will
55541+ be logged with the resource name, the requested size, and the current
55542+ limit. It is highly recommended that you say Y here. If the sysctl
55543+ option is enabled, a sysctl option with name "resource_logging" is
55544+ created. If the RBAC system is enabled, the sysctl value is ignored.
55545+
55546+config GRKERNSEC_CHROOT_EXECLOG
55547+ bool "Log execs within chroot"
55548+ help
55549+ If you say Y here, all executions inside a chroot jail will be logged
55550+ to syslog. This can cause a large amount of logs if certain
55551+ applications (eg. djb's daemontools) are installed on the system, and
55552+ is therefore left as an option. If the sysctl option is enabled, a
55553+ sysctl option with name "chroot_execlog" is created.
55554+
55555+config GRKERNSEC_AUDIT_PTRACE
55556+ bool "Ptrace logging"
55557+ help
55558+ If you say Y here, all attempts to attach to a process via ptrace
55559+ will be logged. If the sysctl option is enabled, a sysctl option
55560+ with name "audit_ptrace" is created.
55561+
55562+config GRKERNSEC_AUDIT_CHDIR
55563+ bool "Chdir logging"
55564+ help
55565+ If you say Y here, all chdir() calls will be logged. If the sysctl
55566+ option is enabled, a sysctl option with name "audit_chdir" is created.
55567+
55568+config GRKERNSEC_AUDIT_MOUNT
55569+ bool "(Un)Mount logging"
55570+ help
55571+ If you say Y here, all mounts and unmounts will be logged. If the
55572+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55573+ created.
55574+
55575+config GRKERNSEC_SIGNAL
55576+ bool "Signal logging"
55577+ help
55578+ If you say Y here, certain important signals will be logged, such as
55579+ SIGSEGV, which will as a result inform you of when a error in a program
55580+ occurred, which in some cases could mean a possible exploit attempt.
55581+ If the sysctl option is enabled, a sysctl option with name
55582+ "signal_logging" is created.
55583+
55584+config GRKERNSEC_FORKFAIL
55585+ bool "Fork failure logging"
55586+ help
55587+ If you say Y here, all failed fork() attempts will be logged.
55588+ This could suggest a fork bomb, or someone attempting to overstep
55589+ their process limit. If the sysctl option is enabled, a sysctl option
55590+ with name "forkfail_logging" is created.
55591+
55592+config GRKERNSEC_TIME
55593+ bool "Time change logging"
55594+ help
55595+ If you say Y here, any changes of the system clock will be logged.
55596+ If the sysctl option is enabled, a sysctl option with name
55597+ "timechange_logging" is created.
55598+
55599+config GRKERNSEC_PROC_IPADDR
55600+ bool "/proc/<pid>/ipaddr support"
55601+ help
55602+ If you say Y here, a new entry will be added to each /proc/<pid>
55603+ directory that contains the IP address of the person using the task.
55604+ The IP is carried across local TCP and AF_UNIX stream sockets.
55605+ This information can be useful for IDS/IPSes to perform remote response
55606+ to a local attack. The entry is readable by only the owner of the
55607+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55608+ the RBAC system), and thus does not create privacy concerns.
55609+
55610+config GRKERNSEC_RWXMAP_LOG
55611+ bool 'Denied RWX mmap/mprotect logging'
55612+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55613+ help
55614+ If you say Y here, calls to mmap() and mprotect() with explicit
55615+ usage of PROT_WRITE and PROT_EXEC together will be logged when
55616+ denied by the PAX_MPROTECT feature. If the sysctl option is
55617+ enabled, a sysctl option with name "rwxmap_logging" is created.
55618+
55619+config GRKERNSEC_AUDIT_TEXTREL
55620+ bool 'ELF text relocations logging (READ HELP)'
55621+ depends on PAX_MPROTECT
55622+ help
55623+ If you say Y here, text relocations will be logged with the filename
55624+ of the offending library or binary. The purpose of the feature is
55625+ to help Linux distribution developers get rid of libraries and
55626+ binaries that need text relocations which hinder the future progress
55627+ of PaX. Only Linux distribution developers should say Y here, and
55628+ never on a production machine, as this option creates an information
55629+ leak that could aid an attacker in defeating the randomization of
55630+ a single memory region. If the sysctl option is enabled, a sysctl
55631+ option with name "audit_textrel" is created.
55632+
55633+endmenu
55634+
55635+menu "Executable Protections"
55636+depends on GRKERNSEC
55637+
55638+config GRKERNSEC_DMESG
55639+ bool "Dmesg(8) restriction"
55640+ help
55641+ If you say Y here, non-root users will not be able to use dmesg(8)
55642+ to view up to the last 4kb of messages in the kernel's log buffer.
55643+ The kernel's log buffer often contains kernel addresses and other
55644+ identifying information useful to an attacker in fingerprinting a
55645+ system for a targeted exploit.
55646+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
55647+ created.
55648+
55649+config GRKERNSEC_HARDEN_PTRACE
55650+ bool "Deter ptrace-based process snooping"
55651+ help
55652+ If you say Y here, TTY sniffers and other malicious monitoring
55653+ programs implemented through ptrace will be defeated. If you
55654+ have been using the RBAC system, this option has already been
55655+ enabled for several years for all users, with the ability to make
55656+ fine-grained exceptions.
55657+
55658+ This option only affects the ability of non-root users to ptrace
55659+ processes that are not a descendent of the ptracing process.
55660+ This means that strace ./binary and gdb ./binary will still work,
55661+ but attaching to arbitrary processes will not. If the sysctl
55662+ option is enabled, a sysctl option with name "harden_ptrace" is
55663+ created.
55664+
55665+config GRKERNSEC_PTRACE_READEXEC
55666+ bool "Require read access to ptrace sensitive binaries"
55667+ help
55668+ If you say Y here, unprivileged users will not be able to ptrace unreadable
55669+ binaries. This option is useful in environments that
55670+ remove the read bits (e.g. file mode 4711) from suid binaries to
55671+ prevent infoleaking of their contents. This option adds
55672+ consistency to the use of that file mode, as the binary could normally
55673+ be read out when run without privileges while ptracing.
55674+
55675+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
55676+ is created.
55677+
55678+config GRKERNSEC_SETXID
55679+ bool "Enforce consistent multithreaded privileges"
55680+ help
55681+ If you say Y here, a change from a root uid to a non-root uid
55682+ in a multithreaded application will cause the resulting uids,
55683+ gids, supplementary groups, and capabilities in that thread
55684+ to be propagated to the other threads of the process. In most
55685+ cases this is unnecessary, as glibc will emulate this behavior
55686+ on behalf of the application. Other libcs do not act in the
55687+ same way, allowing the other threads of the process to continue
55688+ running with root privileges. If the sysctl option is enabled,
55689+ a sysctl option with name "consistent_setxid" is created.
55690+
55691+config GRKERNSEC_TPE
55692+ bool "Trusted Path Execution (TPE)"
55693+ help
55694+ If you say Y here, you will be able to choose a gid to add to the
55695+ supplementary groups of users you want to mark as "untrusted."
55696+ These users will not be able to execute any files that are not in
55697+ root-owned directories writable only by root. If the sysctl option
55698+ is enabled, a sysctl option with name "tpe" is created.
55699+
55700+config GRKERNSEC_TPE_ALL
55701+ bool "Partially restrict all non-root users"
55702+ depends on GRKERNSEC_TPE
55703+ help
55704+ If you say Y here, all non-root users will be covered under
55705+ a weaker TPE restriction. This is separate from, and in addition to,
55706+ the main TPE options that you have selected elsewhere. Thus, if a
55707+ "trusted" GID is chosen, this restriction applies to even that GID.
55708+ Under this restriction, all non-root users will only be allowed to
55709+ execute files in directories they own that are not group or
55710+ world-writable, or in directories owned by root and writable only by
55711+ root. If the sysctl option is enabled, a sysctl option with name
55712+ "tpe_restrict_all" is created.
55713+
55714+config GRKERNSEC_TPE_INVERT
55715+ bool "Invert GID option"
55716+ depends on GRKERNSEC_TPE
55717+ help
55718+ If you say Y here, the group you specify in the TPE configuration will
55719+ decide what group TPE restrictions will be *disabled* for. This
55720+ option is useful if you want TPE restrictions to be applied to most
55721+ users on the system. If the sysctl option is enabled, a sysctl option
55722+ with name "tpe_invert" is created. Unlike other sysctl options, this
55723+ entry will default to on for backward-compatibility.
55724+
55725+config GRKERNSEC_TPE_GID
55726+ int "GID for untrusted users"
55727+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55728+ default 1005
55729+ help
55730+ Setting this GID determines what group TPE restrictions will be
55731+ *enabled* for. If the sysctl option is enabled, a sysctl option
55732+ with name "tpe_gid" is created.
55733+
55734+config GRKERNSEC_TPE_GID
55735+ int "GID for trusted users"
55736+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55737+ default 1005
55738+ help
55739+ Setting this GID determines what group TPE restrictions will be
55740+ *disabled* for. If the sysctl option is enabled, a sysctl option
55741+ with name "tpe_gid" is created.
55742+
55743+endmenu
55744+menu "Network Protections"
55745+depends on GRKERNSEC
55746+
55747+config GRKERNSEC_RANDNET
55748+ bool "Larger entropy pools"
55749+ help
55750+ If you say Y here, the entropy pools used for many features of Linux
55751+ and grsecurity will be doubled in size. Since several grsecurity
55752+ features use additional randomness, it is recommended that you say Y
55753+ here. Saying Y here has a similar effect as modifying
55754+ /proc/sys/kernel/random/poolsize.
55755+
55756+config GRKERNSEC_BLACKHOLE
55757+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55758+ depends on NET
55759+ help
55760+ If you say Y here, neither TCP resets nor ICMP
55761+ destination-unreachable packets will be sent in response to packets
55762+ sent to ports for which no associated listening process exists.
55763+ This feature supports both IPV4 and IPV6 and exempts the
55764+ loopback interface from blackholing. Enabling this feature
55765+ makes a host more resilient to DoS attacks and reduces network
55766+ visibility against scanners.
55767+
55768+ The blackhole feature as-implemented is equivalent to the FreeBSD
55769+ blackhole feature, as it prevents RST responses to all packets, not
55770+ just SYNs. Under most application behavior this causes no
55771+ problems, but applications (like haproxy) may not close certain
55772+ connections in a way that cleanly terminates them on the remote
55773+ end, leaving the remote host in LAST_ACK state. Because of this
55774+ side-effect and to prevent intentional LAST_ACK DoSes, this
55775+ feature also adds automatic mitigation against such attacks.
55776+ The mitigation drastically reduces the amount of time a socket
55777+ can spend in LAST_ACK state. If you're using haproxy and not
55778+ all servers it connects to have this option enabled, consider
55779+ disabling this feature on the haproxy host.
55780+
55781+ If the sysctl option is enabled, two sysctl options with names
55782+ "ip_blackhole" and "lastack_retries" will be created.
55783+ While "ip_blackhole" takes the standard zero/non-zero on/off
55784+ toggle, "lastack_retries" uses the same kinds of values as
55785+ "tcp_retries1" and "tcp_retries2". The default value of 4
55786+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55787+ state.
55788+
55789+config GRKERNSEC_SOCKET
55790+ bool "Socket restrictions"
55791+ depends on NET
55792+ help
55793+ If you say Y here, you will be able to choose from several options.
55794+ If you assign a GID on your system and add it to the supplementary
55795+ groups of users you want to restrict socket access to, this patch
55796+ will perform up to three things, based on the option(s) you choose.
55797+
55798+config GRKERNSEC_SOCKET_ALL
55799+ bool "Deny any sockets to group"
55800+ depends on GRKERNSEC_SOCKET
55801+ help
55802+ If you say Y here, you will be able to choose a GID of whose users will
55803+ be unable to connect to other hosts from your machine or run server
55804+ applications from your machine. If the sysctl option is enabled, a
55805+ sysctl option with name "socket_all" is created.
55806+
55807+config GRKERNSEC_SOCKET_ALL_GID
55808+ int "GID to deny all sockets for"
55809+ depends on GRKERNSEC_SOCKET_ALL
55810+ default 1004
55811+ help
55812+ Here you can choose the GID to disable socket access for. Remember to
55813+ add the users you want socket access disabled for to the GID
55814+ specified here. If the sysctl option is enabled, a sysctl option
55815+ with name "socket_all_gid" is created.
55816+
55817+config GRKERNSEC_SOCKET_CLIENT
55818+ bool "Deny client sockets to group"
55819+ depends on GRKERNSEC_SOCKET
55820+ help
55821+ If you say Y here, you will be able to choose a GID of whose users will
55822+ be unable to connect to other hosts from your machine, but will be
55823+ able to run servers. If this option is enabled, all users in the group
55824+ you specify will have to use passive mode when initiating ftp transfers
55825+ from the shell on your machine. If the sysctl option is enabled, a
55826+ sysctl option with name "socket_client" is created.
55827+
55828+config GRKERNSEC_SOCKET_CLIENT_GID
55829+ int "GID to deny client sockets for"
55830+ depends on GRKERNSEC_SOCKET_CLIENT
55831+ default 1003
55832+ help
55833+ Here you can choose the GID to disable client socket access for.
55834+ Remember to add the users you want client socket access disabled for to
55835+ the GID specified here. If the sysctl option is enabled, a sysctl
55836+ option with name "socket_client_gid" is created.
55837+
55838+config GRKERNSEC_SOCKET_SERVER
55839+ bool "Deny server sockets to group"
55840+ depends on GRKERNSEC_SOCKET
55841+ help
55842+ If you say Y here, you will be able to choose a GID of whose users will
55843+ be unable to run server applications from your machine. If the sysctl
55844+ option is enabled, a sysctl option with name "socket_server" is created.
55845+
55846+config GRKERNSEC_SOCKET_SERVER_GID
55847+ int "GID to deny server sockets for"
55848+ depends on GRKERNSEC_SOCKET_SERVER
55849+ default 1002
55850+ help
55851+ Here you can choose the GID to disable server socket access for.
55852+ Remember to add the users you want server socket access disabled for to
55853+ the GID specified here. If the sysctl option is enabled, a sysctl
55854+ option with name "socket_server_gid" is created.
55855+
55856+endmenu
55857+menu "Sysctl support"
55858+depends on GRKERNSEC && SYSCTL
55859+
55860+config GRKERNSEC_SYSCTL
55861+ bool "Sysctl support"
55862+ help
55863+ If you say Y here, you will be able to change the options that
55864+ grsecurity runs with at bootup, without having to recompile your
55865+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55866+ to enable (1) or disable (0) various features. All the sysctl entries
55867+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55868+ All features enabled in the kernel configuration are disabled at boot
55869+ if you do not say Y to the "Turn on features by default" option.
55870+ All options should be set at startup, and the grsec_lock entry should
55871+ be set to a non-zero value after all the options are set.
55872+ *THIS IS EXTREMELY IMPORTANT*
55873+
55874+config GRKERNSEC_SYSCTL_DISTRO
55875+ bool "Extra sysctl support for distro makers (READ HELP)"
55876+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55877+ help
55878+ If you say Y here, additional sysctl options will be created
55879+ for features that affect processes running as root. Therefore,
55880+ it is critical when using this option that the grsec_lock entry be
55881+ enabled after boot. Only distros with prebuilt kernel packages
55882+ with this option enabled that can ensure grsec_lock is enabled
55883+ after boot should use this option.
55884+ *Failure to set grsec_lock after boot makes all grsec features
55885+ this option covers useless*
55886+
55887+ Currently this option creates the following sysctl entries:
55888+ "Disable Privileged I/O": "disable_priv_io"
55889+
55890+config GRKERNSEC_SYSCTL_ON
55891+ bool "Turn on features by default"
55892+ depends on GRKERNSEC_SYSCTL
55893+ help
55894+ If you say Y here, instead of having all features enabled in the
55895+ kernel configuration disabled at boot time, the features will be
55896+ enabled at boot time. It is recommended you say Y here unless
55897+ there is some reason you would want all sysctl-tunable features to
55898+ be disabled by default. As mentioned elsewhere, it is important
55899+ to enable the grsec_lock entry once you have finished modifying
55900+ the sysctl entries.
55901+
55902+endmenu
55903+menu "Logging Options"
55904+depends on GRKERNSEC
55905+
55906+config GRKERNSEC_FLOODTIME
55907+ int "Seconds in between log messages (minimum)"
55908+ default 10
55909+ help
55910+ This option allows you to enforce the number of seconds between
55911+ grsecurity log messages. The default should be suitable for most
55912+ people, however, if you choose to change it, choose a value small enough
55913+ to allow informative logs to be produced, but large enough to
55914+ prevent flooding.
55915+
55916+config GRKERNSEC_FLOODBURST
55917+ int "Number of messages in a burst (maximum)"
55918+ default 6
55919+ help
55920+ This option allows you to choose the maximum number of messages allowed
55921+ within the flood time interval you chose in a separate option. The
55922+ default should be suitable for most people, however if you find that
55923+ many of your logs are being interpreted as flooding, you may want to
55924+ raise this value.
55925+
55926+endmenu
55927+
55928+endmenu
55929diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55930new file mode 100644
55931index 0000000..1b9afa9
55932--- /dev/null
55933+++ b/grsecurity/Makefile
55934@@ -0,0 +1,38 @@
55935+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55936+# during 2001-2009 it has been completely redesigned by Brad Spengler
55937+# into an RBAC system
55938+#
55939+# All code in this directory and various hooks inserted throughout the kernel
55940+# are copyright Brad Spengler - Open Source Security, Inc., and released
55941+# under the GPL v2 or higher
55942+
55943+KBUILD_CFLAGS += -Werror
55944+
55945+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55946+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55947+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55948+
55949+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55950+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55951+ gracl_learn.o grsec_log.o
55952+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55953+
55954+ifdef CONFIG_NET
55955+obj-y += grsec_sock.o
55956+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55957+endif
55958+
55959+ifndef CONFIG_GRKERNSEC
55960+obj-y += grsec_disabled.o
55961+endif
55962+
55963+ifdef CONFIG_GRKERNSEC_HIDESYM
55964+extra-y := grsec_hidesym.o
55965+$(obj)/grsec_hidesym.o:
55966+ @-chmod -f 500 /boot
55967+ @-chmod -f 500 /lib/modules
55968+ @-chmod -f 500 /lib64/modules
55969+ @-chmod -f 500 /lib32/modules
55970+ @-chmod -f 700 .
55971+ @echo ' grsec: protected kernel image paths'
55972+endif
55973diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55974new file mode 100644
55975index 0000000..71cb167
55976--- /dev/null
55977+++ b/grsecurity/gracl.c
55978@@ -0,0 +1,4140 @@
55979+#include <linux/kernel.h>
55980+#include <linux/module.h>
55981+#include <linux/sched.h>
55982+#include <linux/mm.h>
55983+#include <linux/file.h>
55984+#include <linux/fs.h>
55985+#include <linux/namei.h>
55986+#include <linux/mount.h>
55987+#include <linux/tty.h>
55988+#include <linux/proc_fs.h>
55989+#include <linux/smp_lock.h>
55990+#include <linux/slab.h>
55991+#include <linux/vmalloc.h>
55992+#include <linux/types.h>
55993+#include <linux/sysctl.h>
55994+#include <linux/netdevice.h>
55995+#include <linux/ptrace.h>
55996+#include <linux/gracl.h>
55997+#include <linux/gralloc.h>
55998+#include <linux/security.h>
55999+#include <linux/grinternal.h>
56000+#include <linux/pid_namespace.h>
56001+#include <linux/fdtable.h>
56002+#include <linux/percpu.h>
56003+
56004+#include <asm/uaccess.h>
56005+#include <asm/errno.h>
56006+#include <asm/mman.h>
56007+
56008+static struct acl_role_db acl_role_set;
56009+static struct name_db name_set;
56010+static struct inodev_db inodev_set;
56011+
56012+/* for keeping track of userspace pointers used for subjects, so we
56013+ can share references in the kernel as well
56014+*/
56015+
56016+static struct dentry *real_root;
56017+static struct vfsmount *real_root_mnt;
56018+
56019+static struct acl_subj_map_db subj_map_set;
56020+
56021+static struct acl_role_label *default_role;
56022+
56023+static struct acl_role_label *role_list;
56024+
56025+static u16 acl_sp_role_value;
56026+
56027+extern char *gr_shared_page[4];
56028+static DEFINE_MUTEX(gr_dev_mutex);
56029+DEFINE_RWLOCK(gr_inode_lock);
56030+
56031+struct gr_arg *gr_usermode;
56032+
56033+static unsigned int gr_status __read_only = GR_STATUS_INIT;
56034+
56035+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
56036+extern void gr_clear_learn_entries(void);
56037+
56038+#ifdef CONFIG_GRKERNSEC_RESLOG
56039+extern void gr_log_resource(const struct task_struct *task,
56040+ const int res, const unsigned long wanted, const int gt);
56041+#endif
56042+
56043+unsigned char *gr_system_salt;
56044+unsigned char *gr_system_sum;
56045+
56046+static struct sprole_pw **acl_special_roles = NULL;
56047+static __u16 num_sprole_pws = 0;
56048+
56049+static struct acl_role_label *kernel_role = NULL;
56050+
56051+static unsigned int gr_auth_attempts = 0;
56052+static unsigned long gr_auth_expires = 0UL;
56053+
56054+#ifdef CONFIG_NET
56055+extern struct vfsmount *sock_mnt;
56056+#endif
56057+extern struct vfsmount *pipe_mnt;
56058+extern struct vfsmount *shm_mnt;
56059+#ifdef CONFIG_HUGETLBFS
56060+extern struct vfsmount *hugetlbfs_vfsmount;
56061+#endif
56062+
56063+static struct acl_object_label *fakefs_obj_rw;
56064+static struct acl_object_label *fakefs_obj_rwx;
56065+
56066+extern int gr_init_uidset(void);
56067+extern void gr_free_uidset(void);
56068+extern void gr_remove_uid(uid_t uid);
56069+extern int gr_find_uid(uid_t uid);
56070+
56071+__inline__ int
56072+gr_acl_is_enabled(void)
56073+{
56074+ return (gr_status & GR_READY);
56075+}
56076+
56077+#ifdef CONFIG_BTRFS_FS
56078+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56079+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56080+#endif
56081+
56082+static inline dev_t __get_dev(const struct dentry *dentry)
56083+{
56084+#ifdef CONFIG_BTRFS_FS
56085+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56086+ return get_btrfs_dev_from_inode(dentry->d_inode);
56087+ else
56088+#endif
56089+ return dentry->d_inode->i_sb->s_dev;
56090+}
56091+
56092+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56093+{
56094+ return __get_dev(dentry);
56095+}
56096+
56097+static char gr_task_roletype_to_char(struct task_struct *task)
56098+{
56099+ switch (task->role->roletype &
56100+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56101+ GR_ROLE_SPECIAL)) {
56102+ case GR_ROLE_DEFAULT:
56103+ return 'D';
56104+ case GR_ROLE_USER:
56105+ return 'U';
56106+ case GR_ROLE_GROUP:
56107+ return 'G';
56108+ case GR_ROLE_SPECIAL:
56109+ return 'S';
56110+ }
56111+
56112+ return 'X';
56113+}
56114+
56115+char gr_roletype_to_char(void)
56116+{
56117+ return gr_task_roletype_to_char(current);
56118+}
56119+
56120+__inline__ int
56121+gr_acl_tpe_check(void)
56122+{
56123+ if (unlikely(!(gr_status & GR_READY)))
56124+ return 0;
56125+ if (current->role->roletype & GR_ROLE_TPE)
56126+ return 1;
56127+ else
56128+ return 0;
56129+}
56130+
56131+int
56132+gr_handle_rawio(const struct inode *inode)
56133+{
56134+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56135+ if (inode && S_ISBLK(inode->i_mode) &&
56136+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56137+ !capable(CAP_SYS_RAWIO))
56138+ return 1;
56139+#endif
56140+ return 0;
56141+}
56142+
56143+static int
56144+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56145+{
56146+ if (likely(lena != lenb))
56147+ return 0;
56148+
56149+ return !memcmp(a, b, lena);
56150+}
56151+
56152+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56153+{
56154+ *buflen -= namelen;
56155+ if (*buflen < 0)
56156+ return -ENAMETOOLONG;
56157+ *buffer -= namelen;
56158+ memcpy(*buffer, str, namelen);
56159+ return 0;
56160+}
56161+
56162+/* this must be called with vfsmount_lock and dcache_lock held */
56163+
56164+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56165+ struct dentry *root, struct vfsmount *rootmnt,
56166+ char *buffer, int buflen)
56167+{
56168+ char * end = buffer+buflen;
56169+ char * retval;
56170+ int namelen;
56171+
56172+ *--end = '\0';
56173+ buflen--;
56174+
56175+ if (buflen < 1)
56176+ goto Elong;
56177+ /* Get '/' right */
56178+ retval = end-1;
56179+ *retval = '/';
56180+
56181+ for (;;) {
56182+ struct dentry * parent;
56183+
56184+ if (dentry == root && vfsmnt == rootmnt)
56185+ break;
56186+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56187+ /* Global root? */
56188+ if (vfsmnt->mnt_parent == vfsmnt)
56189+ goto global_root;
56190+ dentry = vfsmnt->mnt_mountpoint;
56191+ vfsmnt = vfsmnt->mnt_parent;
56192+ continue;
56193+ }
56194+ parent = dentry->d_parent;
56195+ prefetch(parent);
56196+ namelen = dentry->d_name.len;
56197+ buflen -= namelen + 1;
56198+ if (buflen < 0)
56199+ goto Elong;
56200+ end -= namelen;
56201+ memcpy(end, dentry->d_name.name, namelen);
56202+ *--end = '/';
56203+ retval = end;
56204+ dentry = parent;
56205+ }
56206+
56207+out:
56208+ return retval;
56209+
56210+global_root:
56211+ namelen = dentry->d_name.len;
56212+ buflen -= namelen;
56213+ if (buflen < 0)
56214+ goto Elong;
56215+ retval -= namelen-1; /* hit the slash */
56216+ memcpy(retval, dentry->d_name.name, namelen);
56217+ goto out;
56218+Elong:
56219+ retval = ERR_PTR(-ENAMETOOLONG);
56220+ goto out;
56221+}
56222+
56223+static char *
56224+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56225+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56226+{
56227+ char *retval;
56228+
56229+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56230+ if (unlikely(IS_ERR(retval)))
56231+ retval = strcpy(buf, "<path too long>");
56232+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56233+ retval[1] = '\0';
56234+
56235+ return retval;
56236+}
56237+
56238+static char *
56239+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56240+ char *buf, int buflen)
56241+{
56242+ char *res;
56243+
56244+ /* we can use real_root, real_root_mnt, because this is only called
56245+ by the RBAC system */
56246+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56247+
56248+ return res;
56249+}
56250+
56251+static char *
56252+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56253+ char *buf, int buflen)
56254+{
56255+ char *res;
56256+ struct dentry *root;
56257+ struct vfsmount *rootmnt;
56258+ struct task_struct *reaper = &init_task;
56259+
56260+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56261+ read_lock(&reaper->fs->lock);
56262+ root = dget(reaper->fs->root.dentry);
56263+ rootmnt = mntget(reaper->fs->root.mnt);
56264+ read_unlock(&reaper->fs->lock);
56265+
56266+ spin_lock(&dcache_lock);
56267+ spin_lock(&vfsmount_lock);
56268+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56269+ spin_unlock(&vfsmount_lock);
56270+ spin_unlock(&dcache_lock);
56271+
56272+ dput(root);
56273+ mntput(rootmnt);
56274+ return res;
56275+}
56276+
56277+static char *
56278+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56279+{
56280+ char *ret;
56281+ spin_lock(&dcache_lock);
56282+ spin_lock(&vfsmount_lock);
56283+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56284+ PAGE_SIZE);
56285+ spin_unlock(&vfsmount_lock);
56286+ spin_unlock(&dcache_lock);
56287+ return ret;
56288+}
56289+
56290+static char *
56291+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56292+{
56293+ char *ret;
56294+ char *buf;
56295+ int buflen;
56296+
56297+ spin_lock(&dcache_lock);
56298+ spin_lock(&vfsmount_lock);
56299+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56300+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56301+ buflen = (int)(ret - buf);
56302+ if (buflen >= 5)
56303+ prepend(&ret, &buflen, "/proc", 5);
56304+ else
56305+ ret = strcpy(buf, "<path too long>");
56306+ spin_unlock(&vfsmount_lock);
56307+ spin_unlock(&dcache_lock);
56308+ return ret;
56309+}
56310+
56311+char *
56312+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56313+{
56314+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56315+ PAGE_SIZE);
56316+}
56317+
56318+char *
56319+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56320+{
56321+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56322+ PAGE_SIZE);
56323+}
56324+
56325+char *
56326+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56327+{
56328+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56329+ PAGE_SIZE);
56330+}
56331+
56332+char *
56333+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56334+{
56335+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56336+ PAGE_SIZE);
56337+}
56338+
56339+char *
56340+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56341+{
56342+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56343+ PAGE_SIZE);
56344+}
56345+
56346+__inline__ __u32
56347+to_gr_audit(const __u32 reqmode)
56348+{
56349+ /* masks off auditable permission flags, then shifts them to create
56350+ auditing flags, and adds the special case of append auditing if
56351+ we're requesting write */
56352+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56353+}
56354+
56355+struct acl_subject_label *
56356+lookup_subject_map(const struct acl_subject_label *userp)
56357+{
56358+ unsigned int index = shash(userp, subj_map_set.s_size);
56359+ struct subject_map *match;
56360+
56361+ match = subj_map_set.s_hash[index];
56362+
56363+ while (match && match->user != userp)
56364+ match = match->next;
56365+
56366+ if (match != NULL)
56367+ return match->kernel;
56368+ else
56369+ return NULL;
56370+}
56371+
56372+static void
56373+insert_subj_map_entry(struct subject_map *subjmap)
56374+{
56375+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56376+ struct subject_map **curr;
56377+
56378+ subjmap->prev = NULL;
56379+
56380+ curr = &subj_map_set.s_hash[index];
56381+ if (*curr != NULL)
56382+ (*curr)->prev = subjmap;
56383+
56384+ subjmap->next = *curr;
56385+ *curr = subjmap;
56386+
56387+ return;
56388+}
56389+
56390+static struct acl_role_label *
56391+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56392+ const gid_t gid)
56393+{
56394+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56395+ struct acl_role_label *match;
56396+ struct role_allowed_ip *ipp;
56397+ unsigned int x;
56398+ u32 curr_ip = task->signal->curr_ip;
56399+
56400+ task->signal->saved_ip = curr_ip;
56401+
56402+ match = acl_role_set.r_hash[index];
56403+
56404+ while (match) {
56405+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56406+ for (x = 0; x < match->domain_child_num; x++) {
56407+ if (match->domain_children[x] == uid)
56408+ goto found;
56409+ }
56410+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56411+ break;
56412+ match = match->next;
56413+ }
56414+found:
56415+ if (match == NULL) {
56416+ try_group:
56417+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56418+ match = acl_role_set.r_hash[index];
56419+
56420+ while (match) {
56421+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56422+ for (x = 0; x < match->domain_child_num; x++) {
56423+ if (match->domain_children[x] == gid)
56424+ goto found2;
56425+ }
56426+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56427+ break;
56428+ match = match->next;
56429+ }
56430+found2:
56431+ if (match == NULL)
56432+ match = default_role;
56433+ if (match->allowed_ips == NULL)
56434+ return match;
56435+ else {
56436+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56437+ if (likely
56438+ ((ntohl(curr_ip) & ipp->netmask) ==
56439+ (ntohl(ipp->addr) & ipp->netmask)))
56440+ return match;
56441+ }
56442+ match = default_role;
56443+ }
56444+ } else if (match->allowed_ips == NULL) {
56445+ return match;
56446+ } else {
56447+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56448+ if (likely
56449+ ((ntohl(curr_ip) & ipp->netmask) ==
56450+ (ntohl(ipp->addr) & ipp->netmask)))
56451+ return match;
56452+ }
56453+ goto try_group;
56454+ }
56455+
56456+ return match;
56457+}
56458+
56459+struct acl_subject_label *
56460+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56461+ const struct acl_role_label *role)
56462+{
56463+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56464+ struct acl_subject_label *match;
56465+
56466+ match = role->subj_hash[index];
56467+
56468+ while (match && (match->inode != ino || match->device != dev ||
56469+ (match->mode & GR_DELETED))) {
56470+ match = match->next;
56471+ }
56472+
56473+ if (match && !(match->mode & GR_DELETED))
56474+ return match;
56475+ else
56476+ return NULL;
56477+}
56478+
56479+struct acl_subject_label *
56480+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56481+ const struct acl_role_label *role)
56482+{
56483+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56484+ struct acl_subject_label *match;
56485+
56486+ match = role->subj_hash[index];
56487+
56488+ while (match && (match->inode != ino || match->device != dev ||
56489+ !(match->mode & GR_DELETED))) {
56490+ match = match->next;
56491+ }
56492+
56493+ if (match && (match->mode & GR_DELETED))
56494+ return match;
56495+ else
56496+ return NULL;
56497+}
56498+
56499+static struct acl_object_label *
56500+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56501+ const struct acl_subject_label *subj)
56502+{
56503+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56504+ struct acl_object_label *match;
56505+
56506+ match = subj->obj_hash[index];
56507+
56508+ while (match && (match->inode != ino || match->device != dev ||
56509+ (match->mode & GR_DELETED))) {
56510+ match = match->next;
56511+ }
56512+
56513+ if (match && !(match->mode & GR_DELETED))
56514+ return match;
56515+ else
56516+ return NULL;
56517+}
56518+
56519+static struct acl_object_label *
56520+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56521+ const struct acl_subject_label *subj)
56522+{
56523+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56524+ struct acl_object_label *match;
56525+
56526+ match = subj->obj_hash[index];
56527+
56528+ while (match && (match->inode != ino || match->device != dev ||
56529+ !(match->mode & GR_DELETED))) {
56530+ match = match->next;
56531+ }
56532+
56533+ if (match && (match->mode & GR_DELETED))
56534+ return match;
56535+
56536+ match = subj->obj_hash[index];
56537+
56538+ while (match && (match->inode != ino || match->device != dev ||
56539+ (match->mode & GR_DELETED))) {
56540+ match = match->next;
56541+ }
56542+
56543+ if (match && !(match->mode & GR_DELETED))
56544+ return match;
56545+ else
56546+ return NULL;
56547+}
56548+
56549+static struct name_entry *
56550+lookup_name_entry(const char *name)
56551+{
56552+ unsigned int len = strlen(name);
56553+ unsigned int key = full_name_hash(name, len);
56554+ unsigned int index = key % name_set.n_size;
56555+ struct name_entry *match;
56556+
56557+ match = name_set.n_hash[index];
56558+
56559+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56560+ match = match->next;
56561+
56562+ return match;
56563+}
56564+
56565+static struct name_entry *
56566+lookup_name_entry_create(const char *name)
56567+{
56568+ unsigned int len = strlen(name);
56569+ unsigned int key = full_name_hash(name, len);
56570+ unsigned int index = key % name_set.n_size;
56571+ struct name_entry *match;
56572+
56573+ match = name_set.n_hash[index];
56574+
56575+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56576+ !match->deleted))
56577+ match = match->next;
56578+
56579+ if (match && match->deleted)
56580+ return match;
56581+
56582+ match = name_set.n_hash[index];
56583+
56584+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56585+ match->deleted))
56586+ match = match->next;
56587+
56588+ if (match && !match->deleted)
56589+ return match;
56590+ else
56591+ return NULL;
56592+}
56593+
56594+static struct inodev_entry *
56595+lookup_inodev_entry(const ino_t ino, const dev_t dev)
56596+{
56597+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
56598+ struct inodev_entry *match;
56599+
56600+ match = inodev_set.i_hash[index];
56601+
56602+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56603+ match = match->next;
56604+
56605+ return match;
56606+}
56607+
56608+static void
56609+insert_inodev_entry(struct inodev_entry *entry)
56610+{
56611+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56612+ inodev_set.i_size);
56613+ struct inodev_entry **curr;
56614+
56615+ entry->prev = NULL;
56616+
56617+ curr = &inodev_set.i_hash[index];
56618+ if (*curr != NULL)
56619+ (*curr)->prev = entry;
56620+
56621+ entry->next = *curr;
56622+ *curr = entry;
56623+
56624+ return;
56625+}
56626+
56627+static void
56628+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56629+{
56630+ unsigned int index =
56631+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56632+ struct acl_role_label **curr;
56633+ struct acl_role_label *tmp;
56634+
56635+ curr = &acl_role_set.r_hash[index];
56636+
56637+ /* if role was already inserted due to domains and already has
56638+ a role in the same bucket as it attached, then we need to
56639+ combine these two buckets
56640+ */
56641+ if (role->next) {
56642+ tmp = role->next;
56643+ while (tmp->next)
56644+ tmp = tmp->next;
56645+ tmp->next = *curr;
56646+ } else
56647+ role->next = *curr;
56648+ *curr = role;
56649+
56650+ return;
56651+}
56652+
56653+static void
56654+insert_acl_role_label(struct acl_role_label *role)
56655+{
56656+ int i;
56657+
56658+ if (role_list == NULL) {
56659+ role_list = role;
56660+ role->prev = NULL;
56661+ } else {
56662+ role->prev = role_list;
56663+ role_list = role;
56664+ }
56665+
56666+ /* used for hash chains */
56667+ role->next = NULL;
56668+
56669+ if (role->roletype & GR_ROLE_DOMAIN) {
56670+ for (i = 0; i < role->domain_child_num; i++)
56671+ __insert_acl_role_label(role, role->domain_children[i]);
56672+ } else
56673+ __insert_acl_role_label(role, role->uidgid);
56674+}
56675+
56676+static int
56677+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56678+{
56679+ struct name_entry **curr, *nentry;
56680+ struct inodev_entry *ientry;
56681+ unsigned int len = strlen(name);
56682+ unsigned int key = full_name_hash(name, len);
56683+ unsigned int index = key % name_set.n_size;
56684+
56685+ curr = &name_set.n_hash[index];
56686+
56687+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56688+ curr = &((*curr)->next);
56689+
56690+ if (*curr != NULL)
56691+ return 1;
56692+
56693+ nentry = acl_alloc(sizeof (struct name_entry));
56694+ if (nentry == NULL)
56695+ return 0;
56696+ ientry = acl_alloc(sizeof (struct inodev_entry));
56697+ if (ientry == NULL)
56698+ return 0;
56699+ ientry->nentry = nentry;
56700+
56701+ nentry->key = key;
56702+ nentry->name = name;
56703+ nentry->inode = inode;
56704+ nentry->device = device;
56705+ nentry->len = len;
56706+ nentry->deleted = deleted;
56707+
56708+ nentry->prev = NULL;
56709+ curr = &name_set.n_hash[index];
56710+ if (*curr != NULL)
56711+ (*curr)->prev = nentry;
56712+ nentry->next = *curr;
56713+ *curr = nentry;
56714+
56715+ /* insert us into the table searchable by inode/dev */
56716+ insert_inodev_entry(ientry);
56717+
56718+ return 1;
56719+}
56720+
56721+static void
56722+insert_acl_obj_label(struct acl_object_label *obj,
56723+ struct acl_subject_label *subj)
56724+{
56725+ unsigned int index =
56726+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56727+ struct acl_object_label **curr;
56728+
56729+
56730+ obj->prev = NULL;
56731+
56732+ curr = &subj->obj_hash[index];
56733+ if (*curr != NULL)
56734+ (*curr)->prev = obj;
56735+
56736+ obj->next = *curr;
56737+ *curr = obj;
56738+
56739+ return;
56740+}
56741+
56742+static void
56743+insert_acl_subj_label(struct acl_subject_label *obj,
56744+ struct acl_role_label *role)
56745+{
56746+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56747+ struct acl_subject_label **curr;
56748+
56749+ obj->prev = NULL;
56750+
56751+ curr = &role->subj_hash[index];
56752+ if (*curr != NULL)
56753+ (*curr)->prev = obj;
56754+
56755+ obj->next = *curr;
56756+ *curr = obj;
56757+
56758+ return;
56759+}
56760+
56761+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56762+
56763+static void *
56764+create_table(__u32 * len, int elementsize)
56765+{
56766+ unsigned int table_sizes[] = {
56767+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56768+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56769+ 4194301, 8388593, 16777213, 33554393, 67108859
56770+ };
56771+ void *newtable = NULL;
56772+ unsigned int pwr = 0;
56773+
56774+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56775+ table_sizes[pwr] <= *len)
56776+ pwr++;
56777+
56778+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56779+ return newtable;
56780+
56781+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56782+ newtable =
56783+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56784+ else
56785+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56786+
56787+ *len = table_sizes[pwr];
56788+
56789+ return newtable;
56790+}
56791+
56792+static int
56793+init_variables(const struct gr_arg *arg)
56794+{
56795+ struct task_struct *reaper = &init_task;
56796+ unsigned int stacksize;
56797+
56798+ subj_map_set.s_size = arg->role_db.num_subjects;
56799+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56800+ name_set.n_size = arg->role_db.num_objects;
56801+ inodev_set.i_size = arg->role_db.num_objects;
56802+
56803+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56804+ !name_set.n_size || !inodev_set.i_size)
56805+ return 1;
56806+
56807+ if (!gr_init_uidset())
56808+ return 1;
56809+
56810+ /* set up the stack that holds allocation info */
56811+
56812+ stacksize = arg->role_db.num_pointers + 5;
56813+
56814+ if (!acl_alloc_stack_init(stacksize))
56815+ return 1;
56816+
56817+ /* grab reference for the real root dentry and vfsmount */
56818+ read_lock(&reaper->fs->lock);
56819+ real_root = dget(reaper->fs->root.dentry);
56820+ real_root_mnt = mntget(reaper->fs->root.mnt);
56821+ read_unlock(&reaper->fs->lock);
56822+
56823+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56824+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56825+#endif
56826+
56827+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56828+ if (fakefs_obj_rw == NULL)
56829+ return 1;
56830+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56831+
56832+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56833+ if (fakefs_obj_rwx == NULL)
56834+ return 1;
56835+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56836+
56837+ subj_map_set.s_hash =
56838+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56839+ acl_role_set.r_hash =
56840+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56841+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56842+ inodev_set.i_hash =
56843+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56844+
56845+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56846+ !name_set.n_hash || !inodev_set.i_hash)
56847+ return 1;
56848+
56849+ memset(subj_map_set.s_hash, 0,
56850+ sizeof(struct subject_map *) * subj_map_set.s_size);
56851+ memset(acl_role_set.r_hash, 0,
56852+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56853+ memset(name_set.n_hash, 0,
56854+ sizeof (struct name_entry *) * name_set.n_size);
56855+ memset(inodev_set.i_hash, 0,
56856+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56857+
56858+ return 0;
56859+}
56860+
56861+/* free information not needed after startup
56862+ currently contains user->kernel pointer mappings for subjects
56863+*/
56864+
56865+static void
56866+free_init_variables(void)
56867+{
56868+ __u32 i;
56869+
56870+ if (subj_map_set.s_hash) {
56871+ for (i = 0; i < subj_map_set.s_size; i++) {
56872+ if (subj_map_set.s_hash[i]) {
56873+ kfree(subj_map_set.s_hash[i]);
56874+ subj_map_set.s_hash[i] = NULL;
56875+ }
56876+ }
56877+
56878+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56879+ PAGE_SIZE)
56880+ kfree(subj_map_set.s_hash);
56881+ else
56882+ vfree(subj_map_set.s_hash);
56883+ }
56884+
56885+ return;
56886+}
56887+
56888+static void
56889+free_variables(void)
56890+{
56891+ struct acl_subject_label *s;
56892+ struct acl_role_label *r;
56893+ struct task_struct *task, *task2;
56894+ unsigned int x;
56895+
56896+ gr_clear_learn_entries();
56897+
56898+ read_lock(&tasklist_lock);
56899+ do_each_thread(task2, task) {
56900+ task->acl_sp_role = 0;
56901+ task->acl_role_id = 0;
56902+ task->acl = NULL;
56903+ task->role = NULL;
56904+ } while_each_thread(task2, task);
56905+ read_unlock(&tasklist_lock);
56906+
56907+ /* release the reference to the real root dentry and vfsmount */
56908+ if (real_root)
56909+ dput(real_root);
56910+ real_root = NULL;
56911+ if (real_root_mnt)
56912+ mntput(real_root_mnt);
56913+ real_root_mnt = NULL;
56914+
56915+ /* free all object hash tables */
56916+
56917+ FOR_EACH_ROLE_START(r)
56918+ if (r->subj_hash == NULL)
56919+ goto next_role;
56920+ FOR_EACH_SUBJECT_START(r, s, x)
56921+ if (s->obj_hash == NULL)
56922+ break;
56923+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56924+ kfree(s->obj_hash);
56925+ else
56926+ vfree(s->obj_hash);
56927+ FOR_EACH_SUBJECT_END(s, x)
56928+ FOR_EACH_NESTED_SUBJECT_START(r, s)
56929+ if (s->obj_hash == NULL)
56930+ break;
56931+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56932+ kfree(s->obj_hash);
56933+ else
56934+ vfree(s->obj_hash);
56935+ FOR_EACH_NESTED_SUBJECT_END(s)
56936+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56937+ kfree(r->subj_hash);
56938+ else
56939+ vfree(r->subj_hash);
56940+ r->subj_hash = NULL;
56941+next_role:
56942+ FOR_EACH_ROLE_END(r)
56943+
56944+ acl_free_all();
56945+
56946+ if (acl_role_set.r_hash) {
56947+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56948+ PAGE_SIZE)
56949+ kfree(acl_role_set.r_hash);
56950+ else
56951+ vfree(acl_role_set.r_hash);
56952+ }
56953+ if (name_set.n_hash) {
56954+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
56955+ PAGE_SIZE)
56956+ kfree(name_set.n_hash);
56957+ else
56958+ vfree(name_set.n_hash);
56959+ }
56960+
56961+ if (inodev_set.i_hash) {
56962+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56963+ PAGE_SIZE)
56964+ kfree(inodev_set.i_hash);
56965+ else
56966+ vfree(inodev_set.i_hash);
56967+ }
56968+
56969+ gr_free_uidset();
56970+
56971+ memset(&name_set, 0, sizeof (struct name_db));
56972+ memset(&inodev_set, 0, sizeof (struct inodev_db));
56973+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56974+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56975+
56976+ default_role = NULL;
56977+ role_list = NULL;
56978+
56979+ return;
56980+}
56981+
56982+static __u32
56983+count_user_objs(struct acl_object_label *userp)
56984+{
56985+ struct acl_object_label o_tmp;
56986+ __u32 num = 0;
56987+
56988+ while (userp) {
56989+ if (copy_from_user(&o_tmp, userp,
56990+ sizeof (struct acl_object_label)))
56991+ break;
56992+
56993+ userp = o_tmp.prev;
56994+ num++;
56995+ }
56996+
56997+ return num;
56998+}
56999+
57000+static struct acl_subject_label *
57001+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
57002+
57003+static int
57004+copy_user_glob(struct acl_object_label *obj)
57005+{
57006+ struct acl_object_label *g_tmp, **guser;
57007+ unsigned int len;
57008+ char *tmp;
57009+
57010+ if (obj->globbed == NULL)
57011+ return 0;
57012+
57013+ guser = &obj->globbed;
57014+ while (*guser) {
57015+ g_tmp = (struct acl_object_label *)
57016+ acl_alloc(sizeof (struct acl_object_label));
57017+ if (g_tmp == NULL)
57018+ return -ENOMEM;
57019+
57020+ if (copy_from_user(g_tmp, *guser,
57021+ sizeof (struct acl_object_label)))
57022+ return -EFAULT;
57023+
57024+ len = strnlen_user(g_tmp->filename, PATH_MAX);
57025+
57026+ if (!len || len >= PATH_MAX)
57027+ return -EINVAL;
57028+
57029+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57030+ return -ENOMEM;
57031+
57032+ if (copy_from_user(tmp, g_tmp->filename, len))
57033+ return -EFAULT;
57034+ tmp[len-1] = '\0';
57035+ g_tmp->filename = tmp;
57036+
57037+ *guser = g_tmp;
57038+ guser = &(g_tmp->next);
57039+ }
57040+
57041+ return 0;
57042+}
57043+
57044+static int
57045+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
57046+ struct acl_role_label *role)
57047+{
57048+ struct acl_object_label *o_tmp;
57049+ unsigned int len;
57050+ int ret;
57051+ char *tmp;
57052+
57053+ while (userp) {
57054+ if ((o_tmp = (struct acl_object_label *)
57055+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
57056+ return -ENOMEM;
57057+
57058+ if (copy_from_user(o_tmp, userp,
57059+ sizeof (struct acl_object_label)))
57060+ return -EFAULT;
57061+
57062+ userp = o_tmp->prev;
57063+
57064+ len = strnlen_user(o_tmp->filename, PATH_MAX);
57065+
57066+ if (!len || len >= PATH_MAX)
57067+ return -EINVAL;
57068+
57069+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57070+ return -ENOMEM;
57071+
57072+ if (copy_from_user(tmp, o_tmp->filename, len))
57073+ return -EFAULT;
57074+ tmp[len-1] = '\0';
57075+ o_tmp->filename = tmp;
57076+
57077+ insert_acl_obj_label(o_tmp, subj);
57078+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57079+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57080+ return -ENOMEM;
57081+
57082+ ret = copy_user_glob(o_tmp);
57083+ if (ret)
57084+ return ret;
57085+
57086+ if (o_tmp->nested) {
57087+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57088+ if (IS_ERR(o_tmp->nested))
57089+ return PTR_ERR(o_tmp->nested);
57090+
57091+ /* insert into nested subject list */
57092+ o_tmp->nested->next = role->hash->first;
57093+ role->hash->first = o_tmp->nested;
57094+ }
57095+ }
57096+
57097+ return 0;
57098+}
57099+
57100+static __u32
57101+count_user_subjs(struct acl_subject_label *userp)
57102+{
57103+ struct acl_subject_label s_tmp;
57104+ __u32 num = 0;
57105+
57106+ while (userp) {
57107+ if (copy_from_user(&s_tmp, userp,
57108+ sizeof (struct acl_subject_label)))
57109+ break;
57110+
57111+ userp = s_tmp.prev;
57112+ /* do not count nested subjects against this count, since
57113+ they are not included in the hash table, but are
57114+ attached to objects. We have already counted
57115+ the subjects in userspace for the allocation
57116+ stack
57117+ */
57118+ if (!(s_tmp.mode & GR_NESTED))
57119+ num++;
57120+ }
57121+
57122+ return num;
57123+}
57124+
57125+static int
57126+copy_user_allowedips(struct acl_role_label *rolep)
57127+{
57128+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57129+
57130+ ruserip = rolep->allowed_ips;
57131+
57132+ while (ruserip) {
57133+ rlast = rtmp;
57134+
57135+ if ((rtmp = (struct role_allowed_ip *)
57136+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57137+ return -ENOMEM;
57138+
57139+ if (copy_from_user(rtmp, ruserip,
57140+ sizeof (struct role_allowed_ip)))
57141+ return -EFAULT;
57142+
57143+ ruserip = rtmp->prev;
57144+
57145+ if (!rlast) {
57146+ rtmp->prev = NULL;
57147+ rolep->allowed_ips = rtmp;
57148+ } else {
57149+ rlast->next = rtmp;
57150+ rtmp->prev = rlast;
57151+ }
57152+
57153+ if (!ruserip)
57154+ rtmp->next = NULL;
57155+ }
57156+
57157+ return 0;
57158+}
57159+
57160+static int
57161+copy_user_transitions(struct acl_role_label *rolep)
57162+{
57163+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57164+
57165+ unsigned int len;
57166+ char *tmp;
57167+
57168+ rusertp = rolep->transitions;
57169+
57170+ while (rusertp) {
57171+ rlast = rtmp;
57172+
57173+ if ((rtmp = (struct role_transition *)
57174+ acl_alloc(sizeof (struct role_transition))) == NULL)
57175+ return -ENOMEM;
57176+
57177+ if (copy_from_user(rtmp, rusertp,
57178+ sizeof (struct role_transition)))
57179+ return -EFAULT;
57180+
57181+ rusertp = rtmp->prev;
57182+
57183+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57184+
57185+ if (!len || len >= GR_SPROLE_LEN)
57186+ return -EINVAL;
57187+
57188+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57189+ return -ENOMEM;
57190+
57191+ if (copy_from_user(tmp, rtmp->rolename, len))
57192+ return -EFAULT;
57193+ tmp[len-1] = '\0';
57194+ rtmp->rolename = tmp;
57195+
57196+ if (!rlast) {
57197+ rtmp->prev = NULL;
57198+ rolep->transitions = rtmp;
57199+ } else {
57200+ rlast->next = rtmp;
57201+ rtmp->prev = rlast;
57202+ }
57203+
57204+ if (!rusertp)
57205+ rtmp->next = NULL;
57206+ }
57207+
57208+ return 0;
57209+}
57210+
57211+static struct acl_subject_label *
57212+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57213+{
57214+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57215+ unsigned int len;
57216+ char *tmp;
57217+ __u32 num_objs;
57218+ struct acl_ip_label **i_tmp, *i_utmp2;
57219+ struct gr_hash_struct ghash;
57220+ struct subject_map *subjmap;
57221+ unsigned int i_num;
57222+ int err;
57223+
57224+ s_tmp = lookup_subject_map(userp);
57225+
57226+ /* we've already copied this subject into the kernel, just return
57227+ the reference to it, and don't copy it over again
57228+ */
57229+ if (s_tmp)
57230+ return(s_tmp);
57231+
57232+ if ((s_tmp = (struct acl_subject_label *)
57233+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57234+ return ERR_PTR(-ENOMEM);
57235+
57236+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57237+ if (subjmap == NULL)
57238+ return ERR_PTR(-ENOMEM);
57239+
57240+ subjmap->user = userp;
57241+ subjmap->kernel = s_tmp;
57242+ insert_subj_map_entry(subjmap);
57243+
57244+ if (copy_from_user(s_tmp, userp,
57245+ sizeof (struct acl_subject_label)))
57246+ return ERR_PTR(-EFAULT);
57247+
57248+ len = strnlen_user(s_tmp->filename, PATH_MAX);
57249+
57250+ if (!len || len >= PATH_MAX)
57251+ return ERR_PTR(-EINVAL);
57252+
57253+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57254+ return ERR_PTR(-ENOMEM);
57255+
57256+ if (copy_from_user(tmp, s_tmp->filename, len))
57257+ return ERR_PTR(-EFAULT);
57258+ tmp[len-1] = '\0';
57259+ s_tmp->filename = tmp;
57260+
57261+ if (!strcmp(s_tmp->filename, "/"))
57262+ role->root_label = s_tmp;
57263+
57264+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57265+ return ERR_PTR(-EFAULT);
57266+
57267+ /* copy user and group transition tables */
57268+
57269+ if (s_tmp->user_trans_num) {
57270+ uid_t *uidlist;
57271+
57272+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57273+ if (uidlist == NULL)
57274+ return ERR_PTR(-ENOMEM);
57275+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57276+ return ERR_PTR(-EFAULT);
57277+
57278+ s_tmp->user_transitions = uidlist;
57279+ }
57280+
57281+ if (s_tmp->group_trans_num) {
57282+ gid_t *gidlist;
57283+
57284+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57285+ if (gidlist == NULL)
57286+ return ERR_PTR(-ENOMEM);
57287+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57288+ return ERR_PTR(-EFAULT);
57289+
57290+ s_tmp->group_transitions = gidlist;
57291+ }
57292+
57293+ /* set up object hash table */
57294+ num_objs = count_user_objs(ghash.first);
57295+
57296+ s_tmp->obj_hash_size = num_objs;
57297+ s_tmp->obj_hash =
57298+ (struct acl_object_label **)
57299+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57300+
57301+ if (!s_tmp->obj_hash)
57302+ return ERR_PTR(-ENOMEM);
57303+
57304+ memset(s_tmp->obj_hash, 0,
57305+ s_tmp->obj_hash_size *
57306+ sizeof (struct acl_object_label *));
57307+
57308+ /* add in objects */
57309+ err = copy_user_objs(ghash.first, s_tmp, role);
57310+
57311+ if (err)
57312+ return ERR_PTR(err);
57313+
57314+ /* set pointer for parent subject */
57315+ if (s_tmp->parent_subject) {
57316+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57317+
57318+ if (IS_ERR(s_tmp2))
57319+ return s_tmp2;
57320+
57321+ s_tmp->parent_subject = s_tmp2;
57322+ }
57323+
57324+ /* add in ip acls */
57325+
57326+ if (!s_tmp->ip_num) {
57327+ s_tmp->ips = NULL;
57328+ goto insert;
57329+ }
57330+
57331+ i_tmp =
57332+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57333+ sizeof (struct acl_ip_label *));
57334+
57335+ if (!i_tmp)
57336+ return ERR_PTR(-ENOMEM);
57337+
57338+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57339+ *(i_tmp + i_num) =
57340+ (struct acl_ip_label *)
57341+ acl_alloc(sizeof (struct acl_ip_label));
57342+ if (!*(i_tmp + i_num))
57343+ return ERR_PTR(-ENOMEM);
57344+
57345+ if (copy_from_user
57346+ (&i_utmp2, s_tmp->ips + i_num,
57347+ sizeof (struct acl_ip_label *)))
57348+ return ERR_PTR(-EFAULT);
57349+
57350+ if (copy_from_user
57351+ (*(i_tmp + i_num), i_utmp2,
57352+ sizeof (struct acl_ip_label)))
57353+ return ERR_PTR(-EFAULT);
57354+
57355+ if ((*(i_tmp + i_num))->iface == NULL)
57356+ continue;
57357+
57358+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57359+ if (!len || len >= IFNAMSIZ)
57360+ return ERR_PTR(-EINVAL);
57361+ tmp = acl_alloc(len);
57362+ if (tmp == NULL)
57363+ return ERR_PTR(-ENOMEM);
57364+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57365+ return ERR_PTR(-EFAULT);
57366+ (*(i_tmp + i_num))->iface = tmp;
57367+ }
57368+
57369+ s_tmp->ips = i_tmp;
57370+
57371+insert:
57372+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57373+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57374+ return ERR_PTR(-ENOMEM);
57375+
57376+ return s_tmp;
57377+}
57378+
57379+static int
57380+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57381+{
57382+ struct acl_subject_label s_pre;
57383+ struct acl_subject_label * ret;
57384+ int err;
57385+
57386+ while (userp) {
57387+ if (copy_from_user(&s_pre, userp,
57388+ sizeof (struct acl_subject_label)))
57389+ return -EFAULT;
57390+
57391+ /* do not add nested subjects here, add
57392+ while parsing objects
57393+ */
57394+
57395+ if (s_pre.mode & GR_NESTED) {
57396+ userp = s_pre.prev;
57397+ continue;
57398+ }
57399+
57400+ ret = do_copy_user_subj(userp, role);
57401+
57402+ err = PTR_ERR(ret);
57403+ if (IS_ERR(ret))
57404+ return err;
57405+
57406+ insert_acl_subj_label(ret, role);
57407+
57408+ userp = s_pre.prev;
57409+ }
57410+
57411+ return 0;
57412+}
57413+
57414+static int
57415+copy_user_acl(struct gr_arg *arg)
57416+{
57417+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57418+ struct sprole_pw *sptmp;
57419+ struct gr_hash_struct *ghash;
57420+ uid_t *domainlist;
57421+ unsigned int r_num;
57422+ unsigned int len;
57423+ char *tmp;
57424+ int err = 0;
57425+ __u16 i;
57426+ __u32 num_subjs;
57427+
57428+ /* we need a default and kernel role */
57429+ if (arg->role_db.num_roles < 2)
57430+ return -EINVAL;
57431+
57432+ /* copy special role authentication info from userspace */
57433+
57434+ num_sprole_pws = arg->num_sprole_pws;
57435+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57436+
57437+ if (!acl_special_roles) {
57438+ err = -ENOMEM;
57439+ goto cleanup;
57440+ }
57441+
57442+ for (i = 0; i < num_sprole_pws; i++) {
57443+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57444+ if (!sptmp) {
57445+ err = -ENOMEM;
57446+ goto cleanup;
57447+ }
57448+ if (copy_from_user(sptmp, arg->sprole_pws + i,
57449+ sizeof (struct sprole_pw))) {
57450+ err = -EFAULT;
57451+ goto cleanup;
57452+ }
57453+
57454+ len =
57455+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57456+
57457+ if (!len || len >= GR_SPROLE_LEN) {
57458+ err = -EINVAL;
57459+ goto cleanup;
57460+ }
57461+
57462+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57463+ err = -ENOMEM;
57464+ goto cleanup;
57465+ }
57466+
57467+ if (copy_from_user(tmp, sptmp->rolename, len)) {
57468+ err = -EFAULT;
57469+ goto cleanup;
57470+ }
57471+ tmp[len-1] = '\0';
57472+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57473+ printk(KERN_ALERT "Copying special role %s\n", tmp);
57474+#endif
57475+ sptmp->rolename = tmp;
57476+ acl_special_roles[i] = sptmp;
57477+ }
57478+
57479+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57480+
57481+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57482+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
57483+
57484+ if (!r_tmp) {
57485+ err = -ENOMEM;
57486+ goto cleanup;
57487+ }
57488+
57489+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
57490+ sizeof (struct acl_role_label *))) {
57491+ err = -EFAULT;
57492+ goto cleanup;
57493+ }
57494+
57495+ if (copy_from_user(r_tmp, r_utmp2,
57496+ sizeof (struct acl_role_label))) {
57497+ err = -EFAULT;
57498+ goto cleanup;
57499+ }
57500+
57501+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57502+
57503+ if (!len || len >= PATH_MAX) {
57504+ err = -EINVAL;
57505+ goto cleanup;
57506+ }
57507+
57508+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57509+ err = -ENOMEM;
57510+ goto cleanup;
57511+ }
57512+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
57513+ err = -EFAULT;
57514+ goto cleanup;
57515+ }
57516+ tmp[len-1] = '\0';
57517+ r_tmp->rolename = tmp;
57518+
57519+ if (!strcmp(r_tmp->rolename, "default")
57520+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57521+ default_role = r_tmp;
57522+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57523+ kernel_role = r_tmp;
57524+ }
57525+
57526+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57527+ err = -ENOMEM;
57528+ goto cleanup;
57529+ }
57530+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57531+ err = -EFAULT;
57532+ goto cleanup;
57533+ }
57534+
57535+ r_tmp->hash = ghash;
57536+
57537+ num_subjs = count_user_subjs(r_tmp->hash->first);
57538+
57539+ r_tmp->subj_hash_size = num_subjs;
57540+ r_tmp->subj_hash =
57541+ (struct acl_subject_label **)
57542+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57543+
57544+ if (!r_tmp->subj_hash) {
57545+ err = -ENOMEM;
57546+ goto cleanup;
57547+ }
57548+
57549+ err = copy_user_allowedips(r_tmp);
57550+ if (err)
57551+ goto cleanup;
57552+
57553+ /* copy domain info */
57554+ if (r_tmp->domain_children != NULL) {
57555+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57556+ if (domainlist == NULL) {
57557+ err = -ENOMEM;
57558+ goto cleanup;
57559+ }
57560+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57561+ err = -EFAULT;
57562+ goto cleanup;
57563+ }
57564+ r_tmp->domain_children = domainlist;
57565+ }
57566+
57567+ err = copy_user_transitions(r_tmp);
57568+ if (err)
57569+ goto cleanup;
57570+
57571+ memset(r_tmp->subj_hash, 0,
57572+ r_tmp->subj_hash_size *
57573+ sizeof (struct acl_subject_label *));
57574+
57575+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57576+
57577+ if (err)
57578+ goto cleanup;
57579+
57580+ /* set nested subject list to null */
57581+ r_tmp->hash->first = NULL;
57582+
57583+ insert_acl_role_label(r_tmp);
57584+ }
57585+
57586+ goto return_err;
57587+ cleanup:
57588+ free_variables();
57589+ return_err:
57590+ return err;
57591+
57592+}
57593+
57594+static int
57595+gracl_init(struct gr_arg *args)
57596+{
57597+ int error = 0;
57598+
57599+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57600+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57601+
57602+ if (init_variables(args)) {
57603+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57604+ error = -ENOMEM;
57605+ free_variables();
57606+ goto out;
57607+ }
57608+
57609+ error = copy_user_acl(args);
57610+ free_init_variables();
57611+ if (error) {
57612+ free_variables();
57613+ goto out;
57614+ }
57615+
57616+ if ((error = gr_set_acls(0))) {
57617+ free_variables();
57618+ goto out;
57619+ }
57620+
57621+ pax_open_kernel();
57622+ gr_status |= GR_READY;
57623+ pax_close_kernel();
57624+
57625+ out:
57626+ return error;
57627+}
57628+
57629+/* derived from glibc fnmatch() 0: match, 1: no match*/
57630+
57631+static int
57632+glob_match(const char *p, const char *n)
57633+{
57634+ char c;
57635+
57636+ while ((c = *p++) != '\0') {
57637+ switch (c) {
57638+ case '?':
57639+ if (*n == '\0')
57640+ return 1;
57641+ else if (*n == '/')
57642+ return 1;
57643+ break;
57644+ case '\\':
57645+ if (*n != c)
57646+ return 1;
57647+ break;
57648+ case '*':
57649+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
57650+ if (*n == '/')
57651+ return 1;
57652+ else if (c == '?') {
57653+ if (*n == '\0')
57654+ return 1;
57655+ else
57656+ ++n;
57657+ }
57658+ }
57659+ if (c == '\0') {
57660+ return 0;
57661+ } else {
57662+ const char *endp;
57663+
57664+ if ((endp = strchr(n, '/')) == NULL)
57665+ endp = n + strlen(n);
57666+
57667+ if (c == '[') {
57668+ for (--p; n < endp; ++n)
57669+ if (!glob_match(p, n))
57670+ return 0;
57671+ } else if (c == '/') {
57672+ while (*n != '\0' && *n != '/')
57673+ ++n;
57674+ if (*n == '/' && !glob_match(p, n + 1))
57675+ return 0;
57676+ } else {
57677+ for (--p; n < endp; ++n)
57678+ if (*n == c && !glob_match(p, n))
57679+ return 0;
57680+ }
57681+
57682+ return 1;
57683+ }
57684+ case '[':
57685+ {
57686+ int not;
57687+ char cold;
57688+
57689+ if (*n == '\0' || *n == '/')
57690+ return 1;
57691+
57692+ not = (*p == '!' || *p == '^');
57693+ if (not)
57694+ ++p;
57695+
57696+ c = *p++;
57697+ for (;;) {
57698+ unsigned char fn = (unsigned char)*n;
57699+
57700+ if (c == '\0')
57701+ return 1;
57702+ else {
57703+ if (c == fn)
57704+ goto matched;
57705+ cold = c;
57706+ c = *p++;
57707+
57708+ if (c == '-' && *p != ']') {
57709+ unsigned char cend = *p++;
57710+
57711+ if (cend == '\0')
57712+ return 1;
57713+
57714+ if (cold <= fn && fn <= cend)
57715+ goto matched;
57716+
57717+ c = *p++;
57718+ }
57719+ }
57720+
57721+ if (c == ']')
57722+ break;
57723+ }
57724+ if (!not)
57725+ return 1;
57726+ break;
57727+ matched:
57728+ while (c != ']') {
57729+ if (c == '\0')
57730+ return 1;
57731+
57732+ c = *p++;
57733+ }
57734+ if (not)
57735+ return 1;
57736+ }
57737+ break;
57738+ default:
57739+ if (c != *n)
57740+ return 1;
57741+ }
57742+
57743+ ++n;
57744+ }
57745+
57746+ if (*n == '\0')
57747+ return 0;
57748+
57749+ if (*n == '/')
57750+ return 0;
57751+
57752+ return 1;
57753+}
57754+
57755+static struct acl_object_label *
57756+chk_glob_label(struct acl_object_label *globbed,
57757+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57758+{
57759+ struct acl_object_label *tmp;
57760+
57761+ if (*path == NULL)
57762+ *path = gr_to_filename_nolock(dentry, mnt);
57763+
57764+ tmp = globbed;
57765+
57766+ while (tmp) {
57767+ if (!glob_match(tmp->filename, *path))
57768+ return tmp;
57769+ tmp = tmp->next;
57770+ }
57771+
57772+ return NULL;
57773+}
57774+
57775+static struct acl_object_label *
57776+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57777+ const ino_t curr_ino, const dev_t curr_dev,
57778+ const struct acl_subject_label *subj, char **path, const int checkglob)
57779+{
57780+ struct acl_subject_label *tmpsubj;
57781+ struct acl_object_label *retval;
57782+ struct acl_object_label *retval2;
57783+
57784+ tmpsubj = (struct acl_subject_label *) subj;
57785+ read_lock(&gr_inode_lock);
57786+ do {
57787+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57788+ if (retval) {
57789+ if (checkglob && retval->globbed) {
57790+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57791+ (struct vfsmount *)orig_mnt, path);
57792+ if (retval2)
57793+ retval = retval2;
57794+ }
57795+ break;
57796+ }
57797+ } while ((tmpsubj = tmpsubj->parent_subject));
57798+ read_unlock(&gr_inode_lock);
57799+
57800+ return retval;
57801+}
57802+
57803+static __inline__ struct acl_object_label *
57804+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57805+ const struct dentry *curr_dentry,
57806+ const struct acl_subject_label *subj, char **path, const int checkglob)
57807+{
57808+ int newglob = checkglob;
57809+
57810+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57811+ as we don't want a / * rule to match instead of the / object
57812+ don't do this for create lookups that call this function though, since they're looking up
57813+ on the parent and thus need globbing checks on all paths
57814+ */
57815+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57816+ newglob = GR_NO_GLOB;
57817+
57818+ return __full_lookup(orig_dentry, orig_mnt,
57819+ curr_dentry->d_inode->i_ino,
57820+ __get_dev(curr_dentry), subj, path, newglob);
57821+}
57822+
57823+static struct acl_object_label *
57824+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57825+ const struct acl_subject_label *subj, char *path, const int checkglob)
57826+{
57827+ struct dentry *dentry = (struct dentry *) l_dentry;
57828+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57829+ struct acl_object_label *retval;
57830+
57831+ spin_lock(&dcache_lock);
57832+ spin_lock(&vfsmount_lock);
57833+
57834+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57835+#ifdef CONFIG_NET
57836+ mnt == sock_mnt ||
57837+#endif
57838+#ifdef CONFIG_HUGETLBFS
57839+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57840+#endif
57841+ /* ignore Eric Biederman */
57842+ IS_PRIVATE(l_dentry->d_inode))) {
57843+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57844+ goto out;
57845+ }
57846+
57847+ for (;;) {
57848+ if (dentry == real_root && mnt == real_root_mnt)
57849+ break;
57850+
57851+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57852+ if (mnt->mnt_parent == mnt)
57853+ break;
57854+
57855+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57856+ if (retval != NULL)
57857+ goto out;
57858+
57859+ dentry = mnt->mnt_mountpoint;
57860+ mnt = mnt->mnt_parent;
57861+ continue;
57862+ }
57863+
57864+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57865+ if (retval != NULL)
57866+ goto out;
57867+
57868+ dentry = dentry->d_parent;
57869+ }
57870+
57871+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57872+
57873+ if (retval == NULL)
57874+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57875+out:
57876+ spin_unlock(&vfsmount_lock);
57877+ spin_unlock(&dcache_lock);
57878+
57879+ BUG_ON(retval == NULL);
57880+
57881+ return retval;
57882+}
57883+
57884+static __inline__ struct acl_object_label *
57885+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57886+ const struct acl_subject_label *subj)
57887+{
57888+ char *path = NULL;
57889+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57890+}
57891+
57892+static __inline__ struct acl_object_label *
57893+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57894+ const struct acl_subject_label *subj)
57895+{
57896+ char *path = NULL;
57897+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57898+}
57899+
57900+static __inline__ struct acl_object_label *
57901+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57902+ const struct acl_subject_label *subj, char *path)
57903+{
57904+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57905+}
57906+
57907+static struct acl_subject_label *
57908+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57909+ const struct acl_role_label *role)
57910+{
57911+ struct dentry *dentry = (struct dentry *) l_dentry;
57912+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57913+ struct acl_subject_label *retval;
57914+
57915+ spin_lock(&dcache_lock);
57916+ spin_lock(&vfsmount_lock);
57917+
57918+ for (;;) {
57919+ if (dentry == real_root && mnt == real_root_mnt)
57920+ break;
57921+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57922+ if (mnt->mnt_parent == mnt)
57923+ break;
57924+
57925+ read_lock(&gr_inode_lock);
57926+ retval =
57927+ lookup_acl_subj_label(dentry->d_inode->i_ino,
57928+ __get_dev(dentry), role);
57929+ read_unlock(&gr_inode_lock);
57930+ if (retval != NULL)
57931+ goto out;
57932+
57933+ dentry = mnt->mnt_mountpoint;
57934+ mnt = mnt->mnt_parent;
57935+ continue;
57936+ }
57937+
57938+ read_lock(&gr_inode_lock);
57939+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57940+ __get_dev(dentry), role);
57941+ read_unlock(&gr_inode_lock);
57942+ if (retval != NULL)
57943+ goto out;
57944+
57945+ dentry = dentry->d_parent;
57946+ }
57947+
57948+ read_lock(&gr_inode_lock);
57949+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57950+ __get_dev(dentry), role);
57951+ read_unlock(&gr_inode_lock);
57952+
57953+ if (unlikely(retval == NULL)) {
57954+ read_lock(&gr_inode_lock);
57955+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57956+ __get_dev(real_root), role);
57957+ read_unlock(&gr_inode_lock);
57958+ }
57959+out:
57960+ spin_unlock(&vfsmount_lock);
57961+ spin_unlock(&dcache_lock);
57962+
57963+ BUG_ON(retval == NULL);
57964+
57965+ return retval;
57966+}
57967+
57968+static void
57969+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57970+{
57971+ struct task_struct *task = current;
57972+ const struct cred *cred = current_cred();
57973+
57974+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57975+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57976+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57977+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57978+
57979+ return;
57980+}
57981+
57982+static void
57983+gr_log_learn_sysctl(const char *path, const __u32 mode)
57984+{
57985+ struct task_struct *task = current;
57986+ const struct cred *cred = current_cred();
57987+
57988+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57989+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57990+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57991+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57992+
57993+ return;
57994+}
57995+
57996+static void
57997+gr_log_learn_id_change(const char type, const unsigned int real,
57998+ const unsigned int effective, const unsigned int fs)
57999+{
58000+ struct task_struct *task = current;
58001+ const struct cred *cred = current_cred();
58002+
58003+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
58004+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
58005+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
58006+ type, real, effective, fs, &task->signal->saved_ip);
58007+
58008+ return;
58009+}
58010+
58011+__u32
58012+gr_search_file(const struct dentry * dentry, const __u32 mode,
58013+ const struct vfsmount * mnt)
58014+{
58015+ __u32 retval = mode;
58016+ struct acl_subject_label *curracl;
58017+ struct acl_object_label *currobj;
58018+
58019+ if (unlikely(!(gr_status & GR_READY)))
58020+ return (mode & ~GR_AUDITS);
58021+
58022+ curracl = current->acl;
58023+
58024+ currobj = chk_obj_label(dentry, mnt, curracl);
58025+ retval = currobj->mode & mode;
58026+
58027+ /* if we're opening a specified transfer file for writing
58028+ (e.g. /dev/initctl), then transfer our role to init
58029+ */
58030+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
58031+ current->role->roletype & GR_ROLE_PERSIST)) {
58032+ struct task_struct *task = init_pid_ns.child_reaper;
58033+
58034+ if (task->role != current->role) {
58035+ task->acl_sp_role = 0;
58036+ task->acl_role_id = current->acl_role_id;
58037+ task->role = current->role;
58038+ rcu_read_lock();
58039+ read_lock(&grsec_exec_file_lock);
58040+ gr_apply_subject_to_task(task);
58041+ read_unlock(&grsec_exec_file_lock);
58042+ rcu_read_unlock();
58043+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
58044+ }
58045+ }
58046+
58047+ if (unlikely
58048+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
58049+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
58050+ __u32 new_mode = mode;
58051+
58052+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58053+
58054+ retval = new_mode;
58055+
58056+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
58057+ new_mode |= GR_INHERIT;
58058+
58059+ if (!(mode & GR_NOLEARN))
58060+ gr_log_learn(dentry, mnt, new_mode);
58061+ }
58062+
58063+ return retval;
58064+}
58065+
58066+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
58067+ const struct dentry *parent,
58068+ const struct vfsmount *mnt)
58069+{
58070+ struct name_entry *match;
58071+ struct acl_object_label *matchpo;
58072+ struct acl_subject_label *curracl;
58073+ char *path;
58074+
58075+ if (unlikely(!(gr_status & GR_READY)))
58076+ return NULL;
58077+
58078+ preempt_disable();
58079+ path = gr_to_filename_rbac(new_dentry, mnt);
58080+ match = lookup_name_entry_create(path);
58081+
58082+ curracl = current->acl;
58083+
58084+ if (match) {
58085+ read_lock(&gr_inode_lock);
58086+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58087+ read_unlock(&gr_inode_lock);
58088+
58089+ if (matchpo) {
58090+ preempt_enable();
58091+ return matchpo;
58092+ }
58093+ }
58094+
58095+ // lookup parent
58096+
58097+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58098+
58099+ preempt_enable();
58100+ return matchpo;
58101+}
58102+
58103+__u32
58104+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58105+ const struct vfsmount * mnt, const __u32 mode)
58106+{
58107+ struct acl_object_label *matchpo;
58108+ __u32 retval;
58109+
58110+ if (unlikely(!(gr_status & GR_READY)))
58111+ return (mode & ~GR_AUDITS);
58112+
58113+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58114+
58115+ retval = matchpo->mode & mode;
58116+
58117+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58118+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58119+ __u32 new_mode = mode;
58120+
58121+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58122+
58123+ gr_log_learn(new_dentry, mnt, new_mode);
58124+ return new_mode;
58125+ }
58126+
58127+ return retval;
58128+}
58129+
58130+__u32
58131+gr_check_link(const struct dentry * new_dentry,
58132+ const struct dentry * parent_dentry,
58133+ const struct vfsmount * parent_mnt,
58134+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58135+{
58136+ struct acl_object_label *obj;
58137+ __u32 oldmode, newmode;
58138+ __u32 needmode;
58139+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58140+ GR_DELETE | GR_INHERIT;
58141+
58142+ if (unlikely(!(gr_status & GR_READY)))
58143+ return (GR_CREATE | GR_LINK);
58144+
58145+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58146+ oldmode = obj->mode;
58147+
58148+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58149+ newmode = obj->mode;
58150+
58151+ needmode = newmode & checkmodes;
58152+
58153+ // old name for hardlink must have at least the permissions of the new name
58154+ if ((oldmode & needmode) != needmode)
58155+ goto bad;
58156+
58157+ // if old name had restrictions/auditing, make sure the new name does as well
58158+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58159+
58160+ // don't allow hardlinking of suid/sgid files without permission
58161+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58162+ needmode |= GR_SETID;
58163+
58164+ if ((newmode & needmode) != needmode)
58165+ goto bad;
58166+
58167+ // enforce minimum permissions
58168+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58169+ return newmode;
58170+bad:
58171+ needmode = oldmode;
58172+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58173+ needmode |= GR_SETID;
58174+
58175+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58176+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58177+ return (GR_CREATE | GR_LINK);
58178+ } else if (newmode & GR_SUPPRESS)
58179+ return GR_SUPPRESS;
58180+ else
58181+ return 0;
58182+}
58183+
58184+int
58185+gr_check_hidden_task(const struct task_struct *task)
58186+{
58187+ if (unlikely(!(gr_status & GR_READY)))
58188+ return 0;
58189+
58190+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58191+ return 1;
58192+
58193+ return 0;
58194+}
58195+
58196+int
58197+gr_check_protected_task(const struct task_struct *task)
58198+{
58199+ if (unlikely(!(gr_status & GR_READY) || !task))
58200+ return 0;
58201+
58202+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58203+ task->acl != current->acl)
58204+ return 1;
58205+
58206+ return 0;
58207+}
58208+
58209+int
58210+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58211+{
58212+ struct task_struct *p;
58213+ int ret = 0;
58214+
58215+ if (unlikely(!(gr_status & GR_READY) || !pid))
58216+ return ret;
58217+
58218+ read_lock(&tasklist_lock);
58219+ do_each_pid_task(pid, type, p) {
58220+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58221+ p->acl != current->acl) {
58222+ ret = 1;
58223+ goto out;
58224+ }
58225+ } while_each_pid_task(pid, type, p);
58226+out:
58227+ read_unlock(&tasklist_lock);
58228+
58229+ return ret;
58230+}
58231+
58232+void
58233+gr_copy_label(struct task_struct *tsk)
58234+{
58235+ tsk->signal->used_accept = 0;
58236+ tsk->acl_sp_role = 0;
58237+ tsk->acl_role_id = current->acl_role_id;
58238+ tsk->acl = current->acl;
58239+ tsk->role = current->role;
58240+ tsk->signal->curr_ip = current->signal->curr_ip;
58241+ tsk->signal->saved_ip = current->signal->saved_ip;
58242+ if (current->exec_file)
58243+ get_file(current->exec_file);
58244+ tsk->exec_file = current->exec_file;
58245+ tsk->is_writable = current->is_writable;
58246+ if (unlikely(current->signal->used_accept)) {
58247+ current->signal->curr_ip = 0;
58248+ current->signal->saved_ip = 0;
58249+ }
58250+
58251+ return;
58252+}
58253+
58254+static void
58255+gr_set_proc_res(struct task_struct *task)
58256+{
58257+ struct acl_subject_label *proc;
58258+ unsigned short i;
58259+
58260+ proc = task->acl;
58261+
58262+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58263+ return;
58264+
58265+ for (i = 0; i < RLIM_NLIMITS; i++) {
58266+ if (!(proc->resmask & (1 << i)))
58267+ continue;
58268+
58269+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58270+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58271+ }
58272+
58273+ return;
58274+}
58275+
58276+extern int __gr_process_user_ban(struct user_struct *user);
58277+
58278+int
58279+gr_check_user_change(int real, int effective, int fs)
58280+{
58281+ unsigned int i;
58282+ __u16 num;
58283+ uid_t *uidlist;
58284+ int curuid;
58285+ int realok = 0;
58286+ int effectiveok = 0;
58287+ int fsok = 0;
58288+
58289+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58290+ struct user_struct *user;
58291+
58292+ if (real == -1)
58293+ goto skipit;
58294+
58295+ user = find_user(real);
58296+ if (user == NULL)
58297+ goto skipit;
58298+
58299+ if (__gr_process_user_ban(user)) {
58300+ /* for find_user */
58301+ free_uid(user);
58302+ return 1;
58303+ }
58304+
58305+ /* for find_user */
58306+ free_uid(user);
58307+
58308+skipit:
58309+#endif
58310+
58311+ if (unlikely(!(gr_status & GR_READY)))
58312+ return 0;
58313+
58314+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58315+ gr_log_learn_id_change('u', real, effective, fs);
58316+
58317+ num = current->acl->user_trans_num;
58318+ uidlist = current->acl->user_transitions;
58319+
58320+ if (uidlist == NULL)
58321+ return 0;
58322+
58323+ if (real == -1)
58324+ realok = 1;
58325+ if (effective == -1)
58326+ effectiveok = 1;
58327+ if (fs == -1)
58328+ fsok = 1;
58329+
58330+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
58331+ for (i = 0; i < num; i++) {
58332+ curuid = (int)uidlist[i];
58333+ if (real == curuid)
58334+ realok = 1;
58335+ if (effective == curuid)
58336+ effectiveok = 1;
58337+ if (fs == curuid)
58338+ fsok = 1;
58339+ }
58340+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
58341+ for (i = 0; i < num; i++) {
58342+ curuid = (int)uidlist[i];
58343+ if (real == curuid)
58344+ break;
58345+ if (effective == curuid)
58346+ break;
58347+ if (fs == curuid)
58348+ break;
58349+ }
58350+ /* not in deny list */
58351+ if (i == num) {
58352+ realok = 1;
58353+ effectiveok = 1;
58354+ fsok = 1;
58355+ }
58356+ }
58357+
58358+ if (realok && effectiveok && fsok)
58359+ return 0;
58360+ else {
58361+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58362+ return 1;
58363+ }
58364+}
58365+
58366+int
58367+gr_check_group_change(int real, int effective, int fs)
58368+{
58369+ unsigned int i;
58370+ __u16 num;
58371+ gid_t *gidlist;
58372+ int curgid;
58373+ int realok = 0;
58374+ int effectiveok = 0;
58375+ int fsok = 0;
58376+
58377+ if (unlikely(!(gr_status & GR_READY)))
58378+ return 0;
58379+
58380+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58381+ gr_log_learn_id_change('g', real, effective, fs);
58382+
58383+ num = current->acl->group_trans_num;
58384+ gidlist = current->acl->group_transitions;
58385+
58386+ if (gidlist == NULL)
58387+ return 0;
58388+
58389+ if (real == -1)
58390+ realok = 1;
58391+ if (effective == -1)
58392+ effectiveok = 1;
58393+ if (fs == -1)
58394+ fsok = 1;
58395+
58396+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
58397+ for (i = 0; i < num; i++) {
58398+ curgid = (int)gidlist[i];
58399+ if (real == curgid)
58400+ realok = 1;
58401+ if (effective == curgid)
58402+ effectiveok = 1;
58403+ if (fs == curgid)
58404+ fsok = 1;
58405+ }
58406+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
58407+ for (i = 0; i < num; i++) {
58408+ curgid = (int)gidlist[i];
58409+ if (real == curgid)
58410+ break;
58411+ if (effective == curgid)
58412+ break;
58413+ if (fs == curgid)
58414+ break;
58415+ }
58416+ /* not in deny list */
58417+ if (i == num) {
58418+ realok = 1;
58419+ effectiveok = 1;
58420+ fsok = 1;
58421+ }
58422+ }
58423+
58424+ if (realok && effectiveok && fsok)
58425+ return 0;
58426+ else {
58427+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58428+ return 1;
58429+ }
58430+}
58431+
58432+void
58433+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58434+{
58435+ struct acl_role_label *role = task->role;
58436+ struct acl_subject_label *subj = NULL;
58437+ struct acl_object_label *obj;
58438+ struct file *filp;
58439+
58440+ if (unlikely(!(gr_status & GR_READY)))
58441+ return;
58442+
58443+ filp = task->exec_file;
58444+
58445+ /* kernel process, we'll give them the kernel role */
58446+ if (unlikely(!filp)) {
58447+ task->role = kernel_role;
58448+ task->acl = kernel_role->root_label;
58449+ return;
58450+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58451+ role = lookup_acl_role_label(task, uid, gid);
58452+
58453+ /* perform subject lookup in possibly new role
58454+ we can use this result below in the case where role == task->role
58455+ */
58456+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58457+
58458+ /* if we changed uid/gid, but result in the same role
58459+ and are using inheritance, don't lose the inherited subject
58460+ if current subject is other than what normal lookup
58461+ would result in, we arrived via inheritance, don't
58462+ lose subject
58463+ */
58464+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58465+ (subj == task->acl)))
58466+ task->acl = subj;
58467+
58468+ task->role = role;
58469+
58470+ task->is_writable = 0;
58471+
58472+ /* ignore additional mmap checks for processes that are writable
58473+ by the default ACL */
58474+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58475+ if (unlikely(obj->mode & GR_WRITE))
58476+ task->is_writable = 1;
58477+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58478+ if (unlikely(obj->mode & GR_WRITE))
58479+ task->is_writable = 1;
58480+
58481+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58482+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58483+#endif
58484+
58485+ gr_set_proc_res(task);
58486+
58487+ return;
58488+}
58489+
58490+int
58491+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58492+ const int unsafe_flags)
58493+{
58494+ struct task_struct *task = current;
58495+ struct acl_subject_label *newacl;
58496+ struct acl_object_label *obj;
58497+ __u32 retmode;
58498+
58499+ if (unlikely(!(gr_status & GR_READY)))
58500+ return 0;
58501+
58502+ newacl = chk_subj_label(dentry, mnt, task->role);
58503+
58504+ task_lock(task);
58505+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58506+ !(task->role->roletype & GR_ROLE_GOD) &&
58507+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58508+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58509+ task_unlock(task);
58510+ if (unsafe_flags & LSM_UNSAFE_SHARE)
58511+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58512+ else
58513+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58514+ return -EACCES;
58515+ }
58516+ task_unlock(task);
58517+
58518+ obj = chk_obj_label(dentry, mnt, task->acl);
58519+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58520+
58521+ if (!(task->acl->mode & GR_INHERITLEARN) &&
58522+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58523+ if (obj->nested)
58524+ task->acl = obj->nested;
58525+ else
58526+ task->acl = newacl;
58527+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58528+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58529+
58530+ task->is_writable = 0;
58531+
58532+ /* ignore additional mmap checks for processes that are writable
58533+ by the default ACL */
58534+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
58535+ if (unlikely(obj->mode & GR_WRITE))
58536+ task->is_writable = 1;
58537+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
58538+ if (unlikely(obj->mode & GR_WRITE))
58539+ task->is_writable = 1;
58540+
58541+ gr_set_proc_res(task);
58542+
58543+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58544+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58545+#endif
58546+ return 0;
58547+}
58548+
58549+/* always called with valid inodev ptr */
58550+static void
58551+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58552+{
58553+ struct acl_object_label *matchpo;
58554+ struct acl_subject_label *matchps;
58555+ struct acl_subject_label *subj;
58556+ struct acl_role_label *role;
58557+ unsigned int x;
58558+
58559+ FOR_EACH_ROLE_START(role)
58560+ FOR_EACH_SUBJECT_START(role, subj, x)
58561+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58562+ matchpo->mode |= GR_DELETED;
58563+ FOR_EACH_SUBJECT_END(subj,x)
58564+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58565+ if (subj->inode == ino && subj->device == dev)
58566+ subj->mode |= GR_DELETED;
58567+ FOR_EACH_NESTED_SUBJECT_END(subj)
58568+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58569+ matchps->mode |= GR_DELETED;
58570+ FOR_EACH_ROLE_END(role)
58571+
58572+ inodev->nentry->deleted = 1;
58573+
58574+ return;
58575+}
58576+
58577+void
58578+gr_handle_delete(const ino_t ino, const dev_t dev)
58579+{
58580+ struct inodev_entry *inodev;
58581+
58582+ if (unlikely(!(gr_status & GR_READY)))
58583+ return;
58584+
58585+ write_lock(&gr_inode_lock);
58586+ inodev = lookup_inodev_entry(ino, dev);
58587+ if (inodev != NULL)
58588+ do_handle_delete(inodev, ino, dev);
58589+ write_unlock(&gr_inode_lock);
58590+
58591+ return;
58592+}
58593+
58594+static void
58595+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58596+ const ino_t newinode, const dev_t newdevice,
58597+ struct acl_subject_label *subj)
58598+{
58599+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58600+ struct acl_object_label *match;
58601+
58602+ match = subj->obj_hash[index];
58603+
58604+ while (match && (match->inode != oldinode ||
58605+ match->device != olddevice ||
58606+ !(match->mode & GR_DELETED)))
58607+ match = match->next;
58608+
58609+ if (match && (match->inode == oldinode)
58610+ && (match->device == olddevice)
58611+ && (match->mode & GR_DELETED)) {
58612+ if (match->prev == NULL) {
58613+ subj->obj_hash[index] = match->next;
58614+ if (match->next != NULL)
58615+ match->next->prev = NULL;
58616+ } else {
58617+ match->prev->next = match->next;
58618+ if (match->next != NULL)
58619+ match->next->prev = match->prev;
58620+ }
58621+ match->prev = NULL;
58622+ match->next = NULL;
58623+ match->inode = newinode;
58624+ match->device = newdevice;
58625+ match->mode &= ~GR_DELETED;
58626+
58627+ insert_acl_obj_label(match, subj);
58628+ }
58629+
58630+ return;
58631+}
58632+
58633+static void
58634+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58635+ const ino_t newinode, const dev_t newdevice,
58636+ struct acl_role_label *role)
58637+{
58638+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58639+ struct acl_subject_label *match;
58640+
58641+ match = role->subj_hash[index];
58642+
58643+ while (match && (match->inode != oldinode ||
58644+ match->device != olddevice ||
58645+ !(match->mode & GR_DELETED)))
58646+ match = match->next;
58647+
58648+ if (match && (match->inode == oldinode)
58649+ && (match->device == olddevice)
58650+ && (match->mode & GR_DELETED)) {
58651+ if (match->prev == NULL) {
58652+ role->subj_hash[index] = match->next;
58653+ if (match->next != NULL)
58654+ match->next->prev = NULL;
58655+ } else {
58656+ match->prev->next = match->next;
58657+ if (match->next != NULL)
58658+ match->next->prev = match->prev;
58659+ }
58660+ match->prev = NULL;
58661+ match->next = NULL;
58662+ match->inode = newinode;
58663+ match->device = newdevice;
58664+ match->mode &= ~GR_DELETED;
58665+
58666+ insert_acl_subj_label(match, role);
58667+ }
58668+
58669+ return;
58670+}
58671+
58672+static void
58673+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58674+ const ino_t newinode, const dev_t newdevice)
58675+{
58676+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58677+ struct inodev_entry *match;
58678+
58679+ match = inodev_set.i_hash[index];
58680+
58681+ while (match && (match->nentry->inode != oldinode ||
58682+ match->nentry->device != olddevice || !match->nentry->deleted))
58683+ match = match->next;
58684+
58685+ if (match && (match->nentry->inode == oldinode)
58686+ && (match->nentry->device == olddevice) &&
58687+ match->nentry->deleted) {
58688+ if (match->prev == NULL) {
58689+ inodev_set.i_hash[index] = match->next;
58690+ if (match->next != NULL)
58691+ match->next->prev = NULL;
58692+ } else {
58693+ match->prev->next = match->next;
58694+ if (match->next != NULL)
58695+ match->next->prev = match->prev;
58696+ }
58697+ match->prev = NULL;
58698+ match->next = NULL;
58699+ match->nentry->inode = newinode;
58700+ match->nentry->device = newdevice;
58701+ match->nentry->deleted = 0;
58702+
58703+ insert_inodev_entry(match);
58704+ }
58705+
58706+ return;
58707+}
58708+
58709+static void
58710+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58711+{
58712+ struct acl_subject_label *subj;
58713+ struct acl_role_label *role;
58714+ unsigned int x;
58715+
58716+ FOR_EACH_ROLE_START(role)
58717+ update_acl_subj_label(matchn->inode, matchn->device,
58718+ inode, dev, role);
58719+
58720+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58721+ if ((subj->inode == inode) && (subj->device == dev)) {
58722+ subj->inode = inode;
58723+ subj->device = dev;
58724+ }
58725+ FOR_EACH_NESTED_SUBJECT_END(subj)
58726+ FOR_EACH_SUBJECT_START(role, subj, x)
58727+ update_acl_obj_label(matchn->inode, matchn->device,
58728+ inode, dev, subj);
58729+ FOR_EACH_SUBJECT_END(subj,x)
58730+ FOR_EACH_ROLE_END(role)
58731+
58732+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58733+
58734+ return;
58735+}
58736+
58737+static void
58738+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58739+ const struct vfsmount *mnt)
58740+{
58741+ ino_t ino = dentry->d_inode->i_ino;
58742+ dev_t dev = __get_dev(dentry);
58743+
58744+ __do_handle_create(matchn, ino, dev);
58745+
58746+ return;
58747+}
58748+
58749+void
58750+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58751+{
58752+ struct name_entry *matchn;
58753+
58754+ if (unlikely(!(gr_status & GR_READY)))
58755+ return;
58756+
58757+ preempt_disable();
58758+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58759+
58760+ if (unlikely((unsigned long)matchn)) {
58761+ write_lock(&gr_inode_lock);
58762+ do_handle_create(matchn, dentry, mnt);
58763+ write_unlock(&gr_inode_lock);
58764+ }
58765+ preempt_enable();
58766+
58767+ return;
58768+}
58769+
58770+void
58771+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58772+{
58773+ struct name_entry *matchn;
58774+
58775+ if (unlikely(!(gr_status & GR_READY)))
58776+ return;
58777+
58778+ preempt_disable();
58779+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58780+
58781+ if (unlikely((unsigned long)matchn)) {
58782+ write_lock(&gr_inode_lock);
58783+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58784+ write_unlock(&gr_inode_lock);
58785+ }
58786+ preempt_enable();
58787+
58788+ return;
58789+}
58790+
58791+void
58792+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58793+ struct dentry *old_dentry,
58794+ struct dentry *new_dentry,
58795+ struct vfsmount *mnt, const __u8 replace)
58796+{
58797+ struct name_entry *matchn;
58798+ struct inodev_entry *inodev;
58799+ struct inode *inode = new_dentry->d_inode;
58800+ ino_t oldinode = old_dentry->d_inode->i_ino;
58801+ dev_t olddev = __get_dev(old_dentry);
58802+
58803+ /* vfs_rename swaps the name and parent link for old_dentry and
58804+ new_dentry
58805+ at this point, old_dentry has the new name, parent link, and inode
58806+ for the renamed file
58807+ if a file is being replaced by a rename, new_dentry has the inode
58808+ and name for the replaced file
58809+ */
58810+
58811+ if (unlikely(!(gr_status & GR_READY)))
58812+ return;
58813+
58814+ preempt_disable();
58815+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58816+
58817+ /* we wouldn't have to check d_inode if it weren't for
58818+ NFS silly-renaming
58819+ */
58820+
58821+ write_lock(&gr_inode_lock);
58822+ if (unlikely(replace && inode)) {
58823+ ino_t newinode = inode->i_ino;
58824+ dev_t newdev = __get_dev(new_dentry);
58825+ inodev = lookup_inodev_entry(newinode, newdev);
58826+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58827+ do_handle_delete(inodev, newinode, newdev);
58828+ }
58829+
58830+ inodev = lookup_inodev_entry(oldinode, olddev);
58831+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58832+ do_handle_delete(inodev, oldinode, olddev);
58833+
58834+ if (unlikely((unsigned long)matchn))
58835+ do_handle_create(matchn, old_dentry, mnt);
58836+
58837+ write_unlock(&gr_inode_lock);
58838+ preempt_enable();
58839+
58840+ return;
58841+}
58842+
58843+static int
58844+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58845+ unsigned char **sum)
58846+{
58847+ struct acl_role_label *r;
58848+ struct role_allowed_ip *ipp;
58849+ struct role_transition *trans;
58850+ unsigned int i;
58851+ int found = 0;
58852+ u32 curr_ip = current->signal->curr_ip;
58853+
58854+ current->signal->saved_ip = curr_ip;
58855+
58856+ /* check transition table */
58857+
58858+ for (trans = current->role->transitions; trans; trans = trans->next) {
58859+ if (!strcmp(rolename, trans->rolename)) {
58860+ found = 1;
58861+ break;
58862+ }
58863+ }
58864+
58865+ if (!found)
58866+ return 0;
58867+
58868+ /* handle special roles that do not require authentication
58869+ and check ip */
58870+
58871+ FOR_EACH_ROLE_START(r)
58872+ if (!strcmp(rolename, r->rolename) &&
58873+ (r->roletype & GR_ROLE_SPECIAL)) {
58874+ found = 0;
58875+ if (r->allowed_ips != NULL) {
58876+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58877+ if ((ntohl(curr_ip) & ipp->netmask) ==
58878+ (ntohl(ipp->addr) & ipp->netmask))
58879+ found = 1;
58880+ }
58881+ } else
58882+ found = 2;
58883+ if (!found)
58884+ return 0;
58885+
58886+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58887+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58888+ *salt = NULL;
58889+ *sum = NULL;
58890+ return 1;
58891+ }
58892+ }
58893+ FOR_EACH_ROLE_END(r)
58894+
58895+ for (i = 0; i < num_sprole_pws; i++) {
58896+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58897+ *salt = acl_special_roles[i]->salt;
58898+ *sum = acl_special_roles[i]->sum;
58899+ return 1;
58900+ }
58901+ }
58902+
58903+ return 0;
58904+}
58905+
58906+static void
58907+assign_special_role(char *rolename)
58908+{
58909+ struct acl_object_label *obj;
58910+ struct acl_role_label *r;
58911+ struct acl_role_label *assigned = NULL;
58912+ struct task_struct *tsk;
58913+ struct file *filp;
58914+
58915+ FOR_EACH_ROLE_START(r)
58916+ if (!strcmp(rolename, r->rolename) &&
58917+ (r->roletype & GR_ROLE_SPECIAL)) {
58918+ assigned = r;
58919+ break;
58920+ }
58921+ FOR_EACH_ROLE_END(r)
58922+
58923+ if (!assigned)
58924+ return;
58925+
58926+ read_lock(&tasklist_lock);
58927+ read_lock(&grsec_exec_file_lock);
58928+
58929+ tsk = current->real_parent;
58930+ if (tsk == NULL)
58931+ goto out_unlock;
58932+
58933+ filp = tsk->exec_file;
58934+ if (filp == NULL)
58935+ goto out_unlock;
58936+
58937+ tsk->is_writable = 0;
58938+
58939+ tsk->acl_sp_role = 1;
58940+ tsk->acl_role_id = ++acl_sp_role_value;
58941+ tsk->role = assigned;
58942+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58943+
58944+ /* ignore additional mmap checks for processes that are writable
58945+ by the default ACL */
58946+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58947+ if (unlikely(obj->mode & GR_WRITE))
58948+ tsk->is_writable = 1;
58949+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58950+ if (unlikely(obj->mode & GR_WRITE))
58951+ tsk->is_writable = 1;
58952+
58953+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58954+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58955+#endif
58956+
58957+out_unlock:
58958+ read_unlock(&grsec_exec_file_lock);
58959+ read_unlock(&tasklist_lock);
58960+ return;
58961+}
58962+
58963+int gr_check_secure_terminal(struct task_struct *task)
58964+{
58965+ struct task_struct *p, *p2, *p3;
58966+ struct files_struct *files;
58967+ struct fdtable *fdt;
58968+ struct file *our_file = NULL, *file;
58969+ int i;
58970+
58971+ if (task->signal->tty == NULL)
58972+ return 1;
58973+
58974+ files = get_files_struct(task);
58975+ if (files != NULL) {
58976+ rcu_read_lock();
58977+ fdt = files_fdtable(files);
58978+ for (i=0; i < fdt->max_fds; i++) {
58979+ file = fcheck_files(files, i);
58980+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58981+ get_file(file);
58982+ our_file = file;
58983+ }
58984+ }
58985+ rcu_read_unlock();
58986+ put_files_struct(files);
58987+ }
58988+
58989+ if (our_file == NULL)
58990+ return 1;
58991+
58992+ read_lock(&tasklist_lock);
58993+ do_each_thread(p2, p) {
58994+ files = get_files_struct(p);
58995+ if (files == NULL ||
58996+ (p->signal && p->signal->tty == task->signal->tty)) {
58997+ if (files != NULL)
58998+ put_files_struct(files);
58999+ continue;
59000+ }
59001+ rcu_read_lock();
59002+ fdt = files_fdtable(files);
59003+ for (i=0; i < fdt->max_fds; i++) {
59004+ file = fcheck_files(files, i);
59005+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
59006+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
59007+ p3 = task;
59008+ while (p3->pid > 0) {
59009+ if (p3 == p)
59010+ break;
59011+ p3 = p3->real_parent;
59012+ }
59013+ if (p3 == p)
59014+ break;
59015+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
59016+ gr_handle_alertkill(p);
59017+ rcu_read_unlock();
59018+ put_files_struct(files);
59019+ read_unlock(&tasklist_lock);
59020+ fput(our_file);
59021+ return 0;
59022+ }
59023+ }
59024+ rcu_read_unlock();
59025+ put_files_struct(files);
59026+ } while_each_thread(p2, p);
59027+ read_unlock(&tasklist_lock);
59028+
59029+ fput(our_file);
59030+ return 1;
59031+}
59032+
59033+ssize_t
59034+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
59035+{
59036+ struct gr_arg_wrapper uwrap;
59037+ unsigned char *sprole_salt = NULL;
59038+ unsigned char *sprole_sum = NULL;
59039+ int error = sizeof (struct gr_arg_wrapper);
59040+ int error2 = 0;
59041+
59042+ mutex_lock(&gr_dev_mutex);
59043+
59044+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
59045+ error = -EPERM;
59046+ goto out;
59047+ }
59048+
59049+ if (count != sizeof (struct gr_arg_wrapper)) {
59050+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
59051+ error = -EINVAL;
59052+ goto out;
59053+ }
59054+
59055+
59056+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
59057+ gr_auth_expires = 0;
59058+ gr_auth_attempts = 0;
59059+ }
59060+
59061+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
59062+ error = -EFAULT;
59063+ goto out;
59064+ }
59065+
59066+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
59067+ error = -EINVAL;
59068+ goto out;
59069+ }
59070+
59071+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
59072+ error = -EFAULT;
59073+ goto out;
59074+ }
59075+
59076+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59077+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59078+ time_after(gr_auth_expires, get_seconds())) {
59079+ error = -EBUSY;
59080+ goto out;
59081+ }
59082+
59083+ /* if non-root trying to do anything other than use a special role,
59084+ do not attempt authentication, do not count towards authentication
59085+ locking
59086+ */
59087+
59088+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59089+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59090+ current_uid()) {
59091+ error = -EPERM;
59092+ goto out;
59093+ }
59094+
59095+ /* ensure pw and special role name are null terminated */
59096+
59097+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59098+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59099+
59100+ /* Okay.
59101+ * We have our enough of the argument structure..(we have yet
59102+ * to copy_from_user the tables themselves) . Copy the tables
59103+ * only if we need them, i.e. for loading operations. */
59104+
59105+ switch (gr_usermode->mode) {
59106+ case GR_STATUS:
59107+ if (gr_status & GR_READY) {
59108+ error = 1;
59109+ if (!gr_check_secure_terminal(current))
59110+ error = 3;
59111+ } else
59112+ error = 2;
59113+ goto out;
59114+ case GR_SHUTDOWN:
59115+ if ((gr_status & GR_READY)
59116+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59117+ pax_open_kernel();
59118+ gr_status &= ~GR_READY;
59119+ pax_close_kernel();
59120+
59121+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59122+ free_variables();
59123+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59124+ memset(gr_system_salt, 0, GR_SALT_LEN);
59125+ memset(gr_system_sum, 0, GR_SHA_LEN);
59126+ } else if (gr_status & GR_READY) {
59127+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59128+ error = -EPERM;
59129+ } else {
59130+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59131+ error = -EAGAIN;
59132+ }
59133+ break;
59134+ case GR_ENABLE:
59135+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59136+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59137+ else {
59138+ if (gr_status & GR_READY)
59139+ error = -EAGAIN;
59140+ else
59141+ error = error2;
59142+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59143+ }
59144+ break;
59145+ case GR_RELOAD:
59146+ if (!(gr_status & GR_READY)) {
59147+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59148+ error = -EAGAIN;
59149+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59150+ lock_kernel();
59151+
59152+ pax_open_kernel();
59153+ gr_status &= ~GR_READY;
59154+ pax_close_kernel();
59155+
59156+ free_variables();
59157+ if (!(error2 = gracl_init(gr_usermode))) {
59158+ unlock_kernel();
59159+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59160+ } else {
59161+ unlock_kernel();
59162+ error = error2;
59163+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59164+ }
59165+ } else {
59166+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59167+ error = -EPERM;
59168+ }
59169+ break;
59170+ case GR_SEGVMOD:
59171+ if (unlikely(!(gr_status & GR_READY))) {
59172+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59173+ error = -EAGAIN;
59174+ break;
59175+ }
59176+
59177+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59178+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59179+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59180+ struct acl_subject_label *segvacl;
59181+ segvacl =
59182+ lookup_acl_subj_label(gr_usermode->segv_inode,
59183+ gr_usermode->segv_device,
59184+ current->role);
59185+ if (segvacl) {
59186+ segvacl->crashes = 0;
59187+ segvacl->expires = 0;
59188+ }
59189+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59190+ gr_remove_uid(gr_usermode->segv_uid);
59191+ }
59192+ } else {
59193+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59194+ error = -EPERM;
59195+ }
59196+ break;
59197+ case GR_SPROLE:
59198+ case GR_SPROLEPAM:
59199+ if (unlikely(!(gr_status & GR_READY))) {
59200+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59201+ error = -EAGAIN;
59202+ break;
59203+ }
59204+
59205+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59206+ current->role->expires = 0;
59207+ current->role->auth_attempts = 0;
59208+ }
59209+
59210+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59211+ time_after(current->role->expires, get_seconds())) {
59212+ error = -EBUSY;
59213+ goto out;
59214+ }
59215+
59216+ if (lookup_special_role_auth
59217+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59218+ && ((!sprole_salt && !sprole_sum)
59219+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59220+ char *p = "";
59221+ assign_special_role(gr_usermode->sp_role);
59222+ read_lock(&tasklist_lock);
59223+ if (current->real_parent)
59224+ p = current->real_parent->role->rolename;
59225+ read_unlock(&tasklist_lock);
59226+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59227+ p, acl_sp_role_value);
59228+ } else {
59229+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59230+ error = -EPERM;
59231+ if(!(current->role->auth_attempts++))
59232+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59233+
59234+ goto out;
59235+ }
59236+ break;
59237+ case GR_UNSPROLE:
59238+ if (unlikely(!(gr_status & GR_READY))) {
59239+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59240+ error = -EAGAIN;
59241+ break;
59242+ }
59243+
59244+ if (current->role->roletype & GR_ROLE_SPECIAL) {
59245+ char *p = "";
59246+ int i = 0;
59247+
59248+ read_lock(&tasklist_lock);
59249+ if (current->real_parent) {
59250+ p = current->real_parent->role->rolename;
59251+ i = current->real_parent->acl_role_id;
59252+ }
59253+ read_unlock(&tasklist_lock);
59254+
59255+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59256+ gr_set_acls(1);
59257+ } else {
59258+ error = -EPERM;
59259+ goto out;
59260+ }
59261+ break;
59262+ default:
59263+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59264+ error = -EINVAL;
59265+ break;
59266+ }
59267+
59268+ if (error != -EPERM)
59269+ goto out;
59270+
59271+ if(!(gr_auth_attempts++))
59272+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59273+
59274+ out:
59275+ mutex_unlock(&gr_dev_mutex);
59276+ return error;
59277+}
59278+
59279+/* must be called with
59280+ rcu_read_lock();
59281+ read_lock(&tasklist_lock);
59282+ read_lock(&grsec_exec_file_lock);
59283+*/
59284+int gr_apply_subject_to_task(struct task_struct *task)
59285+{
59286+ struct acl_object_label *obj;
59287+ char *tmpname;
59288+ struct acl_subject_label *tmpsubj;
59289+ struct file *filp;
59290+ struct name_entry *nmatch;
59291+
59292+ filp = task->exec_file;
59293+ if (filp == NULL)
59294+ return 0;
59295+
59296+ /* the following is to apply the correct subject
59297+ on binaries running when the RBAC system
59298+ is enabled, when the binaries have been
59299+ replaced or deleted since their execution
59300+ -----
59301+ when the RBAC system starts, the inode/dev
59302+ from exec_file will be one the RBAC system
59303+ is unaware of. It only knows the inode/dev
59304+ of the present file on disk, or the absence
59305+ of it.
59306+ */
59307+ preempt_disable();
59308+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59309+
59310+ nmatch = lookup_name_entry(tmpname);
59311+ preempt_enable();
59312+ tmpsubj = NULL;
59313+ if (nmatch) {
59314+ if (nmatch->deleted)
59315+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59316+ else
59317+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59318+ if (tmpsubj != NULL)
59319+ task->acl = tmpsubj;
59320+ }
59321+ if (tmpsubj == NULL)
59322+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59323+ task->role);
59324+ if (task->acl) {
59325+ task->is_writable = 0;
59326+ /* ignore additional mmap checks for processes that are writable
59327+ by the default ACL */
59328+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59329+ if (unlikely(obj->mode & GR_WRITE))
59330+ task->is_writable = 1;
59331+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59332+ if (unlikely(obj->mode & GR_WRITE))
59333+ task->is_writable = 1;
59334+
59335+ gr_set_proc_res(task);
59336+
59337+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59338+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59339+#endif
59340+ } else {
59341+ return 1;
59342+ }
59343+
59344+ return 0;
59345+}
59346+
59347+int
59348+gr_set_acls(const int type)
59349+{
59350+ struct task_struct *task, *task2;
59351+ struct acl_role_label *role = current->role;
59352+ __u16 acl_role_id = current->acl_role_id;
59353+ const struct cred *cred;
59354+ int ret;
59355+
59356+ rcu_read_lock();
59357+ read_lock(&tasklist_lock);
59358+ read_lock(&grsec_exec_file_lock);
59359+ do_each_thread(task2, task) {
59360+ /* check to see if we're called from the exit handler,
59361+ if so, only replace ACLs that have inherited the admin
59362+ ACL */
59363+
59364+ if (type && (task->role != role ||
59365+ task->acl_role_id != acl_role_id))
59366+ continue;
59367+
59368+ task->acl_role_id = 0;
59369+ task->acl_sp_role = 0;
59370+
59371+ if (task->exec_file) {
59372+ cred = __task_cred(task);
59373+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59374+
59375+ ret = gr_apply_subject_to_task(task);
59376+ if (ret) {
59377+ read_unlock(&grsec_exec_file_lock);
59378+ read_unlock(&tasklist_lock);
59379+ rcu_read_unlock();
59380+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59381+ return ret;
59382+ }
59383+ } else {
59384+ // it's a kernel process
59385+ task->role = kernel_role;
59386+ task->acl = kernel_role->root_label;
59387+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59388+ task->acl->mode &= ~GR_PROCFIND;
59389+#endif
59390+ }
59391+ } while_each_thread(task2, task);
59392+ read_unlock(&grsec_exec_file_lock);
59393+ read_unlock(&tasklist_lock);
59394+ rcu_read_unlock();
59395+
59396+ return 0;
59397+}
59398+
59399+void
59400+gr_learn_resource(const struct task_struct *task,
59401+ const int res, const unsigned long wanted, const int gt)
59402+{
59403+ struct acl_subject_label *acl;
59404+ const struct cred *cred;
59405+
59406+ if (unlikely((gr_status & GR_READY) &&
59407+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59408+ goto skip_reslog;
59409+
59410+#ifdef CONFIG_GRKERNSEC_RESLOG
59411+ gr_log_resource(task, res, wanted, gt);
59412+#endif
59413+ skip_reslog:
59414+
59415+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59416+ return;
59417+
59418+ acl = task->acl;
59419+
59420+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59421+ !(acl->resmask & (1 << (unsigned short) res))))
59422+ return;
59423+
59424+ if (wanted >= acl->res[res].rlim_cur) {
59425+ unsigned long res_add;
59426+
59427+ res_add = wanted;
59428+ switch (res) {
59429+ case RLIMIT_CPU:
59430+ res_add += GR_RLIM_CPU_BUMP;
59431+ break;
59432+ case RLIMIT_FSIZE:
59433+ res_add += GR_RLIM_FSIZE_BUMP;
59434+ break;
59435+ case RLIMIT_DATA:
59436+ res_add += GR_RLIM_DATA_BUMP;
59437+ break;
59438+ case RLIMIT_STACK:
59439+ res_add += GR_RLIM_STACK_BUMP;
59440+ break;
59441+ case RLIMIT_CORE:
59442+ res_add += GR_RLIM_CORE_BUMP;
59443+ break;
59444+ case RLIMIT_RSS:
59445+ res_add += GR_RLIM_RSS_BUMP;
59446+ break;
59447+ case RLIMIT_NPROC:
59448+ res_add += GR_RLIM_NPROC_BUMP;
59449+ break;
59450+ case RLIMIT_NOFILE:
59451+ res_add += GR_RLIM_NOFILE_BUMP;
59452+ break;
59453+ case RLIMIT_MEMLOCK:
59454+ res_add += GR_RLIM_MEMLOCK_BUMP;
59455+ break;
59456+ case RLIMIT_AS:
59457+ res_add += GR_RLIM_AS_BUMP;
59458+ break;
59459+ case RLIMIT_LOCKS:
59460+ res_add += GR_RLIM_LOCKS_BUMP;
59461+ break;
59462+ case RLIMIT_SIGPENDING:
59463+ res_add += GR_RLIM_SIGPENDING_BUMP;
59464+ break;
59465+ case RLIMIT_MSGQUEUE:
59466+ res_add += GR_RLIM_MSGQUEUE_BUMP;
59467+ break;
59468+ case RLIMIT_NICE:
59469+ res_add += GR_RLIM_NICE_BUMP;
59470+ break;
59471+ case RLIMIT_RTPRIO:
59472+ res_add += GR_RLIM_RTPRIO_BUMP;
59473+ break;
59474+ case RLIMIT_RTTIME:
59475+ res_add += GR_RLIM_RTTIME_BUMP;
59476+ break;
59477+ }
59478+
59479+ acl->res[res].rlim_cur = res_add;
59480+
59481+ if (wanted > acl->res[res].rlim_max)
59482+ acl->res[res].rlim_max = res_add;
59483+
59484+ /* only log the subject filename, since resource logging is supported for
59485+ single-subject learning only */
59486+ rcu_read_lock();
59487+ cred = __task_cred(task);
59488+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59489+ task->role->roletype, cred->uid, cred->gid, acl->filename,
59490+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59491+ "", (unsigned long) res, &task->signal->saved_ip);
59492+ rcu_read_unlock();
59493+ }
59494+
59495+ return;
59496+}
59497+
59498+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59499+void
59500+pax_set_initial_flags(struct linux_binprm *bprm)
59501+{
59502+ struct task_struct *task = current;
59503+ struct acl_subject_label *proc;
59504+ unsigned long flags;
59505+
59506+ if (unlikely(!(gr_status & GR_READY)))
59507+ return;
59508+
59509+ flags = pax_get_flags(task);
59510+
59511+ proc = task->acl;
59512+
59513+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59514+ flags &= ~MF_PAX_PAGEEXEC;
59515+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59516+ flags &= ~MF_PAX_SEGMEXEC;
59517+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59518+ flags &= ~MF_PAX_RANDMMAP;
59519+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59520+ flags &= ~MF_PAX_EMUTRAMP;
59521+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59522+ flags &= ~MF_PAX_MPROTECT;
59523+
59524+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59525+ flags |= MF_PAX_PAGEEXEC;
59526+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59527+ flags |= MF_PAX_SEGMEXEC;
59528+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59529+ flags |= MF_PAX_RANDMMAP;
59530+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59531+ flags |= MF_PAX_EMUTRAMP;
59532+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59533+ flags |= MF_PAX_MPROTECT;
59534+
59535+ pax_set_flags(task, flags);
59536+
59537+ return;
59538+}
59539+#endif
59540+
59541+#ifdef CONFIG_SYSCTL
59542+/* Eric Biederman likes breaking userland ABI and every inode-based security
59543+ system to save 35kb of memory */
59544+
59545+/* we modify the passed in filename, but adjust it back before returning */
59546+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59547+{
59548+ struct name_entry *nmatch;
59549+ char *p, *lastp = NULL;
59550+ struct acl_object_label *obj = NULL, *tmp;
59551+ struct acl_subject_label *tmpsubj;
59552+ char c = '\0';
59553+
59554+ read_lock(&gr_inode_lock);
59555+
59556+ p = name + len - 1;
59557+ do {
59558+ nmatch = lookup_name_entry(name);
59559+ if (lastp != NULL)
59560+ *lastp = c;
59561+
59562+ if (nmatch == NULL)
59563+ goto next_component;
59564+ tmpsubj = current->acl;
59565+ do {
59566+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59567+ if (obj != NULL) {
59568+ tmp = obj->globbed;
59569+ while (tmp) {
59570+ if (!glob_match(tmp->filename, name)) {
59571+ obj = tmp;
59572+ goto found_obj;
59573+ }
59574+ tmp = tmp->next;
59575+ }
59576+ goto found_obj;
59577+ }
59578+ } while ((tmpsubj = tmpsubj->parent_subject));
59579+next_component:
59580+ /* end case */
59581+ if (p == name)
59582+ break;
59583+
59584+ while (*p != '/')
59585+ p--;
59586+ if (p == name)
59587+ lastp = p + 1;
59588+ else {
59589+ lastp = p;
59590+ p--;
59591+ }
59592+ c = *lastp;
59593+ *lastp = '\0';
59594+ } while (1);
59595+found_obj:
59596+ read_unlock(&gr_inode_lock);
59597+ /* obj returned will always be non-null */
59598+ return obj;
59599+}
59600+
59601+/* returns 0 when allowing, non-zero on error
59602+ op of 0 is used for readdir, so we don't log the names of hidden files
59603+*/
59604+__u32
59605+gr_handle_sysctl(const struct ctl_table *table, const int op)
59606+{
59607+ ctl_table *tmp;
59608+ const char *proc_sys = "/proc/sys";
59609+ char *path;
59610+ struct acl_object_label *obj;
59611+ unsigned short len = 0, pos = 0, depth = 0, i;
59612+ __u32 err = 0;
59613+ __u32 mode = 0;
59614+
59615+ if (unlikely(!(gr_status & GR_READY)))
59616+ return 0;
59617+
59618+ /* for now, ignore operations on non-sysctl entries if it's not a
59619+ readdir*/
59620+ if (table->child != NULL && op != 0)
59621+ return 0;
59622+
59623+ mode |= GR_FIND;
59624+ /* it's only a read if it's an entry, read on dirs is for readdir */
59625+ if (op & MAY_READ)
59626+ mode |= GR_READ;
59627+ if (op & MAY_WRITE)
59628+ mode |= GR_WRITE;
59629+
59630+ preempt_disable();
59631+
59632+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59633+
59634+ /* it's only a read/write if it's an actual entry, not a dir
59635+ (which are opened for readdir)
59636+ */
59637+
59638+ /* convert the requested sysctl entry into a pathname */
59639+
59640+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59641+ len += strlen(tmp->procname);
59642+ len++;
59643+ depth++;
59644+ }
59645+
59646+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59647+ /* deny */
59648+ goto out;
59649+ }
59650+
59651+ memset(path, 0, PAGE_SIZE);
59652+
59653+ memcpy(path, proc_sys, strlen(proc_sys));
59654+
59655+ pos += strlen(proc_sys);
59656+
59657+ for (; depth > 0; depth--) {
59658+ path[pos] = '/';
59659+ pos++;
59660+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59661+ if (depth == i) {
59662+ memcpy(path + pos, tmp->procname,
59663+ strlen(tmp->procname));
59664+ pos += strlen(tmp->procname);
59665+ }
59666+ i++;
59667+ }
59668+ }
59669+
59670+ obj = gr_lookup_by_name(path, pos);
59671+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59672+
59673+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59674+ ((err & mode) != mode))) {
59675+ __u32 new_mode = mode;
59676+
59677+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59678+
59679+ err = 0;
59680+ gr_log_learn_sysctl(path, new_mode);
59681+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59682+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59683+ err = -ENOENT;
59684+ } else if (!(err & GR_FIND)) {
59685+ err = -ENOENT;
59686+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59687+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59688+ path, (mode & GR_READ) ? " reading" : "",
59689+ (mode & GR_WRITE) ? " writing" : "");
59690+ err = -EACCES;
59691+ } else if ((err & mode) != mode) {
59692+ err = -EACCES;
59693+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59694+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59695+ path, (mode & GR_READ) ? " reading" : "",
59696+ (mode & GR_WRITE) ? " writing" : "");
59697+ err = 0;
59698+ } else
59699+ err = 0;
59700+
59701+ out:
59702+ preempt_enable();
59703+
59704+ return err;
59705+}
59706+#endif
59707+
59708+int
59709+gr_handle_proc_ptrace(struct task_struct *task)
59710+{
59711+ struct file *filp;
59712+ struct task_struct *tmp = task;
59713+ struct task_struct *curtemp = current;
59714+ __u32 retmode;
59715+
59716+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59717+ if (unlikely(!(gr_status & GR_READY)))
59718+ return 0;
59719+#endif
59720+
59721+ read_lock(&tasklist_lock);
59722+ read_lock(&grsec_exec_file_lock);
59723+ filp = task->exec_file;
59724+
59725+ while (tmp->pid > 0) {
59726+ if (tmp == curtemp)
59727+ break;
59728+ tmp = tmp->real_parent;
59729+ }
59730+
59731+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59732+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59733+ read_unlock(&grsec_exec_file_lock);
59734+ read_unlock(&tasklist_lock);
59735+ return 1;
59736+ }
59737+
59738+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59739+ if (!(gr_status & GR_READY)) {
59740+ read_unlock(&grsec_exec_file_lock);
59741+ read_unlock(&tasklist_lock);
59742+ return 0;
59743+ }
59744+#endif
59745+
59746+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59747+ read_unlock(&grsec_exec_file_lock);
59748+ read_unlock(&tasklist_lock);
59749+
59750+ if (retmode & GR_NOPTRACE)
59751+ return 1;
59752+
59753+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59754+ && (current->acl != task->acl || (current->acl != current->role->root_label
59755+ && current->pid != task->pid)))
59756+ return 1;
59757+
59758+ return 0;
59759+}
59760+
59761+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59762+{
59763+ if (unlikely(!(gr_status & GR_READY)))
59764+ return;
59765+
59766+ if (!(current->role->roletype & GR_ROLE_GOD))
59767+ return;
59768+
59769+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59770+ p->role->rolename, gr_task_roletype_to_char(p),
59771+ p->acl->filename);
59772+}
59773+
59774+int
59775+gr_handle_ptrace(struct task_struct *task, const long request)
59776+{
59777+ struct task_struct *tmp = task;
59778+ struct task_struct *curtemp = current;
59779+ __u32 retmode;
59780+
59781+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59782+ if (unlikely(!(gr_status & GR_READY)))
59783+ return 0;
59784+#endif
59785+
59786+ read_lock(&tasklist_lock);
59787+ while (tmp->pid > 0) {
59788+ if (tmp == curtemp)
59789+ break;
59790+ tmp = tmp->real_parent;
59791+ }
59792+
59793+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59794+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59795+ read_unlock(&tasklist_lock);
59796+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59797+ return 1;
59798+ }
59799+ read_unlock(&tasklist_lock);
59800+
59801+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59802+ if (!(gr_status & GR_READY))
59803+ return 0;
59804+#endif
59805+
59806+ read_lock(&grsec_exec_file_lock);
59807+ if (unlikely(!task->exec_file)) {
59808+ read_unlock(&grsec_exec_file_lock);
59809+ return 0;
59810+ }
59811+
59812+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59813+ read_unlock(&grsec_exec_file_lock);
59814+
59815+ if (retmode & GR_NOPTRACE) {
59816+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59817+ return 1;
59818+ }
59819+
59820+ if (retmode & GR_PTRACERD) {
59821+ switch (request) {
59822+ case PTRACE_POKETEXT:
59823+ case PTRACE_POKEDATA:
59824+ case PTRACE_POKEUSR:
59825+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59826+ case PTRACE_SETREGS:
59827+ case PTRACE_SETFPREGS:
59828+#endif
59829+#ifdef CONFIG_X86
59830+ case PTRACE_SETFPXREGS:
59831+#endif
59832+#ifdef CONFIG_ALTIVEC
59833+ case PTRACE_SETVRREGS:
59834+#endif
59835+ return 1;
59836+ default:
59837+ return 0;
59838+ }
59839+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59840+ !(current->role->roletype & GR_ROLE_GOD) &&
59841+ (current->acl != task->acl)) {
59842+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59843+ return 1;
59844+ }
59845+
59846+ return 0;
59847+}
59848+
59849+static int is_writable_mmap(const struct file *filp)
59850+{
59851+ struct task_struct *task = current;
59852+ struct acl_object_label *obj, *obj2;
59853+
59854+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59855+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59856+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59857+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59858+ task->role->root_label);
59859+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59860+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59861+ return 1;
59862+ }
59863+ }
59864+ return 0;
59865+}
59866+
59867+int
59868+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59869+{
59870+ __u32 mode;
59871+
59872+ if (unlikely(!file || !(prot & PROT_EXEC)))
59873+ return 1;
59874+
59875+ if (is_writable_mmap(file))
59876+ return 0;
59877+
59878+ mode =
59879+ gr_search_file(file->f_path.dentry,
59880+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59881+ file->f_path.mnt);
59882+
59883+ if (!gr_tpe_allow(file))
59884+ return 0;
59885+
59886+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59887+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59888+ return 0;
59889+ } else if (unlikely(!(mode & GR_EXEC))) {
59890+ return 0;
59891+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59892+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59893+ return 1;
59894+ }
59895+
59896+ return 1;
59897+}
59898+
59899+int
59900+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59901+{
59902+ __u32 mode;
59903+
59904+ if (unlikely(!file || !(prot & PROT_EXEC)))
59905+ return 1;
59906+
59907+ if (is_writable_mmap(file))
59908+ return 0;
59909+
59910+ mode =
59911+ gr_search_file(file->f_path.dentry,
59912+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59913+ file->f_path.mnt);
59914+
59915+ if (!gr_tpe_allow(file))
59916+ return 0;
59917+
59918+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59919+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59920+ return 0;
59921+ } else if (unlikely(!(mode & GR_EXEC))) {
59922+ return 0;
59923+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59924+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59925+ return 1;
59926+ }
59927+
59928+ return 1;
59929+}
59930+
59931+void
59932+gr_acl_handle_psacct(struct task_struct *task, const long code)
59933+{
59934+ unsigned long runtime;
59935+ unsigned long cputime;
59936+ unsigned int wday, cday;
59937+ __u8 whr, chr;
59938+ __u8 wmin, cmin;
59939+ __u8 wsec, csec;
59940+ struct timespec timeval;
59941+
59942+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59943+ !(task->acl->mode & GR_PROCACCT)))
59944+ return;
59945+
59946+ do_posix_clock_monotonic_gettime(&timeval);
59947+ runtime = timeval.tv_sec - task->start_time.tv_sec;
59948+ wday = runtime / (3600 * 24);
59949+ runtime -= wday * (3600 * 24);
59950+ whr = runtime / 3600;
59951+ runtime -= whr * 3600;
59952+ wmin = runtime / 60;
59953+ runtime -= wmin * 60;
59954+ wsec = runtime;
59955+
59956+ cputime = (task->utime + task->stime) / HZ;
59957+ cday = cputime / (3600 * 24);
59958+ cputime -= cday * (3600 * 24);
59959+ chr = cputime / 3600;
59960+ cputime -= chr * 3600;
59961+ cmin = cputime / 60;
59962+ cputime -= cmin * 60;
59963+ csec = cputime;
59964+
59965+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59966+
59967+ return;
59968+}
59969+
59970+void gr_set_kernel_label(struct task_struct *task)
59971+{
59972+ if (gr_status & GR_READY) {
59973+ task->role = kernel_role;
59974+ task->acl = kernel_role->root_label;
59975+ }
59976+ return;
59977+}
59978+
59979+#ifdef CONFIG_TASKSTATS
59980+int gr_is_taskstats_denied(int pid)
59981+{
59982+ struct task_struct *task;
59983+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59984+ const struct cred *cred;
59985+#endif
59986+ int ret = 0;
59987+
59988+ /* restrict taskstats viewing to un-chrooted root users
59989+ who have the 'view' subject flag if the RBAC system is enabled
59990+ */
59991+
59992+ rcu_read_lock();
59993+ read_lock(&tasklist_lock);
59994+ task = find_task_by_vpid(pid);
59995+ if (task) {
59996+#ifdef CONFIG_GRKERNSEC_CHROOT
59997+ if (proc_is_chrooted(task))
59998+ ret = -EACCES;
59999+#endif
60000+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60001+ cred = __task_cred(task);
60002+#ifdef CONFIG_GRKERNSEC_PROC_USER
60003+ if (cred->uid != 0)
60004+ ret = -EACCES;
60005+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60006+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
60007+ ret = -EACCES;
60008+#endif
60009+#endif
60010+ if (gr_status & GR_READY) {
60011+ if (!(task->acl->mode & GR_VIEW))
60012+ ret = -EACCES;
60013+ }
60014+ } else
60015+ ret = -ENOENT;
60016+
60017+ read_unlock(&tasklist_lock);
60018+ rcu_read_unlock();
60019+
60020+ return ret;
60021+}
60022+#endif
60023+
60024+/* AUXV entries are filled via a descendant of search_binary_handler
60025+ after we've already applied the subject for the target
60026+*/
60027+int gr_acl_enable_at_secure(void)
60028+{
60029+ if (unlikely(!(gr_status & GR_READY)))
60030+ return 0;
60031+
60032+ if (current->acl->mode & GR_ATSECURE)
60033+ return 1;
60034+
60035+ return 0;
60036+}
60037+
60038+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
60039+{
60040+ struct task_struct *task = current;
60041+ struct dentry *dentry = file->f_path.dentry;
60042+ struct vfsmount *mnt = file->f_path.mnt;
60043+ struct acl_object_label *obj, *tmp;
60044+ struct acl_subject_label *subj;
60045+ unsigned int bufsize;
60046+ int is_not_root;
60047+ char *path;
60048+ dev_t dev = __get_dev(dentry);
60049+
60050+ if (unlikely(!(gr_status & GR_READY)))
60051+ return 1;
60052+
60053+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60054+ return 1;
60055+
60056+ /* ignore Eric Biederman */
60057+ if (IS_PRIVATE(dentry->d_inode))
60058+ return 1;
60059+
60060+ subj = task->acl;
60061+ do {
60062+ obj = lookup_acl_obj_label(ino, dev, subj);
60063+ if (obj != NULL)
60064+ return (obj->mode & GR_FIND) ? 1 : 0;
60065+ } while ((subj = subj->parent_subject));
60066+
60067+ /* this is purely an optimization since we're looking for an object
60068+ for the directory we're doing a readdir on
60069+ if it's possible for any globbed object to match the entry we're
60070+ filling into the directory, then the object we find here will be
60071+ an anchor point with attached globbed objects
60072+ */
60073+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
60074+ if (obj->globbed == NULL)
60075+ return (obj->mode & GR_FIND) ? 1 : 0;
60076+
60077+ is_not_root = ((obj->filename[0] == '/') &&
60078+ (obj->filename[1] == '\0')) ? 0 : 1;
60079+ bufsize = PAGE_SIZE - namelen - is_not_root;
60080+
60081+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60082+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60083+ return 1;
60084+
60085+ preempt_disable();
60086+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60087+ bufsize);
60088+
60089+ bufsize = strlen(path);
60090+
60091+ /* if base is "/", don't append an additional slash */
60092+ if (is_not_root)
60093+ *(path + bufsize) = '/';
60094+ memcpy(path + bufsize + is_not_root, name, namelen);
60095+ *(path + bufsize + namelen + is_not_root) = '\0';
60096+
60097+ tmp = obj->globbed;
60098+ while (tmp) {
60099+ if (!glob_match(tmp->filename, path)) {
60100+ preempt_enable();
60101+ return (tmp->mode & GR_FIND) ? 1 : 0;
60102+ }
60103+ tmp = tmp->next;
60104+ }
60105+ preempt_enable();
60106+ return (obj->mode & GR_FIND) ? 1 : 0;
60107+}
60108+
60109+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60110+EXPORT_SYMBOL(gr_acl_is_enabled);
60111+#endif
60112+EXPORT_SYMBOL(gr_learn_resource);
60113+EXPORT_SYMBOL(gr_set_kernel_label);
60114+#ifdef CONFIG_SECURITY
60115+EXPORT_SYMBOL(gr_check_user_change);
60116+EXPORT_SYMBOL(gr_check_group_change);
60117+#endif
60118+
60119diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60120new file mode 100644
60121index 0000000..34fefda
60122--- /dev/null
60123+++ b/grsecurity/gracl_alloc.c
60124@@ -0,0 +1,105 @@
60125+#include <linux/kernel.h>
60126+#include <linux/mm.h>
60127+#include <linux/slab.h>
60128+#include <linux/vmalloc.h>
60129+#include <linux/gracl.h>
60130+#include <linux/grsecurity.h>
60131+
60132+static unsigned long alloc_stack_next = 1;
60133+static unsigned long alloc_stack_size = 1;
60134+static void **alloc_stack;
60135+
60136+static __inline__ int
60137+alloc_pop(void)
60138+{
60139+ if (alloc_stack_next == 1)
60140+ return 0;
60141+
60142+ kfree(alloc_stack[alloc_stack_next - 2]);
60143+
60144+ alloc_stack_next--;
60145+
60146+ return 1;
60147+}
60148+
60149+static __inline__ int
60150+alloc_push(void *buf)
60151+{
60152+ if (alloc_stack_next >= alloc_stack_size)
60153+ return 1;
60154+
60155+ alloc_stack[alloc_stack_next - 1] = buf;
60156+
60157+ alloc_stack_next++;
60158+
60159+ return 0;
60160+}
60161+
60162+void *
60163+acl_alloc(unsigned long len)
60164+{
60165+ void *ret = NULL;
60166+
60167+ if (!len || len > PAGE_SIZE)
60168+ goto out;
60169+
60170+ ret = kmalloc(len, GFP_KERNEL);
60171+
60172+ if (ret) {
60173+ if (alloc_push(ret)) {
60174+ kfree(ret);
60175+ ret = NULL;
60176+ }
60177+ }
60178+
60179+out:
60180+ return ret;
60181+}
60182+
60183+void *
60184+acl_alloc_num(unsigned long num, unsigned long len)
60185+{
60186+ if (!len || (num > (PAGE_SIZE / len)))
60187+ return NULL;
60188+
60189+ return acl_alloc(num * len);
60190+}
60191+
60192+void
60193+acl_free_all(void)
60194+{
60195+ if (gr_acl_is_enabled() || !alloc_stack)
60196+ return;
60197+
60198+ while (alloc_pop()) ;
60199+
60200+ if (alloc_stack) {
60201+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60202+ kfree(alloc_stack);
60203+ else
60204+ vfree(alloc_stack);
60205+ }
60206+
60207+ alloc_stack = NULL;
60208+ alloc_stack_size = 1;
60209+ alloc_stack_next = 1;
60210+
60211+ return;
60212+}
60213+
60214+int
60215+acl_alloc_stack_init(unsigned long size)
60216+{
60217+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60218+ alloc_stack =
60219+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60220+ else
60221+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60222+
60223+ alloc_stack_size = size;
60224+
60225+ if (!alloc_stack)
60226+ return 0;
60227+ else
60228+ return 1;
60229+}
60230diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60231new file mode 100644
60232index 0000000..955ddfb
60233--- /dev/null
60234+++ b/grsecurity/gracl_cap.c
60235@@ -0,0 +1,101 @@
60236+#include <linux/kernel.h>
60237+#include <linux/module.h>
60238+#include <linux/sched.h>
60239+#include <linux/gracl.h>
60240+#include <linux/grsecurity.h>
60241+#include <linux/grinternal.h>
60242+
60243+extern const char *captab_log[];
60244+extern int captab_log_entries;
60245+
60246+int
60247+gr_acl_is_capable(const int cap)
60248+{
60249+ struct task_struct *task = current;
60250+ const struct cred *cred = current_cred();
60251+ struct acl_subject_label *curracl;
60252+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60253+ kernel_cap_t cap_audit = __cap_empty_set;
60254+
60255+ if (!gr_acl_is_enabled())
60256+ return 1;
60257+
60258+ curracl = task->acl;
60259+
60260+ cap_drop = curracl->cap_lower;
60261+ cap_mask = curracl->cap_mask;
60262+ cap_audit = curracl->cap_invert_audit;
60263+
60264+ while ((curracl = curracl->parent_subject)) {
60265+ /* if the cap isn't specified in the current computed mask but is specified in the
60266+ current level subject, and is lowered in the current level subject, then add
60267+ it to the set of dropped capabilities
60268+ otherwise, add the current level subject's mask to the current computed mask
60269+ */
60270+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60271+ cap_raise(cap_mask, cap);
60272+ if (cap_raised(curracl->cap_lower, cap))
60273+ cap_raise(cap_drop, cap);
60274+ if (cap_raised(curracl->cap_invert_audit, cap))
60275+ cap_raise(cap_audit, cap);
60276+ }
60277+ }
60278+
60279+ if (!cap_raised(cap_drop, cap)) {
60280+ if (cap_raised(cap_audit, cap))
60281+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60282+ return 1;
60283+ }
60284+
60285+ curracl = task->acl;
60286+
60287+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60288+ && cap_raised(cred->cap_effective, cap)) {
60289+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60290+ task->role->roletype, cred->uid,
60291+ cred->gid, task->exec_file ?
60292+ gr_to_filename(task->exec_file->f_path.dentry,
60293+ task->exec_file->f_path.mnt) : curracl->filename,
60294+ curracl->filename, 0UL,
60295+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60296+ return 1;
60297+ }
60298+
60299+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60300+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60301+ return 0;
60302+}
60303+
60304+int
60305+gr_acl_is_capable_nolog(const int cap)
60306+{
60307+ struct acl_subject_label *curracl;
60308+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60309+
60310+ if (!gr_acl_is_enabled())
60311+ return 1;
60312+
60313+ curracl = current->acl;
60314+
60315+ cap_drop = curracl->cap_lower;
60316+ cap_mask = curracl->cap_mask;
60317+
60318+ while ((curracl = curracl->parent_subject)) {
60319+ /* if the cap isn't specified in the current computed mask but is specified in the
60320+ current level subject, and is lowered in the current level subject, then add
60321+ it to the set of dropped capabilities
60322+ otherwise, add the current level subject's mask to the current computed mask
60323+ */
60324+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60325+ cap_raise(cap_mask, cap);
60326+ if (cap_raised(curracl->cap_lower, cap))
60327+ cap_raise(cap_drop, cap);
60328+ }
60329+ }
60330+
60331+ if (!cap_raised(cap_drop, cap))
60332+ return 1;
60333+
60334+ return 0;
60335+}
60336+
60337diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60338new file mode 100644
60339index 0000000..d5f210c
60340--- /dev/null
60341+++ b/grsecurity/gracl_fs.c
60342@@ -0,0 +1,433 @@
60343+#include <linux/kernel.h>
60344+#include <linux/sched.h>
60345+#include <linux/types.h>
60346+#include <linux/fs.h>
60347+#include <linux/file.h>
60348+#include <linux/stat.h>
60349+#include <linux/grsecurity.h>
60350+#include <linux/grinternal.h>
60351+#include <linux/gracl.h>
60352+
60353+__u32
60354+gr_acl_handle_hidden_file(const struct dentry * dentry,
60355+ const struct vfsmount * mnt)
60356+{
60357+ __u32 mode;
60358+
60359+ if (unlikely(!dentry->d_inode))
60360+ return GR_FIND;
60361+
60362+ mode =
60363+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60364+
60365+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60366+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60367+ return mode;
60368+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60369+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60370+ return 0;
60371+ } else if (unlikely(!(mode & GR_FIND)))
60372+ return 0;
60373+
60374+ return GR_FIND;
60375+}
60376+
60377+__u32
60378+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60379+ int acc_mode)
60380+{
60381+ __u32 reqmode = GR_FIND;
60382+ __u32 mode;
60383+
60384+ if (unlikely(!dentry->d_inode))
60385+ return reqmode;
60386+
60387+ if (acc_mode & MAY_APPEND)
60388+ reqmode |= GR_APPEND;
60389+ else if (acc_mode & MAY_WRITE)
60390+ reqmode |= GR_WRITE;
60391+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60392+ reqmode |= GR_READ;
60393+
60394+ mode =
60395+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60396+ mnt);
60397+
60398+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60399+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60400+ reqmode & GR_READ ? " reading" : "",
60401+ reqmode & GR_WRITE ? " writing" : reqmode &
60402+ GR_APPEND ? " appending" : "");
60403+ return reqmode;
60404+ } else
60405+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60406+ {
60407+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60408+ reqmode & GR_READ ? " reading" : "",
60409+ reqmode & GR_WRITE ? " writing" : reqmode &
60410+ GR_APPEND ? " appending" : "");
60411+ return 0;
60412+ } else if (unlikely((mode & reqmode) != reqmode))
60413+ return 0;
60414+
60415+ return reqmode;
60416+}
60417+
60418+__u32
60419+gr_acl_handle_creat(const struct dentry * dentry,
60420+ const struct dentry * p_dentry,
60421+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60422+ const int imode)
60423+{
60424+ __u32 reqmode = GR_WRITE | GR_CREATE;
60425+ __u32 mode;
60426+
60427+ if (acc_mode & MAY_APPEND)
60428+ reqmode |= GR_APPEND;
60429+ // if a directory was required or the directory already exists, then
60430+ // don't count this open as a read
60431+ if ((acc_mode & MAY_READ) &&
60432+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60433+ reqmode |= GR_READ;
60434+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60435+ reqmode |= GR_SETID;
60436+
60437+ mode =
60438+ gr_check_create(dentry, p_dentry, p_mnt,
60439+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60440+
60441+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60442+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60443+ reqmode & GR_READ ? " reading" : "",
60444+ reqmode & GR_WRITE ? " writing" : reqmode &
60445+ GR_APPEND ? " appending" : "");
60446+ return reqmode;
60447+ } else
60448+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60449+ {
60450+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60451+ reqmode & GR_READ ? " reading" : "",
60452+ reqmode & GR_WRITE ? " writing" : reqmode &
60453+ GR_APPEND ? " appending" : "");
60454+ return 0;
60455+ } else if (unlikely((mode & reqmode) != reqmode))
60456+ return 0;
60457+
60458+ return reqmode;
60459+}
60460+
60461+__u32
60462+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60463+ const int fmode)
60464+{
60465+ __u32 mode, reqmode = GR_FIND;
60466+
60467+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60468+ reqmode |= GR_EXEC;
60469+ if (fmode & S_IWOTH)
60470+ reqmode |= GR_WRITE;
60471+ if (fmode & S_IROTH)
60472+ reqmode |= GR_READ;
60473+
60474+ mode =
60475+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60476+ mnt);
60477+
60478+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60479+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60480+ reqmode & GR_READ ? " reading" : "",
60481+ reqmode & GR_WRITE ? " writing" : "",
60482+ reqmode & GR_EXEC ? " executing" : "");
60483+ return reqmode;
60484+ } else
60485+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60486+ {
60487+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60488+ reqmode & GR_READ ? " reading" : "",
60489+ reqmode & GR_WRITE ? " writing" : "",
60490+ reqmode & GR_EXEC ? " executing" : "");
60491+ return 0;
60492+ } else if (unlikely((mode & reqmode) != reqmode))
60493+ return 0;
60494+
60495+ return reqmode;
60496+}
60497+
60498+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60499+{
60500+ __u32 mode;
60501+
60502+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60503+
60504+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60505+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60506+ return mode;
60507+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60508+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60509+ return 0;
60510+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60511+ return 0;
60512+
60513+ return (reqmode);
60514+}
60515+
60516+__u32
60517+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60518+{
60519+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60520+}
60521+
60522+__u32
60523+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60524+{
60525+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60526+}
60527+
60528+__u32
60529+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60530+{
60531+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60532+}
60533+
60534+__u32
60535+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60536+{
60537+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60538+}
60539+
60540+__u32
60541+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60542+ mode_t mode)
60543+{
60544+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60545+ return 1;
60546+
60547+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60548+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60549+ GR_FCHMOD_ACL_MSG);
60550+ } else {
60551+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60552+ }
60553+}
60554+
60555+__u32
60556+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60557+ mode_t mode)
60558+{
60559+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60560+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60561+ GR_CHMOD_ACL_MSG);
60562+ } else {
60563+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60564+ }
60565+}
60566+
60567+__u32
60568+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60569+{
60570+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60571+}
60572+
60573+__u32
60574+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60575+{
60576+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60577+}
60578+
60579+__u32
60580+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60581+{
60582+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60583+}
60584+
60585+__u32
60586+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60587+{
60588+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60589+ GR_UNIXCONNECT_ACL_MSG);
60590+}
60591+
60592+/* hardlinks require at minimum create and link permission,
60593+ any additional privilege required is based on the
60594+ privilege of the file being linked to
60595+*/
60596+__u32
60597+gr_acl_handle_link(const struct dentry * new_dentry,
60598+ const struct dentry * parent_dentry,
60599+ const struct vfsmount * parent_mnt,
60600+ const struct dentry * old_dentry,
60601+ const struct vfsmount * old_mnt, const char *to)
60602+{
60603+ __u32 mode;
60604+ __u32 needmode = GR_CREATE | GR_LINK;
60605+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60606+
60607+ mode =
60608+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60609+ old_mnt);
60610+
60611+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60612+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60613+ return mode;
60614+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60615+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60616+ return 0;
60617+ } else if (unlikely((mode & needmode) != needmode))
60618+ return 0;
60619+
60620+ return 1;
60621+}
60622+
60623+__u32
60624+gr_acl_handle_symlink(const struct dentry * new_dentry,
60625+ const struct dentry * parent_dentry,
60626+ const struct vfsmount * parent_mnt, const char *from)
60627+{
60628+ __u32 needmode = GR_WRITE | GR_CREATE;
60629+ __u32 mode;
60630+
60631+ mode =
60632+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
60633+ GR_CREATE | GR_AUDIT_CREATE |
60634+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60635+
60636+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60637+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60638+ return mode;
60639+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60640+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60641+ return 0;
60642+ } else if (unlikely((mode & needmode) != needmode))
60643+ return 0;
60644+
60645+ return (GR_WRITE | GR_CREATE);
60646+}
60647+
60648+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60649+{
60650+ __u32 mode;
60651+
60652+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60653+
60654+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60655+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60656+ return mode;
60657+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60658+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60659+ return 0;
60660+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60661+ return 0;
60662+
60663+ return (reqmode);
60664+}
60665+
60666+__u32
60667+gr_acl_handle_mknod(const struct dentry * new_dentry,
60668+ const struct dentry * parent_dentry,
60669+ const struct vfsmount * parent_mnt,
60670+ const int mode)
60671+{
60672+ __u32 reqmode = GR_WRITE | GR_CREATE;
60673+ if (unlikely(mode & (S_ISUID | S_ISGID)))
60674+ reqmode |= GR_SETID;
60675+
60676+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60677+ reqmode, GR_MKNOD_ACL_MSG);
60678+}
60679+
60680+__u32
60681+gr_acl_handle_mkdir(const struct dentry *new_dentry,
60682+ const struct dentry *parent_dentry,
60683+ const struct vfsmount *parent_mnt)
60684+{
60685+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60686+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60687+}
60688+
60689+#define RENAME_CHECK_SUCCESS(old, new) \
60690+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60691+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60692+
60693+int
60694+gr_acl_handle_rename(struct dentry *new_dentry,
60695+ struct dentry *parent_dentry,
60696+ const struct vfsmount *parent_mnt,
60697+ struct dentry *old_dentry,
60698+ struct inode *old_parent_inode,
60699+ struct vfsmount *old_mnt, const char *newname)
60700+{
60701+ __u32 comp1, comp2;
60702+ int error = 0;
60703+
60704+ if (unlikely(!gr_acl_is_enabled()))
60705+ return 0;
60706+
60707+ if (!new_dentry->d_inode) {
60708+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60709+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60710+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60711+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60712+ GR_DELETE | GR_AUDIT_DELETE |
60713+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60714+ GR_SUPPRESS, old_mnt);
60715+ } else {
60716+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60717+ GR_CREATE | GR_DELETE |
60718+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60719+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60720+ GR_SUPPRESS, parent_mnt);
60721+ comp2 =
60722+ gr_search_file(old_dentry,
60723+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60724+ GR_DELETE | GR_AUDIT_DELETE |
60725+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60726+ }
60727+
60728+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60729+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60730+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60731+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60732+ && !(comp2 & GR_SUPPRESS)) {
60733+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60734+ error = -EACCES;
60735+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60736+ error = -EACCES;
60737+
60738+ return error;
60739+}
60740+
60741+void
60742+gr_acl_handle_exit(void)
60743+{
60744+ u16 id;
60745+ char *rolename;
60746+ struct file *exec_file;
60747+
60748+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60749+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60750+ id = current->acl_role_id;
60751+ rolename = current->role->rolename;
60752+ gr_set_acls(1);
60753+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60754+ }
60755+
60756+ write_lock(&grsec_exec_file_lock);
60757+ exec_file = current->exec_file;
60758+ current->exec_file = NULL;
60759+ write_unlock(&grsec_exec_file_lock);
60760+
60761+ if (exec_file)
60762+ fput(exec_file);
60763+}
60764+
60765+int
60766+gr_acl_handle_procpidmem(const struct task_struct *task)
60767+{
60768+ if (unlikely(!gr_acl_is_enabled()))
60769+ return 0;
60770+
60771+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60772+ return -EACCES;
60773+
60774+ return 0;
60775+}
60776diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60777new file mode 100644
60778index 0000000..cd07b96
60779--- /dev/null
60780+++ b/grsecurity/gracl_ip.c
60781@@ -0,0 +1,382 @@
60782+#include <linux/kernel.h>
60783+#include <asm/uaccess.h>
60784+#include <asm/errno.h>
60785+#include <net/sock.h>
60786+#include <linux/file.h>
60787+#include <linux/fs.h>
60788+#include <linux/net.h>
60789+#include <linux/in.h>
60790+#include <linux/skbuff.h>
60791+#include <linux/ip.h>
60792+#include <linux/udp.h>
60793+#include <linux/smp_lock.h>
60794+#include <linux/types.h>
60795+#include <linux/sched.h>
60796+#include <linux/netdevice.h>
60797+#include <linux/inetdevice.h>
60798+#include <linux/gracl.h>
60799+#include <linux/grsecurity.h>
60800+#include <linux/grinternal.h>
60801+
60802+#define GR_BIND 0x01
60803+#define GR_CONNECT 0x02
60804+#define GR_INVERT 0x04
60805+#define GR_BINDOVERRIDE 0x08
60806+#define GR_CONNECTOVERRIDE 0x10
60807+#define GR_SOCK_FAMILY 0x20
60808+
60809+static const char * gr_protocols[IPPROTO_MAX] = {
60810+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60811+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60812+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60813+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60814+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60815+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60816+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60817+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60818+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60819+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60820+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60821+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60822+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60823+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60824+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60825+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60826+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60827+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60828+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60829+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60830+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60831+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60832+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60833+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60834+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60835+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60836+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60837+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60838+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60839+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60840+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60841+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60842+ };
60843+
60844+static const char * gr_socktypes[SOCK_MAX] = {
60845+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60846+ "unknown:7", "unknown:8", "unknown:9", "packet"
60847+ };
60848+
60849+static const char * gr_sockfamilies[AF_MAX+1] = {
60850+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60851+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60852+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60853+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60854+ };
60855+
60856+const char *
60857+gr_proto_to_name(unsigned char proto)
60858+{
60859+ return gr_protocols[proto];
60860+}
60861+
60862+const char *
60863+gr_socktype_to_name(unsigned char type)
60864+{
60865+ return gr_socktypes[type];
60866+}
60867+
60868+const char *
60869+gr_sockfamily_to_name(unsigned char family)
60870+{
60871+ return gr_sockfamilies[family];
60872+}
60873+
60874+int
60875+gr_search_socket(const int domain, const int type, const int protocol)
60876+{
60877+ struct acl_subject_label *curr;
60878+ const struct cred *cred = current_cred();
60879+
60880+ if (unlikely(!gr_acl_is_enabled()))
60881+ goto exit;
60882+
60883+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60884+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60885+ goto exit; // let the kernel handle it
60886+
60887+ curr = current->acl;
60888+
60889+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60890+ /* the family is allowed, if this is PF_INET allow it only if
60891+ the extra sock type/protocol checks pass */
60892+ if (domain == PF_INET)
60893+ goto inet_check;
60894+ goto exit;
60895+ } else {
60896+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60897+ __u32 fakeip = 0;
60898+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60899+ current->role->roletype, cred->uid,
60900+ cred->gid, current->exec_file ?
60901+ gr_to_filename(current->exec_file->f_path.dentry,
60902+ current->exec_file->f_path.mnt) :
60903+ curr->filename, curr->filename,
60904+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60905+ &current->signal->saved_ip);
60906+ goto exit;
60907+ }
60908+ goto exit_fail;
60909+ }
60910+
60911+inet_check:
60912+ /* the rest of this checking is for IPv4 only */
60913+ if (!curr->ips)
60914+ goto exit;
60915+
60916+ if ((curr->ip_type & (1 << type)) &&
60917+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60918+ goto exit;
60919+
60920+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60921+ /* we don't place acls on raw sockets , and sometimes
60922+ dgram/ip sockets are opened for ioctl and not
60923+ bind/connect, so we'll fake a bind learn log */
60924+ if (type == SOCK_RAW || type == SOCK_PACKET) {
60925+ __u32 fakeip = 0;
60926+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60927+ current->role->roletype, cred->uid,
60928+ cred->gid, current->exec_file ?
60929+ gr_to_filename(current->exec_file->f_path.dentry,
60930+ current->exec_file->f_path.mnt) :
60931+ curr->filename, curr->filename,
60932+ &fakeip, 0, type,
60933+ protocol, GR_CONNECT, &current->signal->saved_ip);
60934+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60935+ __u32 fakeip = 0;
60936+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60937+ current->role->roletype, cred->uid,
60938+ cred->gid, current->exec_file ?
60939+ gr_to_filename(current->exec_file->f_path.dentry,
60940+ current->exec_file->f_path.mnt) :
60941+ curr->filename, curr->filename,
60942+ &fakeip, 0, type,
60943+ protocol, GR_BIND, &current->signal->saved_ip);
60944+ }
60945+ /* we'll log when they use connect or bind */
60946+ goto exit;
60947+ }
60948+
60949+exit_fail:
60950+ if (domain == PF_INET)
60951+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60952+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
60953+ else
60954+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60955+ gr_socktype_to_name(type), protocol);
60956+
60957+ return 0;
60958+exit:
60959+ return 1;
60960+}
60961+
60962+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60963+{
60964+ if ((ip->mode & mode) &&
60965+ (ip_port >= ip->low) &&
60966+ (ip_port <= ip->high) &&
60967+ ((ntohl(ip_addr) & our_netmask) ==
60968+ (ntohl(our_addr) & our_netmask))
60969+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60970+ && (ip->type & (1 << type))) {
60971+ if (ip->mode & GR_INVERT)
60972+ return 2; // specifically denied
60973+ else
60974+ return 1; // allowed
60975+ }
60976+
60977+ return 0; // not specifically allowed, may continue parsing
60978+}
60979+
60980+static int
60981+gr_search_connectbind(const int full_mode, struct sock *sk,
60982+ struct sockaddr_in *addr, const int type)
60983+{
60984+ char iface[IFNAMSIZ] = {0};
60985+ struct acl_subject_label *curr;
60986+ struct acl_ip_label *ip;
60987+ struct inet_sock *isk;
60988+ struct net_device *dev;
60989+ struct in_device *idev;
60990+ unsigned long i;
60991+ int ret;
60992+ int mode = full_mode & (GR_BIND | GR_CONNECT);
60993+ __u32 ip_addr = 0;
60994+ __u32 our_addr;
60995+ __u32 our_netmask;
60996+ char *p;
60997+ __u16 ip_port = 0;
60998+ const struct cred *cred = current_cred();
60999+
61000+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
61001+ return 0;
61002+
61003+ curr = current->acl;
61004+ isk = inet_sk(sk);
61005+
61006+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
61007+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
61008+ addr->sin_addr.s_addr = curr->inaddr_any_override;
61009+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
61010+ struct sockaddr_in saddr;
61011+ int err;
61012+
61013+ saddr.sin_family = AF_INET;
61014+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
61015+ saddr.sin_port = isk->sport;
61016+
61017+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61018+ if (err)
61019+ return err;
61020+
61021+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
61022+ if (err)
61023+ return err;
61024+ }
61025+
61026+ if (!curr->ips)
61027+ return 0;
61028+
61029+ ip_addr = addr->sin_addr.s_addr;
61030+ ip_port = ntohs(addr->sin_port);
61031+
61032+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
61033+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
61034+ current->role->roletype, cred->uid,
61035+ cred->gid, current->exec_file ?
61036+ gr_to_filename(current->exec_file->f_path.dentry,
61037+ current->exec_file->f_path.mnt) :
61038+ curr->filename, curr->filename,
61039+ &ip_addr, ip_port, type,
61040+ sk->sk_protocol, mode, &current->signal->saved_ip);
61041+ return 0;
61042+ }
61043+
61044+ for (i = 0; i < curr->ip_num; i++) {
61045+ ip = *(curr->ips + i);
61046+ if (ip->iface != NULL) {
61047+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
61048+ p = strchr(iface, ':');
61049+ if (p != NULL)
61050+ *p = '\0';
61051+ dev = dev_get_by_name(sock_net(sk), iface);
61052+ if (dev == NULL)
61053+ continue;
61054+ idev = in_dev_get(dev);
61055+ if (idev == NULL) {
61056+ dev_put(dev);
61057+ continue;
61058+ }
61059+ rcu_read_lock();
61060+ for_ifa(idev) {
61061+ if (!strcmp(ip->iface, ifa->ifa_label)) {
61062+ our_addr = ifa->ifa_address;
61063+ our_netmask = 0xffffffff;
61064+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61065+ if (ret == 1) {
61066+ rcu_read_unlock();
61067+ in_dev_put(idev);
61068+ dev_put(dev);
61069+ return 0;
61070+ } else if (ret == 2) {
61071+ rcu_read_unlock();
61072+ in_dev_put(idev);
61073+ dev_put(dev);
61074+ goto denied;
61075+ }
61076+ }
61077+ } endfor_ifa(idev);
61078+ rcu_read_unlock();
61079+ in_dev_put(idev);
61080+ dev_put(dev);
61081+ } else {
61082+ our_addr = ip->addr;
61083+ our_netmask = ip->netmask;
61084+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61085+ if (ret == 1)
61086+ return 0;
61087+ else if (ret == 2)
61088+ goto denied;
61089+ }
61090+ }
61091+
61092+denied:
61093+ if (mode == GR_BIND)
61094+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61095+ else if (mode == GR_CONNECT)
61096+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61097+
61098+ return -EACCES;
61099+}
61100+
61101+int
61102+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61103+{
61104+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61105+}
61106+
61107+int
61108+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61109+{
61110+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61111+}
61112+
61113+int gr_search_listen(struct socket *sock)
61114+{
61115+ struct sock *sk = sock->sk;
61116+ struct sockaddr_in addr;
61117+
61118+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61119+ addr.sin_port = inet_sk(sk)->sport;
61120+
61121+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61122+}
61123+
61124+int gr_search_accept(struct socket *sock)
61125+{
61126+ struct sock *sk = sock->sk;
61127+ struct sockaddr_in addr;
61128+
61129+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61130+ addr.sin_port = inet_sk(sk)->sport;
61131+
61132+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61133+}
61134+
61135+int
61136+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61137+{
61138+ if (addr)
61139+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61140+ else {
61141+ struct sockaddr_in sin;
61142+ const struct inet_sock *inet = inet_sk(sk);
61143+
61144+ sin.sin_addr.s_addr = inet->daddr;
61145+ sin.sin_port = inet->dport;
61146+
61147+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61148+ }
61149+}
61150+
61151+int
61152+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61153+{
61154+ struct sockaddr_in sin;
61155+
61156+ if (unlikely(skb->len < sizeof (struct udphdr)))
61157+ return 0; // skip this packet
61158+
61159+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61160+ sin.sin_port = udp_hdr(skb)->source;
61161+
61162+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61163+}
61164diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61165new file mode 100644
61166index 0000000..34bdd46
61167--- /dev/null
61168+++ b/grsecurity/gracl_learn.c
61169@@ -0,0 +1,208 @@
61170+#include <linux/kernel.h>
61171+#include <linux/mm.h>
61172+#include <linux/sched.h>
61173+#include <linux/poll.h>
61174+#include <linux/smp_lock.h>
61175+#include <linux/string.h>
61176+#include <linux/file.h>
61177+#include <linux/types.h>
61178+#include <linux/vmalloc.h>
61179+#include <linux/grinternal.h>
61180+
61181+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61182+ size_t count, loff_t *ppos);
61183+extern int gr_acl_is_enabled(void);
61184+
61185+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61186+static int gr_learn_attached;
61187+
61188+/* use a 512k buffer */
61189+#define LEARN_BUFFER_SIZE (512 * 1024)
61190+
61191+static DEFINE_SPINLOCK(gr_learn_lock);
61192+static DEFINE_MUTEX(gr_learn_user_mutex);
61193+
61194+/* we need to maintain two buffers, so that the kernel context of grlearn
61195+ uses a semaphore around the userspace copying, and the other kernel contexts
61196+ use a spinlock when copying into the buffer, since they cannot sleep
61197+*/
61198+static char *learn_buffer;
61199+static char *learn_buffer_user;
61200+static int learn_buffer_len;
61201+static int learn_buffer_user_len;
61202+
61203+static ssize_t
61204+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61205+{
61206+ DECLARE_WAITQUEUE(wait, current);
61207+ ssize_t retval = 0;
61208+
61209+ add_wait_queue(&learn_wait, &wait);
61210+ set_current_state(TASK_INTERRUPTIBLE);
61211+ do {
61212+ mutex_lock(&gr_learn_user_mutex);
61213+ spin_lock(&gr_learn_lock);
61214+ if (learn_buffer_len)
61215+ break;
61216+ spin_unlock(&gr_learn_lock);
61217+ mutex_unlock(&gr_learn_user_mutex);
61218+ if (file->f_flags & O_NONBLOCK) {
61219+ retval = -EAGAIN;
61220+ goto out;
61221+ }
61222+ if (signal_pending(current)) {
61223+ retval = -ERESTARTSYS;
61224+ goto out;
61225+ }
61226+
61227+ schedule();
61228+ } while (1);
61229+
61230+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61231+ learn_buffer_user_len = learn_buffer_len;
61232+ retval = learn_buffer_len;
61233+ learn_buffer_len = 0;
61234+
61235+ spin_unlock(&gr_learn_lock);
61236+
61237+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61238+ retval = -EFAULT;
61239+
61240+ mutex_unlock(&gr_learn_user_mutex);
61241+out:
61242+ set_current_state(TASK_RUNNING);
61243+ remove_wait_queue(&learn_wait, &wait);
61244+ return retval;
61245+}
61246+
61247+static unsigned int
61248+poll_learn(struct file * file, poll_table * wait)
61249+{
61250+ poll_wait(file, &learn_wait, wait);
61251+
61252+ if (learn_buffer_len)
61253+ return (POLLIN | POLLRDNORM);
61254+
61255+ return 0;
61256+}
61257+
61258+void
61259+gr_clear_learn_entries(void)
61260+{
61261+ char *tmp;
61262+
61263+ mutex_lock(&gr_learn_user_mutex);
61264+ spin_lock(&gr_learn_lock);
61265+ tmp = learn_buffer;
61266+ learn_buffer = NULL;
61267+ spin_unlock(&gr_learn_lock);
61268+ if (tmp)
61269+ vfree(tmp);
61270+ if (learn_buffer_user != NULL) {
61271+ vfree(learn_buffer_user);
61272+ learn_buffer_user = NULL;
61273+ }
61274+ learn_buffer_len = 0;
61275+ mutex_unlock(&gr_learn_user_mutex);
61276+
61277+ return;
61278+}
61279+
61280+void
61281+gr_add_learn_entry(const char *fmt, ...)
61282+{
61283+ va_list args;
61284+ unsigned int len;
61285+
61286+ if (!gr_learn_attached)
61287+ return;
61288+
61289+ spin_lock(&gr_learn_lock);
61290+
61291+ /* leave a gap at the end so we know when it's "full" but don't have to
61292+ compute the exact length of the string we're trying to append
61293+ */
61294+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61295+ spin_unlock(&gr_learn_lock);
61296+ wake_up_interruptible(&learn_wait);
61297+ return;
61298+ }
61299+ if (learn_buffer == NULL) {
61300+ spin_unlock(&gr_learn_lock);
61301+ return;
61302+ }
61303+
61304+ va_start(args, fmt);
61305+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61306+ va_end(args);
61307+
61308+ learn_buffer_len += len + 1;
61309+
61310+ spin_unlock(&gr_learn_lock);
61311+ wake_up_interruptible(&learn_wait);
61312+
61313+ return;
61314+}
61315+
61316+static int
61317+open_learn(struct inode *inode, struct file *file)
61318+{
61319+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61320+ return -EBUSY;
61321+ if (file->f_mode & FMODE_READ) {
61322+ int retval = 0;
61323+ mutex_lock(&gr_learn_user_mutex);
61324+ if (learn_buffer == NULL)
61325+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61326+ if (learn_buffer_user == NULL)
61327+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61328+ if (learn_buffer == NULL) {
61329+ retval = -ENOMEM;
61330+ goto out_error;
61331+ }
61332+ if (learn_buffer_user == NULL) {
61333+ retval = -ENOMEM;
61334+ goto out_error;
61335+ }
61336+ learn_buffer_len = 0;
61337+ learn_buffer_user_len = 0;
61338+ gr_learn_attached = 1;
61339+out_error:
61340+ mutex_unlock(&gr_learn_user_mutex);
61341+ return retval;
61342+ }
61343+ return 0;
61344+}
61345+
61346+static int
61347+close_learn(struct inode *inode, struct file *file)
61348+{
61349+ if (file->f_mode & FMODE_READ) {
61350+ char *tmp = NULL;
61351+ mutex_lock(&gr_learn_user_mutex);
61352+ spin_lock(&gr_learn_lock);
61353+ tmp = learn_buffer;
61354+ learn_buffer = NULL;
61355+ spin_unlock(&gr_learn_lock);
61356+ if (tmp)
61357+ vfree(tmp);
61358+ if (learn_buffer_user != NULL) {
61359+ vfree(learn_buffer_user);
61360+ learn_buffer_user = NULL;
61361+ }
61362+ learn_buffer_len = 0;
61363+ learn_buffer_user_len = 0;
61364+ gr_learn_attached = 0;
61365+ mutex_unlock(&gr_learn_user_mutex);
61366+ }
61367+
61368+ return 0;
61369+}
61370+
61371+const struct file_operations grsec_fops = {
61372+ .read = read_learn,
61373+ .write = write_grsec_handler,
61374+ .open = open_learn,
61375+ .release = close_learn,
61376+ .poll = poll_learn,
61377+};
61378diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61379new file mode 100644
61380index 0000000..70b2179
61381--- /dev/null
61382+++ b/grsecurity/gracl_res.c
61383@@ -0,0 +1,67 @@
61384+#include <linux/kernel.h>
61385+#include <linux/sched.h>
61386+#include <linux/gracl.h>
61387+#include <linux/grinternal.h>
61388+
61389+static const char *restab_log[] = {
61390+ [RLIMIT_CPU] = "RLIMIT_CPU",
61391+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61392+ [RLIMIT_DATA] = "RLIMIT_DATA",
61393+ [RLIMIT_STACK] = "RLIMIT_STACK",
61394+ [RLIMIT_CORE] = "RLIMIT_CORE",
61395+ [RLIMIT_RSS] = "RLIMIT_RSS",
61396+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
61397+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61398+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61399+ [RLIMIT_AS] = "RLIMIT_AS",
61400+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61401+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61402+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61403+ [RLIMIT_NICE] = "RLIMIT_NICE",
61404+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61405+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61406+ [GR_CRASH_RES] = "RLIMIT_CRASH"
61407+};
61408+
61409+void
61410+gr_log_resource(const struct task_struct *task,
61411+ const int res, const unsigned long wanted, const int gt)
61412+{
61413+ const struct cred *cred;
61414+ unsigned long rlim;
61415+
61416+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
61417+ return;
61418+
61419+ // not yet supported resource
61420+ if (unlikely(!restab_log[res]))
61421+ return;
61422+
61423+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61424+ rlim = task->signal->rlim[res].rlim_max;
61425+ else
61426+ rlim = task->signal->rlim[res].rlim_cur;
61427+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61428+ return;
61429+
61430+ rcu_read_lock();
61431+ cred = __task_cred(task);
61432+
61433+ if (res == RLIMIT_NPROC &&
61434+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61435+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61436+ goto out_rcu_unlock;
61437+ else if (res == RLIMIT_MEMLOCK &&
61438+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61439+ goto out_rcu_unlock;
61440+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61441+ goto out_rcu_unlock;
61442+ rcu_read_unlock();
61443+
61444+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61445+
61446+ return;
61447+out_rcu_unlock:
61448+ rcu_read_unlock();
61449+ return;
61450+}
61451diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61452new file mode 100644
61453index 0000000..1d1b734
61454--- /dev/null
61455+++ b/grsecurity/gracl_segv.c
61456@@ -0,0 +1,284 @@
61457+#include <linux/kernel.h>
61458+#include <linux/mm.h>
61459+#include <asm/uaccess.h>
61460+#include <asm/errno.h>
61461+#include <asm/mman.h>
61462+#include <net/sock.h>
61463+#include <linux/file.h>
61464+#include <linux/fs.h>
61465+#include <linux/net.h>
61466+#include <linux/in.h>
61467+#include <linux/smp_lock.h>
61468+#include <linux/slab.h>
61469+#include <linux/types.h>
61470+#include <linux/sched.h>
61471+#include <linux/timer.h>
61472+#include <linux/gracl.h>
61473+#include <linux/grsecurity.h>
61474+#include <linux/grinternal.h>
61475+
61476+static struct crash_uid *uid_set;
61477+static unsigned short uid_used;
61478+static DEFINE_SPINLOCK(gr_uid_lock);
61479+extern rwlock_t gr_inode_lock;
61480+extern struct acl_subject_label *
61481+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61482+ struct acl_role_label *role);
61483+extern int gr_fake_force_sig(int sig, struct task_struct *t);
61484+
61485+int
61486+gr_init_uidset(void)
61487+{
61488+ uid_set =
61489+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61490+ uid_used = 0;
61491+
61492+ return uid_set ? 1 : 0;
61493+}
61494+
61495+void
61496+gr_free_uidset(void)
61497+{
61498+ if (uid_set)
61499+ kfree(uid_set);
61500+
61501+ return;
61502+}
61503+
61504+int
61505+gr_find_uid(const uid_t uid)
61506+{
61507+ struct crash_uid *tmp = uid_set;
61508+ uid_t buid;
61509+ int low = 0, high = uid_used - 1, mid;
61510+
61511+ while (high >= low) {
61512+ mid = (low + high) >> 1;
61513+ buid = tmp[mid].uid;
61514+ if (buid == uid)
61515+ return mid;
61516+ if (buid > uid)
61517+ high = mid - 1;
61518+ if (buid < uid)
61519+ low = mid + 1;
61520+ }
61521+
61522+ return -1;
61523+}
61524+
61525+static __inline__ void
61526+gr_insertsort(void)
61527+{
61528+ unsigned short i, j;
61529+ struct crash_uid index;
61530+
61531+ for (i = 1; i < uid_used; i++) {
61532+ index = uid_set[i];
61533+ j = i;
61534+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61535+ uid_set[j] = uid_set[j - 1];
61536+ j--;
61537+ }
61538+ uid_set[j] = index;
61539+ }
61540+
61541+ return;
61542+}
61543+
61544+static __inline__ void
61545+gr_insert_uid(const uid_t uid, const unsigned long expires)
61546+{
61547+ int loc;
61548+
61549+ if (uid_used == GR_UIDTABLE_MAX)
61550+ return;
61551+
61552+ loc = gr_find_uid(uid);
61553+
61554+ if (loc >= 0) {
61555+ uid_set[loc].expires = expires;
61556+ return;
61557+ }
61558+
61559+ uid_set[uid_used].uid = uid;
61560+ uid_set[uid_used].expires = expires;
61561+ uid_used++;
61562+
61563+ gr_insertsort();
61564+
61565+ return;
61566+}
61567+
61568+void
61569+gr_remove_uid(const unsigned short loc)
61570+{
61571+ unsigned short i;
61572+
61573+ for (i = loc + 1; i < uid_used; i++)
61574+ uid_set[i - 1] = uid_set[i];
61575+
61576+ uid_used--;
61577+
61578+ return;
61579+}
61580+
61581+int
61582+gr_check_crash_uid(const uid_t uid)
61583+{
61584+ int loc;
61585+ int ret = 0;
61586+
61587+ if (unlikely(!gr_acl_is_enabled()))
61588+ return 0;
61589+
61590+ spin_lock(&gr_uid_lock);
61591+ loc = gr_find_uid(uid);
61592+
61593+ if (loc < 0)
61594+ goto out_unlock;
61595+
61596+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
61597+ gr_remove_uid(loc);
61598+ else
61599+ ret = 1;
61600+
61601+out_unlock:
61602+ spin_unlock(&gr_uid_lock);
61603+ return ret;
61604+}
61605+
61606+static __inline__ int
61607+proc_is_setxid(const struct cred *cred)
61608+{
61609+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
61610+ cred->uid != cred->fsuid)
61611+ return 1;
61612+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61613+ cred->gid != cred->fsgid)
61614+ return 1;
61615+
61616+ return 0;
61617+}
61618+
61619+void
61620+gr_handle_crash(struct task_struct *task, const int sig)
61621+{
61622+ struct acl_subject_label *curr;
61623+ struct task_struct *tsk, *tsk2;
61624+ const struct cred *cred;
61625+ const struct cred *cred2;
61626+
61627+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61628+ return;
61629+
61630+ if (unlikely(!gr_acl_is_enabled()))
61631+ return;
61632+
61633+ curr = task->acl;
61634+
61635+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
61636+ return;
61637+
61638+ if (time_before_eq(curr->expires, get_seconds())) {
61639+ curr->expires = 0;
61640+ curr->crashes = 0;
61641+ }
61642+
61643+ curr->crashes++;
61644+
61645+ if (!curr->expires)
61646+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61647+
61648+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61649+ time_after(curr->expires, get_seconds())) {
61650+ rcu_read_lock();
61651+ cred = __task_cred(task);
61652+ if (cred->uid && proc_is_setxid(cred)) {
61653+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61654+ spin_lock(&gr_uid_lock);
61655+ gr_insert_uid(cred->uid, curr->expires);
61656+ spin_unlock(&gr_uid_lock);
61657+ curr->expires = 0;
61658+ curr->crashes = 0;
61659+ read_lock(&tasklist_lock);
61660+ do_each_thread(tsk2, tsk) {
61661+ cred2 = __task_cred(tsk);
61662+ if (tsk != task && cred2->uid == cred->uid)
61663+ gr_fake_force_sig(SIGKILL, tsk);
61664+ } while_each_thread(tsk2, tsk);
61665+ read_unlock(&tasklist_lock);
61666+ } else {
61667+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61668+ read_lock(&tasklist_lock);
61669+ read_lock(&grsec_exec_file_lock);
61670+ do_each_thread(tsk2, tsk) {
61671+ if (likely(tsk != task)) {
61672+ // if this thread has the same subject as the one that triggered
61673+ // RES_CRASH and it's the same binary, kill it
61674+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61675+ gr_fake_force_sig(SIGKILL, tsk);
61676+ }
61677+ } while_each_thread(tsk2, tsk);
61678+ read_unlock(&grsec_exec_file_lock);
61679+ read_unlock(&tasklist_lock);
61680+ }
61681+ rcu_read_unlock();
61682+ }
61683+
61684+ return;
61685+}
61686+
61687+int
61688+gr_check_crash_exec(const struct file *filp)
61689+{
61690+ struct acl_subject_label *curr;
61691+
61692+ if (unlikely(!gr_acl_is_enabled()))
61693+ return 0;
61694+
61695+ read_lock(&gr_inode_lock);
61696+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61697+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61698+ current->role);
61699+ read_unlock(&gr_inode_lock);
61700+
61701+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61702+ (!curr->crashes && !curr->expires))
61703+ return 0;
61704+
61705+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61706+ time_after(curr->expires, get_seconds()))
61707+ return 1;
61708+ else if (time_before_eq(curr->expires, get_seconds())) {
61709+ curr->crashes = 0;
61710+ curr->expires = 0;
61711+ }
61712+
61713+ return 0;
61714+}
61715+
61716+void
61717+gr_handle_alertkill(struct task_struct *task)
61718+{
61719+ struct acl_subject_label *curracl;
61720+ __u32 curr_ip;
61721+ struct task_struct *p, *p2;
61722+
61723+ if (unlikely(!gr_acl_is_enabled()))
61724+ return;
61725+
61726+ curracl = task->acl;
61727+ curr_ip = task->signal->curr_ip;
61728+
61729+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61730+ read_lock(&tasklist_lock);
61731+ do_each_thread(p2, p) {
61732+ if (p->signal->curr_ip == curr_ip)
61733+ gr_fake_force_sig(SIGKILL, p);
61734+ } while_each_thread(p2, p);
61735+ read_unlock(&tasklist_lock);
61736+ } else if (curracl->mode & GR_KILLPROC)
61737+ gr_fake_force_sig(SIGKILL, task);
61738+
61739+ return;
61740+}
61741diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61742new file mode 100644
61743index 0000000..9d83a69
61744--- /dev/null
61745+++ b/grsecurity/gracl_shm.c
61746@@ -0,0 +1,40 @@
61747+#include <linux/kernel.h>
61748+#include <linux/mm.h>
61749+#include <linux/sched.h>
61750+#include <linux/file.h>
61751+#include <linux/ipc.h>
61752+#include <linux/gracl.h>
61753+#include <linux/grsecurity.h>
61754+#include <linux/grinternal.h>
61755+
61756+int
61757+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61758+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61759+{
61760+ struct task_struct *task;
61761+
61762+ if (!gr_acl_is_enabled())
61763+ return 1;
61764+
61765+ rcu_read_lock();
61766+ read_lock(&tasklist_lock);
61767+
61768+ task = find_task_by_vpid(shm_cprid);
61769+
61770+ if (unlikely(!task))
61771+ task = find_task_by_vpid(shm_lapid);
61772+
61773+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61774+ (task->pid == shm_lapid)) &&
61775+ (task->acl->mode & GR_PROTSHM) &&
61776+ (task->acl != current->acl))) {
61777+ read_unlock(&tasklist_lock);
61778+ rcu_read_unlock();
61779+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61780+ return 0;
61781+ }
61782+ read_unlock(&tasklist_lock);
61783+ rcu_read_unlock();
61784+
61785+ return 1;
61786+}
61787diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61788new file mode 100644
61789index 0000000..bc0be01
61790--- /dev/null
61791+++ b/grsecurity/grsec_chdir.c
61792@@ -0,0 +1,19 @@
61793+#include <linux/kernel.h>
61794+#include <linux/sched.h>
61795+#include <linux/fs.h>
61796+#include <linux/file.h>
61797+#include <linux/grsecurity.h>
61798+#include <linux/grinternal.h>
61799+
61800+void
61801+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61802+{
61803+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61804+ if ((grsec_enable_chdir && grsec_enable_group &&
61805+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61806+ !grsec_enable_group)) {
61807+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61808+ }
61809+#endif
61810+ return;
61811+}
61812diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61813new file mode 100644
61814index 0000000..197bdd5
61815--- /dev/null
61816+++ b/grsecurity/grsec_chroot.c
61817@@ -0,0 +1,386 @@
61818+#include <linux/kernel.h>
61819+#include <linux/module.h>
61820+#include <linux/sched.h>
61821+#include <linux/file.h>
61822+#include <linux/fs.h>
61823+#include <linux/mount.h>
61824+#include <linux/types.h>
61825+#include <linux/pid_namespace.h>
61826+#include <linux/grsecurity.h>
61827+#include <linux/grinternal.h>
61828+
61829+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61830+{
61831+#ifdef CONFIG_GRKERNSEC
61832+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61833+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61834+ task->gr_is_chrooted = 1;
61835+ else
61836+ task->gr_is_chrooted = 0;
61837+
61838+ task->gr_chroot_dentry = path->dentry;
61839+#endif
61840+ return;
61841+}
61842+
61843+void gr_clear_chroot_entries(struct task_struct *task)
61844+{
61845+#ifdef CONFIG_GRKERNSEC
61846+ task->gr_is_chrooted = 0;
61847+ task->gr_chroot_dentry = NULL;
61848+#endif
61849+ return;
61850+}
61851+
61852+int
61853+gr_handle_chroot_unix(const pid_t pid)
61854+{
61855+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61856+ struct task_struct *p;
61857+
61858+ if (unlikely(!grsec_enable_chroot_unix))
61859+ return 1;
61860+
61861+ if (likely(!proc_is_chrooted(current)))
61862+ return 1;
61863+
61864+ rcu_read_lock();
61865+ read_lock(&tasklist_lock);
61866+
61867+ p = find_task_by_vpid_unrestricted(pid);
61868+ if (unlikely(p && !have_same_root(current, p))) {
61869+ read_unlock(&tasklist_lock);
61870+ rcu_read_unlock();
61871+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61872+ return 0;
61873+ }
61874+ read_unlock(&tasklist_lock);
61875+ rcu_read_unlock();
61876+#endif
61877+ return 1;
61878+}
61879+
61880+int
61881+gr_handle_chroot_nice(void)
61882+{
61883+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61884+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61885+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61886+ return -EPERM;
61887+ }
61888+#endif
61889+ return 0;
61890+}
61891+
61892+int
61893+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61894+{
61895+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61896+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61897+ && proc_is_chrooted(current)) {
61898+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61899+ return -EACCES;
61900+ }
61901+#endif
61902+ return 0;
61903+}
61904+
61905+int
61906+gr_handle_chroot_rawio(const struct inode *inode)
61907+{
61908+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61909+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61910+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61911+ return 1;
61912+#endif
61913+ return 0;
61914+}
61915+
61916+int
61917+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61918+{
61919+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61920+ struct task_struct *p;
61921+ int ret = 0;
61922+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61923+ return ret;
61924+
61925+ read_lock(&tasklist_lock);
61926+ do_each_pid_task(pid, type, p) {
61927+ if (!have_same_root(current, p)) {
61928+ ret = 1;
61929+ goto out;
61930+ }
61931+ } while_each_pid_task(pid, type, p);
61932+out:
61933+ read_unlock(&tasklist_lock);
61934+ return ret;
61935+#endif
61936+ return 0;
61937+}
61938+
61939+int
61940+gr_pid_is_chrooted(struct task_struct *p)
61941+{
61942+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61943+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61944+ return 0;
61945+
61946+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61947+ !have_same_root(current, p)) {
61948+ return 1;
61949+ }
61950+#endif
61951+ return 0;
61952+}
61953+
61954+EXPORT_SYMBOL(gr_pid_is_chrooted);
61955+
61956+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61957+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61958+{
61959+ struct dentry *dentry = (struct dentry *)u_dentry;
61960+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61961+ struct dentry *realroot;
61962+ struct vfsmount *realrootmnt;
61963+ struct dentry *currentroot;
61964+ struct vfsmount *currentmnt;
61965+ struct task_struct *reaper = &init_task;
61966+ int ret = 1;
61967+
61968+ read_lock(&reaper->fs->lock);
61969+ realrootmnt = mntget(reaper->fs->root.mnt);
61970+ realroot = dget(reaper->fs->root.dentry);
61971+ read_unlock(&reaper->fs->lock);
61972+
61973+ read_lock(&current->fs->lock);
61974+ currentmnt = mntget(current->fs->root.mnt);
61975+ currentroot = dget(current->fs->root.dentry);
61976+ read_unlock(&current->fs->lock);
61977+
61978+ spin_lock(&dcache_lock);
61979+ for (;;) {
61980+ if (unlikely((dentry == realroot && mnt == realrootmnt)
61981+ || (dentry == currentroot && mnt == currentmnt)))
61982+ break;
61983+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61984+ if (mnt->mnt_parent == mnt)
61985+ break;
61986+ dentry = mnt->mnt_mountpoint;
61987+ mnt = mnt->mnt_parent;
61988+ continue;
61989+ }
61990+ dentry = dentry->d_parent;
61991+ }
61992+ spin_unlock(&dcache_lock);
61993+
61994+ dput(currentroot);
61995+ mntput(currentmnt);
61996+
61997+ /* access is outside of chroot */
61998+ if (dentry == realroot && mnt == realrootmnt)
61999+ ret = 0;
62000+
62001+ dput(realroot);
62002+ mntput(realrootmnt);
62003+ return ret;
62004+}
62005+#endif
62006+
62007+int
62008+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
62009+{
62010+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
62011+ if (!grsec_enable_chroot_fchdir)
62012+ return 1;
62013+
62014+ if (!proc_is_chrooted(current))
62015+ return 1;
62016+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
62017+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
62018+ return 0;
62019+ }
62020+#endif
62021+ return 1;
62022+}
62023+
62024+int
62025+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62026+ const time_t shm_createtime)
62027+{
62028+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
62029+ struct task_struct *p;
62030+ time_t starttime;
62031+
62032+ if (unlikely(!grsec_enable_chroot_shmat))
62033+ return 1;
62034+
62035+ if (likely(!proc_is_chrooted(current)))
62036+ return 1;
62037+
62038+ rcu_read_lock();
62039+ read_lock(&tasklist_lock);
62040+
62041+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
62042+ starttime = p->start_time.tv_sec;
62043+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
62044+ if (have_same_root(current, p)) {
62045+ goto allow;
62046+ } else {
62047+ read_unlock(&tasklist_lock);
62048+ rcu_read_unlock();
62049+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62050+ return 0;
62051+ }
62052+ }
62053+ /* creator exited, pid reuse, fall through to next check */
62054+ }
62055+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
62056+ if (unlikely(!have_same_root(current, p))) {
62057+ read_unlock(&tasklist_lock);
62058+ rcu_read_unlock();
62059+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
62060+ return 0;
62061+ }
62062+ }
62063+
62064+allow:
62065+ read_unlock(&tasklist_lock);
62066+ rcu_read_unlock();
62067+#endif
62068+ return 1;
62069+}
62070+
62071+void
62072+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
62073+{
62074+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
62075+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
62076+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
62077+#endif
62078+ return;
62079+}
62080+
62081+int
62082+gr_handle_chroot_mknod(const struct dentry *dentry,
62083+ const struct vfsmount *mnt, const int mode)
62084+{
62085+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62086+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62087+ proc_is_chrooted(current)) {
62088+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62089+ return -EPERM;
62090+ }
62091+#endif
62092+ return 0;
62093+}
62094+
62095+int
62096+gr_handle_chroot_mount(const struct dentry *dentry,
62097+ const struct vfsmount *mnt, const char *dev_name)
62098+{
62099+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62100+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62101+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62102+ return -EPERM;
62103+ }
62104+#endif
62105+ return 0;
62106+}
62107+
62108+int
62109+gr_handle_chroot_pivot(void)
62110+{
62111+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62112+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62113+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62114+ return -EPERM;
62115+ }
62116+#endif
62117+ return 0;
62118+}
62119+
62120+int
62121+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62122+{
62123+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62124+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62125+ !gr_is_outside_chroot(dentry, mnt)) {
62126+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62127+ return -EPERM;
62128+ }
62129+#endif
62130+ return 0;
62131+}
62132+
62133+extern const char *captab_log[];
62134+extern int captab_log_entries;
62135+
62136+int
62137+gr_chroot_is_capable(const int cap)
62138+{
62139+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62140+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62141+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62142+ if (cap_raised(chroot_caps, cap)) {
62143+ const struct cred *creds = current_cred();
62144+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62145+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62146+ }
62147+ return 0;
62148+ }
62149+ }
62150+#endif
62151+ return 1;
62152+}
62153+
62154+int
62155+gr_chroot_is_capable_nolog(const int cap)
62156+{
62157+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62158+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62159+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62160+ if (cap_raised(chroot_caps, cap)) {
62161+ return 0;
62162+ }
62163+ }
62164+#endif
62165+ return 1;
62166+}
62167+
62168+int
62169+gr_handle_chroot_sysctl(const int op)
62170+{
62171+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62172+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62173+ && (op & MAY_WRITE))
62174+ return -EACCES;
62175+#endif
62176+ return 0;
62177+}
62178+
62179+void
62180+gr_handle_chroot_chdir(struct path *path)
62181+{
62182+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62183+ if (grsec_enable_chroot_chdir)
62184+ set_fs_pwd(current->fs, path);
62185+#endif
62186+ return;
62187+}
62188+
62189+int
62190+gr_handle_chroot_chmod(const struct dentry *dentry,
62191+ const struct vfsmount *mnt, const int mode)
62192+{
62193+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62194+ /* allow chmod +s on directories, but not on files */
62195+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62196+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62197+ proc_is_chrooted(current)) {
62198+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62199+ return -EPERM;
62200+ }
62201+#endif
62202+ return 0;
62203+}
62204diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62205new file mode 100644
62206index 0000000..b81db5b
62207--- /dev/null
62208+++ b/grsecurity/grsec_disabled.c
62209@@ -0,0 +1,439 @@
62210+#include <linux/kernel.h>
62211+#include <linux/module.h>
62212+#include <linux/sched.h>
62213+#include <linux/file.h>
62214+#include <linux/fs.h>
62215+#include <linux/kdev_t.h>
62216+#include <linux/net.h>
62217+#include <linux/in.h>
62218+#include <linux/ip.h>
62219+#include <linux/skbuff.h>
62220+#include <linux/sysctl.h>
62221+
62222+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62223+void
62224+pax_set_initial_flags(struct linux_binprm *bprm)
62225+{
62226+ return;
62227+}
62228+#endif
62229+
62230+#ifdef CONFIG_SYSCTL
62231+__u32
62232+gr_handle_sysctl(const struct ctl_table * table, const int op)
62233+{
62234+ return 0;
62235+}
62236+#endif
62237+
62238+#ifdef CONFIG_TASKSTATS
62239+int gr_is_taskstats_denied(int pid)
62240+{
62241+ return 0;
62242+}
62243+#endif
62244+
62245+int
62246+gr_acl_is_enabled(void)
62247+{
62248+ return 0;
62249+}
62250+
62251+void
62252+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62253+{
62254+ return;
62255+}
62256+
62257+int
62258+gr_handle_rawio(const struct inode *inode)
62259+{
62260+ return 0;
62261+}
62262+
62263+void
62264+gr_acl_handle_psacct(struct task_struct *task, const long code)
62265+{
62266+ return;
62267+}
62268+
62269+int
62270+gr_handle_ptrace(struct task_struct *task, const long request)
62271+{
62272+ return 0;
62273+}
62274+
62275+int
62276+gr_handle_proc_ptrace(struct task_struct *task)
62277+{
62278+ return 0;
62279+}
62280+
62281+void
62282+gr_learn_resource(const struct task_struct *task,
62283+ const int res, const unsigned long wanted, const int gt)
62284+{
62285+ return;
62286+}
62287+
62288+int
62289+gr_set_acls(const int type)
62290+{
62291+ return 0;
62292+}
62293+
62294+int
62295+gr_check_hidden_task(const struct task_struct *tsk)
62296+{
62297+ return 0;
62298+}
62299+
62300+int
62301+gr_check_protected_task(const struct task_struct *task)
62302+{
62303+ return 0;
62304+}
62305+
62306+int
62307+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62308+{
62309+ return 0;
62310+}
62311+
62312+void
62313+gr_copy_label(struct task_struct *tsk)
62314+{
62315+ return;
62316+}
62317+
62318+void
62319+gr_set_pax_flags(struct task_struct *task)
62320+{
62321+ return;
62322+}
62323+
62324+int
62325+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62326+ const int unsafe_share)
62327+{
62328+ return 0;
62329+}
62330+
62331+void
62332+gr_handle_delete(const ino_t ino, const dev_t dev)
62333+{
62334+ return;
62335+}
62336+
62337+void
62338+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62339+{
62340+ return;
62341+}
62342+
62343+void
62344+gr_handle_crash(struct task_struct *task, const int sig)
62345+{
62346+ return;
62347+}
62348+
62349+int
62350+gr_check_crash_exec(const struct file *filp)
62351+{
62352+ return 0;
62353+}
62354+
62355+int
62356+gr_check_crash_uid(const uid_t uid)
62357+{
62358+ return 0;
62359+}
62360+
62361+void
62362+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62363+ struct dentry *old_dentry,
62364+ struct dentry *new_dentry,
62365+ struct vfsmount *mnt, const __u8 replace)
62366+{
62367+ return;
62368+}
62369+
62370+int
62371+gr_search_socket(const int family, const int type, const int protocol)
62372+{
62373+ return 1;
62374+}
62375+
62376+int
62377+gr_search_connectbind(const int mode, const struct socket *sock,
62378+ const struct sockaddr_in *addr)
62379+{
62380+ return 0;
62381+}
62382+
62383+void
62384+gr_handle_alertkill(struct task_struct *task)
62385+{
62386+ return;
62387+}
62388+
62389+__u32
62390+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62391+{
62392+ return 1;
62393+}
62394+
62395+__u32
62396+gr_acl_handle_hidden_file(const struct dentry * dentry,
62397+ const struct vfsmount * mnt)
62398+{
62399+ return 1;
62400+}
62401+
62402+__u32
62403+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62404+ int acc_mode)
62405+{
62406+ return 1;
62407+}
62408+
62409+__u32
62410+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62411+{
62412+ return 1;
62413+}
62414+
62415+__u32
62416+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62417+{
62418+ return 1;
62419+}
62420+
62421+int
62422+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62423+ unsigned int *vm_flags)
62424+{
62425+ return 1;
62426+}
62427+
62428+__u32
62429+gr_acl_handle_truncate(const struct dentry * dentry,
62430+ const struct vfsmount * mnt)
62431+{
62432+ return 1;
62433+}
62434+
62435+__u32
62436+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62437+{
62438+ return 1;
62439+}
62440+
62441+__u32
62442+gr_acl_handle_access(const struct dentry * dentry,
62443+ const struct vfsmount * mnt, const int fmode)
62444+{
62445+ return 1;
62446+}
62447+
62448+__u32
62449+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62450+ mode_t mode)
62451+{
62452+ return 1;
62453+}
62454+
62455+__u32
62456+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62457+ mode_t mode)
62458+{
62459+ return 1;
62460+}
62461+
62462+__u32
62463+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62464+{
62465+ return 1;
62466+}
62467+
62468+__u32
62469+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62470+{
62471+ return 1;
62472+}
62473+
62474+void
62475+grsecurity_init(void)
62476+{
62477+ return;
62478+}
62479+
62480+__u32
62481+gr_acl_handle_mknod(const struct dentry * new_dentry,
62482+ const struct dentry * parent_dentry,
62483+ const struct vfsmount * parent_mnt,
62484+ const int mode)
62485+{
62486+ return 1;
62487+}
62488+
62489+__u32
62490+gr_acl_handle_mkdir(const struct dentry * new_dentry,
62491+ const struct dentry * parent_dentry,
62492+ const struct vfsmount * parent_mnt)
62493+{
62494+ return 1;
62495+}
62496+
62497+__u32
62498+gr_acl_handle_symlink(const struct dentry * new_dentry,
62499+ const struct dentry * parent_dentry,
62500+ const struct vfsmount * parent_mnt, const char *from)
62501+{
62502+ return 1;
62503+}
62504+
62505+__u32
62506+gr_acl_handle_link(const struct dentry * new_dentry,
62507+ const struct dentry * parent_dentry,
62508+ const struct vfsmount * parent_mnt,
62509+ const struct dentry * old_dentry,
62510+ const struct vfsmount * old_mnt, const char *to)
62511+{
62512+ return 1;
62513+}
62514+
62515+int
62516+gr_acl_handle_rename(const struct dentry *new_dentry,
62517+ const struct dentry *parent_dentry,
62518+ const struct vfsmount *parent_mnt,
62519+ const struct dentry *old_dentry,
62520+ const struct inode *old_parent_inode,
62521+ const struct vfsmount *old_mnt, const char *newname)
62522+{
62523+ return 0;
62524+}
62525+
62526+int
62527+gr_acl_handle_filldir(const struct file *file, const char *name,
62528+ const int namelen, const ino_t ino)
62529+{
62530+ return 1;
62531+}
62532+
62533+int
62534+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62535+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62536+{
62537+ return 1;
62538+}
62539+
62540+int
62541+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62542+{
62543+ return 0;
62544+}
62545+
62546+int
62547+gr_search_accept(const struct socket *sock)
62548+{
62549+ return 0;
62550+}
62551+
62552+int
62553+gr_search_listen(const struct socket *sock)
62554+{
62555+ return 0;
62556+}
62557+
62558+int
62559+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62560+{
62561+ return 0;
62562+}
62563+
62564+__u32
62565+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62566+{
62567+ return 1;
62568+}
62569+
62570+__u32
62571+gr_acl_handle_creat(const struct dentry * dentry,
62572+ const struct dentry * p_dentry,
62573+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62574+ const int imode)
62575+{
62576+ return 1;
62577+}
62578+
62579+void
62580+gr_acl_handle_exit(void)
62581+{
62582+ return;
62583+}
62584+
62585+int
62586+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62587+{
62588+ return 1;
62589+}
62590+
62591+void
62592+gr_set_role_label(const uid_t uid, const gid_t gid)
62593+{
62594+ return;
62595+}
62596+
62597+int
62598+gr_acl_handle_procpidmem(const struct task_struct *task)
62599+{
62600+ return 0;
62601+}
62602+
62603+int
62604+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62605+{
62606+ return 0;
62607+}
62608+
62609+int
62610+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62611+{
62612+ return 0;
62613+}
62614+
62615+void
62616+gr_set_kernel_label(struct task_struct *task)
62617+{
62618+ return;
62619+}
62620+
62621+int
62622+gr_check_user_change(int real, int effective, int fs)
62623+{
62624+ return 0;
62625+}
62626+
62627+int
62628+gr_check_group_change(int real, int effective, int fs)
62629+{
62630+ return 0;
62631+}
62632+
62633+int gr_acl_enable_at_secure(void)
62634+{
62635+ return 0;
62636+}
62637+
62638+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62639+{
62640+ return dentry->d_inode->i_sb->s_dev;
62641+}
62642+
62643+EXPORT_SYMBOL(gr_learn_resource);
62644+EXPORT_SYMBOL(gr_set_kernel_label);
62645+#ifdef CONFIG_SECURITY
62646+EXPORT_SYMBOL(gr_check_user_change);
62647+EXPORT_SYMBOL(gr_check_group_change);
62648+#endif
62649diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62650new file mode 100644
62651index 0000000..a96e155
62652--- /dev/null
62653+++ b/grsecurity/grsec_exec.c
62654@@ -0,0 +1,204 @@
62655+#include <linux/kernel.h>
62656+#include <linux/sched.h>
62657+#include <linux/file.h>
62658+#include <linux/binfmts.h>
62659+#include <linux/smp_lock.h>
62660+#include <linux/fs.h>
62661+#include <linux/types.h>
62662+#include <linux/grdefs.h>
62663+#include <linux/grinternal.h>
62664+#include <linux/capability.h>
62665+#include <linux/compat.h>
62666+#include <linux/module.h>
62667+
62668+#include <asm/uaccess.h>
62669+
62670+#ifdef CONFIG_GRKERNSEC_EXECLOG
62671+static char gr_exec_arg_buf[132];
62672+static DEFINE_MUTEX(gr_exec_arg_mutex);
62673+#endif
62674+
62675+void
62676+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62677+{
62678+#ifdef CONFIG_GRKERNSEC_EXECLOG
62679+ char *grarg = gr_exec_arg_buf;
62680+ unsigned int i, x, execlen = 0;
62681+ char c;
62682+
62683+ if (!((grsec_enable_execlog && grsec_enable_group &&
62684+ in_group_p(grsec_audit_gid))
62685+ || (grsec_enable_execlog && !grsec_enable_group)))
62686+ return;
62687+
62688+ mutex_lock(&gr_exec_arg_mutex);
62689+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62690+
62691+ if (unlikely(argv == NULL))
62692+ goto log;
62693+
62694+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62695+ const char __user *p;
62696+ unsigned int len;
62697+
62698+ if (copy_from_user(&p, argv + i, sizeof(p)))
62699+ goto log;
62700+ if (!p)
62701+ goto log;
62702+ len = strnlen_user(p, 128 - execlen);
62703+ if (len > 128 - execlen)
62704+ len = 128 - execlen;
62705+ else if (len > 0)
62706+ len--;
62707+ if (copy_from_user(grarg + execlen, p, len))
62708+ goto log;
62709+
62710+ /* rewrite unprintable characters */
62711+ for (x = 0; x < len; x++) {
62712+ c = *(grarg + execlen + x);
62713+ if (c < 32 || c > 126)
62714+ *(grarg + execlen + x) = ' ';
62715+ }
62716+
62717+ execlen += len;
62718+ *(grarg + execlen) = ' ';
62719+ *(grarg + execlen + 1) = '\0';
62720+ execlen++;
62721+ }
62722+
62723+ log:
62724+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62725+ bprm->file->f_path.mnt, grarg);
62726+ mutex_unlock(&gr_exec_arg_mutex);
62727+#endif
62728+ return;
62729+}
62730+
62731+#ifdef CONFIG_COMPAT
62732+void
62733+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62734+{
62735+#ifdef CONFIG_GRKERNSEC_EXECLOG
62736+ char *grarg = gr_exec_arg_buf;
62737+ unsigned int i, x, execlen = 0;
62738+ char c;
62739+
62740+ if (!((grsec_enable_execlog && grsec_enable_group &&
62741+ in_group_p(grsec_audit_gid))
62742+ || (grsec_enable_execlog && !grsec_enable_group)))
62743+ return;
62744+
62745+ mutex_lock(&gr_exec_arg_mutex);
62746+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62747+
62748+ if (unlikely(argv == NULL))
62749+ goto log;
62750+
62751+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62752+ compat_uptr_t p;
62753+ unsigned int len;
62754+
62755+ if (get_user(p, argv + i))
62756+ goto log;
62757+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62758+ if (len > 128 - execlen)
62759+ len = 128 - execlen;
62760+ else if (len > 0)
62761+ len--;
62762+ else
62763+ goto log;
62764+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62765+ goto log;
62766+
62767+ /* rewrite unprintable characters */
62768+ for (x = 0; x < len; x++) {
62769+ c = *(grarg + execlen + x);
62770+ if (c < 32 || c > 126)
62771+ *(grarg + execlen + x) = ' ';
62772+ }
62773+
62774+ execlen += len;
62775+ *(grarg + execlen) = ' ';
62776+ *(grarg + execlen + 1) = '\0';
62777+ execlen++;
62778+ }
62779+
62780+ log:
62781+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62782+ bprm->file->f_path.mnt, grarg);
62783+ mutex_unlock(&gr_exec_arg_mutex);
62784+#endif
62785+ return;
62786+}
62787+#endif
62788+
62789+#ifdef CONFIG_GRKERNSEC
62790+extern int gr_acl_is_capable(const int cap);
62791+extern int gr_acl_is_capable_nolog(const int cap);
62792+extern int gr_chroot_is_capable(const int cap);
62793+extern int gr_chroot_is_capable_nolog(const int cap);
62794+#endif
62795+
62796+const char *captab_log[] = {
62797+ "CAP_CHOWN",
62798+ "CAP_DAC_OVERRIDE",
62799+ "CAP_DAC_READ_SEARCH",
62800+ "CAP_FOWNER",
62801+ "CAP_FSETID",
62802+ "CAP_KILL",
62803+ "CAP_SETGID",
62804+ "CAP_SETUID",
62805+ "CAP_SETPCAP",
62806+ "CAP_LINUX_IMMUTABLE",
62807+ "CAP_NET_BIND_SERVICE",
62808+ "CAP_NET_BROADCAST",
62809+ "CAP_NET_ADMIN",
62810+ "CAP_NET_RAW",
62811+ "CAP_IPC_LOCK",
62812+ "CAP_IPC_OWNER",
62813+ "CAP_SYS_MODULE",
62814+ "CAP_SYS_RAWIO",
62815+ "CAP_SYS_CHROOT",
62816+ "CAP_SYS_PTRACE",
62817+ "CAP_SYS_PACCT",
62818+ "CAP_SYS_ADMIN",
62819+ "CAP_SYS_BOOT",
62820+ "CAP_SYS_NICE",
62821+ "CAP_SYS_RESOURCE",
62822+ "CAP_SYS_TIME",
62823+ "CAP_SYS_TTY_CONFIG",
62824+ "CAP_MKNOD",
62825+ "CAP_LEASE",
62826+ "CAP_AUDIT_WRITE",
62827+ "CAP_AUDIT_CONTROL",
62828+ "CAP_SETFCAP",
62829+ "CAP_MAC_OVERRIDE",
62830+ "CAP_MAC_ADMIN"
62831+};
62832+
62833+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62834+
62835+int gr_is_capable(const int cap)
62836+{
62837+#ifdef CONFIG_GRKERNSEC
62838+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62839+ return 1;
62840+ return 0;
62841+#else
62842+ return 1;
62843+#endif
62844+}
62845+
62846+int gr_is_capable_nolog(const int cap)
62847+{
62848+#ifdef CONFIG_GRKERNSEC
62849+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62850+ return 1;
62851+ return 0;
62852+#else
62853+ return 1;
62854+#endif
62855+}
62856+
62857+EXPORT_SYMBOL(gr_is_capable);
62858+EXPORT_SYMBOL(gr_is_capable_nolog);
62859diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62860new file mode 100644
62861index 0000000..d3ee748
62862--- /dev/null
62863+++ b/grsecurity/grsec_fifo.c
62864@@ -0,0 +1,24 @@
62865+#include <linux/kernel.h>
62866+#include <linux/sched.h>
62867+#include <linux/fs.h>
62868+#include <linux/file.h>
62869+#include <linux/grinternal.h>
62870+
62871+int
62872+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62873+ const struct dentry *dir, const int flag, const int acc_mode)
62874+{
62875+#ifdef CONFIG_GRKERNSEC_FIFO
62876+ const struct cred *cred = current_cred();
62877+
62878+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62879+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62880+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62881+ (cred->fsuid != dentry->d_inode->i_uid)) {
62882+ if (!inode_permission(dentry->d_inode, acc_mode))
62883+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62884+ return -EACCES;
62885+ }
62886+#endif
62887+ return 0;
62888+}
62889diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62890new file mode 100644
62891index 0000000..8ca18bf
62892--- /dev/null
62893+++ b/grsecurity/grsec_fork.c
62894@@ -0,0 +1,23 @@
62895+#include <linux/kernel.h>
62896+#include <linux/sched.h>
62897+#include <linux/grsecurity.h>
62898+#include <linux/grinternal.h>
62899+#include <linux/errno.h>
62900+
62901+void
62902+gr_log_forkfail(const int retval)
62903+{
62904+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62905+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62906+ switch (retval) {
62907+ case -EAGAIN:
62908+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62909+ break;
62910+ case -ENOMEM:
62911+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62912+ break;
62913+ }
62914+ }
62915+#endif
62916+ return;
62917+}
62918diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62919new file mode 100644
62920index 0000000..1e995d3
62921--- /dev/null
62922+++ b/grsecurity/grsec_init.c
62923@@ -0,0 +1,278 @@
62924+#include <linux/kernel.h>
62925+#include <linux/sched.h>
62926+#include <linux/mm.h>
62927+#include <linux/smp_lock.h>
62928+#include <linux/gracl.h>
62929+#include <linux/slab.h>
62930+#include <linux/vmalloc.h>
62931+#include <linux/percpu.h>
62932+#include <linux/module.h>
62933+
62934+int grsec_enable_ptrace_readexec;
62935+int grsec_enable_setxid;
62936+int grsec_enable_brute;
62937+int grsec_enable_link;
62938+int grsec_enable_dmesg;
62939+int grsec_enable_harden_ptrace;
62940+int grsec_enable_fifo;
62941+int grsec_enable_execlog;
62942+int grsec_enable_signal;
62943+int grsec_enable_forkfail;
62944+int grsec_enable_audit_ptrace;
62945+int grsec_enable_time;
62946+int grsec_enable_audit_textrel;
62947+int grsec_enable_group;
62948+int grsec_audit_gid;
62949+int grsec_enable_chdir;
62950+int grsec_enable_mount;
62951+int grsec_enable_rofs;
62952+int grsec_enable_chroot_findtask;
62953+int grsec_enable_chroot_mount;
62954+int grsec_enable_chroot_shmat;
62955+int grsec_enable_chroot_fchdir;
62956+int grsec_enable_chroot_double;
62957+int grsec_enable_chroot_pivot;
62958+int grsec_enable_chroot_chdir;
62959+int grsec_enable_chroot_chmod;
62960+int grsec_enable_chroot_mknod;
62961+int grsec_enable_chroot_nice;
62962+int grsec_enable_chroot_execlog;
62963+int grsec_enable_chroot_caps;
62964+int grsec_enable_chroot_sysctl;
62965+int grsec_enable_chroot_unix;
62966+int grsec_enable_tpe;
62967+int grsec_tpe_gid;
62968+int grsec_enable_blackhole;
62969+#ifdef CONFIG_IPV6_MODULE
62970+EXPORT_SYMBOL(grsec_enable_blackhole);
62971+#endif
62972+int grsec_lastack_retries;
62973+int grsec_enable_tpe_all;
62974+int grsec_enable_tpe_invert;
62975+int grsec_enable_socket_all;
62976+int grsec_socket_all_gid;
62977+int grsec_enable_socket_client;
62978+int grsec_socket_client_gid;
62979+int grsec_enable_socket_server;
62980+int grsec_socket_server_gid;
62981+int grsec_resource_logging;
62982+int grsec_disable_privio;
62983+int grsec_enable_log_rwxmaps;
62984+int grsec_lock;
62985+
62986+DEFINE_SPINLOCK(grsec_alert_lock);
62987+unsigned long grsec_alert_wtime = 0;
62988+unsigned long grsec_alert_fyet = 0;
62989+
62990+DEFINE_SPINLOCK(grsec_audit_lock);
62991+
62992+DEFINE_RWLOCK(grsec_exec_file_lock);
62993+
62994+char *gr_shared_page[4];
62995+
62996+char *gr_alert_log_fmt;
62997+char *gr_audit_log_fmt;
62998+char *gr_alert_log_buf;
62999+char *gr_audit_log_buf;
63000+
63001+extern struct gr_arg *gr_usermode;
63002+extern unsigned char *gr_system_salt;
63003+extern unsigned char *gr_system_sum;
63004+
63005+void __init
63006+grsecurity_init(void)
63007+{
63008+ int j;
63009+ /* create the per-cpu shared pages */
63010+
63011+#ifdef CONFIG_X86
63012+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
63013+#endif
63014+
63015+ for (j = 0; j < 4; j++) {
63016+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
63017+ if (gr_shared_page[j] == NULL) {
63018+ panic("Unable to allocate grsecurity shared page");
63019+ return;
63020+ }
63021+ }
63022+
63023+ /* allocate log buffers */
63024+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
63025+ if (!gr_alert_log_fmt) {
63026+ panic("Unable to allocate grsecurity alert log format buffer");
63027+ return;
63028+ }
63029+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
63030+ if (!gr_audit_log_fmt) {
63031+ panic("Unable to allocate grsecurity audit log format buffer");
63032+ return;
63033+ }
63034+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63035+ if (!gr_alert_log_buf) {
63036+ panic("Unable to allocate grsecurity alert log buffer");
63037+ return;
63038+ }
63039+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
63040+ if (!gr_audit_log_buf) {
63041+ panic("Unable to allocate grsecurity audit log buffer");
63042+ return;
63043+ }
63044+
63045+ /* allocate memory for authentication structure */
63046+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
63047+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
63048+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
63049+
63050+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
63051+ panic("Unable to allocate grsecurity authentication structure");
63052+ return;
63053+ }
63054+
63055+
63056+#ifdef CONFIG_GRKERNSEC_IO
63057+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
63058+ grsec_disable_privio = 1;
63059+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63060+ grsec_disable_privio = 1;
63061+#else
63062+ grsec_disable_privio = 0;
63063+#endif
63064+#endif
63065+
63066+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
63067+ /* for backward compatibility, tpe_invert always defaults to on if
63068+ enabled in the kernel
63069+ */
63070+ grsec_enable_tpe_invert = 1;
63071+#endif
63072+
63073+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
63074+#ifndef CONFIG_GRKERNSEC_SYSCTL
63075+ grsec_lock = 1;
63076+#endif
63077+
63078+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63079+ grsec_enable_audit_textrel = 1;
63080+#endif
63081+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63082+ grsec_enable_log_rwxmaps = 1;
63083+#endif
63084+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63085+ grsec_enable_group = 1;
63086+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63087+#endif
63088+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63089+ grsec_enable_chdir = 1;
63090+#endif
63091+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63092+ grsec_enable_harden_ptrace = 1;
63093+#endif
63094+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63095+ grsec_enable_mount = 1;
63096+#endif
63097+#ifdef CONFIG_GRKERNSEC_LINK
63098+ grsec_enable_link = 1;
63099+#endif
63100+#ifdef CONFIG_GRKERNSEC_BRUTE
63101+ grsec_enable_brute = 1;
63102+#endif
63103+#ifdef CONFIG_GRKERNSEC_DMESG
63104+ grsec_enable_dmesg = 1;
63105+#endif
63106+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63107+ grsec_enable_blackhole = 1;
63108+ grsec_lastack_retries = 4;
63109+#endif
63110+#ifdef CONFIG_GRKERNSEC_FIFO
63111+ grsec_enable_fifo = 1;
63112+#endif
63113+#ifdef CONFIG_GRKERNSEC_EXECLOG
63114+ grsec_enable_execlog = 1;
63115+#endif
63116+#ifdef CONFIG_GRKERNSEC_SETXID
63117+ grsec_enable_setxid = 1;
63118+#endif
63119+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63120+ grsec_enable_ptrace_readexec = 1;
63121+#endif
63122+#ifdef CONFIG_GRKERNSEC_SIGNAL
63123+ grsec_enable_signal = 1;
63124+#endif
63125+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63126+ grsec_enable_forkfail = 1;
63127+#endif
63128+#ifdef CONFIG_GRKERNSEC_TIME
63129+ grsec_enable_time = 1;
63130+#endif
63131+#ifdef CONFIG_GRKERNSEC_RESLOG
63132+ grsec_resource_logging = 1;
63133+#endif
63134+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63135+ grsec_enable_chroot_findtask = 1;
63136+#endif
63137+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63138+ grsec_enable_chroot_unix = 1;
63139+#endif
63140+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63141+ grsec_enable_chroot_mount = 1;
63142+#endif
63143+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63144+ grsec_enable_chroot_fchdir = 1;
63145+#endif
63146+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63147+ grsec_enable_chroot_shmat = 1;
63148+#endif
63149+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63150+ grsec_enable_audit_ptrace = 1;
63151+#endif
63152+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63153+ grsec_enable_chroot_double = 1;
63154+#endif
63155+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63156+ grsec_enable_chroot_pivot = 1;
63157+#endif
63158+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63159+ grsec_enable_chroot_chdir = 1;
63160+#endif
63161+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63162+ grsec_enable_chroot_chmod = 1;
63163+#endif
63164+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63165+ grsec_enable_chroot_mknod = 1;
63166+#endif
63167+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63168+ grsec_enable_chroot_nice = 1;
63169+#endif
63170+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63171+ grsec_enable_chroot_execlog = 1;
63172+#endif
63173+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63174+ grsec_enable_chroot_caps = 1;
63175+#endif
63176+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63177+ grsec_enable_chroot_sysctl = 1;
63178+#endif
63179+#ifdef CONFIG_GRKERNSEC_TPE
63180+ grsec_enable_tpe = 1;
63181+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63182+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63183+ grsec_enable_tpe_all = 1;
63184+#endif
63185+#endif
63186+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63187+ grsec_enable_socket_all = 1;
63188+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63189+#endif
63190+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63191+ grsec_enable_socket_client = 1;
63192+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63193+#endif
63194+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63195+ grsec_enable_socket_server = 1;
63196+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63197+#endif
63198+#endif
63199+
63200+ return;
63201+}
63202diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63203new file mode 100644
63204index 0000000..3efe141
63205--- /dev/null
63206+++ b/grsecurity/grsec_link.c
63207@@ -0,0 +1,43 @@
63208+#include <linux/kernel.h>
63209+#include <linux/sched.h>
63210+#include <linux/fs.h>
63211+#include <linux/file.h>
63212+#include <linux/grinternal.h>
63213+
63214+int
63215+gr_handle_follow_link(const struct inode *parent,
63216+ const struct inode *inode,
63217+ const struct dentry *dentry, const struct vfsmount *mnt)
63218+{
63219+#ifdef CONFIG_GRKERNSEC_LINK
63220+ const struct cred *cred = current_cred();
63221+
63222+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63223+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63224+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63225+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63226+ return -EACCES;
63227+ }
63228+#endif
63229+ return 0;
63230+}
63231+
63232+int
63233+gr_handle_hardlink(const struct dentry *dentry,
63234+ const struct vfsmount *mnt,
63235+ struct inode *inode, const int mode, const char *to)
63236+{
63237+#ifdef CONFIG_GRKERNSEC_LINK
63238+ const struct cred *cred = current_cred();
63239+
63240+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63241+ (!S_ISREG(mode) || (mode & S_ISUID) ||
63242+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63243+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63244+ !capable(CAP_FOWNER) && cred->uid) {
63245+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63246+ return -EPERM;
63247+ }
63248+#endif
63249+ return 0;
63250+}
63251diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63252new file mode 100644
63253index 0000000..a45d2e9
63254--- /dev/null
63255+++ b/grsecurity/grsec_log.c
63256@@ -0,0 +1,322 @@
63257+#include <linux/kernel.h>
63258+#include <linux/sched.h>
63259+#include <linux/file.h>
63260+#include <linux/tty.h>
63261+#include <linux/fs.h>
63262+#include <linux/grinternal.h>
63263+
63264+#ifdef CONFIG_TREE_PREEMPT_RCU
63265+#define DISABLE_PREEMPT() preempt_disable()
63266+#define ENABLE_PREEMPT() preempt_enable()
63267+#else
63268+#define DISABLE_PREEMPT()
63269+#define ENABLE_PREEMPT()
63270+#endif
63271+
63272+#define BEGIN_LOCKS(x) \
63273+ DISABLE_PREEMPT(); \
63274+ rcu_read_lock(); \
63275+ read_lock(&tasklist_lock); \
63276+ read_lock(&grsec_exec_file_lock); \
63277+ if (x != GR_DO_AUDIT) \
63278+ spin_lock(&grsec_alert_lock); \
63279+ else \
63280+ spin_lock(&grsec_audit_lock)
63281+
63282+#define END_LOCKS(x) \
63283+ if (x != GR_DO_AUDIT) \
63284+ spin_unlock(&grsec_alert_lock); \
63285+ else \
63286+ spin_unlock(&grsec_audit_lock); \
63287+ read_unlock(&grsec_exec_file_lock); \
63288+ read_unlock(&tasklist_lock); \
63289+ rcu_read_unlock(); \
63290+ ENABLE_PREEMPT(); \
63291+ if (x == GR_DONT_AUDIT) \
63292+ gr_handle_alertkill(current)
63293+
63294+enum {
63295+ FLOODING,
63296+ NO_FLOODING
63297+};
63298+
63299+extern char *gr_alert_log_fmt;
63300+extern char *gr_audit_log_fmt;
63301+extern char *gr_alert_log_buf;
63302+extern char *gr_audit_log_buf;
63303+
63304+static int gr_log_start(int audit)
63305+{
63306+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63307+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63308+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63309+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63310+ unsigned long curr_secs = get_seconds();
63311+
63312+ if (audit == GR_DO_AUDIT)
63313+ goto set_fmt;
63314+
63315+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63316+ grsec_alert_wtime = curr_secs;
63317+ grsec_alert_fyet = 0;
63318+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63319+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63320+ grsec_alert_fyet++;
63321+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63322+ grsec_alert_wtime = curr_secs;
63323+ grsec_alert_fyet++;
63324+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63325+ return FLOODING;
63326+ }
63327+ else return FLOODING;
63328+
63329+set_fmt:
63330+#endif
63331+ memset(buf, 0, PAGE_SIZE);
63332+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63333+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63334+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63335+ } else if (current->signal->curr_ip) {
63336+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63337+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63338+ } else if (gr_acl_is_enabled()) {
63339+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63340+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63341+ } else {
63342+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63343+ strcpy(buf, fmt);
63344+ }
63345+
63346+ return NO_FLOODING;
63347+}
63348+
63349+static void gr_log_middle(int audit, const char *msg, va_list ap)
63350+ __attribute__ ((format (printf, 2, 0)));
63351+
63352+static void gr_log_middle(int audit, const char *msg, va_list ap)
63353+{
63354+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63355+ unsigned int len = strlen(buf);
63356+
63357+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63358+
63359+ return;
63360+}
63361+
63362+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63363+ __attribute__ ((format (printf, 2, 3)));
63364+
63365+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63366+{
63367+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63368+ unsigned int len = strlen(buf);
63369+ va_list ap;
63370+
63371+ va_start(ap, msg);
63372+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63373+ va_end(ap);
63374+
63375+ return;
63376+}
63377+
63378+static void gr_log_end(int audit, int append_default)
63379+{
63380+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63381+
63382+ if (append_default) {
63383+ unsigned int len = strlen(buf);
63384+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63385+ }
63386+
63387+ printk("%s\n", buf);
63388+
63389+ return;
63390+}
63391+
63392+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63393+{
63394+ int logtype;
63395+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63396+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63397+ void *voidptr = NULL;
63398+ int num1 = 0, num2 = 0;
63399+ unsigned long ulong1 = 0, ulong2 = 0;
63400+ struct dentry *dentry = NULL;
63401+ struct vfsmount *mnt = NULL;
63402+ struct file *file = NULL;
63403+ struct task_struct *task = NULL;
63404+ const struct cred *cred, *pcred;
63405+ va_list ap;
63406+
63407+ BEGIN_LOCKS(audit);
63408+ logtype = gr_log_start(audit);
63409+ if (logtype == FLOODING) {
63410+ END_LOCKS(audit);
63411+ return;
63412+ }
63413+ va_start(ap, argtypes);
63414+ switch (argtypes) {
63415+ case GR_TTYSNIFF:
63416+ task = va_arg(ap, struct task_struct *);
63417+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63418+ break;
63419+ case GR_SYSCTL_HIDDEN:
63420+ str1 = va_arg(ap, char *);
63421+ gr_log_middle_varargs(audit, msg, result, str1);
63422+ break;
63423+ case GR_RBAC:
63424+ dentry = va_arg(ap, struct dentry *);
63425+ mnt = va_arg(ap, struct vfsmount *);
63426+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63427+ break;
63428+ case GR_RBAC_STR:
63429+ dentry = va_arg(ap, struct dentry *);
63430+ mnt = va_arg(ap, struct vfsmount *);
63431+ str1 = va_arg(ap, char *);
63432+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63433+ break;
63434+ case GR_STR_RBAC:
63435+ str1 = va_arg(ap, char *);
63436+ dentry = va_arg(ap, struct dentry *);
63437+ mnt = va_arg(ap, struct vfsmount *);
63438+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63439+ break;
63440+ case GR_RBAC_MODE2:
63441+ dentry = va_arg(ap, struct dentry *);
63442+ mnt = va_arg(ap, struct vfsmount *);
63443+ str1 = va_arg(ap, char *);
63444+ str2 = va_arg(ap, char *);
63445+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63446+ break;
63447+ case GR_RBAC_MODE3:
63448+ dentry = va_arg(ap, struct dentry *);
63449+ mnt = va_arg(ap, struct vfsmount *);
63450+ str1 = va_arg(ap, char *);
63451+ str2 = va_arg(ap, char *);
63452+ str3 = va_arg(ap, char *);
63453+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63454+ break;
63455+ case GR_FILENAME:
63456+ dentry = va_arg(ap, struct dentry *);
63457+ mnt = va_arg(ap, struct vfsmount *);
63458+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63459+ break;
63460+ case GR_STR_FILENAME:
63461+ str1 = va_arg(ap, char *);
63462+ dentry = va_arg(ap, struct dentry *);
63463+ mnt = va_arg(ap, struct vfsmount *);
63464+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63465+ break;
63466+ case GR_FILENAME_STR:
63467+ dentry = va_arg(ap, struct dentry *);
63468+ mnt = va_arg(ap, struct vfsmount *);
63469+ str1 = va_arg(ap, char *);
63470+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63471+ break;
63472+ case GR_FILENAME_TWO_INT:
63473+ dentry = va_arg(ap, struct dentry *);
63474+ mnt = va_arg(ap, struct vfsmount *);
63475+ num1 = va_arg(ap, int);
63476+ num2 = va_arg(ap, int);
63477+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63478+ break;
63479+ case GR_FILENAME_TWO_INT_STR:
63480+ dentry = va_arg(ap, struct dentry *);
63481+ mnt = va_arg(ap, struct vfsmount *);
63482+ num1 = va_arg(ap, int);
63483+ num2 = va_arg(ap, int);
63484+ str1 = va_arg(ap, char *);
63485+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63486+ break;
63487+ case GR_TEXTREL:
63488+ file = va_arg(ap, struct file *);
63489+ ulong1 = va_arg(ap, unsigned long);
63490+ ulong2 = va_arg(ap, unsigned long);
63491+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63492+ break;
63493+ case GR_PTRACE:
63494+ task = va_arg(ap, struct task_struct *);
63495+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63496+ break;
63497+ case GR_RESOURCE:
63498+ task = va_arg(ap, struct task_struct *);
63499+ cred = __task_cred(task);
63500+ pcred = __task_cred(task->real_parent);
63501+ ulong1 = va_arg(ap, unsigned long);
63502+ str1 = va_arg(ap, char *);
63503+ ulong2 = va_arg(ap, unsigned long);
63504+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63505+ break;
63506+ case GR_CAP:
63507+ task = va_arg(ap, struct task_struct *);
63508+ cred = __task_cred(task);
63509+ pcred = __task_cred(task->real_parent);
63510+ str1 = va_arg(ap, char *);
63511+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63512+ break;
63513+ case GR_SIG:
63514+ str1 = va_arg(ap, char *);
63515+ voidptr = va_arg(ap, void *);
63516+ gr_log_middle_varargs(audit, msg, str1, voidptr);
63517+ break;
63518+ case GR_SIG2:
63519+ task = va_arg(ap, struct task_struct *);
63520+ cred = __task_cred(task);
63521+ pcred = __task_cred(task->real_parent);
63522+ num1 = va_arg(ap, int);
63523+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63524+ break;
63525+ case GR_CRASH1:
63526+ task = va_arg(ap, struct task_struct *);
63527+ cred = __task_cred(task);
63528+ pcred = __task_cred(task->real_parent);
63529+ ulong1 = va_arg(ap, unsigned long);
63530+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63531+ break;
63532+ case GR_CRASH2:
63533+ task = va_arg(ap, struct task_struct *);
63534+ cred = __task_cred(task);
63535+ pcred = __task_cred(task->real_parent);
63536+ ulong1 = va_arg(ap, unsigned long);
63537+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63538+ break;
63539+ case GR_RWXMAP:
63540+ file = va_arg(ap, struct file *);
63541+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63542+ break;
63543+ case GR_PSACCT:
63544+ {
63545+ unsigned int wday, cday;
63546+ __u8 whr, chr;
63547+ __u8 wmin, cmin;
63548+ __u8 wsec, csec;
63549+ char cur_tty[64] = { 0 };
63550+ char parent_tty[64] = { 0 };
63551+
63552+ task = va_arg(ap, struct task_struct *);
63553+ wday = va_arg(ap, unsigned int);
63554+ cday = va_arg(ap, unsigned int);
63555+ whr = va_arg(ap, int);
63556+ chr = va_arg(ap, int);
63557+ wmin = va_arg(ap, int);
63558+ cmin = va_arg(ap, int);
63559+ wsec = va_arg(ap, int);
63560+ csec = va_arg(ap, int);
63561+ ulong1 = va_arg(ap, unsigned long);
63562+ cred = __task_cred(task);
63563+ pcred = __task_cred(task->real_parent);
63564+
63565+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63566+ }
63567+ break;
63568+ default:
63569+ gr_log_middle(audit, msg, ap);
63570+ }
63571+ va_end(ap);
63572+ // these don't need DEFAULTSECARGS printed on the end
63573+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63574+ gr_log_end(audit, 0);
63575+ else
63576+ gr_log_end(audit, 1);
63577+ END_LOCKS(audit);
63578+}
63579diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63580new file mode 100644
63581index 0000000..f536303
63582--- /dev/null
63583+++ b/grsecurity/grsec_mem.c
63584@@ -0,0 +1,40 @@
63585+#include <linux/kernel.h>
63586+#include <linux/sched.h>
63587+#include <linux/mm.h>
63588+#include <linux/mman.h>
63589+#include <linux/grinternal.h>
63590+
63591+void
63592+gr_handle_ioperm(void)
63593+{
63594+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63595+ return;
63596+}
63597+
63598+void
63599+gr_handle_iopl(void)
63600+{
63601+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63602+ return;
63603+}
63604+
63605+void
63606+gr_handle_mem_readwrite(u64 from, u64 to)
63607+{
63608+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63609+ return;
63610+}
63611+
63612+void
63613+gr_handle_vm86(void)
63614+{
63615+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63616+ return;
63617+}
63618+
63619+void
63620+gr_log_badprocpid(const char *entry)
63621+{
63622+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
63623+ return;
63624+}
63625diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63626new file mode 100644
63627index 0000000..2131422
63628--- /dev/null
63629+++ b/grsecurity/grsec_mount.c
63630@@ -0,0 +1,62 @@
63631+#include <linux/kernel.h>
63632+#include <linux/sched.h>
63633+#include <linux/mount.h>
63634+#include <linux/grsecurity.h>
63635+#include <linux/grinternal.h>
63636+
63637+void
63638+gr_log_remount(const char *devname, const int retval)
63639+{
63640+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63641+ if (grsec_enable_mount && (retval >= 0))
63642+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63643+#endif
63644+ return;
63645+}
63646+
63647+void
63648+gr_log_unmount(const char *devname, const int retval)
63649+{
63650+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63651+ if (grsec_enable_mount && (retval >= 0))
63652+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63653+#endif
63654+ return;
63655+}
63656+
63657+void
63658+gr_log_mount(const char *from, const char *to, const int retval)
63659+{
63660+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63661+ if (grsec_enable_mount && (retval >= 0))
63662+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63663+#endif
63664+ return;
63665+}
63666+
63667+int
63668+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63669+{
63670+#ifdef CONFIG_GRKERNSEC_ROFS
63671+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63672+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63673+ return -EPERM;
63674+ } else
63675+ return 0;
63676+#endif
63677+ return 0;
63678+}
63679+
63680+int
63681+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63682+{
63683+#ifdef CONFIG_GRKERNSEC_ROFS
63684+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63685+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63686+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63687+ return -EPERM;
63688+ } else
63689+ return 0;
63690+#endif
63691+ return 0;
63692+}
63693diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63694new file mode 100644
63695index 0000000..a3b12a0
63696--- /dev/null
63697+++ b/grsecurity/grsec_pax.c
63698@@ -0,0 +1,36 @@
63699+#include <linux/kernel.h>
63700+#include <linux/sched.h>
63701+#include <linux/mm.h>
63702+#include <linux/file.h>
63703+#include <linux/grinternal.h>
63704+#include <linux/grsecurity.h>
63705+
63706+void
63707+gr_log_textrel(struct vm_area_struct * vma)
63708+{
63709+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63710+ if (grsec_enable_audit_textrel)
63711+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63712+#endif
63713+ return;
63714+}
63715+
63716+void
63717+gr_log_rwxmmap(struct file *file)
63718+{
63719+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63720+ if (grsec_enable_log_rwxmaps)
63721+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63722+#endif
63723+ return;
63724+}
63725+
63726+void
63727+gr_log_rwxmprotect(struct file *file)
63728+{
63729+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63730+ if (grsec_enable_log_rwxmaps)
63731+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63732+#endif
63733+ return;
63734+}
63735diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63736new file mode 100644
63737index 0000000..78f8733
63738--- /dev/null
63739+++ b/grsecurity/grsec_ptrace.c
63740@@ -0,0 +1,30 @@
63741+#include <linux/kernel.h>
63742+#include <linux/sched.h>
63743+#include <linux/grinternal.h>
63744+#include <linux/security.h>
63745+
63746+void
63747+gr_audit_ptrace(struct task_struct *task)
63748+{
63749+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63750+ if (grsec_enable_audit_ptrace)
63751+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63752+#endif
63753+ return;
63754+}
63755+
63756+int
63757+gr_ptrace_readexec(struct file *file, int unsafe_flags)
63758+{
63759+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63760+ const struct dentry *dentry = file->f_path.dentry;
63761+ const struct vfsmount *mnt = file->f_path.mnt;
63762+
63763+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
63764+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
63765+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
63766+ return -EACCES;
63767+ }
63768+#endif
63769+ return 0;
63770+}
63771diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63772new file mode 100644
63773index 0000000..c648492
63774--- /dev/null
63775+++ b/grsecurity/grsec_sig.c
63776@@ -0,0 +1,206 @@
63777+#include <linux/kernel.h>
63778+#include <linux/sched.h>
63779+#include <linux/delay.h>
63780+#include <linux/grsecurity.h>
63781+#include <linux/grinternal.h>
63782+#include <linux/hardirq.h>
63783+
63784+char *signames[] = {
63785+ [SIGSEGV] = "Segmentation fault",
63786+ [SIGILL] = "Illegal instruction",
63787+ [SIGABRT] = "Abort",
63788+ [SIGBUS] = "Invalid alignment/Bus error"
63789+};
63790+
63791+void
63792+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63793+{
63794+#ifdef CONFIG_GRKERNSEC_SIGNAL
63795+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63796+ (sig == SIGABRT) || (sig == SIGBUS))) {
63797+ if (t->pid == current->pid) {
63798+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63799+ } else {
63800+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63801+ }
63802+ }
63803+#endif
63804+ return;
63805+}
63806+
63807+int
63808+gr_handle_signal(const struct task_struct *p, const int sig)
63809+{
63810+#ifdef CONFIG_GRKERNSEC
63811+ /* ignore the 0 signal for protected task checks */
63812+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
63813+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63814+ return -EPERM;
63815+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63816+ return -EPERM;
63817+ }
63818+#endif
63819+ return 0;
63820+}
63821+
63822+#ifdef CONFIG_GRKERNSEC
63823+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63824+
63825+int gr_fake_force_sig(int sig, struct task_struct *t)
63826+{
63827+ unsigned long int flags;
63828+ int ret, blocked, ignored;
63829+ struct k_sigaction *action;
63830+
63831+ spin_lock_irqsave(&t->sighand->siglock, flags);
63832+ action = &t->sighand->action[sig-1];
63833+ ignored = action->sa.sa_handler == SIG_IGN;
63834+ blocked = sigismember(&t->blocked, sig);
63835+ if (blocked || ignored) {
63836+ action->sa.sa_handler = SIG_DFL;
63837+ if (blocked) {
63838+ sigdelset(&t->blocked, sig);
63839+ recalc_sigpending_and_wake(t);
63840+ }
63841+ }
63842+ if (action->sa.sa_handler == SIG_DFL)
63843+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63844+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63845+
63846+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63847+
63848+ return ret;
63849+}
63850+#endif
63851+
63852+#ifdef CONFIG_GRKERNSEC_BRUTE
63853+#define GR_USER_BAN_TIME (15 * 60)
63854+
63855+static int __get_dumpable(unsigned long mm_flags)
63856+{
63857+ int ret;
63858+
63859+ ret = mm_flags & MMF_DUMPABLE_MASK;
63860+ return (ret >= 2) ? 2 : ret;
63861+}
63862+#endif
63863+
63864+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63865+{
63866+#ifdef CONFIG_GRKERNSEC_BRUTE
63867+ uid_t uid = 0;
63868+
63869+ if (!grsec_enable_brute)
63870+ return;
63871+
63872+ rcu_read_lock();
63873+ read_lock(&tasklist_lock);
63874+ read_lock(&grsec_exec_file_lock);
63875+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63876+ p->real_parent->brute = 1;
63877+ else {
63878+ const struct cred *cred = __task_cred(p), *cred2;
63879+ struct task_struct *tsk, *tsk2;
63880+
63881+ if (!__get_dumpable(mm_flags) && cred->uid) {
63882+ struct user_struct *user;
63883+
63884+ uid = cred->uid;
63885+
63886+ /* this is put upon execution past expiration */
63887+ user = find_user(uid);
63888+ if (user == NULL)
63889+ goto unlock;
63890+ user->banned = 1;
63891+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63892+ if (user->ban_expires == ~0UL)
63893+ user->ban_expires--;
63894+
63895+ do_each_thread(tsk2, tsk) {
63896+ cred2 = __task_cred(tsk);
63897+ if (tsk != p && cred2->uid == uid)
63898+ gr_fake_force_sig(SIGKILL, tsk);
63899+ } while_each_thread(tsk2, tsk);
63900+ }
63901+ }
63902+unlock:
63903+ read_unlock(&grsec_exec_file_lock);
63904+ read_unlock(&tasklist_lock);
63905+ rcu_read_unlock();
63906+
63907+ if (uid)
63908+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63909+#endif
63910+ return;
63911+}
63912+
63913+void gr_handle_brute_check(void)
63914+{
63915+#ifdef CONFIG_GRKERNSEC_BRUTE
63916+ if (current->brute)
63917+ msleep(30 * 1000);
63918+#endif
63919+ return;
63920+}
63921+
63922+void gr_handle_kernel_exploit(void)
63923+{
63924+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63925+ const struct cred *cred;
63926+ struct task_struct *tsk, *tsk2;
63927+ struct user_struct *user;
63928+ uid_t uid;
63929+
63930+ if (in_irq() || in_serving_softirq() || in_nmi())
63931+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63932+
63933+ uid = current_uid();
63934+
63935+ if (uid == 0)
63936+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
63937+ else {
63938+ /* kill all the processes of this user, hold a reference
63939+ to their creds struct, and prevent them from creating
63940+ another process until system reset
63941+ */
63942+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63943+ /* we intentionally leak this ref */
63944+ user = get_uid(current->cred->user);
63945+ if (user) {
63946+ user->banned = 1;
63947+ user->ban_expires = ~0UL;
63948+ }
63949+
63950+ read_lock(&tasklist_lock);
63951+ do_each_thread(tsk2, tsk) {
63952+ cred = __task_cred(tsk);
63953+ if (cred->uid == uid)
63954+ gr_fake_force_sig(SIGKILL, tsk);
63955+ } while_each_thread(tsk2, tsk);
63956+ read_unlock(&tasklist_lock);
63957+ }
63958+#endif
63959+}
63960+
63961+int __gr_process_user_ban(struct user_struct *user)
63962+{
63963+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63964+ if (unlikely(user->banned)) {
63965+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63966+ user->banned = 0;
63967+ user->ban_expires = 0;
63968+ free_uid(user);
63969+ } else
63970+ return -EPERM;
63971+ }
63972+#endif
63973+ return 0;
63974+}
63975+
63976+int gr_process_user_ban(void)
63977+{
63978+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63979+ return __gr_process_user_ban(current->cred->user);
63980+#endif
63981+ return 0;
63982+}
63983diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63984new file mode 100644
63985index 0000000..7512ea9
63986--- /dev/null
63987+++ b/grsecurity/grsec_sock.c
63988@@ -0,0 +1,275 @@
63989+#include <linux/kernel.h>
63990+#include <linux/module.h>
63991+#include <linux/sched.h>
63992+#include <linux/file.h>
63993+#include <linux/net.h>
63994+#include <linux/in.h>
63995+#include <linux/ip.h>
63996+#include <net/sock.h>
63997+#include <net/inet_sock.h>
63998+#include <linux/grsecurity.h>
63999+#include <linux/grinternal.h>
64000+#include <linux/gracl.h>
64001+
64002+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
64003+EXPORT_SYMBOL(gr_cap_rtnetlink);
64004+
64005+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
64006+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
64007+
64008+EXPORT_SYMBOL(gr_search_udp_recvmsg);
64009+EXPORT_SYMBOL(gr_search_udp_sendmsg);
64010+
64011+#ifdef CONFIG_UNIX_MODULE
64012+EXPORT_SYMBOL(gr_acl_handle_unix);
64013+EXPORT_SYMBOL(gr_acl_handle_mknod);
64014+EXPORT_SYMBOL(gr_handle_chroot_unix);
64015+EXPORT_SYMBOL(gr_handle_create);
64016+#endif
64017+
64018+#ifdef CONFIG_GRKERNSEC
64019+#define gr_conn_table_size 32749
64020+struct conn_table_entry {
64021+ struct conn_table_entry *next;
64022+ struct signal_struct *sig;
64023+};
64024+
64025+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
64026+DEFINE_SPINLOCK(gr_conn_table_lock);
64027+
64028+extern const char * gr_socktype_to_name(unsigned char type);
64029+extern const char * gr_proto_to_name(unsigned char proto);
64030+extern const char * gr_sockfamily_to_name(unsigned char family);
64031+
64032+static __inline__ int
64033+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
64034+{
64035+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
64036+}
64037+
64038+static __inline__ int
64039+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
64040+ __u16 sport, __u16 dport)
64041+{
64042+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
64043+ sig->gr_sport == sport && sig->gr_dport == dport))
64044+ return 1;
64045+ else
64046+ return 0;
64047+}
64048+
64049+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
64050+{
64051+ struct conn_table_entry **match;
64052+ unsigned int index;
64053+
64054+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64055+ sig->gr_sport, sig->gr_dport,
64056+ gr_conn_table_size);
64057+
64058+ newent->sig = sig;
64059+
64060+ match = &gr_conn_table[index];
64061+ newent->next = *match;
64062+ *match = newent;
64063+
64064+ return;
64065+}
64066+
64067+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
64068+{
64069+ struct conn_table_entry *match, *last = NULL;
64070+ unsigned int index;
64071+
64072+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
64073+ sig->gr_sport, sig->gr_dport,
64074+ gr_conn_table_size);
64075+
64076+ match = gr_conn_table[index];
64077+ while (match && !conn_match(match->sig,
64078+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
64079+ sig->gr_dport)) {
64080+ last = match;
64081+ match = match->next;
64082+ }
64083+
64084+ if (match) {
64085+ if (last)
64086+ last->next = match->next;
64087+ else
64088+ gr_conn_table[index] = NULL;
64089+ kfree(match);
64090+ }
64091+
64092+ return;
64093+}
64094+
64095+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64096+ __u16 sport, __u16 dport)
64097+{
64098+ struct conn_table_entry *match;
64099+ unsigned int index;
64100+
64101+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64102+
64103+ match = gr_conn_table[index];
64104+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64105+ match = match->next;
64106+
64107+ if (match)
64108+ return match->sig;
64109+ else
64110+ return NULL;
64111+}
64112+
64113+#endif
64114+
64115+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64116+{
64117+#ifdef CONFIG_GRKERNSEC
64118+ struct signal_struct *sig = task->signal;
64119+ struct conn_table_entry *newent;
64120+
64121+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64122+ if (newent == NULL)
64123+ return;
64124+ /* no bh lock needed since we are called with bh disabled */
64125+ spin_lock(&gr_conn_table_lock);
64126+ gr_del_task_from_ip_table_nolock(sig);
64127+ sig->gr_saddr = inet->rcv_saddr;
64128+ sig->gr_daddr = inet->daddr;
64129+ sig->gr_sport = inet->sport;
64130+ sig->gr_dport = inet->dport;
64131+ gr_add_to_task_ip_table_nolock(sig, newent);
64132+ spin_unlock(&gr_conn_table_lock);
64133+#endif
64134+ return;
64135+}
64136+
64137+void gr_del_task_from_ip_table(struct task_struct *task)
64138+{
64139+#ifdef CONFIG_GRKERNSEC
64140+ spin_lock_bh(&gr_conn_table_lock);
64141+ gr_del_task_from_ip_table_nolock(task->signal);
64142+ spin_unlock_bh(&gr_conn_table_lock);
64143+#endif
64144+ return;
64145+}
64146+
64147+void
64148+gr_attach_curr_ip(const struct sock *sk)
64149+{
64150+#ifdef CONFIG_GRKERNSEC
64151+ struct signal_struct *p, *set;
64152+ const struct inet_sock *inet = inet_sk(sk);
64153+
64154+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64155+ return;
64156+
64157+ set = current->signal;
64158+
64159+ spin_lock_bh(&gr_conn_table_lock);
64160+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64161+ inet->dport, inet->sport);
64162+ if (unlikely(p != NULL)) {
64163+ set->curr_ip = p->curr_ip;
64164+ set->used_accept = 1;
64165+ gr_del_task_from_ip_table_nolock(p);
64166+ spin_unlock_bh(&gr_conn_table_lock);
64167+ return;
64168+ }
64169+ spin_unlock_bh(&gr_conn_table_lock);
64170+
64171+ set->curr_ip = inet->daddr;
64172+ set->used_accept = 1;
64173+#endif
64174+ return;
64175+}
64176+
64177+int
64178+gr_handle_sock_all(const int family, const int type, const int protocol)
64179+{
64180+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64181+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64182+ (family != AF_UNIX)) {
64183+ if (family == AF_INET)
64184+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64185+ else
64186+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64187+ return -EACCES;
64188+ }
64189+#endif
64190+ return 0;
64191+}
64192+
64193+int
64194+gr_handle_sock_server(const struct sockaddr *sck)
64195+{
64196+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64197+ if (grsec_enable_socket_server &&
64198+ in_group_p(grsec_socket_server_gid) &&
64199+ sck && (sck->sa_family != AF_UNIX) &&
64200+ (sck->sa_family != AF_LOCAL)) {
64201+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64202+ return -EACCES;
64203+ }
64204+#endif
64205+ return 0;
64206+}
64207+
64208+int
64209+gr_handle_sock_server_other(const struct sock *sck)
64210+{
64211+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64212+ if (grsec_enable_socket_server &&
64213+ in_group_p(grsec_socket_server_gid) &&
64214+ sck && (sck->sk_family != AF_UNIX) &&
64215+ (sck->sk_family != AF_LOCAL)) {
64216+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64217+ return -EACCES;
64218+ }
64219+#endif
64220+ return 0;
64221+}
64222+
64223+int
64224+gr_handle_sock_client(const struct sockaddr *sck)
64225+{
64226+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64227+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64228+ sck && (sck->sa_family != AF_UNIX) &&
64229+ (sck->sa_family != AF_LOCAL)) {
64230+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64231+ return -EACCES;
64232+ }
64233+#endif
64234+ return 0;
64235+}
64236+
64237+kernel_cap_t
64238+gr_cap_rtnetlink(struct sock *sock)
64239+{
64240+#ifdef CONFIG_GRKERNSEC
64241+ if (!gr_acl_is_enabled())
64242+ return current_cap();
64243+ else if (sock->sk_protocol == NETLINK_ISCSI &&
64244+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64245+ gr_is_capable(CAP_SYS_ADMIN))
64246+ return current_cap();
64247+ else if (sock->sk_protocol == NETLINK_AUDIT &&
64248+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64249+ gr_is_capable(CAP_AUDIT_WRITE) &&
64250+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64251+ gr_is_capable(CAP_AUDIT_CONTROL))
64252+ return current_cap();
64253+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64254+ ((sock->sk_protocol == NETLINK_ROUTE) ?
64255+ gr_is_capable_nolog(CAP_NET_ADMIN) :
64256+ gr_is_capable(CAP_NET_ADMIN)))
64257+ return current_cap();
64258+ else
64259+ return __cap_empty_set;
64260+#else
64261+ return current_cap();
64262+#endif
64263+}
64264diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64265new file mode 100644
64266index 0000000..31f3258
64267--- /dev/null
64268+++ b/grsecurity/grsec_sysctl.c
64269@@ -0,0 +1,499 @@
64270+#include <linux/kernel.h>
64271+#include <linux/sched.h>
64272+#include <linux/sysctl.h>
64273+#include <linux/grsecurity.h>
64274+#include <linux/grinternal.h>
64275+
64276+int
64277+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64278+{
64279+#ifdef CONFIG_GRKERNSEC_SYSCTL
64280+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64281+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64282+ return -EACCES;
64283+ }
64284+#endif
64285+ return 0;
64286+}
64287+
64288+#ifdef CONFIG_GRKERNSEC_ROFS
64289+static int __maybe_unused one = 1;
64290+#endif
64291+
64292+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64293+ctl_table grsecurity_table[] = {
64294+#ifdef CONFIG_GRKERNSEC_SYSCTL
64295+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64296+#ifdef CONFIG_GRKERNSEC_IO
64297+ {
64298+ .ctl_name = CTL_UNNUMBERED,
64299+ .procname = "disable_priv_io",
64300+ .data = &grsec_disable_privio,
64301+ .maxlen = sizeof(int),
64302+ .mode = 0600,
64303+ .proc_handler = &proc_dointvec,
64304+ },
64305+#endif
64306+#endif
64307+#ifdef CONFIG_GRKERNSEC_LINK
64308+ {
64309+ .ctl_name = CTL_UNNUMBERED,
64310+ .procname = "linking_restrictions",
64311+ .data = &grsec_enable_link,
64312+ .maxlen = sizeof(int),
64313+ .mode = 0600,
64314+ .proc_handler = &proc_dointvec,
64315+ },
64316+#endif
64317+#ifdef CONFIG_GRKERNSEC_BRUTE
64318+ {
64319+ .ctl_name = CTL_UNNUMBERED,
64320+ .procname = "deter_bruteforce",
64321+ .data = &grsec_enable_brute,
64322+ .maxlen = sizeof(int),
64323+ .mode = 0600,
64324+ .proc_handler = &proc_dointvec,
64325+ },
64326+#endif
64327+#ifdef CONFIG_GRKERNSEC_FIFO
64328+ {
64329+ .ctl_name = CTL_UNNUMBERED,
64330+ .procname = "fifo_restrictions",
64331+ .data = &grsec_enable_fifo,
64332+ .maxlen = sizeof(int),
64333+ .mode = 0600,
64334+ .proc_handler = &proc_dointvec,
64335+ },
64336+#endif
64337+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64338+ {
64339+ .ctl_name = CTL_UNNUMBERED,
64340+ .procname = "ptrace_readexec",
64341+ .data = &grsec_enable_ptrace_readexec,
64342+ .maxlen = sizeof(int),
64343+ .mode = 0600,
64344+ .proc_handler = &proc_dointvec,
64345+ },
64346+#endif
64347+#ifdef CONFIG_GRKERNSEC_SETXID
64348+ {
64349+ .ctl_name = CTL_UNNUMBERED,
64350+ .procname = "consistent_setxid",
64351+ .data = &grsec_enable_setxid,
64352+ .maxlen = sizeof(int),
64353+ .mode = 0600,
64354+ .proc_handler = &proc_dointvec,
64355+ },
64356+#endif
64357+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64358+ {
64359+ .ctl_name = CTL_UNNUMBERED,
64360+ .procname = "ip_blackhole",
64361+ .data = &grsec_enable_blackhole,
64362+ .maxlen = sizeof(int),
64363+ .mode = 0600,
64364+ .proc_handler = &proc_dointvec,
64365+ },
64366+ {
64367+ .ctl_name = CTL_UNNUMBERED,
64368+ .procname = "lastack_retries",
64369+ .data = &grsec_lastack_retries,
64370+ .maxlen = sizeof(int),
64371+ .mode = 0600,
64372+ .proc_handler = &proc_dointvec,
64373+ },
64374+#endif
64375+#ifdef CONFIG_GRKERNSEC_EXECLOG
64376+ {
64377+ .ctl_name = CTL_UNNUMBERED,
64378+ .procname = "exec_logging",
64379+ .data = &grsec_enable_execlog,
64380+ .maxlen = sizeof(int),
64381+ .mode = 0600,
64382+ .proc_handler = &proc_dointvec,
64383+ },
64384+#endif
64385+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64386+ {
64387+ .ctl_name = CTL_UNNUMBERED,
64388+ .procname = "rwxmap_logging",
64389+ .data = &grsec_enable_log_rwxmaps,
64390+ .maxlen = sizeof(int),
64391+ .mode = 0600,
64392+ .proc_handler = &proc_dointvec,
64393+ },
64394+#endif
64395+#ifdef CONFIG_GRKERNSEC_SIGNAL
64396+ {
64397+ .ctl_name = CTL_UNNUMBERED,
64398+ .procname = "signal_logging",
64399+ .data = &grsec_enable_signal,
64400+ .maxlen = sizeof(int),
64401+ .mode = 0600,
64402+ .proc_handler = &proc_dointvec,
64403+ },
64404+#endif
64405+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64406+ {
64407+ .ctl_name = CTL_UNNUMBERED,
64408+ .procname = "forkfail_logging",
64409+ .data = &grsec_enable_forkfail,
64410+ .maxlen = sizeof(int),
64411+ .mode = 0600,
64412+ .proc_handler = &proc_dointvec,
64413+ },
64414+#endif
64415+#ifdef CONFIG_GRKERNSEC_TIME
64416+ {
64417+ .ctl_name = CTL_UNNUMBERED,
64418+ .procname = "timechange_logging",
64419+ .data = &grsec_enable_time,
64420+ .maxlen = sizeof(int),
64421+ .mode = 0600,
64422+ .proc_handler = &proc_dointvec,
64423+ },
64424+#endif
64425+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64426+ {
64427+ .ctl_name = CTL_UNNUMBERED,
64428+ .procname = "chroot_deny_shmat",
64429+ .data = &grsec_enable_chroot_shmat,
64430+ .maxlen = sizeof(int),
64431+ .mode = 0600,
64432+ .proc_handler = &proc_dointvec,
64433+ },
64434+#endif
64435+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64436+ {
64437+ .ctl_name = CTL_UNNUMBERED,
64438+ .procname = "chroot_deny_unix",
64439+ .data = &grsec_enable_chroot_unix,
64440+ .maxlen = sizeof(int),
64441+ .mode = 0600,
64442+ .proc_handler = &proc_dointvec,
64443+ },
64444+#endif
64445+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64446+ {
64447+ .ctl_name = CTL_UNNUMBERED,
64448+ .procname = "chroot_deny_mount",
64449+ .data = &grsec_enable_chroot_mount,
64450+ .maxlen = sizeof(int),
64451+ .mode = 0600,
64452+ .proc_handler = &proc_dointvec,
64453+ },
64454+#endif
64455+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64456+ {
64457+ .ctl_name = CTL_UNNUMBERED,
64458+ .procname = "chroot_deny_fchdir",
64459+ .data = &grsec_enable_chroot_fchdir,
64460+ .maxlen = sizeof(int),
64461+ .mode = 0600,
64462+ .proc_handler = &proc_dointvec,
64463+ },
64464+#endif
64465+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64466+ {
64467+ .ctl_name = CTL_UNNUMBERED,
64468+ .procname = "chroot_deny_chroot",
64469+ .data = &grsec_enable_chroot_double,
64470+ .maxlen = sizeof(int),
64471+ .mode = 0600,
64472+ .proc_handler = &proc_dointvec,
64473+ },
64474+#endif
64475+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64476+ {
64477+ .ctl_name = CTL_UNNUMBERED,
64478+ .procname = "chroot_deny_pivot",
64479+ .data = &grsec_enable_chroot_pivot,
64480+ .maxlen = sizeof(int),
64481+ .mode = 0600,
64482+ .proc_handler = &proc_dointvec,
64483+ },
64484+#endif
64485+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64486+ {
64487+ .ctl_name = CTL_UNNUMBERED,
64488+ .procname = "chroot_enforce_chdir",
64489+ .data = &grsec_enable_chroot_chdir,
64490+ .maxlen = sizeof(int),
64491+ .mode = 0600,
64492+ .proc_handler = &proc_dointvec,
64493+ },
64494+#endif
64495+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64496+ {
64497+ .ctl_name = CTL_UNNUMBERED,
64498+ .procname = "chroot_deny_chmod",
64499+ .data = &grsec_enable_chroot_chmod,
64500+ .maxlen = sizeof(int),
64501+ .mode = 0600,
64502+ .proc_handler = &proc_dointvec,
64503+ },
64504+#endif
64505+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64506+ {
64507+ .ctl_name = CTL_UNNUMBERED,
64508+ .procname = "chroot_deny_mknod",
64509+ .data = &grsec_enable_chroot_mknod,
64510+ .maxlen = sizeof(int),
64511+ .mode = 0600,
64512+ .proc_handler = &proc_dointvec,
64513+ },
64514+#endif
64515+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64516+ {
64517+ .ctl_name = CTL_UNNUMBERED,
64518+ .procname = "chroot_restrict_nice",
64519+ .data = &grsec_enable_chroot_nice,
64520+ .maxlen = sizeof(int),
64521+ .mode = 0600,
64522+ .proc_handler = &proc_dointvec,
64523+ },
64524+#endif
64525+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64526+ {
64527+ .ctl_name = CTL_UNNUMBERED,
64528+ .procname = "chroot_execlog",
64529+ .data = &grsec_enable_chroot_execlog,
64530+ .maxlen = sizeof(int),
64531+ .mode = 0600,
64532+ .proc_handler = &proc_dointvec,
64533+ },
64534+#endif
64535+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64536+ {
64537+ .ctl_name = CTL_UNNUMBERED,
64538+ .procname = "chroot_caps",
64539+ .data = &grsec_enable_chroot_caps,
64540+ .maxlen = sizeof(int),
64541+ .mode = 0600,
64542+ .proc_handler = &proc_dointvec,
64543+ },
64544+#endif
64545+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64546+ {
64547+ .ctl_name = CTL_UNNUMBERED,
64548+ .procname = "chroot_deny_sysctl",
64549+ .data = &grsec_enable_chroot_sysctl,
64550+ .maxlen = sizeof(int),
64551+ .mode = 0600,
64552+ .proc_handler = &proc_dointvec,
64553+ },
64554+#endif
64555+#ifdef CONFIG_GRKERNSEC_TPE
64556+ {
64557+ .ctl_name = CTL_UNNUMBERED,
64558+ .procname = "tpe",
64559+ .data = &grsec_enable_tpe,
64560+ .maxlen = sizeof(int),
64561+ .mode = 0600,
64562+ .proc_handler = &proc_dointvec,
64563+ },
64564+ {
64565+ .ctl_name = CTL_UNNUMBERED,
64566+ .procname = "tpe_gid",
64567+ .data = &grsec_tpe_gid,
64568+ .maxlen = sizeof(int),
64569+ .mode = 0600,
64570+ .proc_handler = &proc_dointvec,
64571+ },
64572+#endif
64573+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64574+ {
64575+ .ctl_name = CTL_UNNUMBERED,
64576+ .procname = "tpe_invert",
64577+ .data = &grsec_enable_tpe_invert,
64578+ .maxlen = sizeof(int),
64579+ .mode = 0600,
64580+ .proc_handler = &proc_dointvec,
64581+ },
64582+#endif
64583+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64584+ {
64585+ .ctl_name = CTL_UNNUMBERED,
64586+ .procname = "tpe_restrict_all",
64587+ .data = &grsec_enable_tpe_all,
64588+ .maxlen = sizeof(int),
64589+ .mode = 0600,
64590+ .proc_handler = &proc_dointvec,
64591+ },
64592+#endif
64593+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64594+ {
64595+ .ctl_name = CTL_UNNUMBERED,
64596+ .procname = "socket_all",
64597+ .data = &grsec_enable_socket_all,
64598+ .maxlen = sizeof(int),
64599+ .mode = 0600,
64600+ .proc_handler = &proc_dointvec,
64601+ },
64602+ {
64603+ .ctl_name = CTL_UNNUMBERED,
64604+ .procname = "socket_all_gid",
64605+ .data = &grsec_socket_all_gid,
64606+ .maxlen = sizeof(int),
64607+ .mode = 0600,
64608+ .proc_handler = &proc_dointvec,
64609+ },
64610+#endif
64611+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64612+ {
64613+ .ctl_name = CTL_UNNUMBERED,
64614+ .procname = "socket_client",
64615+ .data = &grsec_enable_socket_client,
64616+ .maxlen = sizeof(int),
64617+ .mode = 0600,
64618+ .proc_handler = &proc_dointvec,
64619+ },
64620+ {
64621+ .ctl_name = CTL_UNNUMBERED,
64622+ .procname = "socket_client_gid",
64623+ .data = &grsec_socket_client_gid,
64624+ .maxlen = sizeof(int),
64625+ .mode = 0600,
64626+ .proc_handler = &proc_dointvec,
64627+ },
64628+#endif
64629+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64630+ {
64631+ .ctl_name = CTL_UNNUMBERED,
64632+ .procname = "socket_server",
64633+ .data = &grsec_enable_socket_server,
64634+ .maxlen = sizeof(int),
64635+ .mode = 0600,
64636+ .proc_handler = &proc_dointvec,
64637+ },
64638+ {
64639+ .ctl_name = CTL_UNNUMBERED,
64640+ .procname = "socket_server_gid",
64641+ .data = &grsec_socket_server_gid,
64642+ .maxlen = sizeof(int),
64643+ .mode = 0600,
64644+ .proc_handler = &proc_dointvec,
64645+ },
64646+#endif
64647+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64648+ {
64649+ .ctl_name = CTL_UNNUMBERED,
64650+ .procname = "audit_group",
64651+ .data = &grsec_enable_group,
64652+ .maxlen = sizeof(int),
64653+ .mode = 0600,
64654+ .proc_handler = &proc_dointvec,
64655+ },
64656+ {
64657+ .ctl_name = CTL_UNNUMBERED,
64658+ .procname = "audit_gid",
64659+ .data = &grsec_audit_gid,
64660+ .maxlen = sizeof(int),
64661+ .mode = 0600,
64662+ .proc_handler = &proc_dointvec,
64663+ },
64664+#endif
64665+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64666+ {
64667+ .ctl_name = CTL_UNNUMBERED,
64668+ .procname = "audit_chdir",
64669+ .data = &grsec_enable_chdir,
64670+ .maxlen = sizeof(int),
64671+ .mode = 0600,
64672+ .proc_handler = &proc_dointvec,
64673+ },
64674+#endif
64675+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64676+ {
64677+ .ctl_name = CTL_UNNUMBERED,
64678+ .procname = "audit_mount",
64679+ .data = &grsec_enable_mount,
64680+ .maxlen = sizeof(int),
64681+ .mode = 0600,
64682+ .proc_handler = &proc_dointvec,
64683+ },
64684+#endif
64685+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64686+ {
64687+ .ctl_name = CTL_UNNUMBERED,
64688+ .procname = "audit_textrel",
64689+ .data = &grsec_enable_audit_textrel,
64690+ .maxlen = sizeof(int),
64691+ .mode = 0600,
64692+ .proc_handler = &proc_dointvec,
64693+ },
64694+#endif
64695+#ifdef CONFIG_GRKERNSEC_DMESG
64696+ {
64697+ .ctl_name = CTL_UNNUMBERED,
64698+ .procname = "dmesg",
64699+ .data = &grsec_enable_dmesg,
64700+ .maxlen = sizeof(int),
64701+ .mode = 0600,
64702+ .proc_handler = &proc_dointvec,
64703+ },
64704+#endif
64705+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64706+ {
64707+ .ctl_name = CTL_UNNUMBERED,
64708+ .procname = "chroot_findtask",
64709+ .data = &grsec_enable_chroot_findtask,
64710+ .maxlen = sizeof(int),
64711+ .mode = 0600,
64712+ .proc_handler = &proc_dointvec,
64713+ },
64714+#endif
64715+#ifdef CONFIG_GRKERNSEC_RESLOG
64716+ {
64717+ .ctl_name = CTL_UNNUMBERED,
64718+ .procname = "resource_logging",
64719+ .data = &grsec_resource_logging,
64720+ .maxlen = sizeof(int),
64721+ .mode = 0600,
64722+ .proc_handler = &proc_dointvec,
64723+ },
64724+#endif
64725+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64726+ {
64727+ .ctl_name = CTL_UNNUMBERED,
64728+ .procname = "audit_ptrace",
64729+ .data = &grsec_enable_audit_ptrace,
64730+ .maxlen = sizeof(int),
64731+ .mode = 0600,
64732+ .proc_handler = &proc_dointvec,
64733+ },
64734+#endif
64735+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64736+ {
64737+ .ctl_name = CTL_UNNUMBERED,
64738+ .procname = "harden_ptrace",
64739+ .data = &grsec_enable_harden_ptrace,
64740+ .maxlen = sizeof(int),
64741+ .mode = 0600,
64742+ .proc_handler = &proc_dointvec,
64743+ },
64744+#endif
64745+ {
64746+ .ctl_name = CTL_UNNUMBERED,
64747+ .procname = "grsec_lock",
64748+ .data = &grsec_lock,
64749+ .maxlen = sizeof(int),
64750+ .mode = 0600,
64751+ .proc_handler = &proc_dointvec,
64752+ },
64753+#endif
64754+#ifdef CONFIG_GRKERNSEC_ROFS
64755+ {
64756+ .ctl_name = CTL_UNNUMBERED,
64757+ .procname = "romount_protect",
64758+ .data = &grsec_enable_rofs,
64759+ .maxlen = sizeof(int),
64760+ .mode = 0600,
64761+ .proc_handler = &proc_dointvec_minmax,
64762+ .extra1 = &one,
64763+ .extra2 = &one,
64764+ },
64765+#endif
64766+ { .ctl_name = 0 }
64767+};
64768+#endif
64769diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64770new file mode 100644
64771index 0000000..0dc13c3
64772--- /dev/null
64773+++ b/grsecurity/grsec_time.c
64774@@ -0,0 +1,16 @@
64775+#include <linux/kernel.h>
64776+#include <linux/sched.h>
64777+#include <linux/grinternal.h>
64778+#include <linux/module.h>
64779+
64780+void
64781+gr_log_timechange(void)
64782+{
64783+#ifdef CONFIG_GRKERNSEC_TIME
64784+ if (grsec_enable_time)
64785+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64786+#endif
64787+ return;
64788+}
64789+
64790+EXPORT_SYMBOL(gr_log_timechange);
64791diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64792new file mode 100644
64793index 0000000..07e0dc0
64794--- /dev/null
64795+++ b/grsecurity/grsec_tpe.c
64796@@ -0,0 +1,73 @@
64797+#include <linux/kernel.h>
64798+#include <linux/sched.h>
64799+#include <linux/file.h>
64800+#include <linux/fs.h>
64801+#include <linux/grinternal.h>
64802+
64803+extern int gr_acl_tpe_check(void);
64804+
64805+int
64806+gr_tpe_allow(const struct file *file)
64807+{
64808+#ifdef CONFIG_GRKERNSEC
64809+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64810+ const struct cred *cred = current_cred();
64811+ char *msg = NULL;
64812+ char *msg2 = NULL;
64813+
64814+ // never restrict root
64815+ if (!cred->uid)
64816+ return 1;
64817+
64818+ if (grsec_enable_tpe) {
64819+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64820+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
64821+ msg = "not being in trusted group";
64822+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
64823+ msg = "being in untrusted group";
64824+#else
64825+ if (in_group_p(grsec_tpe_gid))
64826+ msg = "being in untrusted group";
64827+#endif
64828+ }
64829+ if (!msg && gr_acl_tpe_check())
64830+ msg = "being in untrusted role";
64831+
64832+ // not in any affected group/role
64833+ if (!msg)
64834+ goto next_check;
64835+
64836+ if (inode->i_uid)
64837+ msg2 = "file in non-root-owned directory";
64838+ else if (inode->i_mode & S_IWOTH)
64839+ msg2 = "file in world-writable directory";
64840+ else if (inode->i_mode & S_IWGRP)
64841+ msg2 = "file in group-writable directory";
64842+
64843+ if (msg && msg2) {
64844+ char fullmsg[70] = {0};
64845+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
64846+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
64847+ return 0;
64848+ }
64849+ msg = NULL;
64850+next_check:
64851+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64852+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
64853+ return 1;
64854+
64855+ if (inode->i_uid && (inode->i_uid != cred->uid))
64856+ msg = "directory not owned by user";
64857+ else if (inode->i_mode & S_IWOTH)
64858+ msg = "file in world-writable directory";
64859+ else if (inode->i_mode & S_IWGRP)
64860+ msg = "file in group-writable directory";
64861+
64862+ if (msg) {
64863+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
64864+ return 0;
64865+ }
64866+#endif
64867+#endif
64868+ return 1;
64869+}
64870diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64871new file mode 100644
64872index 0000000..9f7b1ac
64873--- /dev/null
64874+++ b/grsecurity/grsum.c
64875@@ -0,0 +1,61 @@
64876+#include <linux/err.h>
64877+#include <linux/kernel.h>
64878+#include <linux/sched.h>
64879+#include <linux/mm.h>
64880+#include <linux/scatterlist.h>
64881+#include <linux/crypto.h>
64882+#include <linux/gracl.h>
64883+
64884+
64885+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64886+#error "crypto and sha256 must be built into the kernel"
64887+#endif
64888+
64889+int
64890+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64891+{
64892+ char *p;
64893+ struct crypto_hash *tfm;
64894+ struct hash_desc desc;
64895+ struct scatterlist sg;
64896+ unsigned char temp_sum[GR_SHA_LEN];
64897+ volatile int retval = 0;
64898+ volatile int dummy = 0;
64899+ unsigned int i;
64900+
64901+ sg_init_table(&sg, 1);
64902+
64903+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64904+ if (IS_ERR(tfm)) {
64905+ /* should never happen, since sha256 should be built in */
64906+ return 1;
64907+ }
64908+
64909+ desc.tfm = tfm;
64910+ desc.flags = 0;
64911+
64912+ crypto_hash_init(&desc);
64913+
64914+ p = salt;
64915+ sg_set_buf(&sg, p, GR_SALT_LEN);
64916+ crypto_hash_update(&desc, &sg, sg.length);
64917+
64918+ p = entry->pw;
64919+ sg_set_buf(&sg, p, strlen(p));
64920+
64921+ crypto_hash_update(&desc, &sg, sg.length);
64922+
64923+ crypto_hash_final(&desc, temp_sum);
64924+
64925+ memset(entry->pw, 0, GR_PW_LEN);
64926+
64927+ for (i = 0; i < GR_SHA_LEN; i++)
64928+ if (sum[i] != temp_sum[i])
64929+ retval = 1;
64930+ else
64931+ dummy = 1; // waste a cycle
64932+
64933+ crypto_free_hash(tfm);
64934+
64935+ return retval;
64936+}
64937diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64938index 3cd9ccd..fe16d47 100644
64939--- a/include/acpi/acpi_bus.h
64940+++ b/include/acpi/acpi_bus.h
64941@@ -107,7 +107,7 @@ struct acpi_device_ops {
64942 acpi_op_bind bind;
64943 acpi_op_unbind unbind;
64944 acpi_op_notify notify;
64945-};
64946+} __no_const;
64947
64948 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64949
64950diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64951index f4906f6..71feb73 100644
64952--- a/include/acpi/acpi_drivers.h
64953+++ b/include/acpi/acpi_drivers.h
64954@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64955 Dock Station
64956 -------------------------------------------------------------------------- */
64957 struct acpi_dock_ops {
64958- acpi_notify_handler handler;
64959- acpi_notify_handler uevent;
64960+ const acpi_notify_handler handler;
64961+ const acpi_notify_handler uevent;
64962 };
64963
64964 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64965@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64966 extern int register_dock_notifier(struct notifier_block *nb);
64967 extern void unregister_dock_notifier(struct notifier_block *nb);
64968 extern int register_hotplug_dock_device(acpi_handle handle,
64969- struct acpi_dock_ops *ops,
64970+ const struct acpi_dock_ops *ops,
64971 void *context);
64972 extern void unregister_hotplug_dock_device(acpi_handle handle);
64973 #else
64974@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64975 {
64976 }
64977 static inline int register_hotplug_dock_device(acpi_handle handle,
64978- struct acpi_dock_ops *ops,
64979+ const struct acpi_dock_ops *ops,
64980 void *context)
64981 {
64982 return -ENODEV;
64983diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64984index b7babf0..a9ac9fc 100644
64985--- a/include/asm-generic/atomic-long.h
64986+++ b/include/asm-generic/atomic-long.h
64987@@ -22,6 +22,12 @@
64988
64989 typedef atomic64_t atomic_long_t;
64990
64991+#ifdef CONFIG_PAX_REFCOUNT
64992+typedef atomic64_unchecked_t atomic_long_unchecked_t;
64993+#else
64994+typedef atomic64_t atomic_long_unchecked_t;
64995+#endif
64996+
64997 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64998
64999 static inline long atomic_long_read(atomic_long_t *l)
65000@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65001 return (long)atomic64_read(v);
65002 }
65003
65004+#ifdef CONFIG_PAX_REFCOUNT
65005+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65006+{
65007+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65008+
65009+ return (long)atomic64_read_unchecked(v);
65010+}
65011+#endif
65012+
65013 static inline void atomic_long_set(atomic_long_t *l, long i)
65014 {
65015 atomic64_t *v = (atomic64_t *)l;
65016@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65017 atomic64_set(v, i);
65018 }
65019
65020+#ifdef CONFIG_PAX_REFCOUNT
65021+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65022+{
65023+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65024+
65025+ atomic64_set_unchecked(v, i);
65026+}
65027+#endif
65028+
65029 static inline void atomic_long_inc(atomic_long_t *l)
65030 {
65031 atomic64_t *v = (atomic64_t *)l;
65032@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65033 atomic64_inc(v);
65034 }
65035
65036+#ifdef CONFIG_PAX_REFCOUNT
65037+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65038+{
65039+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65040+
65041+ atomic64_inc_unchecked(v);
65042+}
65043+#endif
65044+
65045 static inline void atomic_long_dec(atomic_long_t *l)
65046 {
65047 atomic64_t *v = (atomic64_t *)l;
65048@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65049 atomic64_dec(v);
65050 }
65051
65052+#ifdef CONFIG_PAX_REFCOUNT
65053+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65054+{
65055+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65056+
65057+ atomic64_dec_unchecked(v);
65058+}
65059+#endif
65060+
65061 static inline void atomic_long_add(long i, atomic_long_t *l)
65062 {
65063 atomic64_t *v = (atomic64_t *)l;
65064@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65065 atomic64_add(i, v);
65066 }
65067
65068+#ifdef CONFIG_PAX_REFCOUNT
65069+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65070+{
65071+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65072+
65073+ atomic64_add_unchecked(i, v);
65074+}
65075+#endif
65076+
65077 static inline void atomic_long_sub(long i, atomic_long_t *l)
65078 {
65079 atomic64_t *v = (atomic64_t *)l;
65080@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65081 return (long)atomic64_inc_return(v);
65082 }
65083
65084+#ifdef CONFIG_PAX_REFCOUNT
65085+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65086+{
65087+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65088+
65089+ return (long)atomic64_inc_return_unchecked(v);
65090+}
65091+#endif
65092+
65093 static inline long atomic_long_dec_return(atomic_long_t *l)
65094 {
65095 atomic64_t *v = (atomic64_t *)l;
65096@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65097
65098 typedef atomic_t atomic_long_t;
65099
65100+#ifdef CONFIG_PAX_REFCOUNT
65101+typedef atomic_unchecked_t atomic_long_unchecked_t;
65102+#else
65103+typedef atomic_t atomic_long_unchecked_t;
65104+#endif
65105+
65106 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65107 static inline long atomic_long_read(atomic_long_t *l)
65108 {
65109@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65110 return (long)atomic_read(v);
65111 }
65112
65113+#ifdef CONFIG_PAX_REFCOUNT
65114+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65115+{
65116+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65117+
65118+ return (long)atomic_read_unchecked(v);
65119+}
65120+#endif
65121+
65122 static inline void atomic_long_set(atomic_long_t *l, long i)
65123 {
65124 atomic_t *v = (atomic_t *)l;
65125@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65126 atomic_set(v, i);
65127 }
65128
65129+#ifdef CONFIG_PAX_REFCOUNT
65130+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65131+{
65132+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65133+
65134+ atomic_set_unchecked(v, i);
65135+}
65136+#endif
65137+
65138 static inline void atomic_long_inc(atomic_long_t *l)
65139 {
65140 atomic_t *v = (atomic_t *)l;
65141@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65142 atomic_inc(v);
65143 }
65144
65145+#ifdef CONFIG_PAX_REFCOUNT
65146+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65147+{
65148+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65149+
65150+ atomic_inc_unchecked(v);
65151+}
65152+#endif
65153+
65154 static inline void atomic_long_dec(atomic_long_t *l)
65155 {
65156 atomic_t *v = (atomic_t *)l;
65157@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65158 atomic_dec(v);
65159 }
65160
65161+#ifdef CONFIG_PAX_REFCOUNT
65162+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65163+{
65164+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65165+
65166+ atomic_dec_unchecked(v);
65167+}
65168+#endif
65169+
65170 static inline void atomic_long_add(long i, atomic_long_t *l)
65171 {
65172 atomic_t *v = (atomic_t *)l;
65173@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65174 atomic_add(i, v);
65175 }
65176
65177+#ifdef CONFIG_PAX_REFCOUNT
65178+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65179+{
65180+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65181+
65182+ atomic_add_unchecked(i, v);
65183+}
65184+#endif
65185+
65186 static inline void atomic_long_sub(long i, atomic_long_t *l)
65187 {
65188 atomic_t *v = (atomic_t *)l;
65189@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65190 return (long)atomic_inc_return(v);
65191 }
65192
65193+#ifdef CONFIG_PAX_REFCOUNT
65194+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65195+{
65196+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65197+
65198+ return (long)atomic_inc_return_unchecked(v);
65199+}
65200+#endif
65201+
65202 static inline long atomic_long_dec_return(atomic_long_t *l)
65203 {
65204 atomic_t *v = (atomic_t *)l;
65205@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65206
65207 #endif /* BITS_PER_LONG == 64 */
65208
65209+#ifdef CONFIG_PAX_REFCOUNT
65210+static inline void pax_refcount_needs_these_functions(void)
65211+{
65212+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65213+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65214+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65215+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65216+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65217+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65218+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65219+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65220+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65221+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65222+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65223+
65224+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65225+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65226+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65227+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65228+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65229+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65230+}
65231+#else
65232+#define atomic_read_unchecked(v) atomic_read(v)
65233+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65234+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65235+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65236+#define atomic_inc_unchecked(v) atomic_inc(v)
65237+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65238+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65239+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65240+#define atomic_dec_unchecked(v) atomic_dec(v)
65241+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65242+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65243+
65244+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65245+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65246+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65247+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65248+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65249+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65250+#endif
65251+
65252 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65253diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65254index b18ce4f..2ee2843 100644
65255--- a/include/asm-generic/atomic64.h
65256+++ b/include/asm-generic/atomic64.h
65257@@ -16,6 +16,8 @@ typedef struct {
65258 long long counter;
65259 } atomic64_t;
65260
65261+typedef atomic64_t atomic64_unchecked_t;
65262+
65263 #define ATOMIC64_INIT(i) { (i) }
65264
65265 extern long long atomic64_read(const atomic64_t *v);
65266@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65267 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65268 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65269
65270+#define atomic64_read_unchecked(v) atomic64_read(v)
65271+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65272+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65273+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65274+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65275+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65276+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65277+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65278+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65279+
65280 #endif /* _ASM_GENERIC_ATOMIC64_H */
65281diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65282index d48ddf0..656a0ac 100644
65283--- a/include/asm-generic/bug.h
65284+++ b/include/asm-generic/bug.h
65285@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65286
65287 #else /* !CONFIG_BUG */
65288 #ifndef HAVE_ARCH_BUG
65289-#define BUG() do {} while(0)
65290+#define BUG() do { for (;;) ; } while(0)
65291 #endif
65292
65293 #ifndef HAVE_ARCH_BUG_ON
65294-#define BUG_ON(condition) do { if (condition) ; } while(0)
65295+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65296 #endif
65297
65298 #ifndef HAVE_ARCH_WARN_ON
65299diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65300index 1bfcfe5..e04c5c9 100644
65301--- a/include/asm-generic/cache.h
65302+++ b/include/asm-generic/cache.h
65303@@ -6,7 +6,7 @@
65304 * cache lines need to provide their own cache.h.
65305 */
65306
65307-#define L1_CACHE_SHIFT 5
65308-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65309+#define L1_CACHE_SHIFT 5UL
65310+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65311
65312 #endif /* __ASM_GENERIC_CACHE_H */
65313diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65314index 6920695..41038bc 100644
65315--- a/include/asm-generic/dma-mapping-common.h
65316+++ b/include/asm-generic/dma-mapping-common.h
65317@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65318 enum dma_data_direction dir,
65319 struct dma_attrs *attrs)
65320 {
65321- struct dma_map_ops *ops = get_dma_ops(dev);
65322+ const struct dma_map_ops *ops = get_dma_ops(dev);
65323 dma_addr_t addr;
65324
65325 kmemcheck_mark_initialized(ptr, size);
65326@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65327 enum dma_data_direction dir,
65328 struct dma_attrs *attrs)
65329 {
65330- struct dma_map_ops *ops = get_dma_ops(dev);
65331+ const struct dma_map_ops *ops = get_dma_ops(dev);
65332
65333 BUG_ON(!valid_dma_direction(dir));
65334 if (ops->unmap_page)
65335@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65336 int nents, enum dma_data_direction dir,
65337 struct dma_attrs *attrs)
65338 {
65339- struct dma_map_ops *ops = get_dma_ops(dev);
65340+ const struct dma_map_ops *ops = get_dma_ops(dev);
65341 int i, ents;
65342 struct scatterlist *s;
65343
65344@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65345 int nents, enum dma_data_direction dir,
65346 struct dma_attrs *attrs)
65347 {
65348- struct dma_map_ops *ops = get_dma_ops(dev);
65349+ const struct dma_map_ops *ops = get_dma_ops(dev);
65350
65351 BUG_ON(!valid_dma_direction(dir));
65352 debug_dma_unmap_sg(dev, sg, nents, dir);
65353@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65354 size_t offset, size_t size,
65355 enum dma_data_direction dir)
65356 {
65357- struct dma_map_ops *ops = get_dma_ops(dev);
65358+ const struct dma_map_ops *ops = get_dma_ops(dev);
65359 dma_addr_t addr;
65360
65361 kmemcheck_mark_initialized(page_address(page) + offset, size);
65362@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65363 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65364 size_t size, enum dma_data_direction dir)
65365 {
65366- struct dma_map_ops *ops = get_dma_ops(dev);
65367+ const struct dma_map_ops *ops = get_dma_ops(dev);
65368
65369 BUG_ON(!valid_dma_direction(dir));
65370 if (ops->unmap_page)
65371@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65372 size_t size,
65373 enum dma_data_direction dir)
65374 {
65375- struct dma_map_ops *ops = get_dma_ops(dev);
65376+ const struct dma_map_ops *ops = get_dma_ops(dev);
65377
65378 BUG_ON(!valid_dma_direction(dir));
65379 if (ops->sync_single_for_cpu)
65380@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65381 dma_addr_t addr, size_t size,
65382 enum dma_data_direction dir)
65383 {
65384- struct dma_map_ops *ops = get_dma_ops(dev);
65385+ const struct dma_map_ops *ops = get_dma_ops(dev);
65386
65387 BUG_ON(!valid_dma_direction(dir));
65388 if (ops->sync_single_for_device)
65389@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65390 size_t size,
65391 enum dma_data_direction dir)
65392 {
65393- struct dma_map_ops *ops = get_dma_ops(dev);
65394+ const struct dma_map_ops *ops = get_dma_ops(dev);
65395
65396 BUG_ON(!valid_dma_direction(dir));
65397 if (ops->sync_single_range_for_cpu) {
65398@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65399 size_t size,
65400 enum dma_data_direction dir)
65401 {
65402- struct dma_map_ops *ops = get_dma_ops(dev);
65403+ const struct dma_map_ops *ops = get_dma_ops(dev);
65404
65405 BUG_ON(!valid_dma_direction(dir));
65406 if (ops->sync_single_range_for_device) {
65407@@ -155,7 +155,7 @@ static inline void
65408 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65409 int nelems, enum dma_data_direction dir)
65410 {
65411- struct dma_map_ops *ops = get_dma_ops(dev);
65412+ const struct dma_map_ops *ops = get_dma_ops(dev);
65413
65414 BUG_ON(!valid_dma_direction(dir));
65415 if (ops->sync_sg_for_cpu)
65416@@ -167,7 +167,7 @@ static inline void
65417 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65418 int nelems, enum dma_data_direction dir)
65419 {
65420- struct dma_map_ops *ops = get_dma_ops(dev);
65421+ const struct dma_map_ops *ops = get_dma_ops(dev);
65422
65423 BUG_ON(!valid_dma_direction(dir));
65424 if (ops->sync_sg_for_device)
65425diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65426index 0d68a1e..b74a761 100644
65427--- a/include/asm-generic/emergency-restart.h
65428+++ b/include/asm-generic/emergency-restart.h
65429@@ -1,7 +1,7 @@
65430 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65431 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65432
65433-static inline void machine_emergency_restart(void)
65434+static inline __noreturn void machine_emergency_restart(void)
65435 {
65436 machine_restart(NULL);
65437 }
65438diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65439index 3c2344f..4590a7d 100644
65440--- a/include/asm-generic/futex.h
65441+++ b/include/asm-generic/futex.h
65442@@ -6,7 +6,7 @@
65443 #include <asm/errno.h>
65444
65445 static inline int
65446-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65447+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65448 {
65449 int op = (encoded_op >> 28) & 7;
65450 int cmp = (encoded_op >> 24) & 15;
65451@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65452 }
65453
65454 static inline int
65455-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65456+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65457 {
65458 return -ENOSYS;
65459 }
65460diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65461index 1ca3efc..e3dc852 100644
65462--- a/include/asm-generic/int-l64.h
65463+++ b/include/asm-generic/int-l64.h
65464@@ -46,6 +46,8 @@ typedef unsigned int u32;
65465 typedef signed long s64;
65466 typedef unsigned long u64;
65467
65468+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65469+
65470 #define S8_C(x) x
65471 #define U8_C(x) x ## U
65472 #define S16_C(x) x
65473diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65474index f394147..b6152b9 100644
65475--- a/include/asm-generic/int-ll64.h
65476+++ b/include/asm-generic/int-ll64.h
65477@@ -51,6 +51,8 @@ typedef unsigned int u32;
65478 typedef signed long long s64;
65479 typedef unsigned long long u64;
65480
65481+typedef unsigned long long intoverflow_t;
65482+
65483 #define S8_C(x) x
65484 #define U8_C(x) x ## U
65485 #define S16_C(x) x
65486diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65487index e5f234a..cdb16b3 100644
65488--- a/include/asm-generic/kmap_types.h
65489+++ b/include/asm-generic/kmap_types.h
65490@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65491 KMAP_D(16) KM_IRQ_PTE,
65492 KMAP_D(17) KM_NMI,
65493 KMAP_D(18) KM_NMI_PTE,
65494-KMAP_D(19) KM_TYPE_NR
65495+KMAP_D(19) KM_CLEARPAGE,
65496+KMAP_D(20) KM_TYPE_NR
65497 };
65498
65499 #undef KMAP_D
65500diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65501index 725612b..9cc513a 100644
65502--- a/include/asm-generic/pgtable-nopmd.h
65503+++ b/include/asm-generic/pgtable-nopmd.h
65504@@ -1,14 +1,19 @@
65505 #ifndef _PGTABLE_NOPMD_H
65506 #define _PGTABLE_NOPMD_H
65507
65508-#ifndef __ASSEMBLY__
65509-
65510 #include <asm-generic/pgtable-nopud.h>
65511
65512-struct mm_struct;
65513-
65514 #define __PAGETABLE_PMD_FOLDED
65515
65516+#define PMD_SHIFT PUD_SHIFT
65517+#define PTRS_PER_PMD 1
65518+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65519+#define PMD_MASK (~(PMD_SIZE-1))
65520+
65521+#ifndef __ASSEMBLY__
65522+
65523+struct mm_struct;
65524+
65525 /*
65526 * Having the pmd type consist of a pud gets the size right, and allows
65527 * us to conceptually access the pud entry that this pmd is folded into
65528@@ -16,11 +21,6 @@ struct mm_struct;
65529 */
65530 typedef struct { pud_t pud; } pmd_t;
65531
65532-#define PMD_SHIFT PUD_SHIFT
65533-#define PTRS_PER_PMD 1
65534-#define PMD_SIZE (1UL << PMD_SHIFT)
65535-#define PMD_MASK (~(PMD_SIZE-1))
65536-
65537 /*
65538 * The "pud_xxx()" functions here are trivial for a folded two-level
65539 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65540diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65541index 810431d..ccc3638 100644
65542--- a/include/asm-generic/pgtable-nopud.h
65543+++ b/include/asm-generic/pgtable-nopud.h
65544@@ -1,10 +1,15 @@
65545 #ifndef _PGTABLE_NOPUD_H
65546 #define _PGTABLE_NOPUD_H
65547
65548-#ifndef __ASSEMBLY__
65549-
65550 #define __PAGETABLE_PUD_FOLDED
65551
65552+#define PUD_SHIFT PGDIR_SHIFT
65553+#define PTRS_PER_PUD 1
65554+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65555+#define PUD_MASK (~(PUD_SIZE-1))
65556+
65557+#ifndef __ASSEMBLY__
65558+
65559 /*
65560 * Having the pud type consist of a pgd gets the size right, and allows
65561 * us to conceptually access the pgd entry that this pud is folded into
65562@@ -12,11 +17,6 @@
65563 */
65564 typedef struct { pgd_t pgd; } pud_t;
65565
65566-#define PUD_SHIFT PGDIR_SHIFT
65567-#define PTRS_PER_PUD 1
65568-#define PUD_SIZE (1UL << PUD_SHIFT)
65569-#define PUD_MASK (~(PUD_SIZE-1))
65570-
65571 /*
65572 * The "pgd_xxx()" functions here are trivial for a folded two-level
65573 * setup: the pud is never bad, and a pud always exists (as it's folded
65574diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65575index e2bd73e..fea8ed3 100644
65576--- a/include/asm-generic/pgtable.h
65577+++ b/include/asm-generic/pgtable.h
65578@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65579 unsigned long size);
65580 #endif
65581
65582+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65583+static inline unsigned long pax_open_kernel(void) { return 0; }
65584+#endif
65585+
65586+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65587+static inline unsigned long pax_close_kernel(void) { return 0; }
65588+#endif
65589+
65590 #endif /* !__ASSEMBLY__ */
65591
65592 #endif /* _ASM_GENERIC_PGTABLE_H */
65593diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65594index b6e818f..21aa58a 100644
65595--- a/include/asm-generic/vmlinux.lds.h
65596+++ b/include/asm-generic/vmlinux.lds.h
65597@@ -199,6 +199,7 @@
65598 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65599 VMLINUX_SYMBOL(__start_rodata) = .; \
65600 *(.rodata) *(.rodata.*) \
65601+ *(.data.read_only) \
65602 *(__vermagic) /* Kernel version magic */ \
65603 *(__markers_strings) /* Markers: strings */ \
65604 *(__tracepoints_strings)/* Tracepoints: strings */ \
65605@@ -656,22 +657,24 @@
65606 * section in the linker script will go there too. @phdr should have
65607 * a leading colon.
65608 *
65609- * Note that this macros defines __per_cpu_load as an absolute symbol.
65610+ * Note that this macros defines per_cpu_load as an absolute symbol.
65611 * If there is no need to put the percpu section at a predetermined
65612 * address, use PERCPU().
65613 */
65614 #define PERCPU_VADDR(vaddr, phdr) \
65615- VMLINUX_SYMBOL(__per_cpu_load) = .; \
65616- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65617+ per_cpu_load = .; \
65618+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65619 - LOAD_OFFSET) { \
65620+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65621 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65622 *(.data.percpu.first) \
65623- *(.data.percpu.page_aligned) \
65624 *(.data.percpu) \
65625+ . = ALIGN(PAGE_SIZE); \
65626+ *(.data.percpu.page_aligned) \
65627 *(.data.percpu.shared_aligned) \
65628 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65629 } phdr \
65630- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65631+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65632
65633 /**
65634 * PERCPU - define output section for percpu area, simple version
65635diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65636index ebab6a6..351dba1 100644
65637--- a/include/drm/drmP.h
65638+++ b/include/drm/drmP.h
65639@@ -71,6 +71,7 @@
65640 #include <linux/workqueue.h>
65641 #include <linux/poll.h>
65642 #include <asm/pgalloc.h>
65643+#include <asm/local.h>
65644 #include "drm.h"
65645
65646 #include <linux/idr.h>
65647@@ -814,7 +815,7 @@ struct drm_driver {
65648 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65649
65650 /* Driver private ops for this object */
65651- struct vm_operations_struct *gem_vm_ops;
65652+ const struct vm_operations_struct *gem_vm_ops;
65653
65654 int major;
65655 int minor;
65656@@ -917,7 +918,7 @@ struct drm_device {
65657
65658 /** \name Usage Counters */
65659 /*@{ */
65660- int open_count; /**< Outstanding files open */
65661+ local_t open_count; /**< Outstanding files open */
65662 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65663 atomic_t vma_count; /**< Outstanding vma areas open */
65664 int buf_use; /**< Buffers in use -- cannot alloc */
65665@@ -928,7 +929,7 @@ struct drm_device {
65666 /*@{ */
65667 unsigned long counters;
65668 enum drm_stat_type types[15];
65669- atomic_t counts[15];
65670+ atomic_unchecked_t counts[15];
65671 /*@} */
65672
65673 struct list_head filelist;
65674@@ -1016,7 +1017,7 @@ struct drm_device {
65675 struct pci_controller *hose;
65676 #endif
65677 struct drm_sg_mem *sg; /**< Scatter gather memory */
65678- unsigned int num_crtcs; /**< Number of CRTCs on this device */
65679+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
65680 void *dev_private; /**< device private data */
65681 void *mm_private;
65682 struct address_space *dev_mapping;
65683@@ -1042,11 +1043,11 @@ struct drm_device {
65684 spinlock_t object_name_lock;
65685 struct idr object_name_idr;
65686 atomic_t object_count;
65687- atomic_t object_memory;
65688+ atomic_unchecked_t object_memory;
65689 atomic_t pin_count;
65690- atomic_t pin_memory;
65691+ atomic_unchecked_t pin_memory;
65692 atomic_t gtt_count;
65693- atomic_t gtt_memory;
65694+ atomic_unchecked_t gtt_memory;
65695 uint32_t gtt_total;
65696 uint32_t invalidate_domains; /* domains pending invalidation */
65697 uint32_t flush_domains; /* domains pending flush */
65698diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65699index b29e201..3413cc9 100644
65700--- a/include/drm/drm_crtc_helper.h
65701+++ b/include/drm/drm_crtc_helper.h
65702@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65703
65704 /* reload the current crtc LUT */
65705 void (*load_lut)(struct drm_crtc *crtc);
65706-};
65707+} __no_const;
65708
65709 struct drm_encoder_helper_funcs {
65710 void (*dpms)(struct drm_encoder *encoder, int mode);
65711@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65712 struct drm_connector *connector);
65713 /* disable encoder when not in use - more explicit than dpms off */
65714 void (*disable)(struct drm_encoder *encoder);
65715-};
65716+} __no_const;
65717
65718 struct drm_connector_helper_funcs {
65719 int (*get_modes)(struct drm_connector *connector);
65720diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65721index b199170..6f9e64c 100644
65722--- a/include/drm/ttm/ttm_memory.h
65723+++ b/include/drm/ttm/ttm_memory.h
65724@@ -47,7 +47,7 @@
65725
65726 struct ttm_mem_shrink {
65727 int (*do_shrink) (struct ttm_mem_shrink *);
65728-};
65729+} __no_const;
65730
65731 /**
65732 * struct ttm_mem_global - Global memory accounting structure.
65733diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65734index e86dfca..40cc55f 100644
65735--- a/include/linux/a.out.h
65736+++ b/include/linux/a.out.h
65737@@ -39,6 +39,14 @@ enum machine_type {
65738 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65739 };
65740
65741+/* Constants for the N_FLAGS field */
65742+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65743+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65744+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65745+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65746+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65747+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65748+
65749 #if !defined (N_MAGIC)
65750 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65751 #endif
65752diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65753index 817b237..62c10bc 100644
65754--- a/include/linux/atmdev.h
65755+++ b/include/linux/atmdev.h
65756@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65757 #endif
65758
65759 struct k_atm_aal_stats {
65760-#define __HANDLE_ITEM(i) atomic_t i
65761+#define __HANDLE_ITEM(i) atomic_unchecked_t i
65762 __AAL_STAT_ITEMS
65763 #undef __HANDLE_ITEM
65764 };
65765diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65766index 0f5f578..8c4f884 100644
65767--- a/include/linux/backlight.h
65768+++ b/include/linux/backlight.h
65769@@ -36,18 +36,18 @@ struct backlight_device;
65770 struct fb_info;
65771
65772 struct backlight_ops {
65773- unsigned int options;
65774+ const unsigned int options;
65775
65776 #define BL_CORE_SUSPENDRESUME (1 << 0)
65777
65778 /* Notify the backlight driver some property has changed */
65779- int (*update_status)(struct backlight_device *);
65780+ int (* const update_status)(struct backlight_device *);
65781 /* Return the current backlight brightness (accounting for power,
65782 fb_blank etc.) */
65783- int (*get_brightness)(struct backlight_device *);
65784+ int (* const get_brightness)(struct backlight_device *);
65785 /* Check if given framebuffer device is the one bound to this backlight;
65786 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65787- int (*check_fb)(struct fb_info *);
65788+ int (* const check_fb)(struct fb_info *);
65789 };
65790
65791 /* This structure defines all the properties of a backlight */
65792@@ -86,7 +86,7 @@ struct backlight_device {
65793 registered this device has been unloaded, and if class_get_devdata()
65794 points to something in the body of that driver, it is also invalid. */
65795 struct mutex ops_lock;
65796- struct backlight_ops *ops;
65797+ const struct backlight_ops *ops;
65798
65799 /* The framebuffer notifier block */
65800 struct notifier_block fb_notif;
65801@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65802 }
65803
65804 extern struct backlight_device *backlight_device_register(const char *name,
65805- struct device *dev, void *devdata, struct backlight_ops *ops);
65806+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65807 extern void backlight_device_unregister(struct backlight_device *bd);
65808 extern void backlight_force_update(struct backlight_device *bd,
65809 enum backlight_update_reason reason);
65810diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65811index a3d802e..482f69c 100644
65812--- a/include/linux/binfmts.h
65813+++ b/include/linux/binfmts.h
65814@@ -83,6 +83,7 @@ struct linux_binfmt {
65815 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65816 int (*load_shlib)(struct file *);
65817 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65818+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65819 unsigned long min_coredump; /* minimal dump size */
65820 int hasvdso;
65821 };
65822diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65823index 5eb6cb0..a2906d2 100644
65824--- a/include/linux/blkdev.h
65825+++ b/include/linux/blkdev.h
65826@@ -1281,7 +1281,7 @@ struct block_device_operations {
65827 int (*revalidate_disk) (struct gendisk *);
65828 int (*getgeo)(struct block_device *, struct hd_geometry *);
65829 struct module *owner;
65830-};
65831+} __do_const;
65832
65833 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65834 unsigned long);
65835diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65836index 3b73b99..629d21b 100644
65837--- a/include/linux/blktrace_api.h
65838+++ b/include/linux/blktrace_api.h
65839@@ -160,7 +160,7 @@ struct blk_trace {
65840 struct dentry *dir;
65841 struct dentry *dropped_file;
65842 struct dentry *msg_file;
65843- atomic_t dropped;
65844+ atomic_unchecked_t dropped;
65845 };
65846
65847 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65848diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65849index 83195fb..0b0f77d 100644
65850--- a/include/linux/byteorder/little_endian.h
65851+++ b/include/linux/byteorder/little_endian.h
65852@@ -42,51 +42,51 @@
65853
65854 static inline __le64 __cpu_to_le64p(const __u64 *p)
65855 {
65856- return (__force __le64)*p;
65857+ return (__force const __le64)*p;
65858 }
65859 static inline __u64 __le64_to_cpup(const __le64 *p)
65860 {
65861- return (__force __u64)*p;
65862+ return (__force const __u64)*p;
65863 }
65864 static inline __le32 __cpu_to_le32p(const __u32 *p)
65865 {
65866- return (__force __le32)*p;
65867+ return (__force const __le32)*p;
65868 }
65869 static inline __u32 __le32_to_cpup(const __le32 *p)
65870 {
65871- return (__force __u32)*p;
65872+ return (__force const __u32)*p;
65873 }
65874 static inline __le16 __cpu_to_le16p(const __u16 *p)
65875 {
65876- return (__force __le16)*p;
65877+ return (__force const __le16)*p;
65878 }
65879 static inline __u16 __le16_to_cpup(const __le16 *p)
65880 {
65881- return (__force __u16)*p;
65882+ return (__force const __u16)*p;
65883 }
65884 static inline __be64 __cpu_to_be64p(const __u64 *p)
65885 {
65886- return (__force __be64)__swab64p(p);
65887+ return (__force const __be64)__swab64p(p);
65888 }
65889 static inline __u64 __be64_to_cpup(const __be64 *p)
65890 {
65891- return __swab64p((__u64 *)p);
65892+ return __swab64p((const __u64 *)p);
65893 }
65894 static inline __be32 __cpu_to_be32p(const __u32 *p)
65895 {
65896- return (__force __be32)__swab32p(p);
65897+ return (__force const __be32)__swab32p(p);
65898 }
65899 static inline __u32 __be32_to_cpup(const __be32 *p)
65900 {
65901- return __swab32p((__u32 *)p);
65902+ return __swab32p((const __u32 *)p);
65903 }
65904 static inline __be16 __cpu_to_be16p(const __u16 *p)
65905 {
65906- return (__force __be16)__swab16p(p);
65907+ return (__force const __be16)__swab16p(p);
65908 }
65909 static inline __u16 __be16_to_cpup(const __be16 *p)
65910 {
65911- return __swab16p((__u16 *)p);
65912+ return __swab16p((const __u16 *)p);
65913 }
65914 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65915 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65916diff --git a/include/linux/cache.h b/include/linux/cache.h
65917index 97e2488..e7576b9 100644
65918--- a/include/linux/cache.h
65919+++ b/include/linux/cache.h
65920@@ -16,6 +16,10 @@
65921 #define __read_mostly
65922 #endif
65923
65924+#ifndef __read_only
65925+#define __read_only __read_mostly
65926+#endif
65927+
65928 #ifndef ____cacheline_aligned
65929 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65930 #endif
65931diff --git a/include/linux/capability.h b/include/linux/capability.h
65932index c8f2a5f7..1618a5c 100644
65933--- a/include/linux/capability.h
65934+++ b/include/linux/capability.h
65935@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65936 (security_real_capable_noaudit((t), (cap)) == 0)
65937
65938 extern int capable(int cap);
65939+int capable_nolog(int cap);
65940
65941 /* audit system wants to get cap info from files as well */
65942 struct dentry;
65943diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65944index 450fa59..86019fb 100644
65945--- a/include/linux/compiler-gcc4.h
65946+++ b/include/linux/compiler-gcc4.h
65947@@ -36,4 +36,16 @@
65948 the kernel context */
65949 #define __cold __attribute__((__cold__))
65950
65951+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65952+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65953+#define __bos0(ptr) __bos((ptr), 0)
65954+#define __bos1(ptr) __bos((ptr), 1)
65955+
65956+#if __GNUC_MINOR__ >= 5
65957+#ifdef CONSTIFY_PLUGIN
65958+#define __no_const __attribute__((no_const))
65959+#define __do_const __attribute__((do_const))
65960+#endif
65961+#endif
65962+
65963 #endif
65964diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65965index 04fb513..fd6477b 100644
65966--- a/include/linux/compiler.h
65967+++ b/include/linux/compiler.h
65968@@ -5,11 +5,14 @@
65969
65970 #ifdef __CHECKER__
65971 # define __user __attribute__((noderef, address_space(1)))
65972+# define __force_user __force __user
65973 # define __kernel /* default address space */
65974+# define __force_kernel __force __kernel
65975 # define __safe __attribute__((safe))
65976 # define __force __attribute__((force))
65977 # define __nocast __attribute__((nocast))
65978 # define __iomem __attribute__((noderef, address_space(2)))
65979+# define __force_iomem __force __iomem
65980 # define __acquires(x) __attribute__((context(x,0,1)))
65981 # define __releases(x) __attribute__((context(x,1,0)))
65982 # define __acquire(x) __context__(x,1)
65983@@ -17,13 +20,34 @@
65984 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65985 extern void __chk_user_ptr(const volatile void __user *);
65986 extern void __chk_io_ptr(const volatile void __iomem *);
65987+#elif defined(CHECKER_PLUGIN)
65988+//# define __user
65989+//# define __force_user
65990+//# define __kernel
65991+//# define __force_kernel
65992+# define __safe
65993+# define __force
65994+# define __nocast
65995+# define __iomem
65996+# define __force_iomem
65997+# define __chk_user_ptr(x) (void)0
65998+# define __chk_io_ptr(x) (void)0
65999+# define __builtin_warning(x, y...) (1)
66000+# define __acquires(x)
66001+# define __releases(x)
66002+# define __acquire(x) (void)0
66003+# define __release(x) (void)0
66004+# define __cond_lock(x,c) (c)
66005 #else
66006 # define __user
66007+# define __force_user
66008 # define __kernel
66009+# define __force_kernel
66010 # define __safe
66011 # define __force
66012 # define __nocast
66013 # define __iomem
66014+# define __force_iomem
66015 # define __chk_user_ptr(x) (void)0
66016 # define __chk_io_ptr(x) (void)0
66017 # define __builtin_warning(x, y...) (1)
66018@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66019 # define __attribute_const__ /* unimplemented */
66020 #endif
66021
66022+#ifndef __no_const
66023+# define __no_const
66024+#endif
66025+
66026+#ifndef __do_const
66027+# define __do_const
66028+#endif
66029+
66030 /*
66031 * Tell gcc if a function is cold. The compiler will assume any path
66032 * directly leading to the call is unlikely.
66033@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66034 #define __cold
66035 #endif
66036
66037+#ifndef __alloc_size
66038+#define __alloc_size(...)
66039+#endif
66040+
66041+#ifndef __bos
66042+#define __bos(ptr, arg)
66043+#endif
66044+
66045+#ifndef __bos0
66046+#define __bos0(ptr)
66047+#endif
66048+
66049+#ifndef __bos1
66050+#define __bos1(ptr)
66051+#endif
66052+
66053 /* Simple shorthand for a section definition */
66054 #ifndef __section
66055 # define __section(S) __attribute__ ((__section__(#S)))
66056@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
66057 * use is to mediate communication between process-level code and irq/NMI
66058 * handlers, all running on the same CPU.
66059 */
66060-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
66061+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
66062+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
66063
66064 #endif /* __LINUX_COMPILER_H */
66065diff --git a/include/linux/crypto.h b/include/linux/crypto.h
66066index fd92988..a3164bd 100644
66067--- a/include/linux/crypto.h
66068+++ b/include/linux/crypto.h
66069@@ -394,7 +394,7 @@ struct cipher_tfm {
66070 const u8 *key, unsigned int keylen);
66071 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66072 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
66073-};
66074+} __no_const;
66075
66076 struct hash_tfm {
66077 int (*init)(struct hash_desc *desc);
66078@@ -415,13 +415,13 @@ struct compress_tfm {
66079 int (*cot_decompress)(struct crypto_tfm *tfm,
66080 const u8 *src, unsigned int slen,
66081 u8 *dst, unsigned int *dlen);
66082-};
66083+} __no_const;
66084
66085 struct rng_tfm {
66086 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66087 unsigned int dlen);
66088 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66089-};
66090+} __no_const;
66091
66092 #define crt_ablkcipher crt_u.ablkcipher
66093 #define crt_aead crt_u.aead
66094diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66095index 30b93b2..cd7a8db 100644
66096--- a/include/linux/dcache.h
66097+++ b/include/linux/dcache.h
66098@@ -119,6 +119,8 @@ struct dentry {
66099 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66100 };
66101
66102+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66103+
66104 /*
66105 * dentry->d_lock spinlock nesting subclasses:
66106 *
66107diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66108index 3e9bd6a..f4e1aa0 100644
66109--- a/include/linux/decompress/mm.h
66110+++ b/include/linux/decompress/mm.h
66111@@ -78,7 +78,7 @@ static void free(void *where)
66112 * warnings when not needed (indeed large_malloc / large_free are not
66113 * needed by inflate */
66114
66115-#define malloc(a) kmalloc(a, GFP_KERNEL)
66116+#define malloc(a) kmalloc((a), GFP_KERNEL)
66117 #define free(a) kfree(a)
66118
66119 #define large_malloc(a) vmalloc(a)
66120diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66121index 91b7618..92a93d32 100644
66122--- a/include/linux/dma-mapping.h
66123+++ b/include/linux/dma-mapping.h
66124@@ -16,51 +16,51 @@ enum dma_data_direction {
66125 };
66126
66127 struct dma_map_ops {
66128- void* (*alloc_coherent)(struct device *dev, size_t size,
66129+ void* (* const alloc_coherent)(struct device *dev, size_t size,
66130 dma_addr_t *dma_handle, gfp_t gfp);
66131- void (*free_coherent)(struct device *dev, size_t size,
66132+ void (* const free_coherent)(struct device *dev, size_t size,
66133 void *vaddr, dma_addr_t dma_handle);
66134- dma_addr_t (*map_page)(struct device *dev, struct page *page,
66135+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66136 unsigned long offset, size_t size,
66137 enum dma_data_direction dir,
66138 struct dma_attrs *attrs);
66139- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66140+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66141 size_t size, enum dma_data_direction dir,
66142 struct dma_attrs *attrs);
66143- int (*map_sg)(struct device *dev, struct scatterlist *sg,
66144+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66145 int nents, enum dma_data_direction dir,
66146 struct dma_attrs *attrs);
66147- void (*unmap_sg)(struct device *dev,
66148+ void (* const unmap_sg)(struct device *dev,
66149 struct scatterlist *sg, int nents,
66150 enum dma_data_direction dir,
66151 struct dma_attrs *attrs);
66152- void (*sync_single_for_cpu)(struct device *dev,
66153+ void (* const sync_single_for_cpu)(struct device *dev,
66154 dma_addr_t dma_handle, size_t size,
66155 enum dma_data_direction dir);
66156- void (*sync_single_for_device)(struct device *dev,
66157+ void (* const sync_single_for_device)(struct device *dev,
66158 dma_addr_t dma_handle, size_t size,
66159 enum dma_data_direction dir);
66160- void (*sync_single_range_for_cpu)(struct device *dev,
66161+ void (* const sync_single_range_for_cpu)(struct device *dev,
66162 dma_addr_t dma_handle,
66163 unsigned long offset,
66164 size_t size,
66165 enum dma_data_direction dir);
66166- void (*sync_single_range_for_device)(struct device *dev,
66167+ void (* const sync_single_range_for_device)(struct device *dev,
66168 dma_addr_t dma_handle,
66169 unsigned long offset,
66170 size_t size,
66171 enum dma_data_direction dir);
66172- void (*sync_sg_for_cpu)(struct device *dev,
66173+ void (* const sync_sg_for_cpu)(struct device *dev,
66174 struct scatterlist *sg, int nents,
66175 enum dma_data_direction dir);
66176- void (*sync_sg_for_device)(struct device *dev,
66177+ void (* const sync_sg_for_device)(struct device *dev,
66178 struct scatterlist *sg, int nents,
66179 enum dma_data_direction dir);
66180- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66181- int (*dma_supported)(struct device *dev, u64 mask);
66182+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66183+ int (* const dma_supported)(struct device *dev, u64 mask);
66184 int (*set_dma_mask)(struct device *dev, u64 mask);
66185 int is_phys;
66186-};
66187+} __do_const;
66188
66189 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66190
66191diff --git a/include/linux/dst.h b/include/linux/dst.h
66192index e26fed8..b976d9f 100644
66193--- a/include/linux/dst.h
66194+++ b/include/linux/dst.h
66195@@ -380,7 +380,7 @@ struct dst_node
66196 struct thread_pool *pool;
66197
66198 /* Transaction IDs live here */
66199- atomic_long_t gen;
66200+ atomic_long_unchecked_t gen;
66201
66202 /*
66203 * How frequently and how many times transaction
66204diff --git a/include/linux/elf.h b/include/linux/elf.h
66205index 90a4ed0..d652617 100644
66206--- a/include/linux/elf.h
66207+++ b/include/linux/elf.h
66208@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66209 #define PT_GNU_EH_FRAME 0x6474e550
66210
66211 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66212+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66213+
66214+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66215+
66216+/* Constants for the e_flags field */
66217+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66218+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66219+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66220+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66221+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66222+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66223
66224 /* These constants define the different elf file types */
66225 #define ET_NONE 0
66226@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66227 #define DT_DEBUG 21
66228 #define DT_TEXTREL 22
66229 #define DT_JMPREL 23
66230+#define DT_FLAGS 30
66231+ #define DF_TEXTREL 0x00000004
66232 #define DT_ENCODING 32
66233 #define OLD_DT_LOOS 0x60000000
66234 #define DT_LOOS 0x6000000d
66235@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66236 #define PF_W 0x2
66237 #define PF_X 0x1
66238
66239+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66240+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66241+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66242+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66243+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66244+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66245+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66246+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66247+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66248+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66249+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66250+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66251+
66252 typedef struct elf32_phdr{
66253 Elf32_Word p_type;
66254 Elf32_Off p_offset;
66255@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66256 #define EI_OSABI 7
66257 #define EI_PAD 8
66258
66259+#define EI_PAX 14
66260+
66261 #define ELFMAG0 0x7f /* EI_MAG */
66262 #define ELFMAG1 'E'
66263 #define ELFMAG2 'L'
66264@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66265 #define elf_phdr elf32_phdr
66266 #define elf_note elf32_note
66267 #define elf_addr_t Elf32_Off
66268+#define elf_dyn Elf32_Dyn
66269
66270 #else
66271
66272@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66273 #define elf_phdr elf64_phdr
66274 #define elf_note elf64_note
66275 #define elf_addr_t Elf64_Off
66276+#define elf_dyn Elf64_Dyn
66277
66278 #endif
66279
66280diff --git a/include/linux/fs.h b/include/linux/fs.h
66281index 1b9a47a..6fe2934 100644
66282--- a/include/linux/fs.h
66283+++ b/include/linux/fs.h
66284@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66285 unsigned long, unsigned long);
66286
66287 struct address_space_operations {
66288- int (*writepage)(struct page *page, struct writeback_control *wbc);
66289- int (*readpage)(struct file *, struct page *);
66290- void (*sync_page)(struct page *);
66291+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
66292+ int (* const readpage)(struct file *, struct page *);
66293+ void (* const sync_page)(struct page *);
66294
66295 /* Write back some dirty pages from this mapping. */
66296- int (*writepages)(struct address_space *, struct writeback_control *);
66297+ int (* const writepages)(struct address_space *, struct writeback_control *);
66298
66299 /* Set a page dirty. Return true if this dirtied it */
66300- int (*set_page_dirty)(struct page *page);
66301+ int (* const set_page_dirty)(struct page *page);
66302
66303- int (*readpages)(struct file *filp, struct address_space *mapping,
66304+ int (* const readpages)(struct file *filp, struct address_space *mapping,
66305 struct list_head *pages, unsigned nr_pages);
66306
66307- int (*write_begin)(struct file *, struct address_space *mapping,
66308+ int (* const write_begin)(struct file *, struct address_space *mapping,
66309 loff_t pos, unsigned len, unsigned flags,
66310 struct page **pagep, void **fsdata);
66311- int (*write_end)(struct file *, struct address_space *mapping,
66312+ int (* const write_end)(struct file *, struct address_space *mapping,
66313 loff_t pos, unsigned len, unsigned copied,
66314 struct page *page, void *fsdata);
66315
66316 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66317- sector_t (*bmap)(struct address_space *, sector_t);
66318- void (*invalidatepage) (struct page *, unsigned long);
66319- int (*releasepage) (struct page *, gfp_t);
66320- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66321+ sector_t (* const bmap)(struct address_space *, sector_t);
66322+ void (* const invalidatepage) (struct page *, unsigned long);
66323+ int (* const releasepage) (struct page *, gfp_t);
66324+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66325 loff_t offset, unsigned long nr_segs);
66326- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66327+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66328 void **, unsigned long *);
66329 /* migrate the contents of a page to the specified target */
66330- int (*migratepage) (struct address_space *,
66331+ int (* const migratepage) (struct address_space *,
66332 struct page *, struct page *);
66333- int (*launder_page) (struct page *);
66334- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66335+ int (* const launder_page) (struct page *);
66336+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66337 unsigned long);
66338- int (*error_remove_page)(struct address_space *, struct page *);
66339+ int (* const error_remove_page)(struct address_space *, struct page *);
66340 };
66341
66342 /*
66343@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66344 typedef struct files_struct *fl_owner_t;
66345
66346 struct file_lock_operations {
66347- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66348- void (*fl_release_private)(struct file_lock *);
66349+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66350+ void (* const fl_release_private)(struct file_lock *);
66351 };
66352
66353 struct lock_manager_operations {
66354- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66355- void (*fl_notify)(struct file_lock *); /* unblock callback */
66356- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66357- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66358- void (*fl_release_private)(struct file_lock *);
66359- void (*fl_break)(struct file_lock *);
66360- int (*fl_mylease)(struct file_lock *, struct file_lock *);
66361- int (*fl_change)(struct file_lock **, int);
66362+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66363+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
66364+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66365+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66366+ void (* const fl_release_private)(struct file_lock *);
66367+ void (* const fl_break)(struct file_lock *);
66368+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66369+ int (* const fl_change)(struct file_lock **, int);
66370 };
66371
66372 struct lock_manager {
66373@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66374 unsigned int fi_flags; /* Flags as passed from user */
66375 unsigned int fi_extents_mapped; /* Number of mapped extents */
66376 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66377- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66378+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66379 * array */
66380 };
66381 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66382@@ -1512,7 +1512,8 @@ struct file_operations {
66383 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66384 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66385 int (*setlease)(struct file *, long, struct file_lock **);
66386-};
66387+} __do_const;
66388+typedef struct file_operations __no_const file_operations_no_const;
66389
66390 struct inode_operations {
66391 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66392@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66393 unsigned long, loff_t *);
66394
66395 struct super_operations {
66396- struct inode *(*alloc_inode)(struct super_block *sb);
66397- void (*destroy_inode)(struct inode *);
66398+ struct inode *(* const alloc_inode)(struct super_block *sb);
66399+ void (* const destroy_inode)(struct inode *);
66400
66401- void (*dirty_inode) (struct inode *);
66402- int (*write_inode) (struct inode *, int);
66403- void (*drop_inode) (struct inode *);
66404- void (*delete_inode) (struct inode *);
66405- void (*put_super) (struct super_block *);
66406- void (*write_super) (struct super_block *);
66407- int (*sync_fs)(struct super_block *sb, int wait);
66408- int (*freeze_fs) (struct super_block *);
66409- int (*unfreeze_fs) (struct super_block *);
66410- int (*statfs) (struct dentry *, struct kstatfs *);
66411- int (*remount_fs) (struct super_block *, int *, char *);
66412- void (*clear_inode) (struct inode *);
66413- void (*umount_begin) (struct super_block *);
66414+ void (* const dirty_inode) (struct inode *);
66415+ int (* const write_inode) (struct inode *, int);
66416+ void (* const drop_inode) (struct inode *);
66417+ void (* const delete_inode) (struct inode *);
66418+ void (* const put_super) (struct super_block *);
66419+ void (* const write_super) (struct super_block *);
66420+ int (* const sync_fs)(struct super_block *sb, int wait);
66421+ int (* const freeze_fs) (struct super_block *);
66422+ int (* const unfreeze_fs) (struct super_block *);
66423+ int (* const statfs) (struct dentry *, struct kstatfs *);
66424+ int (* const remount_fs) (struct super_block *, int *, char *);
66425+ void (* const clear_inode) (struct inode *);
66426+ void (* const umount_begin) (struct super_block *);
66427
66428- int (*show_options)(struct seq_file *, struct vfsmount *);
66429- int (*show_stats)(struct seq_file *, struct vfsmount *);
66430+ int (* const show_options)(struct seq_file *, struct vfsmount *);
66431+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
66432 #ifdef CONFIG_QUOTA
66433- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66434- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66435+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66436+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66437 #endif
66438- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66439+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66440 };
66441
66442 /*
66443diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66444index 78a05bf..2a7d3e1 100644
66445--- a/include/linux/fs_struct.h
66446+++ b/include/linux/fs_struct.h
66447@@ -4,7 +4,7 @@
66448 #include <linux/path.h>
66449
66450 struct fs_struct {
66451- int users;
66452+ atomic_t users;
66453 rwlock_t lock;
66454 int umask;
66455 int in_exec;
66456diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66457index 7be0c6f..2f63a2b 100644
66458--- a/include/linux/fscache-cache.h
66459+++ b/include/linux/fscache-cache.h
66460@@ -116,7 +116,7 @@ struct fscache_operation {
66461 #endif
66462 };
66463
66464-extern atomic_t fscache_op_debug_id;
66465+extern atomic_unchecked_t fscache_op_debug_id;
66466 extern const struct slow_work_ops fscache_op_slow_work_ops;
66467
66468 extern void fscache_enqueue_operation(struct fscache_operation *);
66469@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66470 fscache_operation_release_t release)
66471 {
66472 atomic_set(&op->usage, 1);
66473- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66474+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66475 op->release = release;
66476 INIT_LIST_HEAD(&op->pend_link);
66477 fscache_set_op_state(op, "Init");
66478diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66479index 4d6f47b..00bcedb 100644
66480--- a/include/linux/fsnotify_backend.h
66481+++ b/include/linux/fsnotify_backend.h
66482@@ -86,6 +86,7 @@ struct fsnotify_ops {
66483 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66484 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66485 };
66486+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66487
66488 /*
66489 * A group is a "thing" that wants to receive notification about filesystem
66490diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66491index 4ec5e67..42f1eb9 100644
66492--- a/include/linux/ftrace_event.h
66493+++ b/include/linux/ftrace_event.h
66494@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66495 int filter_type);
66496 extern int trace_define_common_fields(struct ftrace_event_call *call);
66497
66498-#define is_signed_type(type) (((type)(-1)) < 0)
66499+#define is_signed_type(type) (((type)(-1)) < (type)1)
66500
66501 int trace_set_clr_event(const char *system, const char *event, int set);
66502
66503diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66504index 297df45..b6a74ff 100644
66505--- a/include/linux/genhd.h
66506+++ b/include/linux/genhd.h
66507@@ -161,7 +161,7 @@ struct gendisk {
66508
66509 struct timer_rand_state *random;
66510
66511- atomic_t sync_io; /* RAID */
66512+ atomic_unchecked_t sync_io; /* RAID */
66513 struct work_struct async_notify;
66514 #ifdef CONFIG_BLK_DEV_INTEGRITY
66515 struct blk_integrity *integrity;
66516diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66517new file mode 100644
66518index 0000000..0dc3943
66519--- /dev/null
66520+++ b/include/linux/gracl.h
66521@@ -0,0 +1,317 @@
66522+#ifndef GR_ACL_H
66523+#define GR_ACL_H
66524+
66525+#include <linux/grdefs.h>
66526+#include <linux/resource.h>
66527+#include <linux/capability.h>
66528+#include <linux/dcache.h>
66529+#include <asm/resource.h>
66530+
66531+/* Major status information */
66532+
66533+#define GR_VERSION "grsecurity 2.2.2"
66534+#define GRSECURITY_VERSION 0x2202
66535+
66536+enum {
66537+ GR_SHUTDOWN = 0,
66538+ GR_ENABLE = 1,
66539+ GR_SPROLE = 2,
66540+ GR_RELOAD = 3,
66541+ GR_SEGVMOD = 4,
66542+ GR_STATUS = 5,
66543+ GR_UNSPROLE = 6,
66544+ GR_PASSSET = 7,
66545+ GR_SPROLEPAM = 8,
66546+};
66547+
66548+/* Password setup definitions
66549+ * kernel/grhash.c */
66550+enum {
66551+ GR_PW_LEN = 128,
66552+ GR_SALT_LEN = 16,
66553+ GR_SHA_LEN = 32,
66554+};
66555+
66556+enum {
66557+ GR_SPROLE_LEN = 64,
66558+};
66559+
66560+enum {
66561+ GR_NO_GLOB = 0,
66562+ GR_REG_GLOB,
66563+ GR_CREATE_GLOB
66564+};
66565+
66566+#define GR_NLIMITS 32
66567+
66568+/* Begin Data Structures */
66569+
66570+struct sprole_pw {
66571+ unsigned char *rolename;
66572+ unsigned char salt[GR_SALT_LEN];
66573+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66574+};
66575+
66576+struct name_entry {
66577+ __u32 key;
66578+ ino_t inode;
66579+ dev_t device;
66580+ char *name;
66581+ __u16 len;
66582+ __u8 deleted;
66583+ struct name_entry *prev;
66584+ struct name_entry *next;
66585+};
66586+
66587+struct inodev_entry {
66588+ struct name_entry *nentry;
66589+ struct inodev_entry *prev;
66590+ struct inodev_entry *next;
66591+};
66592+
66593+struct acl_role_db {
66594+ struct acl_role_label **r_hash;
66595+ __u32 r_size;
66596+};
66597+
66598+struct inodev_db {
66599+ struct inodev_entry **i_hash;
66600+ __u32 i_size;
66601+};
66602+
66603+struct name_db {
66604+ struct name_entry **n_hash;
66605+ __u32 n_size;
66606+};
66607+
66608+struct crash_uid {
66609+ uid_t uid;
66610+ unsigned long expires;
66611+};
66612+
66613+struct gr_hash_struct {
66614+ void **table;
66615+ void **nametable;
66616+ void *first;
66617+ __u32 table_size;
66618+ __u32 used_size;
66619+ int type;
66620+};
66621+
66622+/* Userspace Grsecurity ACL data structures */
66623+
66624+struct acl_subject_label {
66625+ char *filename;
66626+ ino_t inode;
66627+ dev_t device;
66628+ __u32 mode;
66629+ kernel_cap_t cap_mask;
66630+ kernel_cap_t cap_lower;
66631+ kernel_cap_t cap_invert_audit;
66632+
66633+ struct rlimit res[GR_NLIMITS];
66634+ __u32 resmask;
66635+
66636+ __u8 user_trans_type;
66637+ __u8 group_trans_type;
66638+ uid_t *user_transitions;
66639+ gid_t *group_transitions;
66640+ __u16 user_trans_num;
66641+ __u16 group_trans_num;
66642+
66643+ __u32 sock_families[2];
66644+ __u32 ip_proto[8];
66645+ __u32 ip_type;
66646+ struct acl_ip_label **ips;
66647+ __u32 ip_num;
66648+ __u32 inaddr_any_override;
66649+
66650+ __u32 crashes;
66651+ unsigned long expires;
66652+
66653+ struct acl_subject_label *parent_subject;
66654+ struct gr_hash_struct *hash;
66655+ struct acl_subject_label *prev;
66656+ struct acl_subject_label *next;
66657+
66658+ struct acl_object_label **obj_hash;
66659+ __u32 obj_hash_size;
66660+ __u16 pax_flags;
66661+};
66662+
66663+struct role_allowed_ip {
66664+ __u32 addr;
66665+ __u32 netmask;
66666+
66667+ struct role_allowed_ip *prev;
66668+ struct role_allowed_ip *next;
66669+};
66670+
66671+struct role_transition {
66672+ char *rolename;
66673+
66674+ struct role_transition *prev;
66675+ struct role_transition *next;
66676+};
66677+
66678+struct acl_role_label {
66679+ char *rolename;
66680+ uid_t uidgid;
66681+ __u16 roletype;
66682+
66683+ __u16 auth_attempts;
66684+ unsigned long expires;
66685+
66686+ struct acl_subject_label *root_label;
66687+ struct gr_hash_struct *hash;
66688+
66689+ struct acl_role_label *prev;
66690+ struct acl_role_label *next;
66691+
66692+ struct role_transition *transitions;
66693+ struct role_allowed_ip *allowed_ips;
66694+ uid_t *domain_children;
66695+ __u16 domain_child_num;
66696+
66697+ struct acl_subject_label **subj_hash;
66698+ __u32 subj_hash_size;
66699+};
66700+
66701+struct user_acl_role_db {
66702+ struct acl_role_label **r_table;
66703+ __u32 num_pointers; /* Number of allocations to track */
66704+ __u32 num_roles; /* Number of roles */
66705+ __u32 num_domain_children; /* Number of domain children */
66706+ __u32 num_subjects; /* Number of subjects */
66707+ __u32 num_objects; /* Number of objects */
66708+};
66709+
66710+struct acl_object_label {
66711+ char *filename;
66712+ ino_t inode;
66713+ dev_t device;
66714+ __u32 mode;
66715+
66716+ struct acl_subject_label *nested;
66717+ struct acl_object_label *globbed;
66718+
66719+ /* next two structures not used */
66720+
66721+ struct acl_object_label *prev;
66722+ struct acl_object_label *next;
66723+};
66724+
66725+struct acl_ip_label {
66726+ char *iface;
66727+ __u32 addr;
66728+ __u32 netmask;
66729+ __u16 low, high;
66730+ __u8 mode;
66731+ __u32 type;
66732+ __u32 proto[8];
66733+
66734+ /* next two structures not used */
66735+
66736+ struct acl_ip_label *prev;
66737+ struct acl_ip_label *next;
66738+};
66739+
66740+struct gr_arg {
66741+ struct user_acl_role_db role_db;
66742+ unsigned char pw[GR_PW_LEN];
66743+ unsigned char salt[GR_SALT_LEN];
66744+ unsigned char sum[GR_SHA_LEN];
66745+ unsigned char sp_role[GR_SPROLE_LEN];
66746+ struct sprole_pw *sprole_pws;
66747+ dev_t segv_device;
66748+ ino_t segv_inode;
66749+ uid_t segv_uid;
66750+ __u16 num_sprole_pws;
66751+ __u16 mode;
66752+};
66753+
66754+struct gr_arg_wrapper {
66755+ struct gr_arg *arg;
66756+ __u32 version;
66757+ __u32 size;
66758+};
66759+
66760+struct subject_map {
66761+ struct acl_subject_label *user;
66762+ struct acl_subject_label *kernel;
66763+ struct subject_map *prev;
66764+ struct subject_map *next;
66765+};
66766+
66767+struct acl_subj_map_db {
66768+ struct subject_map **s_hash;
66769+ __u32 s_size;
66770+};
66771+
66772+/* End Data Structures Section */
66773+
66774+/* Hash functions generated by empirical testing by Brad Spengler
66775+ Makes good use of the low bits of the inode. Generally 0-1 times
66776+ in loop for successful match. 0-3 for unsuccessful match.
66777+ Shift/add algorithm with modulus of table size and an XOR*/
66778+
66779+static __inline__ unsigned int
66780+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66781+{
66782+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
66783+}
66784+
66785+ static __inline__ unsigned int
66786+shash(const struct acl_subject_label *userp, const unsigned int sz)
66787+{
66788+ return ((const unsigned long)userp % sz);
66789+}
66790+
66791+static __inline__ unsigned int
66792+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66793+{
66794+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66795+}
66796+
66797+static __inline__ unsigned int
66798+nhash(const char *name, const __u16 len, const unsigned int sz)
66799+{
66800+ return full_name_hash((const unsigned char *)name, len) % sz;
66801+}
66802+
66803+#define FOR_EACH_ROLE_START(role) \
66804+ role = role_list; \
66805+ while (role) {
66806+
66807+#define FOR_EACH_ROLE_END(role) \
66808+ role = role->prev; \
66809+ }
66810+
66811+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66812+ subj = NULL; \
66813+ iter = 0; \
66814+ while (iter < role->subj_hash_size) { \
66815+ if (subj == NULL) \
66816+ subj = role->subj_hash[iter]; \
66817+ if (subj == NULL) { \
66818+ iter++; \
66819+ continue; \
66820+ }
66821+
66822+#define FOR_EACH_SUBJECT_END(subj,iter) \
66823+ subj = subj->next; \
66824+ if (subj == NULL) \
66825+ iter++; \
66826+ }
66827+
66828+
66829+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66830+ subj = role->hash->first; \
66831+ while (subj != NULL) {
66832+
66833+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66834+ subj = subj->next; \
66835+ }
66836+
66837+#endif
66838+
66839diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66840new file mode 100644
66841index 0000000..323ecf2
66842--- /dev/null
66843+++ b/include/linux/gralloc.h
66844@@ -0,0 +1,9 @@
66845+#ifndef __GRALLOC_H
66846+#define __GRALLOC_H
66847+
66848+void acl_free_all(void);
66849+int acl_alloc_stack_init(unsigned long size);
66850+void *acl_alloc(unsigned long len);
66851+void *acl_alloc_num(unsigned long num, unsigned long len);
66852+
66853+#endif
66854diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66855new file mode 100644
66856index 0000000..70d6cd5
66857--- /dev/null
66858+++ b/include/linux/grdefs.h
66859@@ -0,0 +1,140 @@
66860+#ifndef GRDEFS_H
66861+#define GRDEFS_H
66862+
66863+/* Begin grsecurity status declarations */
66864+
66865+enum {
66866+ GR_READY = 0x01,
66867+ GR_STATUS_INIT = 0x00 // disabled state
66868+};
66869+
66870+/* Begin ACL declarations */
66871+
66872+/* Role flags */
66873+
66874+enum {
66875+ GR_ROLE_USER = 0x0001,
66876+ GR_ROLE_GROUP = 0x0002,
66877+ GR_ROLE_DEFAULT = 0x0004,
66878+ GR_ROLE_SPECIAL = 0x0008,
66879+ GR_ROLE_AUTH = 0x0010,
66880+ GR_ROLE_NOPW = 0x0020,
66881+ GR_ROLE_GOD = 0x0040,
66882+ GR_ROLE_LEARN = 0x0080,
66883+ GR_ROLE_TPE = 0x0100,
66884+ GR_ROLE_DOMAIN = 0x0200,
66885+ GR_ROLE_PAM = 0x0400,
66886+ GR_ROLE_PERSIST = 0x800
66887+};
66888+
66889+/* ACL Subject and Object mode flags */
66890+enum {
66891+ GR_DELETED = 0x80000000
66892+};
66893+
66894+/* ACL Object-only mode flags */
66895+enum {
66896+ GR_READ = 0x00000001,
66897+ GR_APPEND = 0x00000002,
66898+ GR_WRITE = 0x00000004,
66899+ GR_EXEC = 0x00000008,
66900+ GR_FIND = 0x00000010,
66901+ GR_INHERIT = 0x00000020,
66902+ GR_SETID = 0x00000040,
66903+ GR_CREATE = 0x00000080,
66904+ GR_DELETE = 0x00000100,
66905+ GR_LINK = 0x00000200,
66906+ GR_AUDIT_READ = 0x00000400,
66907+ GR_AUDIT_APPEND = 0x00000800,
66908+ GR_AUDIT_WRITE = 0x00001000,
66909+ GR_AUDIT_EXEC = 0x00002000,
66910+ GR_AUDIT_FIND = 0x00004000,
66911+ GR_AUDIT_INHERIT= 0x00008000,
66912+ GR_AUDIT_SETID = 0x00010000,
66913+ GR_AUDIT_CREATE = 0x00020000,
66914+ GR_AUDIT_DELETE = 0x00040000,
66915+ GR_AUDIT_LINK = 0x00080000,
66916+ GR_PTRACERD = 0x00100000,
66917+ GR_NOPTRACE = 0x00200000,
66918+ GR_SUPPRESS = 0x00400000,
66919+ GR_NOLEARN = 0x00800000,
66920+ GR_INIT_TRANSFER= 0x01000000
66921+};
66922+
66923+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66924+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66925+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66926+
66927+/* ACL subject-only mode flags */
66928+enum {
66929+ GR_KILL = 0x00000001,
66930+ GR_VIEW = 0x00000002,
66931+ GR_PROTECTED = 0x00000004,
66932+ GR_LEARN = 0x00000008,
66933+ GR_OVERRIDE = 0x00000010,
66934+ /* just a placeholder, this mode is only used in userspace */
66935+ GR_DUMMY = 0x00000020,
66936+ GR_PROTSHM = 0x00000040,
66937+ GR_KILLPROC = 0x00000080,
66938+ GR_KILLIPPROC = 0x00000100,
66939+ /* just a placeholder, this mode is only used in userspace */
66940+ GR_NOTROJAN = 0x00000200,
66941+ GR_PROTPROCFD = 0x00000400,
66942+ GR_PROCACCT = 0x00000800,
66943+ GR_RELAXPTRACE = 0x00001000,
66944+ GR_NESTED = 0x00002000,
66945+ GR_INHERITLEARN = 0x00004000,
66946+ GR_PROCFIND = 0x00008000,
66947+ GR_POVERRIDE = 0x00010000,
66948+ GR_KERNELAUTH = 0x00020000,
66949+ GR_ATSECURE = 0x00040000,
66950+ GR_SHMEXEC = 0x00080000
66951+};
66952+
66953+enum {
66954+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66955+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66956+ GR_PAX_ENABLE_MPROTECT = 0x0004,
66957+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
66958+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66959+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66960+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66961+ GR_PAX_DISABLE_MPROTECT = 0x0400,
66962+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
66963+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66964+};
66965+
66966+enum {
66967+ GR_ID_USER = 0x01,
66968+ GR_ID_GROUP = 0x02,
66969+};
66970+
66971+enum {
66972+ GR_ID_ALLOW = 0x01,
66973+ GR_ID_DENY = 0x02,
66974+};
66975+
66976+#define GR_CRASH_RES 31
66977+#define GR_UIDTABLE_MAX 500
66978+
66979+/* begin resource learning section */
66980+enum {
66981+ GR_RLIM_CPU_BUMP = 60,
66982+ GR_RLIM_FSIZE_BUMP = 50000,
66983+ GR_RLIM_DATA_BUMP = 10000,
66984+ GR_RLIM_STACK_BUMP = 1000,
66985+ GR_RLIM_CORE_BUMP = 10000,
66986+ GR_RLIM_RSS_BUMP = 500000,
66987+ GR_RLIM_NPROC_BUMP = 1,
66988+ GR_RLIM_NOFILE_BUMP = 5,
66989+ GR_RLIM_MEMLOCK_BUMP = 50000,
66990+ GR_RLIM_AS_BUMP = 500000,
66991+ GR_RLIM_LOCKS_BUMP = 2,
66992+ GR_RLIM_SIGPENDING_BUMP = 5,
66993+ GR_RLIM_MSGQUEUE_BUMP = 10000,
66994+ GR_RLIM_NICE_BUMP = 1,
66995+ GR_RLIM_RTPRIO_BUMP = 1,
66996+ GR_RLIM_RTTIME_BUMP = 1000000
66997+};
66998+
66999+#endif
67000diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
67001new file mode 100644
67002index 0000000..3826b91
67003--- /dev/null
67004+++ b/include/linux/grinternal.h
67005@@ -0,0 +1,219 @@
67006+#ifndef __GRINTERNAL_H
67007+#define __GRINTERNAL_H
67008+
67009+#ifdef CONFIG_GRKERNSEC
67010+
67011+#include <linux/fs.h>
67012+#include <linux/mnt_namespace.h>
67013+#include <linux/nsproxy.h>
67014+#include <linux/gracl.h>
67015+#include <linux/grdefs.h>
67016+#include <linux/grmsg.h>
67017+
67018+void gr_add_learn_entry(const char *fmt, ...)
67019+ __attribute__ ((format (printf, 1, 2)));
67020+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
67021+ const struct vfsmount *mnt);
67022+__u32 gr_check_create(const struct dentry *new_dentry,
67023+ const struct dentry *parent,
67024+ const struct vfsmount *mnt, const __u32 mode);
67025+int gr_check_protected_task(const struct task_struct *task);
67026+__u32 to_gr_audit(const __u32 reqmode);
67027+int gr_set_acls(const int type);
67028+int gr_apply_subject_to_task(struct task_struct *task);
67029+int gr_acl_is_enabled(void);
67030+char gr_roletype_to_char(void);
67031+
67032+void gr_handle_alertkill(struct task_struct *task);
67033+char *gr_to_filename(const struct dentry *dentry,
67034+ const struct vfsmount *mnt);
67035+char *gr_to_filename1(const struct dentry *dentry,
67036+ const struct vfsmount *mnt);
67037+char *gr_to_filename2(const struct dentry *dentry,
67038+ const struct vfsmount *mnt);
67039+char *gr_to_filename3(const struct dentry *dentry,
67040+ const struct vfsmount *mnt);
67041+
67042+extern int grsec_enable_ptrace_readexec;
67043+extern int grsec_enable_harden_ptrace;
67044+extern int grsec_enable_link;
67045+extern int grsec_enable_fifo;
67046+extern int grsec_enable_shm;
67047+extern int grsec_enable_execlog;
67048+extern int grsec_enable_signal;
67049+extern int grsec_enable_audit_ptrace;
67050+extern int grsec_enable_forkfail;
67051+extern int grsec_enable_time;
67052+extern int grsec_enable_rofs;
67053+extern int grsec_enable_chroot_shmat;
67054+extern int grsec_enable_chroot_mount;
67055+extern int grsec_enable_chroot_double;
67056+extern int grsec_enable_chroot_pivot;
67057+extern int grsec_enable_chroot_chdir;
67058+extern int grsec_enable_chroot_chmod;
67059+extern int grsec_enable_chroot_mknod;
67060+extern int grsec_enable_chroot_fchdir;
67061+extern int grsec_enable_chroot_nice;
67062+extern int grsec_enable_chroot_execlog;
67063+extern int grsec_enable_chroot_caps;
67064+extern int grsec_enable_chroot_sysctl;
67065+extern int grsec_enable_chroot_unix;
67066+extern int grsec_enable_tpe;
67067+extern int grsec_tpe_gid;
67068+extern int grsec_enable_tpe_all;
67069+extern int grsec_enable_tpe_invert;
67070+extern int grsec_enable_socket_all;
67071+extern int grsec_socket_all_gid;
67072+extern int grsec_enable_socket_client;
67073+extern int grsec_socket_client_gid;
67074+extern int grsec_enable_socket_server;
67075+extern int grsec_socket_server_gid;
67076+extern int grsec_audit_gid;
67077+extern int grsec_enable_group;
67078+extern int grsec_enable_audit_textrel;
67079+extern int grsec_enable_log_rwxmaps;
67080+extern int grsec_enable_mount;
67081+extern int grsec_enable_chdir;
67082+extern int grsec_resource_logging;
67083+extern int grsec_enable_blackhole;
67084+extern int grsec_lastack_retries;
67085+extern int grsec_enable_brute;
67086+extern int grsec_lock;
67087+
67088+extern spinlock_t grsec_alert_lock;
67089+extern unsigned long grsec_alert_wtime;
67090+extern unsigned long grsec_alert_fyet;
67091+
67092+extern spinlock_t grsec_audit_lock;
67093+
67094+extern rwlock_t grsec_exec_file_lock;
67095+
67096+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67097+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67098+ (tsk)->exec_file->f_vfsmnt) : "/")
67099+
67100+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67101+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67102+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67103+
67104+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67105+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67106+ (tsk)->exec_file->f_vfsmnt) : "/")
67107+
67108+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67109+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67110+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67111+
67112+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67113+
67114+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67115+
67116+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67117+ (task)->pid, (cred)->uid, \
67118+ (cred)->euid, (cred)->gid, (cred)->egid, \
67119+ gr_parent_task_fullpath(task), \
67120+ (task)->real_parent->comm, (task)->real_parent->pid, \
67121+ (pcred)->uid, (pcred)->euid, \
67122+ (pcred)->gid, (pcred)->egid
67123+
67124+#define GR_CHROOT_CAPS {{ \
67125+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67126+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67127+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67128+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67129+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67130+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67131+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
67132+
67133+#define security_learn(normal_msg,args...) \
67134+({ \
67135+ read_lock(&grsec_exec_file_lock); \
67136+ gr_add_learn_entry(normal_msg "\n", ## args); \
67137+ read_unlock(&grsec_exec_file_lock); \
67138+})
67139+
67140+enum {
67141+ GR_DO_AUDIT,
67142+ GR_DONT_AUDIT,
67143+ GR_DONT_AUDIT_GOOD
67144+};
67145+
67146+enum {
67147+ GR_TTYSNIFF,
67148+ GR_RBAC,
67149+ GR_RBAC_STR,
67150+ GR_STR_RBAC,
67151+ GR_RBAC_MODE2,
67152+ GR_RBAC_MODE3,
67153+ GR_FILENAME,
67154+ GR_SYSCTL_HIDDEN,
67155+ GR_NOARGS,
67156+ GR_ONE_INT,
67157+ GR_ONE_INT_TWO_STR,
67158+ GR_ONE_STR,
67159+ GR_STR_INT,
67160+ GR_TWO_STR_INT,
67161+ GR_TWO_INT,
67162+ GR_TWO_U64,
67163+ GR_THREE_INT,
67164+ GR_FIVE_INT_TWO_STR,
67165+ GR_TWO_STR,
67166+ GR_THREE_STR,
67167+ GR_FOUR_STR,
67168+ GR_STR_FILENAME,
67169+ GR_FILENAME_STR,
67170+ GR_FILENAME_TWO_INT,
67171+ GR_FILENAME_TWO_INT_STR,
67172+ GR_TEXTREL,
67173+ GR_PTRACE,
67174+ GR_RESOURCE,
67175+ GR_CAP,
67176+ GR_SIG,
67177+ GR_SIG2,
67178+ GR_CRASH1,
67179+ GR_CRASH2,
67180+ GR_PSACCT,
67181+ GR_RWXMAP
67182+};
67183+
67184+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67185+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67186+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67187+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67188+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67189+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67190+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67191+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67192+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67193+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67194+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67195+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67196+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67197+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67198+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67199+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67200+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67201+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67202+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67203+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67204+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67205+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67206+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67207+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67208+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67209+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67210+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67211+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67212+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67213+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67214+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67215+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67216+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67217+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67218+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67219+
67220+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67221+
67222+#endif
67223+
67224+#endif
67225diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67226new file mode 100644
67227index 0000000..8b9ed56
67228--- /dev/null
67229+++ b/include/linux/grmsg.h
67230@@ -0,0 +1,110 @@
67231+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67232+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67233+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67234+#define GR_STOPMOD_MSG "denied modification of module state by "
67235+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67236+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67237+#define GR_IOPERM_MSG "denied use of ioperm() by "
67238+#define GR_IOPL_MSG "denied use of iopl() by "
67239+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67240+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67241+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67242+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67243+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67244+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67245+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67246+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67247+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67248+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67249+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67250+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67251+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67252+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67253+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67254+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67255+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67256+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67257+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67258+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67259+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67260+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67261+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67262+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67263+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67264+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67265+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67266+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67267+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67268+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67269+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67270+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67271+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67272+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67273+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67274+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
67275+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67276+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67277+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67278+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67279+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67280+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67281+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67282+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67283+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67284+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67285+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67286+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67287+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67288+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67289+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67290+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67291+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67292+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67293+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67294+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67295+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67296+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67297+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67298+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67299+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67300+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67301+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67302+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67303+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67304+#define GR_NICE_CHROOT_MSG "denied priority change by "
67305+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67306+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67307+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67308+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67309+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67310+#define GR_TIME_MSG "time set by "
67311+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67312+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67313+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67314+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67315+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67316+#define GR_BIND_MSG "denied bind() by "
67317+#define GR_CONNECT_MSG "denied connect() by "
67318+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67319+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67320+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67321+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67322+#define GR_CAP_ACL_MSG "use of %s denied for "
67323+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67324+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67325+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67326+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67327+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67328+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67329+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67330+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67331+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67332+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67333+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67334+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67335+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67336+#define GR_VM86_MSG "denied use of vm86 by "
67337+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67338+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67339+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67340+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
67341diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67342new file mode 100644
67343index 0000000..bb1e366
67344--- /dev/null
67345+++ b/include/linux/grsecurity.h
67346@@ -0,0 +1,219 @@
67347+#ifndef GR_SECURITY_H
67348+#define GR_SECURITY_H
67349+#include <linux/fs.h>
67350+#include <linux/fs_struct.h>
67351+#include <linux/binfmts.h>
67352+#include <linux/gracl.h>
67353+#include <linux/compat.h>
67354+
67355+/* notify of brain-dead configs */
67356+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67357+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67358+#endif
67359+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67360+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67361+#endif
67362+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67363+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67364+#endif
67365+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67366+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67367+#endif
67368+
67369+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67370+void gr_handle_brute_check(void);
67371+void gr_handle_kernel_exploit(void);
67372+int gr_process_user_ban(void);
67373+
67374+char gr_roletype_to_char(void);
67375+
67376+int gr_acl_enable_at_secure(void);
67377+
67378+int gr_check_user_change(int real, int effective, int fs);
67379+int gr_check_group_change(int real, int effective, int fs);
67380+
67381+void gr_del_task_from_ip_table(struct task_struct *p);
67382+
67383+int gr_pid_is_chrooted(struct task_struct *p);
67384+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67385+int gr_handle_chroot_nice(void);
67386+int gr_handle_chroot_sysctl(const int op);
67387+int gr_handle_chroot_setpriority(struct task_struct *p,
67388+ const int niceval);
67389+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67390+int gr_handle_chroot_chroot(const struct dentry *dentry,
67391+ const struct vfsmount *mnt);
67392+void gr_handle_chroot_chdir(struct path *path);
67393+int gr_handle_chroot_chmod(const struct dentry *dentry,
67394+ const struct vfsmount *mnt, const int mode);
67395+int gr_handle_chroot_mknod(const struct dentry *dentry,
67396+ const struct vfsmount *mnt, const int mode);
67397+int gr_handle_chroot_mount(const struct dentry *dentry,
67398+ const struct vfsmount *mnt,
67399+ const char *dev_name);
67400+int gr_handle_chroot_pivot(void);
67401+int gr_handle_chroot_unix(const pid_t pid);
67402+
67403+int gr_handle_rawio(const struct inode *inode);
67404+
67405+void gr_handle_ioperm(void);
67406+void gr_handle_iopl(void);
67407+
67408+int gr_tpe_allow(const struct file *file);
67409+
67410+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67411+void gr_clear_chroot_entries(struct task_struct *task);
67412+
67413+void gr_log_forkfail(const int retval);
67414+void gr_log_timechange(void);
67415+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67416+void gr_log_chdir(const struct dentry *dentry,
67417+ const struct vfsmount *mnt);
67418+void gr_log_chroot_exec(const struct dentry *dentry,
67419+ const struct vfsmount *mnt);
67420+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67421+#ifdef CONFIG_COMPAT
67422+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67423+#endif
67424+void gr_log_remount(const char *devname, const int retval);
67425+void gr_log_unmount(const char *devname, const int retval);
67426+void gr_log_mount(const char *from, const char *to, const int retval);
67427+void gr_log_textrel(struct vm_area_struct *vma);
67428+void gr_log_rwxmmap(struct file *file);
67429+void gr_log_rwxmprotect(struct file *file);
67430+
67431+int gr_handle_follow_link(const struct inode *parent,
67432+ const struct inode *inode,
67433+ const struct dentry *dentry,
67434+ const struct vfsmount *mnt);
67435+int gr_handle_fifo(const struct dentry *dentry,
67436+ const struct vfsmount *mnt,
67437+ const struct dentry *dir, const int flag,
67438+ const int acc_mode);
67439+int gr_handle_hardlink(const struct dentry *dentry,
67440+ const struct vfsmount *mnt,
67441+ struct inode *inode,
67442+ const int mode, const char *to);
67443+
67444+int gr_is_capable(const int cap);
67445+int gr_is_capable_nolog(const int cap);
67446+void gr_learn_resource(const struct task_struct *task, const int limit,
67447+ const unsigned long wanted, const int gt);
67448+void gr_copy_label(struct task_struct *tsk);
67449+void gr_handle_crash(struct task_struct *task, const int sig);
67450+int gr_handle_signal(const struct task_struct *p, const int sig);
67451+int gr_check_crash_uid(const uid_t uid);
67452+int gr_check_protected_task(const struct task_struct *task);
67453+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67454+int gr_acl_handle_mmap(const struct file *file,
67455+ const unsigned long prot);
67456+int gr_acl_handle_mprotect(const struct file *file,
67457+ const unsigned long prot);
67458+int gr_check_hidden_task(const struct task_struct *tsk);
67459+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67460+ const struct vfsmount *mnt);
67461+__u32 gr_acl_handle_utime(const struct dentry *dentry,
67462+ const struct vfsmount *mnt);
67463+__u32 gr_acl_handle_access(const struct dentry *dentry,
67464+ const struct vfsmount *mnt, const int fmode);
67465+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
67466+ const struct vfsmount *mnt, mode_t mode);
67467+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67468+ const struct vfsmount *mnt, mode_t mode);
67469+__u32 gr_acl_handle_chown(const struct dentry *dentry,
67470+ const struct vfsmount *mnt);
67471+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67472+ const struct vfsmount *mnt);
67473+int gr_handle_ptrace(struct task_struct *task, const long request);
67474+int gr_handle_proc_ptrace(struct task_struct *task);
67475+__u32 gr_acl_handle_execve(const struct dentry *dentry,
67476+ const struct vfsmount *mnt);
67477+int gr_check_crash_exec(const struct file *filp);
67478+int gr_acl_is_enabled(void);
67479+void gr_set_kernel_label(struct task_struct *task);
67480+void gr_set_role_label(struct task_struct *task, const uid_t uid,
67481+ const gid_t gid);
67482+int gr_set_proc_label(const struct dentry *dentry,
67483+ const struct vfsmount *mnt,
67484+ const int unsafe_flags);
67485+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67486+ const struct vfsmount *mnt);
67487+__u32 gr_acl_handle_open(const struct dentry *dentry,
67488+ const struct vfsmount *mnt, int acc_mode);
67489+__u32 gr_acl_handle_creat(const struct dentry *dentry,
67490+ const struct dentry *p_dentry,
67491+ const struct vfsmount *p_mnt,
67492+ int open_flags, int acc_mode, const int imode);
67493+void gr_handle_create(const struct dentry *dentry,
67494+ const struct vfsmount *mnt);
67495+void gr_handle_proc_create(const struct dentry *dentry,
67496+ const struct inode *inode);
67497+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67498+ const struct dentry *parent_dentry,
67499+ const struct vfsmount *parent_mnt,
67500+ const int mode);
67501+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67502+ const struct dentry *parent_dentry,
67503+ const struct vfsmount *parent_mnt);
67504+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67505+ const struct vfsmount *mnt);
67506+void gr_handle_delete(const ino_t ino, const dev_t dev);
67507+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67508+ const struct vfsmount *mnt);
67509+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67510+ const struct dentry *parent_dentry,
67511+ const struct vfsmount *parent_mnt,
67512+ const char *from);
67513+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67514+ const struct dentry *parent_dentry,
67515+ const struct vfsmount *parent_mnt,
67516+ const struct dentry *old_dentry,
67517+ const struct vfsmount *old_mnt, const char *to);
67518+int gr_acl_handle_rename(struct dentry *new_dentry,
67519+ struct dentry *parent_dentry,
67520+ const struct vfsmount *parent_mnt,
67521+ struct dentry *old_dentry,
67522+ struct inode *old_parent_inode,
67523+ struct vfsmount *old_mnt, const char *newname);
67524+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67525+ struct dentry *old_dentry,
67526+ struct dentry *new_dentry,
67527+ struct vfsmount *mnt, const __u8 replace);
67528+__u32 gr_check_link(const struct dentry *new_dentry,
67529+ const struct dentry *parent_dentry,
67530+ const struct vfsmount *parent_mnt,
67531+ const struct dentry *old_dentry,
67532+ const struct vfsmount *old_mnt);
67533+int gr_acl_handle_filldir(const struct file *file, const char *name,
67534+ const unsigned int namelen, const ino_t ino);
67535+
67536+__u32 gr_acl_handle_unix(const struct dentry *dentry,
67537+ const struct vfsmount *mnt);
67538+void gr_acl_handle_exit(void);
67539+void gr_acl_handle_psacct(struct task_struct *task, const long code);
67540+int gr_acl_handle_procpidmem(const struct task_struct *task);
67541+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67542+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67543+void gr_audit_ptrace(struct task_struct *task);
67544+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67545+
67546+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67547+
67548+#ifdef CONFIG_GRKERNSEC
67549+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67550+void gr_handle_vm86(void);
67551+void gr_handle_mem_readwrite(u64 from, u64 to);
67552+
67553+void gr_log_badprocpid(const char *entry);
67554+
67555+extern int grsec_enable_dmesg;
67556+extern int grsec_disable_privio;
67557+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67558+extern int grsec_enable_chroot_findtask;
67559+#endif
67560+#ifdef CONFIG_GRKERNSEC_SETXID
67561+extern int grsec_enable_setxid;
67562+#endif
67563+#endif
67564+
67565+#endif
67566diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67567index 6a87154..a3ce57b 100644
67568--- a/include/linux/hdpu_features.h
67569+++ b/include/linux/hdpu_features.h
67570@@ -3,7 +3,7 @@
67571 struct cpustate_t {
67572 spinlock_t lock;
67573 int excl;
67574- int open_count;
67575+ atomic_t open_count;
67576 unsigned char cached_val;
67577 int inited;
67578 unsigned long *set_addr;
67579diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67580index 211ff44..00ab6d7 100644
67581--- a/include/linux/highmem.h
67582+++ b/include/linux/highmem.h
67583@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67584 kunmap_atomic(kaddr, KM_USER0);
67585 }
67586
67587+static inline void sanitize_highpage(struct page *page)
67588+{
67589+ void *kaddr;
67590+ unsigned long flags;
67591+
67592+ local_irq_save(flags);
67593+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
67594+ clear_page(kaddr);
67595+ kunmap_atomic(kaddr, KM_CLEARPAGE);
67596+ local_irq_restore(flags);
67597+}
67598+
67599 static inline void zero_user_segments(struct page *page,
67600 unsigned start1, unsigned end1,
67601 unsigned start2, unsigned end2)
67602diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67603index 7b40cda..24eb44e 100644
67604--- a/include/linux/i2c.h
67605+++ b/include/linux/i2c.h
67606@@ -325,6 +325,7 @@ struct i2c_algorithm {
67607 /* To determine what the adapter supports */
67608 u32 (*functionality) (struct i2c_adapter *);
67609 };
67610+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67611
67612 /*
67613 * i2c_adapter is the structure used to identify a physical i2c bus along
67614diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67615index 4c4e57d..f3c5303 100644
67616--- a/include/linux/i2o.h
67617+++ b/include/linux/i2o.h
67618@@ -564,7 +564,7 @@ struct i2o_controller {
67619 struct i2o_device *exec; /* Executive */
67620 #if BITS_PER_LONG == 64
67621 spinlock_t context_list_lock; /* lock for context_list */
67622- atomic_t context_list_counter; /* needed for unique contexts */
67623+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67624 struct list_head context_list; /* list of context id's
67625 and pointers */
67626 #endif
67627diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67628index 21a6f5d..dc42eab 100644
67629--- a/include/linux/init_task.h
67630+++ b/include/linux/init_task.h
67631@@ -83,6 +83,12 @@ extern struct group_info init_groups;
67632 #define INIT_IDS
67633 #endif
67634
67635+#ifdef CONFIG_X86
67636+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67637+#else
67638+#define INIT_TASK_THREAD_INFO
67639+#endif
67640+
67641 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67642 /*
67643 * Because of the reduced scope of CAP_SETPCAP when filesystem
67644@@ -156,6 +162,7 @@ extern struct cred init_cred;
67645 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67646 .comm = "swapper", \
67647 .thread = INIT_THREAD, \
67648+ INIT_TASK_THREAD_INFO \
67649 .fs = &init_fs, \
67650 .files = &init_files, \
67651 .signal = &init_signals, \
67652diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67653index 4f0a72a..a849599 100644
67654--- a/include/linux/intel-iommu.h
67655+++ b/include/linux/intel-iommu.h
67656@@ -296,7 +296,7 @@ struct iommu_flush {
67657 u8 fm, u64 type);
67658 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67659 unsigned int size_order, u64 type);
67660-};
67661+} __no_const;
67662
67663 enum {
67664 SR_DMAR_FECTL_REG,
67665diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67666index c739150..be577b5 100644
67667--- a/include/linux/interrupt.h
67668+++ b/include/linux/interrupt.h
67669@@ -369,7 +369,7 @@ enum
67670 /* map softirq index to softirq name. update 'softirq_to_name' in
67671 * kernel/softirq.c when adding a new softirq.
67672 */
67673-extern char *softirq_to_name[NR_SOFTIRQS];
67674+extern const char * const softirq_to_name[NR_SOFTIRQS];
67675
67676 /* softirq mask and active fields moved to irq_cpustat_t in
67677 * asm/hardirq.h to get better cache usage. KAO
67678@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67679
67680 struct softirq_action
67681 {
67682- void (*action)(struct softirq_action *);
67683+ void (*action)(void);
67684 };
67685
67686 asmlinkage void do_softirq(void);
67687 asmlinkage void __do_softirq(void);
67688-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67689+extern void open_softirq(int nr, void (*action)(void));
67690 extern void softirq_init(void);
67691 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67692 extern void raise_softirq_irqoff(unsigned int nr);
67693diff --git a/include/linux/irq.h b/include/linux/irq.h
67694index 9e5f45a..025865b 100644
67695--- a/include/linux/irq.h
67696+++ b/include/linux/irq.h
67697@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67698 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67699 bool boot)
67700 {
67701+#ifdef CONFIG_CPUMASK_OFFSTACK
67702 gfp_t gfp = GFP_ATOMIC;
67703
67704 if (boot)
67705 gfp = GFP_NOWAIT;
67706
67707-#ifdef CONFIG_CPUMASK_OFFSTACK
67708 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67709 return false;
67710
67711diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67712index 7922742..27306a2 100644
67713--- a/include/linux/kallsyms.h
67714+++ b/include/linux/kallsyms.h
67715@@ -15,7 +15,8 @@
67716
67717 struct module;
67718
67719-#ifdef CONFIG_KALLSYMS
67720+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67721+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67722 /* Lookup the address for a symbol. Returns 0 if not found. */
67723 unsigned long kallsyms_lookup_name(const char *name);
67724
67725@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67726 /* Stupid that this does nothing, but I didn't create this mess. */
67727 #define __print_symbol(fmt, addr)
67728 #endif /*CONFIG_KALLSYMS*/
67729+#else /* when included by kallsyms.c, vsnprintf.c, or
67730+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67731+extern void __print_symbol(const char *fmt, unsigned long address);
67732+extern int sprint_symbol(char *buffer, unsigned long address);
67733+const char *kallsyms_lookup(unsigned long addr,
67734+ unsigned long *symbolsize,
67735+ unsigned long *offset,
67736+ char **modname, char *namebuf);
67737+#endif
67738
67739 /* This macro allows us to keep printk typechecking */
67740 static void __check_printsym_format(const char *fmt, ...)
67741diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67742index 6adcc29..13369e8 100644
67743--- a/include/linux/kgdb.h
67744+++ b/include/linux/kgdb.h
67745@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67746
67747 extern int kgdb_connected;
67748
67749-extern atomic_t kgdb_setting_breakpoint;
67750-extern atomic_t kgdb_cpu_doing_single_step;
67751+extern atomic_unchecked_t kgdb_setting_breakpoint;
67752+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67753
67754 extern struct task_struct *kgdb_usethread;
67755 extern struct task_struct *kgdb_contthread;
67756@@ -235,7 +235,7 @@ struct kgdb_arch {
67757 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67758 void (*remove_all_hw_break)(void);
67759 void (*correct_hw_break)(void);
67760-};
67761+} __do_const;
67762
67763 /**
67764 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67765@@ -257,14 +257,14 @@ struct kgdb_io {
67766 int (*init) (void);
67767 void (*pre_exception) (void);
67768 void (*post_exception) (void);
67769-};
67770+} __do_const;
67771
67772-extern struct kgdb_arch arch_kgdb_ops;
67773+extern const struct kgdb_arch arch_kgdb_ops;
67774
67775 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67776
67777-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67778-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67779+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67780+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67781
67782 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67783 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67784diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67785index 384ca8b..83dd97d 100644
67786--- a/include/linux/kmod.h
67787+++ b/include/linux/kmod.h
67788@@ -31,6 +31,8 @@
67789 * usually useless though. */
67790 extern int __request_module(bool wait, const char *name, ...) \
67791 __attribute__((format(printf, 2, 3)));
67792+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67793+ __attribute__((format(printf, 3, 4)));
67794 #define request_module(mod...) __request_module(true, mod)
67795 #define request_module_nowait(mod...) __request_module(false, mod)
67796 #define try_then_request_module(x, mod...) \
67797diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67798index 58ae8e0..3950d3c 100644
67799--- a/include/linux/kobject.h
67800+++ b/include/linux/kobject.h
67801@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67802
67803 struct kobj_type {
67804 void (*release)(struct kobject *kobj);
67805- struct sysfs_ops *sysfs_ops;
67806+ const struct sysfs_ops *sysfs_ops;
67807 struct attribute **default_attrs;
67808 };
67809
67810@@ -118,9 +118,9 @@ struct kobj_uevent_env {
67811 };
67812
67813 struct kset_uevent_ops {
67814- int (*filter)(struct kset *kset, struct kobject *kobj);
67815- const char *(*name)(struct kset *kset, struct kobject *kobj);
67816- int (*uevent)(struct kset *kset, struct kobject *kobj,
67817+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67818+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67819+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67820 struct kobj_uevent_env *env);
67821 };
67822
67823@@ -132,7 +132,7 @@ struct kobj_attribute {
67824 const char *buf, size_t count);
67825 };
67826
67827-extern struct sysfs_ops kobj_sysfs_ops;
67828+extern const struct sysfs_ops kobj_sysfs_ops;
67829
67830 /**
67831 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67832@@ -155,14 +155,14 @@ struct kset {
67833 struct list_head list;
67834 spinlock_t list_lock;
67835 struct kobject kobj;
67836- struct kset_uevent_ops *uevent_ops;
67837+ const struct kset_uevent_ops *uevent_ops;
67838 };
67839
67840 extern void kset_init(struct kset *kset);
67841 extern int __must_check kset_register(struct kset *kset);
67842 extern void kset_unregister(struct kset *kset);
67843 extern struct kset * __must_check kset_create_and_add(const char *name,
67844- struct kset_uevent_ops *u,
67845+ const struct kset_uevent_ops *u,
67846 struct kobject *parent_kobj);
67847
67848 static inline struct kset *to_kset(struct kobject *kobj)
67849diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67850index c728a50..752d821 100644
67851--- a/include/linux/kvm_host.h
67852+++ b/include/linux/kvm_host.h
67853@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67854 void vcpu_load(struct kvm_vcpu *vcpu);
67855 void vcpu_put(struct kvm_vcpu *vcpu);
67856
67857-int kvm_init(void *opaque, unsigned int vcpu_size,
67858+int kvm_init(const void *opaque, unsigned int vcpu_size,
67859 struct module *module);
67860 void kvm_exit(void);
67861
67862@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67863 struct kvm_guest_debug *dbg);
67864 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67865
67866-int kvm_arch_init(void *opaque);
67867+int kvm_arch_init(const void *opaque);
67868 void kvm_arch_exit(void);
67869
67870 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67871diff --git a/include/linux/libata.h b/include/linux/libata.h
67872index a069916..223edde 100644
67873--- a/include/linux/libata.h
67874+++ b/include/linux/libata.h
67875@@ -525,11 +525,11 @@ struct ata_ioports {
67876
67877 struct ata_host {
67878 spinlock_t lock;
67879- struct device *dev;
67880+ struct device *dev;
67881 void __iomem * const *iomap;
67882 unsigned int n_ports;
67883 void *private_data;
67884- struct ata_port_operations *ops;
67885+ const struct ata_port_operations *ops;
67886 unsigned long flags;
67887 #ifdef CONFIG_ATA_ACPI
67888 acpi_handle acpi_handle;
67889@@ -710,7 +710,7 @@ struct ata_link {
67890
67891 struct ata_port {
67892 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67893- struct ata_port_operations *ops;
67894+ const struct ata_port_operations *ops;
67895 spinlock_t *lock;
67896 /* Flags owned by the EH context. Only EH should touch these once the
67897 port is active */
67898@@ -884,7 +884,7 @@ struct ata_port_operations {
67899 * fields must be pointers.
67900 */
67901 const struct ata_port_operations *inherits;
67902-};
67903+} __do_const;
67904
67905 struct ata_port_info {
67906 unsigned long flags;
67907@@ -892,7 +892,7 @@ struct ata_port_info {
67908 unsigned long pio_mask;
67909 unsigned long mwdma_mask;
67910 unsigned long udma_mask;
67911- struct ata_port_operations *port_ops;
67912+ const struct ata_port_operations *port_ops;
67913 void *private_data;
67914 };
67915
67916@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67917 extern const unsigned long sata_deb_timing_hotplug[];
67918 extern const unsigned long sata_deb_timing_long[];
67919
67920-extern struct ata_port_operations ata_dummy_port_ops;
67921+extern const struct ata_port_operations ata_dummy_port_ops;
67922 extern const struct ata_port_info ata_dummy_port_info;
67923
67924 static inline const unsigned long *
67925@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67926 struct scsi_host_template *sht);
67927 extern void ata_host_detach(struct ata_host *host);
67928 extern void ata_host_init(struct ata_host *, struct device *,
67929- unsigned long, struct ata_port_operations *);
67930+ unsigned long, const struct ata_port_operations *);
67931 extern int ata_scsi_detect(struct scsi_host_template *sht);
67932 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67933 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67934diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67935index fbc48f8..0886e57 100644
67936--- a/include/linux/lockd/bind.h
67937+++ b/include/linux/lockd/bind.h
67938@@ -23,13 +23,13 @@ struct svc_rqst;
67939 * This is the set of functions for lockd->nfsd communication
67940 */
67941 struct nlmsvc_binding {
67942- __be32 (*fopen)(struct svc_rqst *,
67943+ __be32 (* const fopen)(struct svc_rqst *,
67944 struct nfs_fh *,
67945 struct file **);
67946- void (*fclose)(struct file *);
67947+ void (* const fclose)(struct file *);
67948 };
67949
67950-extern struct nlmsvc_binding * nlmsvc_ops;
67951+extern const struct nlmsvc_binding * nlmsvc_ops;
67952
67953 /*
67954 * Similar to nfs_client_initdata, but without the NFS-specific
67955diff --git a/include/linux/mca.h b/include/linux/mca.h
67956index 3797270..7765ede 100644
67957--- a/include/linux/mca.h
67958+++ b/include/linux/mca.h
67959@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67960 int region);
67961 void * (*mca_transform_memory)(struct mca_device *,
67962 void *memory);
67963-};
67964+} __no_const;
67965
67966 struct mca_bus {
67967 u64 default_dma_mask;
67968diff --git a/include/linux/memory.h b/include/linux/memory.h
67969index 37fa19b..b597c85 100644
67970--- a/include/linux/memory.h
67971+++ b/include/linux/memory.h
67972@@ -108,7 +108,7 @@ struct memory_accessor {
67973 size_t count);
67974 ssize_t (*write)(struct memory_accessor *, const char *buf,
67975 off_t offset, size_t count);
67976-};
67977+} __no_const;
67978
67979 /*
67980 * Kernel text modification mutex, used for code patching. Users of this lock
67981diff --git a/include/linux/mm.h b/include/linux/mm.h
67982index 11e5be6..1ff2423 100644
67983--- a/include/linux/mm.h
67984+++ b/include/linux/mm.h
67985@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67986
67987 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67988 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67989+
67990+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67991+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67992+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67993+#else
67994 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67995+#endif
67996+
67997 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67998 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67999
68000@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
68001 int set_page_dirty_lock(struct page *page);
68002 int clear_page_dirty_for_io(struct page *page);
68003
68004-/* Is the vma a continuation of the stack vma above it? */
68005-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
68006-{
68007- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
68008-}
68009-
68010 extern unsigned long move_page_tables(struct vm_area_struct *vma,
68011 unsigned long old_addr, struct vm_area_struct *new_vma,
68012 unsigned long new_addr, unsigned long len);
68013@@ -890,6 +891,8 @@ struct shrinker {
68014 extern void register_shrinker(struct shrinker *);
68015 extern void unregister_shrinker(struct shrinker *);
68016
68017+pgprot_t vm_get_page_prot(unsigned long vm_flags);
68018+
68019 int vma_wants_writenotify(struct vm_area_struct *vma);
68020
68021 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
68022@@ -1162,6 +1165,7 @@ out:
68023 }
68024
68025 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
68026+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
68027
68028 extern unsigned long do_brk(unsigned long, unsigned long);
68029
68030@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
68031 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
68032 struct vm_area_struct **pprev);
68033
68034+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
68035+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
68036+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
68037+
68038 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
68039 NULL if none. Assume start_addr < end_addr. */
68040 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
68041@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
68042 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
68043 }
68044
68045-pgprot_t vm_get_page_prot(unsigned long vm_flags);
68046 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
68047 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
68048 unsigned long pfn, unsigned long size, pgprot_t);
68049@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
68050 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
68051 extern int sysctl_memory_failure_early_kill;
68052 extern int sysctl_memory_failure_recovery;
68053-extern atomic_long_t mce_bad_pages;
68054+extern atomic_long_unchecked_t mce_bad_pages;
68055+
68056+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
68057+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
68058+#else
68059+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
68060+#endif
68061
68062 #endif /* __KERNEL__ */
68063 #endif /* _LINUX_MM_H */
68064diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
68065index 9d12ed5..6d9707a 100644
68066--- a/include/linux/mm_types.h
68067+++ b/include/linux/mm_types.h
68068@@ -186,6 +186,8 @@ struct vm_area_struct {
68069 #ifdef CONFIG_NUMA
68070 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
68071 #endif
68072+
68073+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
68074 };
68075
68076 struct core_thread {
68077@@ -287,6 +289,24 @@ struct mm_struct {
68078 #ifdef CONFIG_MMU_NOTIFIER
68079 struct mmu_notifier_mm *mmu_notifier_mm;
68080 #endif
68081+
68082+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68083+ unsigned long pax_flags;
68084+#endif
68085+
68086+#ifdef CONFIG_PAX_DLRESOLVE
68087+ unsigned long call_dl_resolve;
68088+#endif
68089+
68090+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68091+ unsigned long call_syscall;
68092+#endif
68093+
68094+#ifdef CONFIG_PAX_ASLR
68095+ unsigned long delta_mmap; /* randomized offset */
68096+ unsigned long delta_stack; /* randomized offset */
68097+#endif
68098+
68099 };
68100
68101 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68102diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68103index 4e02ee2..afb159e 100644
68104--- a/include/linux/mmu_notifier.h
68105+++ b/include/linux/mmu_notifier.h
68106@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68107 */
68108 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68109 ({ \
68110- pte_t __pte; \
68111+ pte_t ___pte; \
68112 struct vm_area_struct *___vma = __vma; \
68113 unsigned long ___address = __address; \
68114- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68115+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68116 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68117- __pte; \
68118+ ___pte; \
68119 })
68120
68121 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68122diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68123index 6c31a2a..4b0e930 100644
68124--- a/include/linux/mmzone.h
68125+++ b/include/linux/mmzone.h
68126@@ -350,7 +350,7 @@ struct zone {
68127 unsigned long flags; /* zone flags, see below */
68128
68129 /* Zone statistics */
68130- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68131+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68132
68133 /*
68134 * prev_priority holds the scanning priority for this zone. It is
68135diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68136index f58e9d8..3503935 100644
68137--- a/include/linux/mod_devicetable.h
68138+++ b/include/linux/mod_devicetable.h
68139@@ -12,7 +12,7 @@
68140 typedef unsigned long kernel_ulong_t;
68141 #endif
68142
68143-#define PCI_ANY_ID (~0)
68144+#define PCI_ANY_ID ((__u16)~0)
68145
68146 struct pci_device_id {
68147 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68148@@ -131,7 +131,7 @@ struct usb_device_id {
68149 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68150 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68151
68152-#define HID_ANY_ID (~0)
68153+#define HID_ANY_ID (~0U)
68154
68155 struct hid_device_id {
68156 __u16 bus;
68157diff --git a/include/linux/module.h b/include/linux/module.h
68158index 482efc8..642032b 100644
68159--- a/include/linux/module.h
68160+++ b/include/linux/module.h
68161@@ -16,6 +16,7 @@
68162 #include <linux/kobject.h>
68163 #include <linux/moduleparam.h>
68164 #include <linux/tracepoint.h>
68165+#include <linux/fs.h>
68166
68167 #include <asm/local.h>
68168 #include <asm/module.h>
68169@@ -287,16 +288,16 @@ struct module
68170 int (*init)(void);
68171
68172 /* If this is non-NULL, vfree after init() returns */
68173- void *module_init;
68174+ void *module_init_rx, *module_init_rw;
68175
68176 /* Here is the actual code + data, vfree'd on unload. */
68177- void *module_core;
68178+ void *module_core_rx, *module_core_rw;
68179
68180 /* Here are the sizes of the init and core sections */
68181- unsigned int init_size, core_size;
68182+ unsigned int init_size_rw, core_size_rw;
68183
68184 /* The size of the executable code in each section. */
68185- unsigned int init_text_size, core_text_size;
68186+ unsigned int init_size_rx, core_size_rx;
68187
68188 /* Arch-specific module values */
68189 struct mod_arch_specific arch;
68190@@ -345,6 +346,10 @@ struct module
68191 #ifdef CONFIG_EVENT_TRACING
68192 struct ftrace_event_call *trace_events;
68193 unsigned int num_trace_events;
68194+ struct file_operations trace_id;
68195+ struct file_operations trace_enable;
68196+ struct file_operations trace_format;
68197+ struct file_operations trace_filter;
68198 #endif
68199 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68200 unsigned long *ftrace_callsites;
68201@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68202 bool is_module_address(unsigned long addr);
68203 bool is_module_text_address(unsigned long addr);
68204
68205+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68206+{
68207+
68208+#ifdef CONFIG_PAX_KERNEXEC
68209+ if (ktla_ktva(addr) >= (unsigned long)start &&
68210+ ktla_ktva(addr) < (unsigned long)start + size)
68211+ return 1;
68212+#endif
68213+
68214+ return ((void *)addr >= start && (void *)addr < start + size);
68215+}
68216+
68217+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68218+{
68219+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68220+}
68221+
68222+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68223+{
68224+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68225+}
68226+
68227+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68228+{
68229+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68230+}
68231+
68232+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68233+{
68234+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68235+}
68236+
68237 static inline int within_module_core(unsigned long addr, struct module *mod)
68238 {
68239- return (unsigned long)mod->module_core <= addr &&
68240- addr < (unsigned long)mod->module_core + mod->core_size;
68241+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68242 }
68243
68244 static inline int within_module_init(unsigned long addr, struct module *mod)
68245 {
68246- return (unsigned long)mod->module_init <= addr &&
68247- addr < (unsigned long)mod->module_init + mod->init_size;
68248+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68249 }
68250
68251 /* Search for module by name: must hold module_mutex. */
68252diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68253index c1f40c2..682ca53 100644
68254--- a/include/linux/moduleloader.h
68255+++ b/include/linux/moduleloader.h
68256@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68257 sections. Returns NULL on failure. */
68258 void *module_alloc(unsigned long size);
68259
68260+#ifdef CONFIG_PAX_KERNEXEC
68261+void *module_alloc_exec(unsigned long size);
68262+#else
68263+#define module_alloc_exec(x) module_alloc(x)
68264+#endif
68265+
68266 /* Free memory returned from module_alloc. */
68267 void module_free(struct module *mod, void *module_region);
68268
68269+#ifdef CONFIG_PAX_KERNEXEC
68270+void module_free_exec(struct module *mod, void *module_region);
68271+#else
68272+#define module_free_exec(x, y) module_free((x), (y))
68273+#endif
68274+
68275 /* Apply the given relocation to the (simplified) ELF. Return -error
68276 or 0. */
68277 int apply_relocate(Elf_Shdr *sechdrs,
68278diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68279index 82a9124..8a5f622 100644
68280--- a/include/linux/moduleparam.h
68281+++ b/include/linux/moduleparam.h
68282@@ -132,7 +132,7 @@ struct kparam_array
68283
68284 /* Actually copy string: maxlen param is usually sizeof(string). */
68285 #define module_param_string(name, string, len, perm) \
68286- static const struct kparam_string __param_string_##name \
68287+ static const struct kparam_string __param_string_##name __used \
68288 = { len, string }; \
68289 __module_param_call(MODULE_PARAM_PREFIX, name, \
68290 param_set_copystring, param_get_string, \
68291@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68292
68293 /* Comma-separated array: *nump is set to number they actually specified. */
68294 #define module_param_array_named(name, array, type, nump, perm) \
68295- static const struct kparam_array __param_arr_##name \
68296+ static const struct kparam_array __param_arr_##name __used \
68297 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68298 sizeof(array[0]), array }; \
68299 __module_param_call(MODULE_PARAM_PREFIX, name, \
68300diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68301index 878cab4..c92cb3e 100644
68302--- a/include/linux/mutex.h
68303+++ b/include/linux/mutex.h
68304@@ -51,7 +51,7 @@ struct mutex {
68305 spinlock_t wait_lock;
68306 struct list_head wait_list;
68307 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68308- struct thread_info *owner;
68309+ struct task_struct *owner;
68310 #endif
68311 #ifdef CONFIG_DEBUG_MUTEXES
68312 const char *name;
68313diff --git a/include/linux/namei.h b/include/linux/namei.h
68314index ec0f607..d19e675 100644
68315--- a/include/linux/namei.h
68316+++ b/include/linux/namei.h
68317@@ -22,7 +22,7 @@ struct nameidata {
68318 unsigned int flags;
68319 int last_type;
68320 unsigned depth;
68321- char *saved_names[MAX_NESTED_LINKS + 1];
68322+ const char *saved_names[MAX_NESTED_LINKS + 1];
68323
68324 /* Intent data */
68325 union {
68326@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68327 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68328 extern void unlock_rename(struct dentry *, struct dentry *);
68329
68330-static inline void nd_set_link(struct nameidata *nd, char *path)
68331+static inline void nd_set_link(struct nameidata *nd, const char *path)
68332 {
68333 nd->saved_names[nd->depth] = path;
68334 }
68335
68336-static inline char *nd_get_link(struct nameidata *nd)
68337+static inline const char *nd_get_link(const struct nameidata *nd)
68338 {
68339 return nd->saved_names[nd->depth];
68340 }
68341diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68342index 9d7e8f7..04428c5 100644
68343--- a/include/linux/netdevice.h
68344+++ b/include/linux/netdevice.h
68345@@ -637,6 +637,7 @@ struct net_device_ops {
68346 u16 xid);
68347 #endif
68348 };
68349+typedef struct net_device_ops __no_const net_device_ops_no_const;
68350
68351 /*
68352 * The DEVICE structure.
68353diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68354new file mode 100644
68355index 0000000..33f4af8
68356--- /dev/null
68357+++ b/include/linux/netfilter/xt_gradm.h
68358@@ -0,0 +1,9 @@
68359+#ifndef _LINUX_NETFILTER_XT_GRADM_H
68360+#define _LINUX_NETFILTER_XT_GRADM_H 1
68361+
68362+struct xt_gradm_mtinfo {
68363+ __u16 flags;
68364+ __u16 invflags;
68365+};
68366+
68367+#endif
68368diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68369index b359c4a..c08b334 100644
68370--- a/include/linux/nodemask.h
68371+++ b/include/linux/nodemask.h
68372@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68373
68374 #define any_online_node(mask) \
68375 ({ \
68376- int node; \
68377- for_each_node_mask(node, (mask)) \
68378- if (node_online(node)) \
68379+ int __node; \
68380+ for_each_node_mask(__node, (mask)) \
68381+ if (node_online(__node)) \
68382 break; \
68383- node; \
68384+ __node; \
68385 })
68386
68387 #define num_online_nodes() num_node_state(N_ONLINE)
68388diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68389index 5171639..7cf4235 100644
68390--- a/include/linux/oprofile.h
68391+++ b/include/linux/oprofile.h
68392@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68393 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68394 char const * name, ulong * val);
68395
68396-/** Create a file for read-only access to an atomic_t. */
68397+/** Create a file for read-only access to an atomic_unchecked_t. */
68398 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68399- char const * name, atomic_t * val);
68400+ char const * name, atomic_unchecked_t * val);
68401
68402 /** create a directory */
68403 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68404diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68405index 3c62ed4..8924c7c 100644
68406--- a/include/linux/pagemap.h
68407+++ b/include/linux/pagemap.h
68408@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68409 if (((unsigned long)uaddr & PAGE_MASK) !=
68410 ((unsigned long)end & PAGE_MASK))
68411 ret = __get_user(c, end);
68412+ (void)c;
68413 }
68414+ (void)c;
68415 return ret;
68416 }
68417
68418diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68419index 81c9689..a567a55 100644
68420--- a/include/linux/perf_event.h
68421+++ b/include/linux/perf_event.h
68422@@ -476,7 +476,7 @@ struct hw_perf_event {
68423 struct hrtimer hrtimer;
68424 };
68425 };
68426- atomic64_t prev_count;
68427+ atomic64_unchecked_t prev_count;
68428 u64 sample_period;
68429 u64 last_period;
68430 atomic64_t period_left;
68431@@ -557,7 +557,7 @@ struct perf_event {
68432 const struct pmu *pmu;
68433
68434 enum perf_event_active_state state;
68435- atomic64_t count;
68436+ atomic64_unchecked_t count;
68437
68438 /*
68439 * These are the total time in nanoseconds that the event
68440@@ -595,8 +595,8 @@ struct perf_event {
68441 * These accumulate total time (in nanoseconds) that children
68442 * events have been enabled and running, respectively.
68443 */
68444- atomic64_t child_total_time_enabled;
68445- atomic64_t child_total_time_running;
68446+ atomic64_unchecked_t child_total_time_enabled;
68447+ atomic64_unchecked_t child_total_time_running;
68448
68449 /*
68450 * Protect attach/detach and child_list:
68451diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68452index b43a9e0..b77d869 100644
68453--- a/include/linux/pipe_fs_i.h
68454+++ b/include/linux/pipe_fs_i.h
68455@@ -46,9 +46,9 @@ struct pipe_inode_info {
68456 wait_queue_head_t wait;
68457 unsigned int nrbufs, curbuf;
68458 struct page *tmp_page;
68459- unsigned int readers;
68460- unsigned int writers;
68461- unsigned int waiting_writers;
68462+ atomic_t readers;
68463+ atomic_t writers;
68464+ atomic_t waiting_writers;
68465 unsigned int r_counter;
68466 unsigned int w_counter;
68467 struct fasync_struct *fasync_readers;
68468diff --git a/include/linux/poison.h b/include/linux/poison.h
68469index 34066ff..e95d744 100644
68470--- a/include/linux/poison.h
68471+++ b/include/linux/poison.h
68472@@ -19,8 +19,8 @@
68473 * under normal circumstances, used to verify that nobody uses
68474 * non-initialized list entries.
68475 */
68476-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68477-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68478+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68479+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68480
68481 /********** include/linux/timer.h **********/
68482 /*
68483diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68484index 4f71bf4..77ffa64 100644
68485--- a/include/linux/posix-timers.h
68486+++ b/include/linux/posix-timers.h
68487@@ -67,7 +67,7 @@ struct k_itimer {
68488 };
68489
68490 struct k_clock {
68491- int res; /* in nanoseconds */
68492+ const int res; /* in nanoseconds */
68493 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68494 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68495 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68496diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68497index 72b1a10..13303a9 100644
68498--- a/include/linux/preempt.h
68499+++ b/include/linux/preempt.h
68500@@ -110,7 +110,7 @@ struct preempt_ops {
68501 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68502 void (*sched_out)(struct preempt_notifier *notifier,
68503 struct task_struct *next);
68504-};
68505+} __no_const;
68506
68507 /**
68508 * preempt_notifier - key for installing preemption notifiers
68509diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68510index 379eaed..1bf73e3 100644
68511--- a/include/linux/proc_fs.h
68512+++ b/include/linux/proc_fs.h
68513@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68514 return proc_create_data(name, mode, parent, proc_fops, NULL);
68515 }
68516
68517+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68518+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68519+{
68520+#ifdef CONFIG_GRKERNSEC_PROC_USER
68521+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68522+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68523+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68524+#else
68525+ return proc_create_data(name, mode, parent, proc_fops, NULL);
68526+#endif
68527+}
68528+
68529+
68530 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68531 mode_t mode, struct proc_dir_entry *base,
68532 read_proc_t *read_proc, void * data)
68533@@ -256,7 +269,7 @@ union proc_op {
68534 int (*proc_show)(struct seq_file *m,
68535 struct pid_namespace *ns, struct pid *pid,
68536 struct task_struct *task);
68537-};
68538+} __no_const;
68539
68540 struct ctl_table_header;
68541 struct ctl_table;
68542diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68543index 7456d7d..6c1cfc9 100644
68544--- a/include/linux/ptrace.h
68545+++ b/include/linux/ptrace.h
68546@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68547 extern void exit_ptrace(struct task_struct *tracer);
68548 #define PTRACE_MODE_READ 1
68549 #define PTRACE_MODE_ATTACH 2
68550-/* Returns 0 on success, -errno on denial. */
68551-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68552 /* Returns true on success, false on denial. */
68553 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68554+/* Returns true on success, false on denial. */
68555+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68556
68557 static inline int ptrace_reparented(struct task_struct *child)
68558 {
68559diff --git a/include/linux/random.h b/include/linux/random.h
68560index 2948046..3262567 100644
68561--- a/include/linux/random.h
68562+++ b/include/linux/random.h
68563@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68564 u32 random32(void);
68565 void srandom32(u32 seed);
68566
68567+static inline unsigned long pax_get_random_long(void)
68568+{
68569+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68570+}
68571+
68572 #endif /* __KERNEL___ */
68573
68574 #endif /* _LINUX_RANDOM_H */
68575diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68576index 988e55f..17cb4ef 100644
68577--- a/include/linux/reboot.h
68578+++ b/include/linux/reboot.h
68579@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68580 * Architecture-specific implementations of sys_reboot commands.
68581 */
68582
68583-extern void machine_restart(char *cmd);
68584-extern void machine_halt(void);
68585-extern void machine_power_off(void);
68586+extern void machine_restart(char *cmd) __noreturn;
68587+extern void machine_halt(void) __noreturn;
68588+extern void machine_power_off(void) __noreturn;
68589
68590 extern void machine_shutdown(void);
68591 struct pt_regs;
68592@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68593 */
68594
68595 extern void kernel_restart_prepare(char *cmd);
68596-extern void kernel_restart(char *cmd);
68597-extern void kernel_halt(void);
68598-extern void kernel_power_off(void);
68599+extern void kernel_restart(char *cmd) __noreturn;
68600+extern void kernel_halt(void) __noreturn;
68601+extern void kernel_power_off(void) __noreturn;
68602
68603 void ctrl_alt_del(void);
68604
68605@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68606 * Emergency restart, callable from an interrupt handler.
68607 */
68608
68609-extern void emergency_restart(void);
68610+extern void emergency_restart(void) __noreturn;
68611 #include <asm/emergency-restart.h>
68612
68613 #endif
68614diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68615index dd31e7b..5b03c5c 100644
68616--- a/include/linux/reiserfs_fs.h
68617+++ b/include/linux/reiserfs_fs.h
68618@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68619 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68620
68621 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68622-#define get_generation(s) atomic_read (&fs_generation(s))
68623+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68624 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68625 #define __fs_changed(gen,s) (gen != get_generation (s))
68626 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68627@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68628 */
68629
68630 struct item_operations {
68631- int (*bytes_number) (struct item_head * ih, int block_size);
68632- void (*decrement_key) (struct cpu_key *);
68633- int (*is_left_mergeable) (struct reiserfs_key * ih,
68634+ int (* const bytes_number) (struct item_head * ih, int block_size);
68635+ void (* const decrement_key) (struct cpu_key *);
68636+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
68637 unsigned long bsize);
68638- void (*print_item) (struct item_head *, char *item);
68639- void (*check_item) (struct item_head *, char *item);
68640+ void (* const print_item) (struct item_head *, char *item);
68641+ void (* const check_item) (struct item_head *, char *item);
68642
68643- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68644+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68645 int is_affected, int insert_size);
68646- int (*check_left) (struct virtual_item * vi, int free,
68647+ int (* const check_left) (struct virtual_item * vi, int free,
68648 int start_skip, int end_skip);
68649- int (*check_right) (struct virtual_item * vi, int free);
68650- int (*part_size) (struct virtual_item * vi, int from, int to);
68651- int (*unit_num) (struct virtual_item * vi);
68652- void (*print_vi) (struct virtual_item * vi);
68653+ int (* const check_right) (struct virtual_item * vi, int free);
68654+ int (* const part_size) (struct virtual_item * vi, int from, int to);
68655+ int (* const unit_num) (struct virtual_item * vi);
68656+ void (* const print_vi) (struct virtual_item * vi);
68657 };
68658
68659-extern struct item_operations *item_ops[TYPE_ANY + 1];
68660+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68661
68662 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68663 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68664diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68665index dab68bb..0688727 100644
68666--- a/include/linux/reiserfs_fs_sb.h
68667+++ b/include/linux/reiserfs_fs_sb.h
68668@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68669 /* Comment? -Hans */
68670 wait_queue_head_t s_wait;
68671 /* To be obsoleted soon by per buffer seals.. -Hans */
68672- atomic_t s_generation_counter; // increased by one every time the
68673+ atomic_unchecked_t s_generation_counter; // increased by one every time the
68674 // tree gets re-balanced
68675 unsigned long s_properties; /* File system properties. Currently holds
68676 on-disk FS format */
68677diff --git a/include/linux/relay.h b/include/linux/relay.h
68678index 14a86bc..17d0700 100644
68679--- a/include/linux/relay.h
68680+++ b/include/linux/relay.h
68681@@ -159,7 +159,7 @@ struct rchan_callbacks
68682 * The callback should return 0 if successful, negative if not.
68683 */
68684 int (*remove_buf_file)(struct dentry *dentry);
68685-};
68686+} __no_const;
68687
68688 /*
68689 * CONFIG_RELAY kernel API, kernel/relay.c
68690diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68691index 3392c59..a746428 100644
68692--- a/include/linux/rfkill.h
68693+++ b/include/linux/rfkill.h
68694@@ -144,6 +144,7 @@ struct rfkill_ops {
68695 void (*query)(struct rfkill *rfkill, void *data);
68696 int (*set_block)(void *data, bool blocked);
68697 };
68698+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68699
68700 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68701 /**
68702diff --git a/include/linux/sched.h b/include/linux/sched.h
68703index 71849bf..03ceae8 100644
68704--- a/include/linux/sched.h
68705+++ b/include/linux/sched.h
68706@@ -101,6 +101,7 @@ struct bio;
68707 struct fs_struct;
68708 struct bts_context;
68709 struct perf_event_context;
68710+struct linux_binprm;
68711
68712 /*
68713 * List of flags we want to share for kernel threads,
68714@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68715 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68716 asmlinkage void __schedule(void);
68717 asmlinkage void schedule(void);
68718-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68719+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68720
68721 struct nsproxy;
68722 struct user_namespace;
68723@@ -371,9 +372,12 @@ struct user_namespace;
68724 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68725
68726 extern int sysctl_max_map_count;
68727+extern unsigned long sysctl_heap_stack_gap;
68728
68729 #include <linux/aio.h>
68730
68731+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68732+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68733 extern unsigned long
68734 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68735 unsigned long, unsigned long);
68736@@ -666,6 +670,16 @@ struct signal_struct {
68737 struct tty_audit_buf *tty_audit_buf;
68738 #endif
68739
68740+#ifdef CONFIG_GRKERNSEC
68741+ u32 curr_ip;
68742+ u32 saved_ip;
68743+ u32 gr_saddr;
68744+ u32 gr_daddr;
68745+ u16 gr_sport;
68746+ u16 gr_dport;
68747+ u8 used_accept:1;
68748+#endif
68749+
68750 int oom_adj; /* OOM kill score adjustment (bit shift) */
68751 };
68752
68753@@ -723,6 +737,11 @@ struct user_struct {
68754 struct key *session_keyring; /* UID's default session keyring */
68755 #endif
68756
68757+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68758+ unsigned int banned;
68759+ unsigned long ban_expires;
68760+#endif
68761+
68762 /* Hash table maintenance information */
68763 struct hlist_node uidhash_node;
68764 uid_t uid;
68765@@ -1328,8 +1347,8 @@ struct task_struct {
68766 struct list_head thread_group;
68767
68768 struct completion *vfork_done; /* for vfork() */
68769- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68770- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68771+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68772+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68773
68774 cputime_t utime, stime, utimescaled, stimescaled;
68775 cputime_t gtime;
68776@@ -1343,16 +1362,6 @@ struct task_struct {
68777 struct task_cputime cputime_expires;
68778 struct list_head cpu_timers[3];
68779
68780-/* process credentials */
68781- const struct cred *real_cred; /* objective and real subjective task
68782- * credentials (COW) */
68783- const struct cred *cred; /* effective (overridable) subjective task
68784- * credentials (COW) */
68785- struct mutex cred_guard_mutex; /* guard against foreign influences on
68786- * credential calculations
68787- * (notably. ptrace) */
68788- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68789-
68790 char comm[TASK_COMM_LEN]; /* executable name excluding path
68791 - access with [gs]et_task_comm (which lock
68792 it with task_lock())
68793@@ -1369,6 +1378,10 @@ struct task_struct {
68794 #endif
68795 /* CPU-specific state of this task */
68796 struct thread_struct thread;
68797+/* thread_info moved to task_struct */
68798+#ifdef CONFIG_X86
68799+ struct thread_info tinfo;
68800+#endif
68801 /* filesystem information */
68802 struct fs_struct *fs;
68803 /* open file information */
68804@@ -1436,6 +1449,15 @@ struct task_struct {
68805 int hardirq_context;
68806 int softirq_context;
68807 #endif
68808+
68809+/* process credentials */
68810+ const struct cred *real_cred; /* objective and real subjective task
68811+ * credentials (COW) */
68812+ struct mutex cred_guard_mutex; /* guard against foreign influences on
68813+ * credential calculations
68814+ * (notably. ptrace) */
68815+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68816+
68817 #ifdef CONFIG_LOCKDEP
68818 # define MAX_LOCK_DEPTH 48UL
68819 u64 curr_chain_key;
68820@@ -1456,6 +1478,9 @@ struct task_struct {
68821
68822 struct backing_dev_info *backing_dev_info;
68823
68824+ const struct cred *cred; /* effective (overridable) subjective task
68825+ * credentials (COW) */
68826+
68827 struct io_context *io_context;
68828
68829 unsigned long ptrace_message;
68830@@ -1519,6 +1544,27 @@ struct task_struct {
68831 unsigned long default_timer_slack_ns;
68832
68833 struct list_head *scm_work_list;
68834+
68835+#ifdef CONFIG_GRKERNSEC
68836+ /* grsecurity */
68837+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68838+ long long exec_id;
68839+#endif
68840+#ifdef CONFIG_GRKERNSEC_SETXID
68841+ const struct cred *delayed_cred;
68842+#endif
68843+ struct dentry *gr_chroot_dentry;
68844+ struct acl_subject_label *acl;
68845+ struct acl_role_label *role;
68846+ struct file *exec_file;
68847+ u16 acl_role_id;
68848+ /* is this the task that authenticated to the special role */
68849+ u8 acl_sp_role;
68850+ u8 is_writable;
68851+ u8 brute;
68852+ u8 gr_is_chrooted;
68853+#endif
68854+
68855 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68856 /* Index of current stored adress in ret_stack */
68857 int curr_ret_stack;
68858@@ -1542,6 +1588,57 @@ struct task_struct {
68859 #endif /* CONFIG_TRACING */
68860 };
68861
68862+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68863+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68864+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68865+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68866+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68867+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68868+
68869+#ifdef CONFIG_PAX_SOFTMODE
68870+extern int pax_softmode;
68871+#endif
68872+
68873+extern int pax_check_flags(unsigned long *);
68874+
68875+/* if tsk != current then task_lock must be held on it */
68876+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68877+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68878+{
68879+ if (likely(tsk->mm))
68880+ return tsk->mm->pax_flags;
68881+ else
68882+ return 0UL;
68883+}
68884+
68885+/* if tsk != current then task_lock must be held on it */
68886+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68887+{
68888+ if (likely(tsk->mm)) {
68889+ tsk->mm->pax_flags = flags;
68890+ return 0;
68891+ }
68892+ return -EINVAL;
68893+}
68894+#endif
68895+
68896+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68897+extern void pax_set_initial_flags(struct linux_binprm *bprm);
68898+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68899+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68900+#endif
68901+
68902+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68903+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68904+extern void pax_report_refcount_overflow(struct pt_regs *regs);
68905+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68906+
68907+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68908+extern void pax_track_stack(void);
68909+#else
68910+static inline void pax_track_stack(void) {}
68911+#endif
68912+
68913 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68914 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68915
68916@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68917 #define PF_DUMPCORE 0x00000200 /* dumped core */
68918 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68919 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68920-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68921+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68922 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68923 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68924 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68925@@ -1978,7 +2075,9 @@ void yield(void);
68926 extern struct exec_domain default_exec_domain;
68927
68928 union thread_union {
68929+#ifndef CONFIG_X86
68930 struct thread_info thread_info;
68931+#endif
68932 unsigned long stack[THREAD_SIZE/sizeof(long)];
68933 };
68934
68935@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns;
68936 */
68937
68938 extern struct task_struct *find_task_by_vpid(pid_t nr);
68939+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68940 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68941 struct pid_namespace *ns);
68942
68943@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68944 extern void exit_itimers(struct signal_struct *);
68945 extern void flush_itimer_signals(void);
68946
68947-extern NORET_TYPE void do_group_exit(int);
68948+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68949
68950 extern void daemonize(const char *, ...);
68951 extern int allow_signal(int);
68952@@ -2284,13 +2384,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68953
68954 #endif
68955
68956-static inline int object_is_on_stack(void *obj)
68957+static inline int object_starts_on_stack(void *obj)
68958 {
68959- void *stack = task_stack_page(current);
68960+ const void *stack = task_stack_page(current);
68961
68962 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68963 }
68964
68965+#ifdef CONFIG_PAX_USERCOPY
68966+extern int object_is_on_stack(const void *obj, unsigned long len);
68967+#endif
68968+
68969 extern void thread_info_cache_init(void);
68970
68971 #ifdef CONFIG_DEBUG_STACK_USAGE
68972diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68973index 1ee2c05..81b7ec4 100644
68974--- a/include/linux/screen_info.h
68975+++ b/include/linux/screen_info.h
68976@@ -42,7 +42,8 @@ struct screen_info {
68977 __u16 pages; /* 0x32 */
68978 __u16 vesa_attributes; /* 0x34 */
68979 __u32 capabilities; /* 0x36 */
68980- __u8 _reserved[6]; /* 0x3a */
68981+ __u16 vesapm_size; /* 0x3a */
68982+ __u8 _reserved[4]; /* 0x3c */
68983 } __attribute__((packed));
68984
68985 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68986diff --git a/include/linux/security.h b/include/linux/security.h
68987index d40d23f..d739b08 100644
68988--- a/include/linux/security.h
68989+++ b/include/linux/security.h
68990@@ -34,6 +34,7 @@
68991 #include <linux/key.h>
68992 #include <linux/xfrm.h>
68993 #include <linux/gfp.h>
68994+#include <linux/grsecurity.h>
68995 #include <net/flow.h>
68996
68997 /* Maximum number of letters for an LSM name string */
68998@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
68999 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
69000 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
69001 extern int cap_task_setnice(struct task_struct *p, int nice);
69002-extern int cap_syslog(int type);
69003+extern int cap_syslog(int type, bool from_file);
69004 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
69005
69006 struct msghdr;
69007@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
69008 * logging to the console.
69009 * See the syslog(2) manual page for an explanation of the @type values.
69010 * @type contains the type of action.
69011+ * @from_file indicates the context of action (if it came from /proc).
69012 * Return 0 if permission is granted.
69013 * @settime:
69014 * Check permission to change the system time.
69015@@ -1445,7 +1447,7 @@ struct security_operations {
69016 int (*sysctl) (struct ctl_table *table, int op);
69017 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
69018 int (*quota_on) (struct dentry *dentry);
69019- int (*syslog) (int type);
69020+ int (*syslog) (int type, bool from_file);
69021 int (*settime) (struct timespec *ts, struct timezone *tz);
69022 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
69023
69024@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
69025 int security_sysctl(struct ctl_table *table, int op);
69026 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
69027 int security_quota_on(struct dentry *dentry);
69028-int security_syslog(int type);
69029+int security_syslog(int type, bool from_file);
69030 int security_settime(struct timespec *ts, struct timezone *tz);
69031 int security_vm_enough_memory(long pages);
69032 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
69033@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
69034 return 0;
69035 }
69036
69037-static inline int security_syslog(int type)
69038+static inline int security_syslog(int type, bool from_file)
69039 {
69040- return cap_syslog(type);
69041+ return cap_syslog(type, from_file);
69042 }
69043
69044 static inline int security_settime(struct timespec *ts, struct timezone *tz)
69045diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
69046index 8366d8f..898f3c6 100644
69047--- a/include/linux/seq_file.h
69048+++ b/include/linux/seq_file.h
69049@@ -23,6 +23,9 @@ struct seq_file {
69050 u64 version;
69051 struct mutex lock;
69052 const struct seq_operations *op;
69053+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
69054+ long long exec_id;
69055+#endif
69056 void *private;
69057 };
69058
69059@@ -32,6 +35,7 @@ struct seq_operations {
69060 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
69061 int (*show) (struct seq_file *m, void *v);
69062 };
69063+typedef struct seq_operations __no_const seq_operations_no_const;
69064
69065 #define SEQ_SKIP 1
69066
69067diff --git a/include/linux/shm.h b/include/linux/shm.h
69068index eca6235..c7417ed 100644
69069--- a/include/linux/shm.h
69070+++ b/include/linux/shm.h
69071@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
69072 pid_t shm_cprid;
69073 pid_t shm_lprid;
69074 struct user_struct *mlock_user;
69075+#ifdef CONFIG_GRKERNSEC
69076+ time_t shm_createtime;
69077+ pid_t shm_lapid;
69078+#endif
69079 };
69080
69081 /* shm_mode upper byte flags */
69082diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
69083index bcdd660..6e12e11 100644
69084--- a/include/linux/skbuff.h
69085+++ b/include/linux/skbuff.h
69086@@ -14,6 +14,7 @@
69087 #ifndef _LINUX_SKBUFF_H
69088 #define _LINUX_SKBUFF_H
69089
69090+#include <linux/const.h>
69091 #include <linux/kernel.h>
69092 #include <linux/kmemcheck.h>
69093 #include <linux/compiler.h>
69094@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
69095 */
69096 static inline int skb_queue_empty(const struct sk_buff_head *list)
69097 {
69098- return list->next == (struct sk_buff *)list;
69099+ return list->next == (const struct sk_buff *)list;
69100 }
69101
69102 /**
69103@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69104 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69105 const struct sk_buff *skb)
69106 {
69107- return (skb->next == (struct sk_buff *) list);
69108+ return (skb->next == (const struct sk_buff *) list);
69109 }
69110
69111 /**
69112@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69113 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69114 const struct sk_buff *skb)
69115 {
69116- return (skb->prev == (struct sk_buff *) list);
69117+ return (skb->prev == (const struct sk_buff *) list);
69118 }
69119
69120 /**
69121@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69122 * headroom, you should not reduce this.
69123 */
69124 #ifndef NET_SKB_PAD
69125-#define NET_SKB_PAD 32
69126+#define NET_SKB_PAD (_AC(32,UL))
69127 #endif
69128
69129 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69130diff --git a/include/linux/slab.h b/include/linux/slab.h
69131index 2da8372..a3be824 100644
69132--- a/include/linux/slab.h
69133+++ b/include/linux/slab.h
69134@@ -11,12 +11,20 @@
69135
69136 #include <linux/gfp.h>
69137 #include <linux/types.h>
69138+#include <linux/err.h>
69139
69140 /*
69141 * Flags to pass to kmem_cache_create().
69142 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69143 */
69144 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69145+
69146+#ifdef CONFIG_PAX_USERCOPY
69147+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69148+#else
69149+#define SLAB_USERCOPY 0x00000000UL
69150+#endif
69151+
69152 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69153 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69154 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69155@@ -82,10 +90,13 @@
69156 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69157 * Both make kfree a no-op.
69158 */
69159-#define ZERO_SIZE_PTR ((void *)16)
69160+#define ZERO_SIZE_PTR \
69161+({ \
69162+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69163+ (void *)(-MAX_ERRNO-1L); \
69164+})
69165
69166-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69167- (unsigned long)ZERO_SIZE_PTR)
69168+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69169
69170 /*
69171 * struct kmem_cache related prototypes
69172@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69173 void kfree(const void *);
69174 void kzfree(const void *);
69175 size_t ksize(const void *);
69176+void check_object_size(const void *ptr, unsigned long n, bool to);
69177
69178 /*
69179 * Allocator specific definitions. These are mainly used to establish optimized
69180@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69181
69182 void __init kmem_cache_init_late(void);
69183
69184+#define kmalloc(x, y) \
69185+({ \
69186+ void *___retval; \
69187+ intoverflow_t ___x = (intoverflow_t)x; \
69188+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69189+ ___retval = NULL; \
69190+ else \
69191+ ___retval = kmalloc((size_t)___x, (y)); \
69192+ ___retval; \
69193+})
69194+
69195+#define kmalloc_node(x, y, z) \
69196+({ \
69197+ void *___retval; \
69198+ intoverflow_t ___x = (intoverflow_t)x; \
69199+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69200+ ___retval = NULL; \
69201+ else \
69202+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
69203+ ___retval; \
69204+})
69205+
69206+#define kzalloc(x, y) \
69207+({ \
69208+ void *___retval; \
69209+ intoverflow_t ___x = (intoverflow_t)x; \
69210+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69211+ ___retval = NULL; \
69212+ else \
69213+ ___retval = kzalloc((size_t)___x, (y)); \
69214+ ___retval; \
69215+})
69216+
69217 #endif /* _LINUX_SLAB_H */
69218diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69219index 850d057..d9dfe3c 100644
69220--- a/include/linux/slab_def.h
69221+++ b/include/linux/slab_def.h
69222@@ -69,10 +69,10 @@ struct kmem_cache {
69223 unsigned long node_allocs;
69224 unsigned long node_frees;
69225 unsigned long node_overflow;
69226- atomic_t allochit;
69227- atomic_t allocmiss;
69228- atomic_t freehit;
69229- atomic_t freemiss;
69230+ atomic_unchecked_t allochit;
69231+ atomic_unchecked_t allocmiss;
69232+ atomic_unchecked_t freehit;
69233+ atomic_unchecked_t freemiss;
69234
69235 /*
69236 * If debugging is enabled, then the allocator can add additional
69237diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69238index 5ad70a6..57f9f65 100644
69239--- a/include/linux/slub_def.h
69240+++ b/include/linux/slub_def.h
69241@@ -86,7 +86,7 @@ struct kmem_cache {
69242 struct kmem_cache_order_objects max;
69243 struct kmem_cache_order_objects min;
69244 gfp_t allocflags; /* gfp flags to use on each alloc */
69245- int refcount; /* Refcount for slab cache destroy */
69246+ atomic_t refcount; /* Refcount for slab cache destroy */
69247 void (*ctor)(void *);
69248 int inuse; /* Offset to metadata */
69249 int align; /* Alignment */
69250@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69251 #endif
69252
69253 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69254-void *__kmalloc(size_t size, gfp_t flags);
69255+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69256
69257 #ifdef CONFIG_KMEMTRACE
69258 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69259diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69260index 67ad11f..0bbd8af 100644
69261--- a/include/linux/sonet.h
69262+++ b/include/linux/sonet.h
69263@@ -61,7 +61,7 @@ struct sonet_stats {
69264 #include <asm/atomic.h>
69265
69266 struct k_sonet_stats {
69267-#define __HANDLE_ITEM(i) atomic_t i
69268+#define __HANDLE_ITEM(i) atomic_unchecked_t i
69269 __SONET_ITEMS
69270 #undef __HANDLE_ITEM
69271 };
69272diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69273index 6f52b4d..5500323 100644
69274--- a/include/linux/sunrpc/cache.h
69275+++ b/include/linux/sunrpc/cache.h
69276@@ -125,7 +125,7 @@ struct cache_detail {
69277 */
69278 struct cache_req {
69279 struct cache_deferred_req *(*defer)(struct cache_req *req);
69280-};
69281+} __no_const;
69282 /* this must be embedded in a deferred_request that is being
69283 * delayed awaiting cache-fill
69284 */
69285diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69286index 8ed9642..101ceab 100644
69287--- a/include/linux/sunrpc/clnt.h
69288+++ b/include/linux/sunrpc/clnt.h
69289@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69290 {
69291 switch (sap->sa_family) {
69292 case AF_INET:
69293- return ntohs(((struct sockaddr_in *)sap)->sin_port);
69294+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69295 case AF_INET6:
69296- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69297+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69298 }
69299 return 0;
69300 }
69301@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69302 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69303 const struct sockaddr *src)
69304 {
69305- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69306+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69307 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69308
69309 dsin->sin_family = ssin->sin_family;
69310@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69311 if (sa->sa_family != AF_INET6)
69312 return 0;
69313
69314- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69315+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69316 }
69317
69318 #endif /* __KERNEL__ */
69319diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69320index c14fe86..393245e 100644
69321--- a/include/linux/sunrpc/svc_rdma.h
69322+++ b/include/linux/sunrpc/svc_rdma.h
69323@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69324 extern unsigned int svcrdma_max_requests;
69325 extern unsigned int svcrdma_max_req_size;
69326
69327-extern atomic_t rdma_stat_recv;
69328-extern atomic_t rdma_stat_read;
69329-extern atomic_t rdma_stat_write;
69330-extern atomic_t rdma_stat_sq_starve;
69331-extern atomic_t rdma_stat_rq_starve;
69332-extern atomic_t rdma_stat_rq_poll;
69333-extern atomic_t rdma_stat_rq_prod;
69334-extern atomic_t rdma_stat_sq_poll;
69335-extern atomic_t rdma_stat_sq_prod;
69336+extern atomic_unchecked_t rdma_stat_recv;
69337+extern atomic_unchecked_t rdma_stat_read;
69338+extern atomic_unchecked_t rdma_stat_write;
69339+extern atomic_unchecked_t rdma_stat_sq_starve;
69340+extern atomic_unchecked_t rdma_stat_rq_starve;
69341+extern atomic_unchecked_t rdma_stat_rq_poll;
69342+extern atomic_unchecked_t rdma_stat_rq_prod;
69343+extern atomic_unchecked_t rdma_stat_sq_poll;
69344+extern atomic_unchecked_t rdma_stat_sq_prod;
69345
69346 #define RPCRDMA_VERSION 1
69347
69348diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69349index 5e781d8..1e62818 100644
69350--- a/include/linux/suspend.h
69351+++ b/include/linux/suspend.h
69352@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69353 * which require special recovery actions in that situation.
69354 */
69355 struct platform_suspend_ops {
69356- int (*valid)(suspend_state_t state);
69357- int (*begin)(suspend_state_t state);
69358- int (*prepare)(void);
69359- int (*prepare_late)(void);
69360- int (*enter)(suspend_state_t state);
69361- void (*wake)(void);
69362- void (*finish)(void);
69363- void (*end)(void);
69364- void (*recover)(void);
69365+ int (* const valid)(suspend_state_t state);
69366+ int (* const begin)(suspend_state_t state);
69367+ int (* const prepare)(void);
69368+ int (* const prepare_late)(void);
69369+ int (* const enter)(suspend_state_t state);
69370+ void (* const wake)(void);
69371+ void (* const finish)(void);
69372+ void (* const end)(void);
69373+ void (* const recover)(void);
69374 };
69375
69376 #ifdef CONFIG_SUSPEND
69377@@ -120,7 +120,7 @@ struct platform_suspend_ops {
69378 * suspend_set_ops - set platform dependent suspend operations
69379 * @ops: The new suspend operations to set.
69380 */
69381-extern void suspend_set_ops(struct platform_suspend_ops *ops);
69382+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69383 extern int suspend_valid_only_mem(suspend_state_t state);
69384
69385 /**
69386@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69387 #else /* !CONFIG_SUSPEND */
69388 #define suspend_valid_only_mem NULL
69389
69390-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69391+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69392 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69393 #endif /* !CONFIG_SUSPEND */
69394
69395@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69396 * platforms which require special recovery actions in that situation.
69397 */
69398 struct platform_hibernation_ops {
69399- int (*begin)(void);
69400- void (*end)(void);
69401- int (*pre_snapshot)(void);
69402- void (*finish)(void);
69403- int (*prepare)(void);
69404- int (*enter)(void);
69405- void (*leave)(void);
69406- int (*pre_restore)(void);
69407- void (*restore_cleanup)(void);
69408- void (*recover)(void);
69409+ int (* const begin)(void);
69410+ void (* const end)(void);
69411+ int (* const pre_snapshot)(void);
69412+ void (* const finish)(void);
69413+ int (* const prepare)(void);
69414+ int (* const enter)(void);
69415+ void (* const leave)(void);
69416+ int (* const pre_restore)(void);
69417+ void (* const restore_cleanup)(void);
69418+ void (* const recover)(void);
69419 };
69420
69421 #ifdef CONFIG_HIBERNATION
69422@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69423 extern void swsusp_unset_page_free(struct page *);
69424 extern unsigned long get_safe_page(gfp_t gfp_mask);
69425
69426-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69427+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69428 extern int hibernate(void);
69429 extern bool system_entering_hibernation(void);
69430 #else /* CONFIG_HIBERNATION */
69431@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69432 static inline void swsusp_set_page_free(struct page *p) {}
69433 static inline void swsusp_unset_page_free(struct page *p) {}
69434
69435-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69436+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69437 static inline int hibernate(void) { return -ENOSYS; }
69438 static inline bool system_entering_hibernation(void) { return false; }
69439 #endif /* CONFIG_HIBERNATION */
69440diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69441index 0eb6942..a805cb6 100644
69442--- a/include/linux/sysctl.h
69443+++ b/include/linux/sysctl.h
69444@@ -164,7 +164,11 @@ enum
69445 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69446 };
69447
69448-
69449+#ifdef CONFIG_PAX_SOFTMODE
69450+enum {
69451+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69452+};
69453+#endif
69454
69455 /* CTL_VM names: */
69456 enum
69457@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69458
69459 extern int proc_dostring(struct ctl_table *, int,
69460 void __user *, size_t *, loff_t *);
69461+extern int proc_dostring_modpriv(struct ctl_table *, int,
69462+ void __user *, size_t *, loff_t *);
69463 extern int proc_dointvec(struct ctl_table *, int,
69464 void __user *, size_t *, loff_t *);
69465 extern int proc_dointvec_minmax(struct ctl_table *, int,
69466@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69467
69468 extern ctl_handler sysctl_data;
69469 extern ctl_handler sysctl_string;
69470+extern ctl_handler sysctl_string_modpriv;
69471 extern ctl_handler sysctl_intvec;
69472 extern ctl_handler sysctl_jiffies;
69473 extern ctl_handler sysctl_ms_jiffies;
69474diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69475index 9d68fed..71f02cc 100644
69476--- a/include/linux/sysfs.h
69477+++ b/include/linux/sysfs.h
69478@@ -75,8 +75,8 @@ struct bin_attribute {
69479 };
69480
69481 struct sysfs_ops {
69482- ssize_t (*show)(struct kobject *, struct attribute *,char *);
69483- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69484+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69485+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69486 };
69487
69488 struct sysfs_dirent;
69489diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69490new file mode 100644
69491index 0000000..3891139
69492--- /dev/null
69493+++ b/include/linux/syslog.h
69494@@ -0,0 +1,52 @@
69495+/* Syslog internals
69496+ *
69497+ * Copyright 2010 Canonical, Ltd.
69498+ * Author: Kees Cook <kees.cook@canonical.com>
69499+ *
69500+ * This program is free software; you can redistribute it and/or modify
69501+ * it under the terms of the GNU General Public License as published by
69502+ * the Free Software Foundation; either version 2, or (at your option)
69503+ * any later version.
69504+ *
69505+ * This program is distributed in the hope that it will be useful,
69506+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
69507+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69508+ * GNU General Public License for more details.
69509+ *
69510+ * You should have received a copy of the GNU General Public License
69511+ * along with this program; see the file COPYING. If not, write to
69512+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69513+ */
69514+
69515+#ifndef _LINUX_SYSLOG_H
69516+#define _LINUX_SYSLOG_H
69517+
69518+/* Close the log. Currently a NOP. */
69519+#define SYSLOG_ACTION_CLOSE 0
69520+/* Open the log. Currently a NOP. */
69521+#define SYSLOG_ACTION_OPEN 1
69522+/* Read from the log. */
69523+#define SYSLOG_ACTION_READ 2
69524+/* Read all messages remaining in the ring buffer. */
69525+#define SYSLOG_ACTION_READ_ALL 3
69526+/* Read and clear all messages remaining in the ring buffer */
69527+#define SYSLOG_ACTION_READ_CLEAR 4
69528+/* Clear ring buffer. */
69529+#define SYSLOG_ACTION_CLEAR 5
69530+/* Disable printk's to console */
69531+#define SYSLOG_ACTION_CONSOLE_OFF 6
69532+/* Enable printk's to console */
69533+#define SYSLOG_ACTION_CONSOLE_ON 7
69534+/* Set level of messages printed to console */
69535+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69536+/* Return number of unread characters in the log buffer */
69537+#define SYSLOG_ACTION_SIZE_UNREAD 9
69538+/* Return size of the log buffer */
69539+#define SYSLOG_ACTION_SIZE_BUFFER 10
69540+
69541+#define SYSLOG_FROM_CALL 0
69542+#define SYSLOG_FROM_FILE 1
69543+
69544+int do_syslog(int type, char __user *buf, int count, bool from_file);
69545+
69546+#endif /* _LINUX_SYSLOG_H */
69547diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69548index a8cc4e1..98d3b85 100644
69549--- a/include/linux/thread_info.h
69550+++ b/include/linux/thread_info.h
69551@@ -23,7 +23,7 @@ struct restart_block {
69552 };
69553 /* For futex_wait and futex_wait_requeue_pi */
69554 struct {
69555- u32 *uaddr;
69556+ u32 __user *uaddr;
69557 u32 val;
69558 u32 flags;
69559 u32 bitset;
69560diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
69561index 1eb44a9..f582df3 100644
69562--- a/include/linux/tracehook.h
69563+++ b/include/linux/tracehook.h
69564@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
69565 /*
69566 * ptrace report for syscall entry and exit looks identical.
69567 */
69568-static inline void ptrace_report_syscall(struct pt_regs *regs)
69569+static inline int ptrace_report_syscall(struct pt_regs *regs)
69570 {
69571 int ptrace = task_ptrace(current);
69572
69573 if (!(ptrace & PT_PTRACED))
69574- return;
69575+ return 0;
69576
69577 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
69578
69579@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69580 send_sig(current->exit_code, current, 1);
69581 current->exit_code = 0;
69582 }
69583+
69584+ return fatal_signal_pending(current);
69585 }
69586
69587 /**
69588@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69589 static inline __must_check int tracehook_report_syscall_entry(
69590 struct pt_regs *regs)
69591 {
69592- ptrace_report_syscall(regs);
69593- return 0;
69594+ return ptrace_report_syscall(regs);
69595 }
69596
69597 /**
69598diff --git a/include/linux/tty.h b/include/linux/tty.h
69599index e9c57e9..ee6d489 100644
69600--- a/include/linux/tty.h
69601+++ b/include/linux/tty.h
69602@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69603 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69604 extern void tty_ldisc_enable(struct tty_struct *tty);
69605
69606-
69607 /* n_tty.c */
69608 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69609
69610diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69611index 0c4ee9b..9f7c426 100644
69612--- a/include/linux/tty_ldisc.h
69613+++ b/include/linux/tty_ldisc.h
69614@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69615
69616 struct module *owner;
69617
69618- int refcount;
69619+ atomic_t refcount;
69620 };
69621
69622 struct tty_ldisc {
69623diff --git a/include/linux/types.h b/include/linux/types.h
69624index c42724f..d190eee 100644
69625--- a/include/linux/types.h
69626+++ b/include/linux/types.h
69627@@ -191,10 +191,26 @@ typedef struct {
69628 volatile int counter;
69629 } atomic_t;
69630
69631+#ifdef CONFIG_PAX_REFCOUNT
69632+typedef struct {
69633+ volatile int counter;
69634+} atomic_unchecked_t;
69635+#else
69636+typedef atomic_t atomic_unchecked_t;
69637+#endif
69638+
69639 #ifdef CONFIG_64BIT
69640 typedef struct {
69641 volatile long counter;
69642 } atomic64_t;
69643+
69644+#ifdef CONFIG_PAX_REFCOUNT
69645+typedef struct {
69646+ volatile long counter;
69647+} atomic64_unchecked_t;
69648+#else
69649+typedef atomic64_t atomic64_unchecked_t;
69650+#endif
69651 #endif
69652
69653 struct ustat {
69654diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69655index 6b58367..53a3e8e 100644
69656--- a/include/linux/uaccess.h
69657+++ b/include/linux/uaccess.h
69658@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69659 long ret; \
69660 mm_segment_t old_fs = get_fs(); \
69661 \
69662- set_fs(KERNEL_DS); \
69663 pagefault_disable(); \
69664- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69665- pagefault_enable(); \
69666+ set_fs(KERNEL_DS); \
69667+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69668 set_fs(old_fs); \
69669+ pagefault_enable(); \
69670 ret; \
69671 })
69672
69673@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69674 * Safely read from address @src to the buffer at @dst. If a kernel fault
69675 * happens, handle that and return -EFAULT.
69676 */
69677-extern long probe_kernel_read(void *dst, void *src, size_t size);
69678+extern long probe_kernel_read(void *dst, const void *src, size_t size);
69679
69680 /*
69681 * probe_kernel_write(): safely attempt to write to a location
69682@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69683 * Safely write to address @dst from the buffer at @src. If a kernel fault
69684 * happens, handle that and return -EFAULT.
69685 */
69686-extern long probe_kernel_write(void *dst, void *src, size_t size);
69687+extern long probe_kernel_write(void *dst, const void *src, size_t size);
69688
69689 #endif /* __LINUX_UACCESS_H__ */
69690diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69691index 99c1b4d..bb94261 100644
69692--- a/include/linux/unaligned/access_ok.h
69693+++ b/include/linux/unaligned/access_ok.h
69694@@ -6,32 +6,32 @@
69695
69696 static inline u16 get_unaligned_le16(const void *p)
69697 {
69698- return le16_to_cpup((__le16 *)p);
69699+ return le16_to_cpup((const __le16 *)p);
69700 }
69701
69702 static inline u32 get_unaligned_le32(const void *p)
69703 {
69704- return le32_to_cpup((__le32 *)p);
69705+ return le32_to_cpup((const __le32 *)p);
69706 }
69707
69708 static inline u64 get_unaligned_le64(const void *p)
69709 {
69710- return le64_to_cpup((__le64 *)p);
69711+ return le64_to_cpup((const __le64 *)p);
69712 }
69713
69714 static inline u16 get_unaligned_be16(const void *p)
69715 {
69716- return be16_to_cpup((__be16 *)p);
69717+ return be16_to_cpup((const __be16 *)p);
69718 }
69719
69720 static inline u32 get_unaligned_be32(const void *p)
69721 {
69722- return be32_to_cpup((__be32 *)p);
69723+ return be32_to_cpup((const __be32 *)p);
69724 }
69725
69726 static inline u64 get_unaligned_be64(const void *p)
69727 {
69728- return be64_to_cpup((__be64 *)p);
69729+ return be64_to_cpup((const __be64 *)p);
69730 }
69731
69732 static inline void put_unaligned_le16(u16 val, void *p)
69733diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69734index 79b9837..b5a56f9 100644
69735--- a/include/linux/vermagic.h
69736+++ b/include/linux/vermagic.h
69737@@ -26,9 +26,35 @@
69738 #define MODULE_ARCH_VERMAGIC ""
69739 #endif
69740
69741+#ifdef CONFIG_PAX_REFCOUNT
69742+#define MODULE_PAX_REFCOUNT "REFCOUNT "
69743+#else
69744+#define MODULE_PAX_REFCOUNT ""
69745+#endif
69746+
69747+#ifdef CONSTIFY_PLUGIN
69748+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69749+#else
69750+#define MODULE_CONSTIFY_PLUGIN ""
69751+#endif
69752+
69753+#ifdef STACKLEAK_PLUGIN
69754+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69755+#else
69756+#define MODULE_STACKLEAK_PLUGIN ""
69757+#endif
69758+
69759+#ifdef CONFIG_GRKERNSEC
69760+#define MODULE_GRSEC "GRSEC "
69761+#else
69762+#define MODULE_GRSEC ""
69763+#endif
69764+
69765 #define VERMAGIC_STRING \
69766 UTS_RELEASE " " \
69767 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69768 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69769- MODULE_ARCH_VERMAGIC
69770+ MODULE_ARCH_VERMAGIC \
69771+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69772+ MODULE_GRSEC
69773
69774diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69775index 819a634..462ac12 100644
69776--- a/include/linux/vmalloc.h
69777+++ b/include/linux/vmalloc.h
69778@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69779 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69780 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69781 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69782+
69783+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69784+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69785+#endif
69786+
69787 /* bits [20..32] reserved for arch specific ioremap internals */
69788
69789 /*
69790@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69791
69792 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69793
69794+#define vmalloc(x) \
69795+({ \
69796+ void *___retval; \
69797+ intoverflow_t ___x = (intoverflow_t)x; \
69798+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69799+ ___retval = NULL; \
69800+ else \
69801+ ___retval = vmalloc((unsigned long)___x); \
69802+ ___retval; \
69803+})
69804+
69805+#define __vmalloc(x, y, z) \
69806+({ \
69807+ void *___retval; \
69808+ intoverflow_t ___x = (intoverflow_t)x; \
69809+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69810+ ___retval = NULL; \
69811+ else \
69812+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69813+ ___retval; \
69814+})
69815+
69816+#define vmalloc_user(x) \
69817+({ \
69818+ void *___retval; \
69819+ intoverflow_t ___x = (intoverflow_t)x; \
69820+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69821+ ___retval = NULL; \
69822+ else \
69823+ ___retval = vmalloc_user((unsigned long)___x); \
69824+ ___retval; \
69825+})
69826+
69827+#define vmalloc_exec(x) \
69828+({ \
69829+ void *___retval; \
69830+ intoverflow_t ___x = (intoverflow_t)x; \
69831+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69832+ ___retval = NULL; \
69833+ else \
69834+ ___retval = vmalloc_exec((unsigned long)___x); \
69835+ ___retval; \
69836+})
69837+
69838+#define vmalloc_node(x, y) \
69839+({ \
69840+ void *___retval; \
69841+ intoverflow_t ___x = (intoverflow_t)x; \
69842+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69843+ ___retval = NULL; \
69844+ else \
69845+ ___retval = vmalloc_node((unsigned long)___x, (y));\
69846+ ___retval; \
69847+})
69848+
69849+#define vmalloc_32(x) \
69850+({ \
69851+ void *___retval; \
69852+ intoverflow_t ___x = (intoverflow_t)x; \
69853+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69854+ ___retval = NULL; \
69855+ else \
69856+ ___retval = vmalloc_32((unsigned long)___x); \
69857+ ___retval; \
69858+})
69859+
69860+#define vmalloc_32_user(x) \
69861+({ \
69862+ void *___retval; \
69863+ intoverflow_t ___x = (intoverflow_t)x; \
69864+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69865+ ___retval = NULL; \
69866+ else \
69867+ ___retval = vmalloc_32_user((unsigned long)___x);\
69868+ ___retval; \
69869+})
69870+
69871 #endif /* _LINUX_VMALLOC_H */
69872diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69873index 13070d6..aa4159a 100644
69874--- a/include/linux/vmstat.h
69875+++ b/include/linux/vmstat.h
69876@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69877 /*
69878 * Zone based page accounting with per cpu differentials.
69879 */
69880-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69881+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69882
69883 static inline void zone_page_state_add(long x, struct zone *zone,
69884 enum zone_stat_item item)
69885 {
69886- atomic_long_add(x, &zone->vm_stat[item]);
69887- atomic_long_add(x, &vm_stat[item]);
69888+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69889+ atomic_long_add_unchecked(x, &vm_stat[item]);
69890 }
69891
69892 static inline unsigned long global_page_state(enum zone_stat_item item)
69893 {
69894- long x = atomic_long_read(&vm_stat[item]);
69895+ long x = atomic_long_read_unchecked(&vm_stat[item]);
69896 #ifdef CONFIG_SMP
69897 if (x < 0)
69898 x = 0;
69899@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
69900 static inline unsigned long zone_page_state(struct zone *zone,
69901 enum zone_stat_item item)
69902 {
69903- long x = atomic_long_read(&zone->vm_stat[item]);
69904+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69905 #ifdef CONFIG_SMP
69906 if (x < 0)
69907 x = 0;
69908@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
69909 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
69910 enum zone_stat_item item)
69911 {
69912- long x = atomic_long_read(&zone->vm_stat[item]);
69913+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69914
69915 #ifdef CONFIG_SMP
69916 int cpu;
69917@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
69918
69919 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
69920 {
69921- atomic_long_inc(&zone->vm_stat[item]);
69922- atomic_long_inc(&vm_stat[item]);
69923+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
69924+ atomic_long_inc_unchecked(&vm_stat[item]);
69925 }
69926
69927 static inline void __inc_zone_page_state(struct page *page,
69928@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
69929
69930 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
69931 {
69932- atomic_long_dec(&zone->vm_stat[item]);
69933- atomic_long_dec(&vm_stat[item]);
69934+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
69935+ atomic_long_dec_unchecked(&vm_stat[item]);
69936 }
69937
69938 static inline void __dec_zone_page_state(struct page *page,
69939diff --git a/include/linux/xattr.h b/include/linux/xattr.h
69940index 5c84af8..1a3b6e2 100644
69941--- a/include/linux/xattr.h
69942+++ b/include/linux/xattr.h
69943@@ -33,6 +33,11 @@
69944 #define XATTR_USER_PREFIX "user."
69945 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
69946
69947+/* User namespace */
69948+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
69949+#define XATTR_PAX_FLAGS_SUFFIX "flags"
69950+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
69951+
69952 struct inode;
69953 struct dentry;
69954
69955diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
69956index eed5fcc..5080d24 100644
69957--- a/include/media/saa7146_vv.h
69958+++ b/include/media/saa7146_vv.h
69959@@ -167,7 +167,7 @@ struct saa7146_ext_vv
69960 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
69961
69962 /* the extension can override this */
69963- struct v4l2_ioctl_ops ops;
69964+ v4l2_ioctl_ops_no_const ops;
69965 /* pointer to the saa7146 core ops */
69966 const struct v4l2_ioctl_ops *core_ops;
69967
69968diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
69969index 73c9867..2da8837 100644
69970--- a/include/media/v4l2-dev.h
69971+++ b/include/media/v4l2-dev.h
69972@@ -34,7 +34,7 @@ struct v4l2_device;
69973 #define V4L2_FL_UNREGISTERED (0)
69974
69975 struct v4l2_file_operations {
69976- struct module *owner;
69977+ struct module * const owner;
69978 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
69979 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69980 unsigned int (*poll) (struct file *, struct poll_table_struct *);
69981@@ -46,6 +46,7 @@ struct v4l2_file_operations {
69982 int (*open) (struct file *);
69983 int (*release) (struct file *);
69984 };
69985+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69986
69987 /*
69988 * Newer version of video_device, handled by videodev2.c
69989diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69990index 5d5d550..f559ef1 100644
69991--- a/include/media/v4l2-device.h
69992+++ b/include/media/v4l2-device.h
69993@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69994 this function returns 0. If the name ends with a digit (e.g. cx18),
69995 then the name will be set to cx18-0 since cx180 looks really odd. */
69996 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69997- atomic_t *instance);
69998+ atomic_unchecked_t *instance);
69999
70000 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
70001 Since the parent disappears this ensures that v4l2_dev doesn't have an
70002diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
70003index 7a4529d..7244290 100644
70004--- a/include/media/v4l2-ioctl.h
70005+++ b/include/media/v4l2-ioctl.h
70006@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
70007 long (*vidioc_default) (struct file *file, void *fh,
70008 int cmd, void *arg);
70009 };
70010+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
70011
70012
70013 /* v4l debugging and diagnostics */
70014diff --git a/include/net/flow.h b/include/net/flow.h
70015index 809970b..c3df4f3 100644
70016--- a/include/net/flow.h
70017+++ b/include/net/flow.h
70018@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
70019 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
70020 u8 dir, flow_resolve_t resolver);
70021 extern void flow_cache_flush(void);
70022-extern atomic_t flow_cache_genid;
70023+extern atomic_unchecked_t flow_cache_genid;
70024
70025 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
70026 {
70027diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
70028index 15e1f8fe..668837c 100644
70029--- a/include/net/inetpeer.h
70030+++ b/include/net/inetpeer.h
70031@@ -24,7 +24,7 @@ struct inet_peer
70032 __u32 dtime; /* the time of last use of not
70033 * referenced entries */
70034 atomic_t refcnt;
70035- atomic_t rid; /* Frag reception counter */
70036+ atomic_unchecked_t rid; /* Frag reception counter */
70037 __u32 tcp_ts;
70038 unsigned long tcp_ts_stamp;
70039 };
70040diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
70041index 98978e7..2243a3d 100644
70042--- a/include/net/ip_vs.h
70043+++ b/include/net/ip_vs.h
70044@@ -365,7 +365,7 @@ struct ip_vs_conn {
70045 struct ip_vs_conn *control; /* Master control connection */
70046 atomic_t n_control; /* Number of controlled ones */
70047 struct ip_vs_dest *dest; /* real server */
70048- atomic_t in_pkts; /* incoming packet counter */
70049+ atomic_unchecked_t in_pkts; /* incoming packet counter */
70050
70051 /* packet transmitter for different forwarding methods. If it
70052 mangles the packet, it must return NF_DROP or better NF_STOLEN,
70053@@ -466,7 +466,7 @@ struct ip_vs_dest {
70054 union nf_inet_addr addr; /* IP address of the server */
70055 __be16 port; /* port number of the server */
70056 volatile unsigned flags; /* dest status flags */
70057- atomic_t conn_flags; /* flags to copy to conn */
70058+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
70059 atomic_t weight; /* server weight */
70060
70061 atomic_t refcnt; /* reference counter */
70062diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
70063index 69b610a..fe3962c 100644
70064--- a/include/net/irda/ircomm_core.h
70065+++ b/include/net/irda/ircomm_core.h
70066@@ -51,7 +51,7 @@ typedef struct {
70067 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
70068 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
70069 struct ircomm_info *);
70070-} call_t;
70071+} __no_const call_t;
70072
70073 struct ircomm_cb {
70074 irda_queue_t queue;
70075diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
70076index eea2e61..08c692d 100644
70077--- a/include/net/irda/ircomm_tty.h
70078+++ b/include/net/irda/ircomm_tty.h
70079@@ -35,6 +35,7 @@
70080 #include <linux/termios.h>
70081 #include <linux/timer.h>
70082 #include <linux/tty.h> /* struct tty_struct */
70083+#include <asm/local.h>
70084
70085 #include <net/irda/irias_object.h>
70086 #include <net/irda/ircomm_core.h>
70087@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
70088 unsigned short close_delay;
70089 unsigned short closing_wait; /* time to wait before closing */
70090
70091- int open_count;
70092- int blocked_open; /* # of blocked opens */
70093+ local_t open_count;
70094+ local_t blocked_open; /* # of blocked opens */
70095
70096 /* Protect concurent access to :
70097 * o self->open_count
70098diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
70099index f82a1e8..82d81e8 100644
70100--- a/include/net/iucv/af_iucv.h
70101+++ b/include/net/iucv/af_iucv.h
70102@@ -87,7 +87,7 @@ struct iucv_sock {
70103 struct iucv_sock_list {
70104 struct hlist_head head;
70105 rwlock_t lock;
70106- atomic_t autobind_name;
70107+ atomic_unchecked_t autobind_name;
70108 };
70109
70110 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70111diff --git a/include/net/lapb.h b/include/net/lapb.h
70112index 96cb5dd..25e8d4f 100644
70113--- a/include/net/lapb.h
70114+++ b/include/net/lapb.h
70115@@ -95,7 +95,7 @@ struct lapb_cb {
70116 struct sk_buff_head write_queue;
70117 struct sk_buff_head ack_queue;
70118 unsigned char window;
70119- struct lapb_register_struct callbacks;
70120+ struct lapb_register_struct *callbacks;
70121
70122 /* FRMR control information */
70123 struct lapb_frame frmr_data;
70124diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70125index 3817fda..cdb2343 100644
70126--- a/include/net/neighbour.h
70127+++ b/include/net/neighbour.h
70128@@ -131,7 +131,7 @@ struct neigh_ops
70129 int (*connected_output)(struct sk_buff*);
70130 int (*hh_output)(struct sk_buff*);
70131 int (*queue_xmit)(struct sk_buff*);
70132-};
70133+} __do_const;
70134
70135 struct pneigh_entry
70136 {
70137diff --git a/include/net/netlink.h b/include/net/netlink.h
70138index c344646..4778c71 100644
70139--- a/include/net/netlink.h
70140+++ b/include/net/netlink.h
70141@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70142 {
70143 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70144 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70145- nlh->nlmsg_len <= remaining);
70146+ nlh->nlmsg_len <= (unsigned int)remaining);
70147 }
70148
70149 /**
70150@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70151 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70152 {
70153 if (mark)
70154- skb_trim(skb, (unsigned char *) mark - skb->data);
70155+ skb_trim(skb, (const unsigned char *) mark - skb->data);
70156 }
70157
70158 /**
70159diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70160index 9a4b8b7..e49e077 100644
70161--- a/include/net/netns/ipv4.h
70162+++ b/include/net/netns/ipv4.h
70163@@ -54,7 +54,7 @@ struct netns_ipv4 {
70164 int current_rt_cache_rebuild_count;
70165
70166 struct timer_list rt_secret_timer;
70167- atomic_t rt_genid;
70168+ atomic_unchecked_t rt_genid;
70169
70170 #ifdef CONFIG_IP_MROUTE
70171 struct sock *mroute_sk;
70172diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70173index 8a6d529..171f401 100644
70174--- a/include/net/sctp/sctp.h
70175+++ b/include/net/sctp/sctp.h
70176@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70177
70178 #else /* SCTP_DEBUG */
70179
70180-#define SCTP_DEBUG_PRINTK(whatever...)
70181-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70182+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70183+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70184 #define SCTP_ENABLE_DEBUG
70185 #define SCTP_DISABLE_DEBUG
70186 #define SCTP_ASSERT(expr, str, func)
70187diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70188index d97f689..f3b90ab 100644
70189--- a/include/net/secure_seq.h
70190+++ b/include/net/secure_seq.h
70191@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70192 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70193 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70194 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70195- __be16 dport);
70196+ __be16 dport);
70197 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70198 __be16 sport, __be16 dport);
70199 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70200- __be16 sport, __be16 dport);
70201+ __be16 sport, __be16 dport);
70202 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70203- __be16 sport, __be16 dport);
70204+ __be16 sport, __be16 dport);
70205 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70206- __be16 sport, __be16 dport);
70207+ __be16 sport, __be16 dport);
70208
70209 #endif /* _NET_SECURE_SEQ */
70210diff --git a/include/net/sock.h b/include/net/sock.h
70211index 9f96394..76fc9c7 100644
70212--- a/include/net/sock.h
70213+++ b/include/net/sock.h
70214@@ -272,7 +272,7 @@ struct sock {
70215 rwlock_t sk_callback_lock;
70216 int sk_err,
70217 sk_err_soft;
70218- atomic_t sk_drops;
70219+ atomic_unchecked_t sk_drops;
70220 unsigned short sk_ack_backlog;
70221 unsigned short sk_max_ack_backlog;
70222 __u32 sk_priority;
70223@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70224 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70225 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70226 #else
70227-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70228+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70229 int inc)
70230 {
70231 }
70232diff --git a/include/net/tcp.h b/include/net/tcp.h
70233index 6cfe18b..dd21acb 100644
70234--- a/include/net/tcp.h
70235+++ b/include/net/tcp.h
70236@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70237 struct tcp_seq_afinfo {
70238 char *name;
70239 sa_family_t family;
70240- struct file_operations seq_fops;
70241- struct seq_operations seq_ops;
70242+ file_operations_no_const seq_fops;
70243+ seq_operations_no_const seq_ops;
70244 };
70245
70246 struct tcp_iter_state {
70247diff --git a/include/net/udp.h b/include/net/udp.h
70248index f98abd2..b4b042f 100644
70249--- a/include/net/udp.h
70250+++ b/include/net/udp.h
70251@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70252 char *name;
70253 sa_family_t family;
70254 struct udp_table *udp_table;
70255- struct file_operations seq_fops;
70256- struct seq_operations seq_ops;
70257+ file_operations_no_const seq_fops;
70258+ seq_operations_no_const seq_ops;
70259 };
70260
70261 struct udp_iter_state {
70262diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70263index cbb822e..e9c1cbe 100644
70264--- a/include/rdma/iw_cm.h
70265+++ b/include/rdma/iw_cm.h
70266@@ -129,7 +129,7 @@ struct iw_cm_verbs {
70267 int backlog);
70268
70269 int (*destroy_listen)(struct iw_cm_id *cm_id);
70270-};
70271+} __no_const;
70272
70273 /**
70274 * iw_create_cm_id - Create an IW CM identifier.
70275diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70276index 09a124b..caa8ca8 100644
70277--- a/include/scsi/libfc.h
70278+++ b/include/scsi/libfc.h
70279@@ -675,6 +675,7 @@ struct libfc_function_template {
70280 */
70281 void (*disc_stop_final) (struct fc_lport *);
70282 };
70283+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70284
70285 /* information used by the discovery layer */
70286 struct fc_disc {
70287@@ -707,7 +708,7 @@ struct fc_lport {
70288 struct fc_disc disc;
70289
70290 /* Operational Information */
70291- struct libfc_function_template tt;
70292+ libfc_function_template_no_const tt;
70293 u8 link_up;
70294 u8 qfull;
70295 enum fc_lport_state state;
70296diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70297index de8e180..f15e0d7 100644
70298--- a/include/scsi/scsi_device.h
70299+++ b/include/scsi/scsi_device.h
70300@@ -156,9 +156,9 @@ struct scsi_device {
70301 unsigned int max_device_blocked; /* what device_blocked counts down from */
70302 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70303
70304- atomic_t iorequest_cnt;
70305- atomic_t iodone_cnt;
70306- atomic_t ioerr_cnt;
70307+ atomic_unchecked_t iorequest_cnt;
70308+ atomic_unchecked_t iodone_cnt;
70309+ atomic_unchecked_t ioerr_cnt;
70310
70311 struct device sdev_gendev,
70312 sdev_dev;
70313diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70314index fc50bd6..81ba9cb 100644
70315--- a/include/scsi/scsi_transport_fc.h
70316+++ b/include/scsi/scsi_transport_fc.h
70317@@ -708,7 +708,7 @@ struct fc_function_template {
70318 unsigned long show_host_system_hostname:1;
70319
70320 unsigned long disable_target_scan:1;
70321-};
70322+} __do_const;
70323
70324
70325 /**
70326diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70327index 3dae3f7..8440d6f 100644
70328--- a/include/sound/ac97_codec.h
70329+++ b/include/sound/ac97_codec.h
70330@@ -419,15 +419,15 @@
70331 struct snd_ac97;
70332
70333 struct snd_ac97_build_ops {
70334- int (*build_3d) (struct snd_ac97 *ac97);
70335- int (*build_specific) (struct snd_ac97 *ac97);
70336- int (*build_spdif) (struct snd_ac97 *ac97);
70337- int (*build_post_spdif) (struct snd_ac97 *ac97);
70338+ int (* const build_3d) (struct snd_ac97 *ac97);
70339+ int (* const build_specific) (struct snd_ac97 *ac97);
70340+ int (* const build_spdif) (struct snd_ac97 *ac97);
70341+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
70342 #ifdef CONFIG_PM
70343- void (*suspend) (struct snd_ac97 *ac97);
70344- void (*resume) (struct snd_ac97 *ac97);
70345+ void (* const suspend) (struct snd_ac97 *ac97);
70346+ void (* const resume) (struct snd_ac97 *ac97);
70347 #endif
70348- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70349+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70350 };
70351
70352 struct snd_ac97_bus_ops {
70353@@ -477,7 +477,7 @@ struct snd_ac97_template {
70354
70355 struct snd_ac97 {
70356 /* -- lowlevel (hardware) driver specific -- */
70357- struct snd_ac97_build_ops * build_ops;
70358+ const struct snd_ac97_build_ops * build_ops;
70359 void *private_data;
70360 void (*private_free) (struct snd_ac97 *ac97);
70361 /* --- */
70362diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70363index 891cf1a..a94ba2b 100644
70364--- a/include/sound/ak4xxx-adda.h
70365+++ b/include/sound/ak4xxx-adda.h
70366@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70367 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70368 unsigned char val);
70369 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70370-};
70371+} __no_const;
70372
70373 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70374
70375diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70376index 8c05e47..2b5df97 100644
70377--- a/include/sound/hwdep.h
70378+++ b/include/sound/hwdep.h
70379@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70380 struct snd_hwdep_dsp_status *status);
70381 int (*dsp_load)(struct snd_hwdep *hw,
70382 struct snd_hwdep_dsp_image *image);
70383-};
70384+} __no_const;
70385
70386 struct snd_hwdep {
70387 struct snd_card *card;
70388diff --git a/include/sound/info.h b/include/sound/info.h
70389index 112e894..6fda5b5 100644
70390--- a/include/sound/info.h
70391+++ b/include/sound/info.h
70392@@ -44,7 +44,7 @@ struct snd_info_entry_text {
70393 struct snd_info_buffer *buffer);
70394 void (*write)(struct snd_info_entry *entry,
70395 struct snd_info_buffer *buffer);
70396-};
70397+} __no_const;
70398
70399 struct snd_info_entry_ops {
70400 int (*open)(struct snd_info_entry *entry,
70401diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70402index de6d981..590a550 100644
70403--- a/include/sound/pcm.h
70404+++ b/include/sound/pcm.h
70405@@ -80,6 +80,7 @@ struct snd_pcm_ops {
70406 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70407 int (*ack)(struct snd_pcm_substream *substream);
70408 };
70409+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70410
70411 /*
70412 *
70413diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70414index 736eac7..fe8a80f 100644
70415--- a/include/sound/sb16_csp.h
70416+++ b/include/sound/sb16_csp.h
70417@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70418 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70419 int (*csp_stop) (struct snd_sb_csp * p);
70420 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70421-};
70422+} __no_const;
70423
70424 /*
70425 * CSP private data
70426diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70427index 444cd6b..3327cc5 100644
70428--- a/include/sound/ymfpci.h
70429+++ b/include/sound/ymfpci.h
70430@@ -358,7 +358,7 @@ struct snd_ymfpci {
70431 spinlock_t reg_lock;
70432 spinlock_t voice_lock;
70433 wait_queue_head_t interrupt_sleep;
70434- atomic_t interrupt_sleep_count;
70435+ atomic_unchecked_t interrupt_sleep_count;
70436 struct snd_info_entry *proc_entry;
70437 const struct firmware *dsp_microcode;
70438 const struct firmware *controller_microcode;
70439diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70440index b89f9db..f097b38 100644
70441--- a/include/trace/events/irq.h
70442+++ b/include/trace/events/irq.h
70443@@ -34,7 +34,7 @@
70444 */
70445 TRACE_EVENT(irq_handler_entry,
70446
70447- TP_PROTO(int irq, struct irqaction *action),
70448+ TP_PROTO(int irq, const struct irqaction *action),
70449
70450 TP_ARGS(irq, action),
70451
70452@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70453 */
70454 TRACE_EVENT(irq_handler_exit,
70455
70456- TP_PROTO(int irq, struct irqaction *action, int ret),
70457+ TP_PROTO(int irq, const struct irqaction *action, int ret),
70458
70459 TP_ARGS(irq, action, ret),
70460
70461@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70462 */
70463 TRACE_EVENT(softirq_entry,
70464
70465- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70466+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70467
70468 TP_ARGS(h, vec),
70469
70470@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70471 */
70472 TRACE_EVENT(softirq_exit,
70473
70474- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70475+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70476
70477 TP_ARGS(h, vec),
70478
70479diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70480index 0993a22..32ba2fe 100644
70481--- a/include/video/uvesafb.h
70482+++ b/include/video/uvesafb.h
70483@@ -177,6 +177,7 @@ struct uvesafb_par {
70484 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70485 u8 pmi_setpal; /* PMI for palette changes */
70486 u16 *pmi_base; /* protected mode interface location */
70487+ u8 *pmi_code; /* protected mode code location */
70488 void *pmi_start;
70489 void *pmi_pal;
70490 u8 *vbe_state_orig; /*
70491diff --git a/init/Kconfig b/init/Kconfig
70492index d72691b..3996e54 100644
70493--- a/init/Kconfig
70494+++ b/init/Kconfig
70495@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70496
70497 config COMPAT_BRK
70498 bool "Disable heap randomization"
70499- default y
70500+ default n
70501 help
70502 Randomizing heap placement makes heap exploits harder, but it
70503 also breaks ancient binaries (including anything libc5 based).
70504diff --git a/init/do_mounts.c b/init/do_mounts.c
70505index bb008d0..4fa3933 100644
70506--- a/init/do_mounts.c
70507+++ b/init/do_mounts.c
70508@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70509
70510 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70511 {
70512- int err = sys_mount(name, "/root", fs, flags, data);
70513+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70514 if (err)
70515 return err;
70516
70517- sys_chdir("/root");
70518+ sys_chdir((__force const char __user *)"/root");
70519 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70520 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70521 current->fs->pwd.mnt->mnt_sb->s_type->name,
70522@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70523 va_start(args, fmt);
70524 vsprintf(buf, fmt, args);
70525 va_end(args);
70526- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70527+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70528 if (fd >= 0) {
70529 sys_ioctl(fd, FDEJECT, 0);
70530 sys_close(fd);
70531 }
70532 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70533- fd = sys_open("/dev/console", O_RDWR, 0);
70534+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70535 if (fd >= 0) {
70536 sys_ioctl(fd, TCGETS, (long)&termios);
70537 termios.c_lflag &= ~ICANON;
70538 sys_ioctl(fd, TCSETSF, (long)&termios);
70539- sys_read(fd, &c, 1);
70540+ sys_read(fd, (char __user *)&c, 1);
70541 termios.c_lflag |= ICANON;
70542 sys_ioctl(fd, TCSETSF, (long)&termios);
70543 sys_close(fd);
70544@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70545 mount_root();
70546 out:
70547 devtmpfs_mount("dev");
70548- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70549- sys_chroot(".");
70550+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70551+ sys_chroot((__force char __user *)".");
70552 }
70553diff --git a/init/do_mounts.h b/init/do_mounts.h
70554index f5b978a..69dbfe8 100644
70555--- a/init/do_mounts.h
70556+++ b/init/do_mounts.h
70557@@ -15,15 +15,15 @@ extern int root_mountflags;
70558
70559 static inline int create_dev(char *name, dev_t dev)
70560 {
70561- sys_unlink(name);
70562- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70563+ sys_unlink((char __force_user *)name);
70564+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70565 }
70566
70567 #if BITS_PER_LONG == 32
70568 static inline u32 bstat(char *name)
70569 {
70570 struct stat64 stat;
70571- if (sys_stat64(name, &stat) != 0)
70572+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70573 return 0;
70574 if (!S_ISBLK(stat.st_mode))
70575 return 0;
70576@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70577 static inline u32 bstat(char *name)
70578 {
70579 struct stat stat;
70580- if (sys_newstat(name, &stat) != 0)
70581+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70582 return 0;
70583 if (!S_ISBLK(stat.st_mode))
70584 return 0;
70585diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70586index 614241b..4da046b 100644
70587--- a/init/do_mounts_initrd.c
70588+++ b/init/do_mounts_initrd.c
70589@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70590 sys_close(old_fd);sys_close(root_fd);
70591 sys_close(0);sys_close(1);sys_close(2);
70592 sys_setsid();
70593- (void) sys_open("/dev/console",O_RDWR,0);
70594+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70595 (void) sys_dup(0);
70596 (void) sys_dup(0);
70597 return kernel_execve(shell, argv, envp_init);
70598@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70599 create_dev("/dev/root.old", Root_RAM0);
70600 /* mount initrd on rootfs' /root */
70601 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70602- sys_mkdir("/old", 0700);
70603- root_fd = sys_open("/", 0, 0);
70604- old_fd = sys_open("/old", 0, 0);
70605+ sys_mkdir((const char __force_user *)"/old", 0700);
70606+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
70607+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70608 /* move initrd over / and chdir/chroot in initrd root */
70609- sys_chdir("/root");
70610- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70611- sys_chroot(".");
70612+ sys_chdir((const char __force_user *)"/root");
70613+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70614+ sys_chroot((const char __force_user *)".");
70615
70616 /*
70617 * In case that a resume from disk is carried out by linuxrc or one of
70618@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70619
70620 /* move initrd to rootfs' /old */
70621 sys_fchdir(old_fd);
70622- sys_mount("/", ".", NULL, MS_MOVE, NULL);
70623+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70624 /* switch root and cwd back to / of rootfs */
70625 sys_fchdir(root_fd);
70626- sys_chroot(".");
70627+ sys_chroot((const char __force_user *)".");
70628 sys_close(old_fd);
70629 sys_close(root_fd);
70630
70631 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70632- sys_chdir("/old");
70633+ sys_chdir((const char __force_user *)"/old");
70634 return;
70635 }
70636
70637@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70638 mount_root();
70639
70640 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70641- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70642+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70643 if (!error)
70644 printk("okay\n");
70645 else {
70646- int fd = sys_open("/dev/root.old", O_RDWR, 0);
70647+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70648 if (error == -ENOENT)
70649 printk("/initrd does not exist. Ignored.\n");
70650 else
70651 printk("failed\n");
70652 printk(KERN_NOTICE "Unmounting old root\n");
70653- sys_umount("/old", MNT_DETACH);
70654+ sys_umount((char __force_user *)"/old", MNT_DETACH);
70655 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70656 if (fd < 0) {
70657 error = fd;
70658@@ -119,11 +119,11 @@ int __init initrd_load(void)
70659 * mounted in the normal path.
70660 */
70661 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70662- sys_unlink("/initrd.image");
70663+ sys_unlink((const char __force_user *)"/initrd.image");
70664 handle_initrd();
70665 return 1;
70666 }
70667 }
70668- sys_unlink("/initrd.image");
70669+ sys_unlink((const char __force_user *)"/initrd.image");
70670 return 0;
70671 }
70672diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70673index 69aebbf..c0bf6a7 100644
70674--- a/init/do_mounts_md.c
70675+++ b/init/do_mounts_md.c
70676@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70677 partitioned ? "_d" : "", minor,
70678 md_setup_args[ent].device_names);
70679
70680- fd = sys_open(name, 0, 0);
70681+ fd = sys_open((char __force_user *)name, 0, 0);
70682 if (fd < 0) {
70683 printk(KERN_ERR "md: open failed - cannot start "
70684 "array %s\n", name);
70685@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70686 * array without it
70687 */
70688 sys_close(fd);
70689- fd = sys_open(name, 0, 0);
70690+ fd = sys_open((char __force_user *)name, 0, 0);
70691 sys_ioctl(fd, BLKRRPART, 0);
70692 }
70693 sys_close(fd);
70694@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70695
70696 wait_for_device_probe();
70697
70698- fd = sys_open("/dev/md0", 0, 0);
70699+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70700 if (fd >= 0) {
70701 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70702 sys_close(fd);
70703diff --git a/init/initramfs.c b/init/initramfs.c
70704index 1fd59b8..a01b079 100644
70705--- a/init/initramfs.c
70706+++ b/init/initramfs.c
70707@@ -74,7 +74,7 @@ static void __init free_hash(void)
70708 }
70709 }
70710
70711-static long __init do_utime(char __user *filename, time_t mtime)
70712+static long __init do_utime(__force char __user *filename, time_t mtime)
70713 {
70714 struct timespec t[2];
70715
70716@@ -109,7 +109,7 @@ static void __init dir_utime(void)
70717 struct dir_entry *de, *tmp;
70718 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70719 list_del(&de->list);
70720- do_utime(de->name, de->mtime);
70721+ do_utime((char __force_user *)de->name, de->mtime);
70722 kfree(de->name);
70723 kfree(de);
70724 }
70725@@ -271,7 +271,7 @@ static int __init maybe_link(void)
70726 if (nlink >= 2) {
70727 char *old = find_link(major, minor, ino, mode, collected);
70728 if (old)
70729- return (sys_link(old, collected) < 0) ? -1 : 1;
70730+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70731 }
70732 return 0;
70733 }
70734@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70735 {
70736 struct stat st;
70737
70738- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70739+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70740 if (S_ISDIR(st.st_mode))
70741- sys_rmdir(path);
70742+ sys_rmdir((char __force_user *)path);
70743 else
70744- sys_unlink(path);
70745+ sys_unlink((char __force_user *)path);
70746 }
70747 }
70748
70749@@ -305,7 +305,7 @@ static int __init do_name(void)
70750 int openflags = O_WRONLY|O_CREAT;
70751 if (ml != 1)
70752 openflags |= O_TRUNC;
70753- wfd = sys_open(collected, openflags, mode);
70754+ wfd = sys_open((char __force_user *)collected, openflags, mode);
70755
70756 if (wfd >= 0) {
70757 sys_fchown(wfd, uid, gid);
70758@@ -317,17 +317,17 @@ static int __init do_name(void)
70759 }
70760 }
70761 } else if (S_ISDIR(mode)) {
70762- sys_mkdir(collected, mode);
70763- sys_chown(collected, uid, gid);
70764- sys_chmod(collected, mode);
70765+ sys_mkdir((char __force_user *)collected, mode);
70766+ sys_chown((char __force_user *)collected, uid, gid);
70767+ sys_chmod((char __force_user *)collected, mode);
70768 dir_add(collected, mtime);
70769 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70770 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70771 if (maybe_link() == 0) {
70772- sys_mknod(collected, mode, rdev);
70773- sys_chown(collected, uid, gid);
70774- sys_chmod(collected, mode);
70775- do_utime(collected, mtime);
70776+ sys_mknod((char __force_user *)collected, mode, rdev);
70777+ sys_chown((char __force_user *)collected, uid, gid);
70778+ sys_chmod((char __force_user *)collected, mode);
70779+ do_utime((char __force_user *)collected, mtime);
70780 }
70781 }
70782 return 0;
70783@@ -336,15 +336,15 @@ static int __init do_name(void)
70784 static int __init do_copy(void)
70785 {
70786 if (count >= body_len) {
70787- sys_write(wfd, victim, body_len);
70788+ sys_write(wfd, (char __force_user *)victim, body_len);
70789 sys_close(wfd);
70790- do_utime(vcollected, mtime);
70791+ do_utime((char __force_user *)vcollected, mtime);
70792 kfree(vcollected);
70793 eat(body_len);
70794 state = SkipIt;
70795 return 0;
70796 } else {
70797- sys_write(wfd, victim, count);
70798+ sys_write(wfd, (char __force_user *)victim, count);
70799 body_len -= count;
70800 eat(count);
70801 return 1;
70802@@ -355,9 +355,9 @@ static int __init do_symlink(void)
70803 {
70804 collected[N_ALIGN(name_len) + body_len] = '\0';
70805 clean_path(collected, 0);
70806- sys_symlink(collected + N_ALIGN(name_len), collected);
70807- sys_lchown(collected, uid, gid);
70808- do_utime(collected, mtime);
70809+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70810+ sys_lchown((char __force_user *)collected, uid, gid);
70811+ do_utime((char __force_user *)collected, mtime);
70812 state = SkipIt;
70813 next_state = Reset;
70814 return 0;
70815diff --git a/init/main.c b/init/main.c
70816index 1eb4bd5..da8c6f5 100644
70817--- a/init/main.c
70818+++ b/init/main.c
70819@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70820 #ifdef CONFIG_TC
70821 extern void tc_init(void);
70822 #endif
70823+extern void grsecurity_init(void);
70824
70825 enum system_states system_state __read_mostly;
70826 EXPORT_SYMBOL(system_state);
70827@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70828
70829 __setup("reset_devices", set_reset_devices);
70830
70831+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70832+extern char pax_enter_kernel_user[];
70833+extern char pax_exit_kernel_user[];
70834+extern pgdval_t clone_pgd_mask;
70835+#endif
70836+
70837+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70838+static int __init setup_pax_nouderef(char *str)
70839+{
70840+#ifdef CONFIG_X86_32
70841+ unsigned int cpu;
70842+ struct desc_struct *gdt;
70843+
70844+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
70845+ gdt = get_cpu_gdt_table(cpu);
70846+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70847+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70848+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70849+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70850+ }
70851+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70852+#else
70853+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70854+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70855+ clone_pgd_mask = ~(pgdval_t)0UL;
70856+#endif
70857+
70858+ return 0;
70859+}
70860+early_param("pax_nouderef", setup_pax_nouderef);
70861+#endif
70862+
70863+#ifdef CONFIG_PAX_SOFTMODE
70864+int pax_softmode;
70865+
70866+static int __init setup_pax_softmode(char *str)
70867+{
70868+ get_option(&str, &pax_softmode);
70869+ return 1;
70870+}
70871+__setup("pax_softmode=", setup_pax_softmode);
70872+#endif
70873+
70874 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70875 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70876 static const char *panic_later, *panic_param;
70877@@ -705,52 +749,53 @@ int initcall_debug;
70878 core_param(initcall_debug, initcall_debug, bool, 0644);
70879
70880 static char msgbuf[64];
70881-static struct boot_trace_call call;
70882-static struct boot_trace_ret ret;
70883+static struct boot_trace_call trace_call;
70884+static struct boot_trace_ret trace_ret;
70885
70886 int do_one_initcall(initcall_t fn)
70887 {
70888 int count = preempt_count();
70889 ktime_t calltime, delta, rettime;
70890+ const char *msg1 = "", *msg2 = "";
70891
70892 if (initcall_debug) {
70893- call.caller = task_pid_nr(current);
70894- printk("calling %pF @ %i\n", fn, call.caller);
70895+ trace_call.caller = task_pid_nr(current);
70896+ printk("calling %pF @ %i\n", fn, trace_call.caller);
70897 calltime = ktime_get();
70898- trace_boot_call(&call, fn);
70899+ trace_boot_call(&trace_call, fn);
70900 enable_boot_trace();
70901 }
70902
70903- ret.result = fn();
70904+ trace_ret.result = fn();
70905
70906 if (initcall_debug) {
70907 disable_boot_trace();
70908 rettime = ktime_get();
70909 delta = ktime_sub(rettime, calltime);
70910- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70911- trace_boot_ret(&ret, fn);
70912+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70913+ trace_boot_ret(&trace_ret, fn);
70914 printk("initcall %pF returned %d after %Ld usecs\n", fn,
70915- ret.result, ret.duration);
70916+ trace_ret.result, trace_ret.duration);
70917 }
70918
70919 msgbuf[0] = 0;
70920
70921- if (ret.result && ret.result != -ENODEV && initcall_debug)
70922- sprintf(msgbuf, "error code %d ", ret.result);
70923+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
70924+ sprintf(msgbuf, "error code %d ", trace_ret.result);
70925
70926 if (preempt_count() != count) {
70927- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
70928+ msg1 = " preemption imbalance";
70929 preempt_count() = count;
70930 }
70931 if (irqs_disabled()) {
70932- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
70933+ msg2 = " disabled interrupts";
70934 local_irq_enable();
70935 }
70936- if (msgbuf[0]) {
70937- printk("initcall %pF returned with %s\n", fn, msgbuf);
70938+ if (msgbuf[0] || *msg1 || *msg2) {
70939+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
70940 }
70941
70942- return ret.result;
70943+ return trace_ret.result;
70944 }
70945
70946
70947@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
70948 if (!ramdisk_execute_command)
70949 ramdisk_execute_command = "/init";
70950
70951- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
70952+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
70953 ramdisk_execute_command = NULL;
70954 prepare_namespace();
70955 }
70956
70957+ grsecurity_init();
70958+
70959 /*
70960 * Ok, we have completed the initial bootup, and
70961 * we're essentially up and running. Get rid of the
70962diff --git a/init/noinitramfs.c b/init/noinitramfs.c
70963index f4c1a3a..96c19bd 100644
70964--- a/init/noinitramfs.c
70965+++ b/init/noinitramfs.c
70966@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
70967 {
70968 int err;
70969
70970- err = sys_mkdir("/dev", 0755);
70971+ err = sys_mkdir((const char __user *)"/dev", 0755);
70972 if (err < 0)
70973 goto out;
70974
70975@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
70976 if (err < 0)
70977 goto out;
70978
70979- err = sys_mkdir("/root", 0700);
70980+ err = sys_mkdir((const char __user *)"/root", 0700);
70981 if (err < 0)
70982 goto out;
70983
70984diff --git a/ipc/mqueue.c b/ipc/mqueue.c
70985index d01bc14..8df81db 100644
70986--- a/ipc/mqueue.c
70987+++ b/ipc/mqueue.c
70988@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70989 mq_bytes = (mq_msg_tblsz +
70990 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70991
70992+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70993 spin_lock(&mq_lock);
70994 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70995 u->mq_bytes + mq_bytes >
70996diff --git a/ipc/msg.c b/ipc/msg.c
70997index 779f762..4af9e36 100644
70998--- a/ipc/msg.c
70999+++ b/ipc/msg.c
71000@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
71001 return security_msg_queue_associate(msq, msgflg);
71002 }
71003
71004+static struct ipc_ops msg_ops = {
71005+ .getnew = newque,
71006+ .associate = msg_security,
71007+ .more_checks = NULL
71008+};
71009+
71010 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
71011 {
71012 struct ipc_namespace *ns;
71013- struct ipc_ops msg_ops;
71014 struct ipc_params msg_params;
71015
71016 ns = current->nsproxy->ipc_ns;
71017
71018- msg_ops.getnew = newque;
71019- msg_ops.associate = msg_security;
71020- msg_ops.more_checks = NULL;
71021-
71022 msg_params.key = key;
71023 msg_params.flg = msgflg;
71024
71025diff --git a/ipc/sem.c b/ipc/sem.c
71026index b781007..f738b04 100644
71027--- a/ipc/sem.c
71028+++ b/ipc/sem.c
71029@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
71030 return 0;
71031 }
71032
71033+static struct ipc_ops sem_ops = {
71034+ .getnew = newary,
71035+ .associate = sem_security,
71036+ .more_checks = sem_more_checks
71037+};
71038+
71039 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71040 {
71041 struct ipc_namespace *ns;
71042- struct ipc_ops sem_ops;
71043 struct ipc_params sem_params;
71044
71045 ns = current->nsproxy->ipc_ns;
71046@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
71047 if (nsems < 0 || nsems > ns->sc_semmsl)
71048 return -EINVAL;
71049
71050- sem_ops.getnew = newary;
71051- sem_ops.associate = sem_security;
71052- sem_ops.more_checks = sem_more_checks;
71053-
71054 sem_params.key = key;
71055 sem_params.flg = semflg;
71056 sem_params.u.nsems = nsems;
71057@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
71058 ushort* sem_io = fast_sem_io;
71059 int nsems;
71060
71061+ pax_track_stack();
71062+
71063 sma = sem_lock_check(ns, semid);
71064 if (IS_ERR(sma))
71065 return PTR_ERR(sma);
71066@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
71067 unsigned long jiffies_left = 0;
71068 struct ipc_namespace *ns;
71069
71070+ pax_track_stack();
71071+
71072 ns = current->nsproxy->ipc_ns;
71073
71074 if (nsops < 1 || semid < 0)
71075diff --git a/ipc/shm.c b/ipc/shm.c
71076index d30732c..e4992cd 100644
71077--- a/ipc/shm.c
71078+++ b/ipc/shm.c
71079@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
71080 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
71081 #endif
71082
71083+#ifdef CONFIG_GRKERNSEC
71084+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71085+ const time_t shm_createtime, const uid_t cuid,
71086+ const int shmid);
71087+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
71088+ const time_t shm_createtime);
71089+#endif
71090+
71091 void shm_init_ns(struct ipc_namespace *ns)
71092 {
71093 ns->shm_ctlmax = SHMMAX;
71094@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
71095 shp->shm_lprid = 0;
71096 shp->shm_atim = shp->shm_dtim = 0;
71097 shp->shm_ctim = get_seconds();
71098+#ifdef CONFIG_GRKERNSEC
71099+ {
71100+ struct timespec timeval;
71101+ do_posix_clock_monotonic_gettime(&timeval);
71102+
71103+ shp->shm_createtime = timeval.tv_sec;
71104+ }
71105+#endif
71106 shp->shm_segsz = size;
71107 shp->shm_nattch = 0;
71108 shp->shm_file = file;
71109@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71110 return 0;
71111 }
71112
71113+static struct ipc_ops shm_ops = {
71114+ .getnew = newseg,
71115+ .associate = shm_security,
71116+ .more_checks = shm_more_checks
71117+};
71118+
71119 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71120 {
71121 struct ipc_namespace *ns;
71122- struct ipc_ops shm_ops;
71123 struct ipc_params shm_params;
71124
71125 ns = current->nsproxy->ipc_ns;
71126
71127- shm_ops.getnew = newseg;
71128- shm_ops.associate = shm_security;
71129- shm_ops.more_checks = shm_more_checks;
71130-
71131 shm_params.key = key;
71132 shm_params.flg = shmflg;
71133 shm_params.u.size = size;
71134@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71135 f_mode = FMODE_READ | FMODE_WRITE;
71136 }
71137 if (shmflg & SHM_EXEC) {
71138+
71139+#ifdef CONFIG_PAX_MPROTECT
71140+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
71141+ goto out;
71142+#endif
71143+
71144 prot |= PROT_EXEC;
71145 acc_mode |= S_IXUGO;
71146 }
71147@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71148 if (err)
71149 goto out_unlock;
71150
71151+#ifdef CONFIG_GRKERNSEC
71152+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71153+ shp->shm_perm.cuid, shmid) ||
71154+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71155+ err = -EACCES;
71156+ goto out_unlock;
71157+ }
71158+#endif
71159+
71160 path.dentry = dget(shp->shm_file->f_path.dentry);
71161 path.mnt = shp->shm_file->f_path.mnt;
71162 shp->shm_nattch++;
71163+#ifdef CONFIG_GRKERNSEC
71164+ shp->shm_lapid = current->pid;
71165+#endif
71166 size = i_size_read(path.dentry->d_inode);
71167 shm_unlock(shp);
71168
71169diff --git a/kernel/acct.c b/kernel/acct.c
71170index a6605ca..ca91111 100644
71171--- a/kernel/acct.c
71172+++ b/kernel/acct.c
71173@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71174 */
71175 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71176 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71177- file->f_op->write(file, (char *)&ac,
71178+ file->f_op->write(file, (char __force_user *)&ac,
71179 sizeof(acct_t), &file->f_pos);
71180 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71181 set_fs(fs);
71182diff --git a/kernel/audit.c b/kernel/audit.c
71183index 5feed23..48415fd 100644
71184--- a/kernel/audit.c
71185+++ b/kernel/audit.c
71186@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71187 3) suppressed due to audit_rate_limit
71188 4) suppressed due to audit_backlog_limit
71189 */
71190-static atomic_t audit_lost = ATOMIC_INIT(0);
71191+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71192
71193 /* The netlink socket. */
71194 static struct sock *audit_sock;
71195@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71196 unsigned long now;
71197 int print;
71198
71199- atomic_inc(&audit_lost);
71200+ atomic_inc_unchecked(&audit_lost);
71201
71202 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71203
71204@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71205 printk(KERN_WARNING
71206 "audit: audit_lost=%d audit_rate_limit=%d "
71207 "audit_backlog_limit=%d\n",
71208- atomic_read(&audit_lost),
71209+ atomic_read_unchecked(&audit_lost),
71210 audit_rate_limit,
71211 audit_backlog_limit);
71212 audit_panic(message);
71213@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71214 status_set.pid = audit_pid;
71215 status_set.rate_limit = audit_rate_limit;
71216 status_set.backlog_limit = audit_backlog_limit;
71217- status_set.lost = atomic_read(&audit_lost);
71218+ status_set.lost = atomic_read_unchecked(&audit_lost);
71219 status_set.backlog = skb_queue_len(&audit_skb_queue);
71220 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71221 &status_set, sizeof(status_set));
71222@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71223 spin_unlock_irq(&tsk->sighand->siglock);
71224 }
71225 read_unlock(&tasklist_lock);
71226- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71227- &s, sizeof(s));
71228+
71229+ if (!err)
71230+ audit_send_reply(NETLINK_CB(skb).pid, seq,
71231+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71232 break;
71233 }
71234 case AUDIT_TTY_SET: {
71235@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71236 avail = audit_expand(ab,
71237 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71238 if (!avail)
71239- goto out;
71240+ goto out_va_end;
71241 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71242 }
71243- va_end(args2);
71244 if (len > 0)
71245 skb_put(skb, len);
71246+out_va_end:
71247+ va_end(args2);
71248 out:
71249 return;
71250 }
71251diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71252index 267e484..ac41bc3 100644
71253--- a/kernel/auditsc.c
71254+++ b/kernel/auditsc.c
71255@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71256 struct audit_buffer **ab,
71257 struct audit_aux_data_execve *axi)
71258 {
71259- int i;
71260- size_t len, len_sent = 0;
71261+ int i, len;
71262+ size_t len_sent = 0;
71263 const char __user *p;
71264 char *buf;
71265
71266@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71267 }
71268
71269 /* global counter which is incremented every time something logs in */
71270-static atomic_t session_id = ATOMIC_INIT(0);
71271+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71272
71273 /**
71274 * audit_set_loginuid - set a task's audit_context loginuid
71275@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71276 */
71277 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71278 {
71279- unsigned int sessionid = atomic_inc_return(&session_id);
71280+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71281 struct audit_context *context = task->audit_context;
71282
71283 if (context && context->in_syscall) {
71284diff --git a/kernel/capability.c b/kernel/capability.c
71285index 8a944f5..db5001e 100644
71286--- a/kernel/capability.c
71287+++ b/kernel/capability.c
71288@@ -305,10 +305,26 @@ int capable(int cap)
71289 BUG();
71290 }
71291
71292- if (security_capable(cap) == 0) {
71293+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71294 current->flags |= PF_SUPERPRIV;
71295 return 1;
71296 }
71297 return 0;
71298 }
71299+
71300+int capable_nolog(int cap)
71301+{
71302+ if (unlikely(!cap_valid(cap))) {
71303+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71304+ BUG();
71305+ }
71306+
71307+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71308+ current->flags |= PF_SUPERPRIV;
71309+ return 1;
71310+ }
71311+ return 0;
71312+}
71313+
71314 EXPORT_SYMBOL(capable);
71315+EXPORT_SYMBOL(capable_nolog);
71316diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71317index 1fbcc74..7000012 100644
71318--- a/kernel/cgroup.c
71319+++ b/kernel/cgroup.c
71320@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71321 struct hlist_head *hhead;
71322 struct cg_cgroup_link *link;
71323
71324+ pax_track_stack();
71325+
71326 /* First see if we already have a cgroup group that matches
71327 * the desired set */
71328 read_lock(&css_set_lock);
71329diff --git a/kernel/compat.c b/kernel/compat.c
71330index 8bc5578..186e44a 100644
71331--- a/kernel/compat.c
71332+++ b/kernel/compat.c
71333@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71334 mm_segment_t oldfs;
71335 long ret;
71336
71337- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71338+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71339 oldfs = get_fs();
71340 set_fs(KERNEL_DS);
71341 ret = hrtimer_nanosleep_restart(restart);
71342@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71343 oldfs = get_fs();
71344 set_fs(KERNEL_DS);
71345 ret = hrtimer_nanosleep(&tu,
71346- rmtp ? (struct timespec __user *)&rmt : NULL,
71347+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
71348 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71349 set_fs(oldfs);
71350
71351@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71352 mm_segment_t old_fs = get_fs();
71353
71354 set_fs(KERNEL_DS);
71355- ret = sys_sigpending((old_sigset_t __user *) &s);
71356+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
71357 set_fs(old_fs);
71358 if (ret == 0)
71359 ret = put_user(s, set);
71360@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71361 old_fs = get_fs();
71362 set_fs(KERNEL_DS);
71363 ret = sys_sigprocmask(how,
71364- set ? (old_sigset_t __user *) &s : NULL,
71365- oset ? (old_sigset_t __user *) &s : NULL);
71366+ set ? (old_sigset_t __force_user *) &s : NULL,
71367+ oset ? (old_sigset_t __force_user *) &s : NULL);
71368 set_fs(old_fs);
71369 if (ret == 0)
71370 if (oset)
71371@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71372 mm_segment_t old_fs = get_fs();
71373
71374 set_fs(KERNEL_DS);
71375- ret = sys_old_getrlimit(resource, &r);
71376+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71377 set_fs(old_fs);
71378
71379 if (!ret) {
71380@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71381 mm_segment_t old_fs = get_fs();
71382
71383 set_fs(KERNEL_DS);
71384- ret = sys_getrusage(who, (struct rusage __user *) &r);
71385+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71386 set_fs(old_fs);
71387
71388 if (ret)
71389@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71390 set_fs (KERNEL_DS);
71391 ret = sys_wait4(pid,
71392 (stat_addr ?
71393- (unsigned int __user *) &status : NULL),
71394- options, (struct rusage __user *) &r);
71395+ (unsigned int __force_user *) &status : NULL),
71396+ options, (struct rusage __force_user *) &r);
71397 set_fs (old_fs);
71398
71399 if (ret > 0) {
71400@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71401 memset(&info, 0, sizeof(info));
71402
71403 set_fs(KERNEL_DS);
71404- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71405- uru ? (struct rusage __user *)&ru : NULL);
71406+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71407+ uru ? (struct rusage __force_user *)&ru : NULL);
71408 set_fs(old_fs);
71409
71410 if ((ret < 0) || (info.si_signo == 0))
71411@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71412 oldfs = get_fs();
71413 set_fs(KERNEL_DS);
71414 err = sys_timer_settime(timer_id, flags,
71415- (struct itimerspec __user *) &newts,
71416- (struct itimerspec __user *) &oldts);
71417+ (struct itimerspec __force_user *) &newts,
71418+ (struct itimerspec __force_user *) &oldts);
71419 set_fs(oldfs);
71420 if (!err && old && put_compat_itimerspec(old, &oldts))
71421 return -EFAULT;
71422@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71423 oldfs = get_fs();
71424 set_fs(KERNEL_DS);
71425 err = sys_timer_gettime(timer_id,
71426- (struct itimerspec __user *) &ts);
71427+ (struct itimerspec __force_user *) &ts);
71428 set_fs(oldfs);
71429 if (!err && put_compat_itimerspec(setting, &ts))
71430 return -EFAULT;
71431@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71432 oldfs = get_fs();
71433 set_fs(KERNEL_DS);
71434 err = sys_clock_settime(which_clock,
71435- (struct timespec __user *) &ts);
71436+ (struct timespec __force_user *) &ts);
71437 set_fs(oldfs);
71438 return err;
71439 }
71440@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71441 oldfs = get_fs();
71442 set_fs(KERNEL_DS);
71443 err = sys_clock_gettime(which_clock,
71444- (struct timespec __user *) &ts);
71445+ (struct timespec __force_user *) &ts);
71446 set_fs(oldfs);
71447 if (!err && put_compat_timespec(&ts, tp))
71448 return -EFAULT;
71449@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71450 oldfs = get_fs();
71451 set_fs(KERNEL_DS);
71452 err = sys_clock_getres(which_clock,
71453- (struct timespec __user *) &ts);
71454+ (struct timespec __force_user *) &ts);
71455 set_fs(oldfs);
71456 if (!err && tp && put_compat_timespec(&ts, tp))
71457 return -EFAULT;
71458@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71459 long err;
71460 mm_segment_t oldfs;
71461 struct timespec tu;
71462- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71463+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71464
71465- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71466+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71467 oldfs = get_fs();
71468 set_fs(KERNEL_DS);
71469 err = clock_nanosleep_restart(restart);
71470@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71471 oldfs = get_fs();
71472 set_fs(KERNEL_DS);
71473 err = sys_clock_nanosleep(which_clock, flags,
71474- (struct timespec __user *) &in,
71475- (struct timespec __user *) &out);
71476+ (struct timespec __force_user *) &in,
71477+ (struct timespec __force_user *) &out);
71478 set_fs(oldfs);
71479
71480 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71481diff --git a/kernel/configs.c b/kernel/configs.c
71482index abaee68..047facd 100644
71483--- a/kernel/configs.c
71484+++ b/kernel/configs.c
71485@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71486 struct proc_dir_entry *entry;
71487
71488 /* create the current config file */
71489+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71490+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71491+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71492+ &ikconfig_file_ops);
71493+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71494+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71495+ &ikconfig_file_ops);
71496+#endif
71497+#else
71498 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71499 &ikconfig_file_ops);
71500+#endif
71501+
71502 if (!entry)
71503 return -ENOMEM;
71504
71505diff --git a/kernel/cpu.c b/kernel/cpu.c
71506index 3f2f04f..4e53ded 100644
71507--- a/kernel/cpu.c
71508+++ b/kernel/cpu.c
71509@@ -20,7 +20,7 @@
71510 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71511 static DEFINE_MUTEX(cpu_add_remove_lock);
71512
71513-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71514+static RAW_NOTIFIER_HEAD(cpu_chain);
71515
71516 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71517 * Should always be manipulated under cpu_add_remove_lock
71518diff --git a/kernel/cred.c b/kernel/cred.c
71519index 0b5b5fc..f7fe51a 100644
71520--- a/kernel/cred.c
71521+++ b/kernel/cred.c
71522@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71523 */
71524 void __put_cred(struct cred *cred)
71525 {
71526+ pax_track_stack();
71527+
71528 kdebug("__put_cred(%p{%d,%d})", cred,
71529 atomic_read(&cred->usage),
71530 read_cred_subscribers(cred));
71531@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71532 {
71533 struct cred *cred;
71534
71535+ pax_track_stack();
71536+
71537 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71538 atomic_read(&tsk->cred->usage),
71539 read_cred_subscribers(tsk->cred));
71540@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71541 validate_creds(cred);
71542 put_cred(cred);
71543 }
71544+
71545+#ifdef CONFIG_GRKERNSEC_SETXID
71546+ cred = (struct cred *) tsk->delayed_cred;
71547+ if (cred) {
71548+ tsk->delayed_cred = NULL;
71549+ validate_creds(cred);
71550+ put_cred(cred);
71551+ }
71552+#endif
71553 }
71554
71555 /**
71556@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71557 {
71558 const struct cred *cred;
71559
71560+ pax_track_stack();
71561+
71562 rcu_read_lock();
71563
71564 do {
71565@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71566 {
71567 struct cred *new;
71568
71569+ pax_track_stack();
71570+
71571 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71572 if (!new)
71573 return NULL;
71574@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
71575 const struct cred *old;
71576 struct cred *new;
71577
71578+ pax_track_stack();
71579+
71580 validate_process_creds();
71581
71582 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71583@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
71584 struct thread_group_cred *tgcred = NULL;
71585 struct cred *new;
71586
71587+ pax_track_stack();
71588+
71589 #ifdef CONFIG_KEYS
71590 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
71591 if (!tgcred)
71592@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71593 struct cred *new;
71594 int ret;
71595
71596+ pax_track_stack();
71597+
71598 mutex_init(&p->cred_guard_mutex);
71599
71600 if (
71601@@ -523,11 +546,13 @@ error_put:
71602 * Always returns 0 thus allowing this function to be tail-called at the end
71603 * of, say, sys_setgid().
71604 */
71605-int commit_creds(struct cred *new)
71606+static int __commit_creds(struct cred *new)
71607 {
71608 struct task_struct *task = current;
71609 const struct cred *old = task->real_cred;
71610
71611+ pax_track_stack();
71612+
71613 kdebug("commit_creds(%p{%d,%d})", new,
71614 atomic_read(&new->usage),
71615 read_cred_subscribers(new));
71616@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
71617
71618 get_cred(new); /* we will require a ref for the subj creds too */
71619
71620+ gr_set_role_label(task, new->uid, new->gid);
71621+
71622 /* dumpability changes */
71623 if (old->euid != new->euid ||
71624 old->egid != new->egid ||
71625@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
71626 key_fsgid_changed(task);
71627
71628 /* do it
71629- * - What if a process setreuid()'s and this brings the
71630- * new uid over his NPROC rlimit? We can check this now
71631- * cheaply with the new uid cache, so if it matters
71632- * we should be checking for it. -DaveM
71633+ * RLIMIT_NPROC limits on user->processes have already been checked
71634+ * in set_user().
71635 */
71636 alter_cred_subscribers(new, 2);
71637 if (new->user != old->user)
71638@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
71639 put_cred(old);
71640 return 0;
71641 }
71642+
71643+#ifdef CONFIG_GRKERNSEC_SETXID
71644+extern int set_user(struct cred *new);
71645+
71646+void gr_delayed_cred_worker(void)
71647+{
71648+ const struct cred *new = current->delayed_cred;
71649+ struct cred *ncred;
71650+
71651+ current->delayed_cred = NULL;
71652+
71653+ if (current_uid() && new != NULL) {
71654+ // from doing get_cred on it when queueing this
71655+ put_cred(new);
71656+ return;
71657+ } else if (new == NULL)
71658+ return;
71659+
71660+ ncred = prepare_creds();
71661+ if (!ncred)
71662+ goto die;
71663+ // uids
71664+ ncred->uid = new->uid;
71665+ ncred->euid = new->euid;
71666+ ncred->suid = new->suid;
71667+ ncred->fsuid = new->fsuid;
71668+ // gids
71669+ ncred->gid = new->gid;
71670+ ncred->egid = new->egid;
71671+ ncred->sgid = new->sgid;
71672+ ncred->fsgid = new->fsgid;
71673+ // groups
71674+ if (set_groups(ncred, new->group_info) < 0) {
71675+ abort_creds(ncred);
71676+ goto die;
71677+ }
71678+ // caps
71679+ ncred->securebits = new->securebits;
71680+ ncred->cap_inheritable = new->cap_inheritable;
71681+ ncred->cap_permitted = new->cap_permitted;
71682+ ncred->cap_effective = new->cap_effective;
71683+ ncred->cap_bset = new->cap_bset;
71684+
71685+ if (set_user(ncred)) {
71686+ abort_creds(ncred);
71687+ goto die;
71688+ }
71689+
71690+ // from doing get_cred on it when queueing this
71691+ put_cred(new);
71692+
71693+ __commit_creds(ncred);
71694+ return;
71695+die:
71696+ // from doing get_cred on it when queueing this
71697+ put_cred(new);
71698+ do_group_exit(SIGKILL);
71699+}
71700+#endif
71701+
71702+int commit_creds(struct cred *new)
71703+{
71704+#ifdef CONFIG_GRKERNSEC_SETXID
71705+ struct task_struct *t;
71706+
71707+ /* we won't get called with tasklist_lock held for writing
71708+ and interrupts disabled as the cred struct in that case is
71709+ init_cred
71710+ */
71711+ if (grsec_enable_setxid && !current_is_single_threaded() &&
71712+ !current_uid() && new->uid) {
71713+ rcu_read_lock();
71714+ read_lock(&tasklist_lock);
71715+ for (t = next_thread(current); t != current;
71716+ t = next_thread(t)) {
71717+ if (t->delayed_cred == NULL) {
71718+ t->delayed_cred = get_cred(new);
71719+ set_tsk_need_resched(t);
71720+ }
71721+ }
71722+ read_unlock(&tasklist_lock);
71723+ rcu_read_unlock();
71724+ }
71725+#endif
71726+ return __commit_creds(new);
71727+}
71728+
71729 EXPORT_SYMBOL(commit_creds);
71730
71731+
71732 /**
71733 * abort_creds - Discard a set of credentials and unlock the current task
71734 * @new: The credentials that were going to be applied
71735@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
71736 */
71737 void abort_creds(struct cred *new)
71738 {
71739+ pax_track_stack();
71740+
71741 kdebug("abort_creds(%p{%d,%d})", new,
71742 atomic_read(&new->usage),
71743 read_cred_subscribers(new));
71744@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
71745 {
71746 const struct cred *old = current->cred;
71747
71748+ pax_track_stack();
71749+
71750 kdebug("override_creds(%p{%d,%d})", new,
71751 atomic_read(&new->usage),
71752 read_cred_subscribers(new));
71753@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
71754 {
71755 const struct cred *override = current->cred;
71756
71757+ pax_track_stack();
71758+
71759 kdebug("revert_creds(%p{%d,%d})", old,
71760 atomic_read(&old->usage),
71761 read_cred_subscribers(old));
71762@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71763 const struct cred *old;
71764 struct cred *new;
71765
71766+ pax_track_stack();
71767+
71768 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71769 if (!new)
71770 return NULL;
71771@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71772 */
71773 int set_security_override(struct cred *new, u32 secid)
71774 {
71775+ pax_track_stack();
71776+
71777 return security_kernel_act_as(new, secid);
71778 }
71779 EXPORT_SYMBOL(set_security_override);
71780@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71781 u32 secid;
71782 int ret;
71783
71784+ pax_track_stack();
71785+
71786 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71787 if (ret < 0)
71788 return ret;
71789diff --git a/kernel/exit.c b/kernel/exit.c
71790index 0f8fae3..9344a56 100644
71791--- a/kernel/exit.c
71792+++ b/kernel/exit.c
71793@@ -55,6 +55,10 @@
71794 #include <asm/pgtable.h>
71795 #include <asm/mmu_context.h>
71796
71797+#ifdef CONFIG_GRKERNSEC
71798+extern rwlock_t grsec_exec_file_lock;
71799+#endif
71800+
71801 static void exit_mm(struct task_struct * tsk);
71802
71803 static void __unhash_process(struct task_struct *p)
71804@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71805 struct task_struct *leader;
71806 int zap_leader;
71807 repeat:
71808+#ifdef CONFIG_NET
71809+ gr_del_task_from_ip_table(p);
71810+#endif
71811+
71812 tracehook_prepare_release_task(p);
71813 /* don't need to get the RCU readlock here - the process is dead and
71814 * can't be modifying its own credentials */
71815@@ -397,7 +405,7 @@ int allow_signal(int sig)
71816 * know it'll be handled, so that they don't get converted to
71817 * SIGKILL or just silently dropped.
71818 */
71819- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71820+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71821 recalc_sigpending();
71822 spin_unlock_irq(&current->sighand->siglock);
71823 return 0;
71824@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71825 vsnprintf(current->comm, sizeof(current->comm), name, args);
71826 va_end(args);
71827
71828+#ifdef CONFIG_GRKERNSEC
71829+ write_lock(&grsec_exec_file_lock);
71830+ if (current->exec_file) {
71831+ fput(current->exec_file);
71832+ current->exec_file = NULL;
71833+ }
71834+ write_unlock(&grsec_exec_file_lock);
71835+#endif
71836+
71837+ gr_set_kernel_label(current);
71838+
71839 /*
71840 * If we were started as result of loading a module, close all of the
71841 * user space pages. We don't need them, and if we didn't close them
71842@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71843 struct task_struct *tsk = current;
71844 int group_dead;
71845
71846- profile_task_exit(tsk);
71847-
71848- WARN_ON(atomic_read(&tsk->fs_excl));
71849-
71850+ /*
71851+ * Check this first since set_fs() below depends on
71852+ * current_thread_info(), which we better not access when we're in
71853+ * interrupt context. Other than that, we want to do the set_fs()
71854+ * as early as possible.
71855+ */
71856 if (unlikely(in_interrupt()))
71857 panic("Aiee, killing interrupt handler!");
71858- if (unlikely(!tsk->pid))
71859- panic("Attempted to kill the idle task!");
71860
71861 /*
71862- * If do_exit is called because this processes oopsed, it's possible
71863+ * If do_exit is called because this processes Oops'ed, it's possible
71864 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71865 * continuing. Amongst other possible reasons, this is to prevent
71866 * mm_release()->clear_child_tid() from writing to a user-controlled
71867@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71868 */
71869 set_fs(USER_DS);
71870
71871+ profile_task_exit(tsk);
71872+
71873+ WARN_ON(atomic_read(&tsk->fs_excl));
71874+
71875+ if (unlikely(!tsk->pid))
71876+ panic("Attempted to kill the idle task!");
71877+
71878 tracehook_report_exit(&code);
71879
71880 validate_creds_for_do_exit(tsk);
71881@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71882 tsk->exit_code = code;
71883 taskstats_exit(tsk, group_dead);
71884
71885+ gr_acl_handle_psacct(tsk, code);
71886+ gr_acl_handle_exit();
71887+
71888 exit_mm(tsk);
71889
71890 if (group_dead)
71891@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
71892
71893 if (unlikely(wo->wo_flags & WNOWAIT)) {
71894 int exit_code = p->exit_code;
71895- int why, status;
71896+ int why;
71897
71898 get_task_struct(p);
71899 read_unlock(&tasklist_lock);
71900diff --git a/kernel/fork.c b/kernel/fork.c
71901index 4bde56f..29a9bab 100644
71902--- a/kernel/fork.c
71903+++ b/kernel/fork.c
71904@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
71905 *stackend = STACK_END_MAGIC; /* for overflow detection */
71906
71907 #ifdef CONFIG_CC_STACKPROTECTOR
71908- tsk->stack_canary = get_random_int();
71909+ tsk->stack_canary = pax_get_random_long();
71910 #endif
71911
71912 /* One for us, one for whoever does the "release_task()" (usually parent) */
71913@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71914 mm->locked_vm = 0;
71915 mm->mmap = NULL;
71916 mm->mmap_cache = NULL;
71917- mm->free_area_cache = oldmm->mmap_base;
71918- mm->cached_hole_size = ~0UL;
71919+ mm->free_area_cache = oldmm->free_area_cache;
71920+ mm->cached_hole_size = oldmm->cached_hole_size;
71921 mm->map_count = 0;
71922 cpumask_clear(mm_cpumask(mm));
71923 mm->mm_rb = RB_ROOT;
71924@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71925 tmp->vm_flags &= ~VM_LOCKED;
71926 tmp->vm_mm = mm;
71927 tmp->vm_next = tmp->vm_prev = NULL;
71928+ tmp->vm_mirror = NULL;
71929 anon_vma_link(tmp);
71930 file = tmp->vm_file;
71931 if (file) {
71932@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71933 if (retval)
71934 goto out;
71935 }
71936+
71937+#ifdef CONFIG_PAX_SEGMEXEC
71938+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
71939+ struct vm_area_struct *mpnt_m;
71940+
71941+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
71942+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
71943+
71944+ if (!mpnt->vm_mirror)
71945+ continue;
71946+
71947+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
71948+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
71949+ mpnt->vm_mirror = mpnt_m;
71950+ } else {
71951+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
71952+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
71953+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
71954+ mpnt->vm_mirror->vm_mirror = mpnt;
71955+ }
71956+ }
71957+ BUG_ON(mpnt_m);
71958+ }
71959+#endif
71960+
71961 /* a new mm has just been created */
71962 arch_dup_mmap(oldmm, mm);
71963 retval = 0;
71964@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
71965 write_unlock(&fs->lock);
71966 return -EAGAIN;
71967 }
71968- fs->users++;
71969+ atomic_inc(&fs->users);
71970 write_unlock(&fs->lock);
71971 return 0;
71972 }
71973 tsk->fs = copy_fs_struct(fs);
71974 if (!tsk->fs)
71975 return -ENOMEM;
71976+ gr_set_chroot_entries(tsk, &tsk->fs->root);
71977 return 0;
71978 }
71979
71980@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71981 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
71982 #endif
71983 retval = -EAGAIN;
71984+
71985+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
71986+
71987 if (atomic_read(&p->real_cred->user->processes) >=
71988 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
71989- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
71990- p->real_cred->user != INIT_USER)
71991+ if (p->real_cred->user != INIT_USER &&
71992+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
71993 goto bad_fork_free;
71994 }
71995+ current->flags &= ~PF_NPROC_EXCEEDED;
71996
71997 retval = copy_creds(p, clone_flags);
71998 if (retval < 0)
71999@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
72000 goto bad_fork_free_pid;
72001 }
72002
72003+ gr_copy_label(p);
72004+
72005 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
72006 /*
72007 * Clear TID on mm_release()?
72008@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
72009 bad_fork_free:
72010 free_task(p);
72011 fork_out:
72012+ gr_log_forkfail(retval);
72013+
72014 return ERR_PTR(retval);
72015 }
72016
72017@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
72018 if (clone_flags & CLONE_PARENT_SETTID)
72019 put_user(nr, parent_tidptr);
72020
72021+ gr_handle_brute_check();
72022+
72023 if (clone_flags & CLONE_VFORK) {
72024 p->vfork_done = &vfork;
72025 init_completion(&vfork);
72026@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
72027 return 0;
72028
72029 /* don't need lock here; in the worst case we'll do useless copy */
72030- if (fs->users == 1)
72031+ if (atomic_read(&fs->users) == 1)
72032 return 0;
72033
72034 *new_fsp = copy_fs_struct(fs);
72035@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
72036 fs = current->fs;
72037 write_lock(&fs->lock);
72038 current->fs = new_fs;
72039- if (--fs->users)
72040+ gr_set_chroot_entries(current, &current->fs->root);
72041+ if (atomic_dec_return(&fs->users))
72042 new_fs = NULL;
72043 else
72044 new_fs = fs;
72045diff --git a/kernel/futex.c b/kernel/futex.c
72046index fb98c9f..333faec 100644
72047--- a/kernel/futex.c
72048+++ b/kernel/futex.c
72049@@ -54,6 +54,7 @@
72050 #include <linux/mount.h>
72051 #include <linux/pagemap.h>
72052 #include <linux/syscalls.h>
72053+#include <linux/ptrace.h>
72054 #include <linux/signal.h>
72055 #include <linux/module.h>
72056 #include <linux/magic.h>
72057@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
72058 struct page *page;
72059 int err, ro = 0;
72060
72061+#ifdef CONFIG_PAX_SEGMEXEC
72062+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
72063+ return -EFAULT;
72064+#endif
72065+
72066 /*
72067 * The futex address must be "naturally" aligned.
72068 */
72069@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
72070 struct futex_q q;
72071 int ret;
72072
72073+ pax_track_stack();
72074+
72075 if (!bitset)
72076 return -EINVAL;
72077
72078@@ -1871,7 +1879,7 @@ retry:
72079
72080 restart = &current_thread_info()->restart_block;
72081 restart->fn = futex_wait_restart;
72082- restart->futex.uaddr = (u32 *)uaddr;
72083+ restart->futex.uaddr = uaddr;
72084 restart->futex.val = val;
72085 restart->futex.time = abs_time->tv64;
72086 restart->futex.bitset = bitset;
72087@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
72088 struct futex_q q;
72089 int res, ret;
72090
72091+ pax_track_stack();
72092+
72093 if (!bitset)
72094 return -EINVAL;
72095
72096@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
72097 if (!p)
72098 goto err_unlock;
72099 ret = -EPERM;
72100+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72101+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72102+ goto err_unlock;
72103+#endif
72104 pcred = __task_cred(p);
72105 if (cred->euid != pcred->euid &&
72106 cred->euid != pcred->uid &&
72107@@ -2489,7 +2503,7 @@ retry:
72108 */
72109 static inline int fetch_robust_entry(struct robust_list __user **entry,
72110 struct robust_list __user * __user *head,
72111- int *pi)
72112+ unsigned int *pi)
72113 {
72114 unsigned long uentry;
72115
72116@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72117 {
72118 u32 curval;
72119 int i;
72120+ mm_segment_t oldfs;
72121
72122 /*
72123 * This will fail and we want it. Some arch implementations do
72124@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72125 * implementation, the non functional ones will return
72126 * -ENOSYS.
72127 */
72128+ oldfs = get_fs();
72129+ set_fs(USER_DS);
72130 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72131+ set_fs(oldfs);
72132 if (curval == -EFAULT)
72133 futex_cmpxchg_enabled = 1;
72134
72135diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72136index 2357165..eb25501 100644
72137--- a/kernel/futex_compat.c
72138+++ b/kernel/futex_compat.c
72139@@ -10,6 +10,7 @@
72140 #include <linux/compat.h>
72141 #include <linux/nsproxy.h>
72142 #include <linux/futex.h>
72143+#include <linux/ptrace.h>
72144
72145 #include <asm/uaccess.h>
72146
72147@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72148 {
72149 struct compat_robust_list_head __user *head;
72150 unsigned long ret;
72151- const struct cred *cred = current_cred(), *pcred;
72152+ const struct cred *cred = current_cred();
72153+ const struct cred *pcred;
72154
72155 if (!futex_cmpxchg_enabled)
72156 return -ENOSYS;
72157@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72158 if (!p)
72159 goto err_unlock;
72160 ret = -EPERM;
72161+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72162+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72163+ goto err_unlock;
72164+#endif
72165 pcred = __task_cred(p);
72166 if (cred->euid != pcred->euid &&
72167 cred->euid != pcred->uid &&
72168diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72169index 9b22d03..6295b62 100644
72170--- a/kernel/gcov/base.c
72171+++ b/kernel/gcov/base.c
72172@@ -102,11 +102,6 @@ void gcov_enable_events(void)
72173 }
72174
72175 #ifdef CONFIG_MODULES
72176-static inline int within(void *addr, void *start, unsigned long size)
72177-{
72178- return ((addr >= start) && (addr < start + size));
72179-}
72180-
72181 /* Update list and generate events when modules are unloaded. */
72182 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72183 void *data)
72184@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72185 prev = NULL;
72186 /* Remove entries located in module from linked list. */
72187 for (info = gcov_info_head; info; info = info->next) {
72188- if (within(info, mod->module_core, mod->core_size)) {
72189+ if (within_module_core_rw((unsigned long)info, mod)) {
72190 if (prev)
72191 prev->next = info->next;
72192 else
72193diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72194index a6e9d00..a0da4f9 100644
72195--- a/kernel/hrtimer.c
72196+++ b/kernel/hrtimer.c
72197@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72198 local_irq_restore(flags);
72199 }
72200
72201-static void run_hrtimer_softirq(struct softirq_action *h)
72202+static void run_hrtimer_softirq(void)
72203 {
72204 hrtimer_peek_ahead_timers();
72205 }
72206diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72207index 8b6b8b6..6bc87df 100644
72208--- a/kernel/kallsyms.c
72209+++ b/kernel/kallsyms.c
72210@@ -11,6 +11,9 @@
72211 * Changed the compression method from stem compression to "table lookup"
72212 * compression (see scripts/kallsyms.c for a more complete description)
72213 */
72214+#ifdef CONFIG_GRKERNSEC_HIDESYM
72215+#define __INCLUDED_BY_HIDESYM 1
72216+#endif
72217 #include <linux/kallsyms.h>
72218 #include <linux/module.h>
72219 #include <linux/init.h>
72220@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72221
72222 static inline int is_kernel_inittext(unsigned long addr)
72223 {
72224+ if (system_state != SYSTEM_BOOTING)
72225+ return 0;
72226+
72227 if (addr >= (unsigned long)_sinittext
72228 && addr <= (unsigned long)_einittext)
72229 return 1;
72230 return 0;
72231 }
72232
72233+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72234+#ifdef CONFIG_MODULES
72235+static inline int is_module_text(unsigned long addr)
72236+{
72237+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72238+ return 1;
72239+
72240+ addr = ktla_ktva(addr);
72241+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72242+}
72243+#else
72244+static inline int is_module_text(unsigned long addr)
72245+{
72246+ return 0;
72247+}
72248+#endif
72249+#endif
72250+
72251 static inline int is_kernel_text(unsigned long addr)
72252 {
72253 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72254@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72255
72256 static inline int is_kernel(unsigned long addr)
72257 {
72258+
72259+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72260+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
72261+ return 1;
72262+
72263+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72264+#else
72265 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72266+#endif
72267+
72268 return 1;
72269 return in_gate_area_no_task(addr);
72270 }
72271
72272 static int is_ksym_addr(unsigned long addr)
72273 {
72274+
72275+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72276+ if (is_module_text(addr))
72277+ return 0;
72278+#endif
72279+
72280 if (all_var)
72281 return is_kernel(addr);
72282
72283@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72284
72285 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72286 {
72287- iter->name[0] = '\0';
72288 iter->nameoff = get_symbol_offset(new_pos);
72289 iter->pos = new_pos;
72290 }
72291@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72292 {
72293 struct kallsym_iter *iter = m->private;
72294
72295+#ifdef CONFIG_GRKERNSEC_HIDESYM
72296+ if (current_uid())
72297+ return 0;
72298+#endif
72299+
72300 /* Some debugging symbols have no name. Ignore them. */
72301 if (!iter->name[0])
72302 return 0;
72303@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72304 struct kallsym_iter *iter;
72305 int ret;
72306
72307- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72308+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72309 if (!iter)
72310 return -ENOMEM;
72311 reset_iter(iter, 0);
72312diff --git a/kernel/kexec.c b/kernel/kexec.c
72313index f336e21..9c1c20b 100644
72314--- a/kernel/kexec.c
72315+++ b/kernel/kexec.c
72316@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72317 unsigned long flags)
72318 {
72319 struct compat_kexec_segment in;
72320- struct kexec_segment out, __user *ksegments;
72321+ struct kexec_segment out;
72322+ struct kexec_segment __user *ksegments;
72323 unsigned long i, result;
72324
72325 /* Don't allow clients that don't understand the native
72326diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72327index 53dae4b..9ba3743 100644
72328--- a/kernel/kgdb.c
72329+++ b/kernel/kgdb.c
72330@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72331 /* Guard for recursive entry */
72332 static int exception_level;
72333
72334-static struct kgdb_io *kgdb_io_ops;
72335+static const struct kgdb_io *kgdb_io_ops;
72336 static DEFINE_SPINLOCK(kgdb_registration_lock);
72337
72338 /* kgdb console driver is loaded */
72339@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72340 */
72341 static atomic_t passive_cpu_wait[NR_CPUS];
72342 static atomic_t cpu_in_kgdb[NR_CPUS];
72343-atomic_t kgdb_setting_breakpoint;
72344+atomic_unchecked_t kgdb_setting_breakpoint;
72345
72346 struct task_struct *kgdb_usethread;
72347 struct task_struct *kgdb_contthread;
72348@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72349 sizeof(unsigned long)];
72350
72351 /* to keep track of the CPU which is doing the single stepping*/
72352-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72353+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72354
72355 /*
72356 * If you are debugging a problem where roundup (the collection of
72357@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72358 return 0;
72359 if (kgdb_connected)
72360 return 1;
72361- if (atomic_read(&kgdb_setting_breakpoint))
72362+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72363 return 1;
72364 if (print_wait)
72365 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72366@@ -1426,8 +1426,8 @@ acquirelock:
72367 * instance of the exception handler wanted to come into the
72368 * debugger on a different CPU via a single step
72369 */
72370- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72371- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72372+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72373+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72374
72375 atomic_set(&kgdb_active, -1);
72376 touch_softlockup_watchdog();
72377@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72378 *
72379 * Register it with the KGDB core.
72380 */
72381-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72382+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72383 {
72384 int err;
72385
72386@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72387 *
72388 * Unregister it with the KGDB core.
72389 */
72390-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72391+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72392 {
72393 BUG_ON(kgdb_connected);
72394
72395@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72396 */
72397 void kgdb_breakpoint(void)
72398 {
72399- atomic_set(&kgdb_setting_breakpoint, 1);
72400+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72401 wmb(); /* Sync point before breakpoint */
72402 arch_kgdb_breakpoint();
72403 wmb(); /* Sync point after breakpoint */
72404- atomic_set(&kgdb_setting_breakpoint, 0);
72405+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72406 }
72407 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72408
72409diff --git a/kernel/kmod.c b/kernel/kmod.c
72410index d206078..e27ba6a 100644
72411--- a/kernel/kmod.c
72412+++ b/kernel/kmod.c
72413@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72414 * If module auto-loading support is disabled then this function
72415 * becomes a no-operation.
72416 */
72417-int __request_module(bool wait, const char *fmt, ...)
72418+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72419 {
72420- va_list args;
72421 char module_name[MODULE_NAME_LEN];
72422 unsigned int max_modprobes;
72423 int ret;
72424- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72425+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72426 static char *envp[] = { "HOME=/",
72427 "TERM=linux",
72428 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72429@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72430 if (ret)
72431 return ret;
72432
72433- va_start(args, fmt);
72434- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72435- va_end(args);
72436+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72437 if (ret >= MODULE_NAME_LEN)
72438 return -ENAMETOOLONG;
72439
72440+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72441+ if (!current_uid()) {
72442+ /* hack to workaround consolekit/udisks stupidity */
72443+ read_lock(&tasklist_lock);
72444+ if (!strcmp(current->comm, "mount") &&
72445+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72446+ read_unlock(&tasklist_lock);
72447+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72448+ return -EPERM;
72449+ }
72450+ read_unlock(&tasklist_lock);
72451+ }
72452+#endif
72453+
72454 /* If modprobe needs a service that is in a module, we get a recursive
72455 * loop. Limit the number of running kmod threads to max_threads/2 or
72456 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72457@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72458 atomic_dec(&kmod_concurrent);
72459 return ret;
72460 }
72461+
72462+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72463+{
72464+ va_list args;
72465+ int ret;
72466+
72467+ va_start(args, fmt);
72468+ ret = ____request_module(wait, module_param, fmt, args);
72469+ va_end(args);
72470+
72471+ return ret;
72472+}
72473+
72474+int __request_module(bool wait, const char *fmt, ...)
72475+{
72476+ va_list args;
72477+ int ret;
72478+
72479+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72480+ if (current_uid()) {
72481+ char module_param[MODULE_NAME_LEN];
72482+
72483+ memset(module_param, 0, sizeof(module_param));
72484+
72485+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72486+
72487+ va_start(args, fmt);
72488+ ret = ____request_module(wait, module_param, fmt, args);
72489+ va_end(args);
72490+
72491+ return ret;
72492+ }
72493+#endif
72494+
72495+ va_start(args, fmt);
72496+ ret = ____request_module(wait, NULL, fmt, args);
72497+ va_end(args);
72498+
72499+ return ret;
72500+}
72501+
72502+
72503 EXPORT_SYMBOL(__request_module);
72504 #endif /* CONFIG_MODULES */
72505
72506@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72507 *
72508 * Thus the __user pointer cast is valid here.
72509 */
72510- sys_wait4(pid, (int __user *)&ret, 0, NULL);
72511+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72512
72513 /*
72514 * If ret is 0, either ____call_usermodehelper failed and the
72515diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72516index 176d825..77fa8ea 100644
72517--- a/kernel/kprobes.c
72518+++ b/kernel/kprobes.c
72519@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72520 * kernel image and loaded module images reside. This is required
72521 * so x86_64 can correctly handle the %rip-relative fixups.
72522 */
72523- kip->insns = module_alloc(PAGE_SIZE);
72524+ kip->insns = module_alloc_exec(PAGE_SIZE);
72525 if (!kip->insns) {
72526 kfree(kip);
72527 return NULL;
72528@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72529 */
72530 if (!list_is_singular(&kprobe_insn_pages)) {
72531 list_del(&kip->list);
72532- module_free(NULL, kip->insns);
72533+ module_free_exec(NULL, kip->insns);
72534 kfree(kip);
72535 }
72536 return 1;
72537@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72538 {
72539 int i, err = 0;
72540 unsigned long offset = 0, size = 0;
72541- char *modname, namebuf[128];
72542+ char *modname, namebuf[KSYM_NAME_LEN];
72543 const char *symbol_name;
72544 void *addr;
72545 struct kprobe_blackpoint *kb;
72546@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72547 const char *sym = NULL;
72548 unsigned int i = *(loff_t *) v;
72549 unsigned long offset = 0;
72550- char *modname, namebuf[128];
72551+ char *modname, namebuf[KSYM_NAME_LEN];
72552
72553 head = &kprobe_table[i];
72554 preempt_disable();
72555diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72556index d86fe89..d12fc66 100644
72557--- a/kernel/lockdep.c
72558+++ b/kernel/lockdep.c
72559@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72560 /*
72561 * Various lockdep statistics:
72562 */
72563-atomic_t chain_lookup_hits;
72564-atomic_t chain_lookup_misses;
72565-atomic_t hardirqs_on_events;
72566-atomic_t hardirqs_off_events;
72567-atomic_t redundant_hardirqs_on;
72568-atomic_t redundant_hardirqs_off;
72569-atomic_t softirqs_on_events;
72570-atomic_t softirqs_off_events;
72571-atomic_t redundant_softirqs_on;
72572-atomic_t redundant_softirqs_off;
72573-atomic_t nr_unused_locks;
72574-atomic_t nr_cyclic_checks;
72575-atomic_t nr_find_usage_forwards_checks;
72576-atomic_t nr_find_usage_backwards_checks;
72577+atomic_unchecked_t chain_lookup_hits;
72578+atomic_unchecked_t chain_lookup_misses;
72579+atomic_unchecked_t hardirqs_on_events;
72580+atomic_unchecked_t hardirqs_off_events;
72581+atomic_unchecked_t redundant_hardirqs_on;
72582+atomic_unchecked_t redundant_hardirqs_off;
72583+atomic_unchecked_t softirqs_on_events;
72584+atomic_unchecked_t softirqs_off_events;
72585+atomic_unchecked_t redundant_softirqs_on;
72586+atomic_unchecked_t redundant_softirqs_off;
72587+atomic_unchecked_t nr_unused_locks;
72588+atomic_unchecked_t nr_cyclic_checks;
72589+atomic_unchecked_t nr_find_usage_forwards_checks;
72590+atomic_unchecked_t nr_find_usage_backwards_checks;
72591 #endif
72592
72593 /*
72594@@ -577,6 +577,10 @@ static int static_obj(void *obj)
72595 int i;
72596 #endif
72597
72598+#ifdef CONFIG_PAX_KERNEXEC
72599+ start = ktla_ktva(start);
72600+#endif
72601+
72602 /*
72603 * static variable?
72604 */
72605@@ -592,8 +596,7 @@ static int static_obj(void *obj)
72606 */
72607 for_each_possible_cpu(i) {
72608 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
72609- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
72610- + per_cpu_offset(i);
72611+ end = start + PERCPU_ENOUGH_ROOM;
72612
72613 if ((addr >= start) && (addr < end))
72614 return 1;
72615@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
72616 if (!static_obj(lock->key)) {
72617 debug_locks_off();
72618 printk("INFO: trying to register non-static key.\n");
72619+ printk("lock:%pS key:%pS.\n", lock, lock->key);
72620 printk("the code is fine but needs lockdep annotation.\n");
72621 printk("turning off the locking correctness validator.\n");
72622 dump_stack();
72623@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
72624 if (!class)
72625 return 0;
72626 }
72627- debug_atomic_inc((atomic_t *)&class->ops);
72628+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
72629 if (very_verbose(class)) {
72630 printk("\nacquire class [%p] %s", class->key, class->name);
72631 if (class->name_version > 1)
72632diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
72633index a2ee95a..092f0f2 100644
72634--- a/kernel/lockdep_internals.h
72635+++ b/kernel/lockdep_internals.h
72636@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
72637 /*
72638 * Various lockdep statistics:
72639 */
72640-extern atomic_t chain_lookup_hits;
72641-extern atomic_t chain_lookup_misses;
72642-extern atomic_t hardirqs_on_events;
72643-extern atomic_t hardirqs_off_events;
72644-extern atomic_t redundant_hardirqs_on;
72645-extern atomic_t redundant_hardirqs_off;
72646-extern atomic_t softirqs_on_events;
72647-extern atomic_t softirqs_off_events;
72648-extern atomic_t redundant_softirqs_on;
72649-extern atomic_t redundant_softirqs_off;
72650-extern atomic_t nr_unused_locks;
72651-extern atomic_t nr_cyclic_checks;
72652-extern atomic_t nr_cyclic_check_recursions;
72653-extern atomic_t nr_find_usage_forwards_checks;
72654-extern atomic_t nr_find_usage_forwards_recursions;
72655-extern atomic_t nr_find_usage_backwards_checks;
72656-extern atomic_t nr_find_usage_backwards_recursions;
72657-# define debug_atomic_inc(ptr) atomic_inc(ptr)
72658-# define debug_atomic_dec(ptr) atomic_dec(ptr)
72659-# define debug_atomic_read(ptr) atomic_read(ptr)
72660+extern atomic_unchecked_t chain_lookup_hits;
72661+extern atomic_unchecked_t chain_lookup_misses;
72662+extern atomic_unchecked_t hardirqs_on_events;
72663+extern atomic_unchecked_t hardirqs_off_events;
72664+extern atomic_unchecked_t redundant_hardirqs_on;
72665+extern atomic_unchecked_t redundant_hardirqs_off;
72666+extern atomic_unchecked_t softirqs_on_events;
72667+extern atomic_unchecked_t softirqs_off_events;
72668+extern atomic_unchecked_t redundant_softirqs_on;
72669+extern atomic_unchecked_t redundant_softirqs_off;
72670+extern atomic_unchecked_t nr_unused_locks;
72671+extern atomic_unchecked_t nr_cyclic_checks;
72672+extern atomic_unchecked_t nr_cyclic_check_recursions;
72673+extern atomic_unchecked_t nr_find_usage_forwards_checks;
72674+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
72675+extern atomic_unchecked_t nr_find_usage_backwards_checks;
72676+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
72677+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
72678+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
72679+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
72680 #else
72681 # define debug_atomic_inc(ptr) do { } while (0)
72682 # define debug_atomic_dec(ptr) do { } while (0)
72683diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
72684index d4aba4f..02a353f 100644
72685--- a/kernel/lockdep_proc.c
72686+++ b/kernel/lockdep_proc.c
72687@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
72688
72689 static void print_name(struct seq_file *m, struct lock_class *class)
72690 {
72691- char str[128];
72692+ char str[KSYM_NAME_LEN];
72693 const char *name = class->name;
72694
72695 if (!name) {
72696diff --git a/kernel/module.c b/kernel/module.c
72697index 4b270e6..2226274 100644
72698--- a/kernel/module.c
72699+++ b/kernel/module.c
72700@@ -55,6 +55,7 @@
72701 #include <linux/async.h>
72702 #include <linux/percpu.h>
72703 #include <linux/kmemleak.h>
72704+#include <linux/grsecurity.h>
72705
72706 #define CREATE_TRACE_POINTS
72707 #include <trace/events/module.h>
72708@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72709 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72710
72711 /* Bounds of module allocation, for speeding __module_address */
72712-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72713+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72714+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72715
72716 int register_module_notifier(struct notifier_block * nb)
72717 {
72718@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72719 return true;
72720
72721 list_for_each_entry_rcu(mod, &modules, list) {
72722- struct symsearch arr[] = {
72723+ struct symsearch modarr[] = {
72724 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72725 NOT_GPL_ONLY, false },
72726 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72727@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72728 #endif
72729 };
72730
72731- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72732+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72733 return true;
72734 }
72735 return false;
72736@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72737 void *ptr;
72738 int cpu;
72739
72740- if (align > PAGE_SIZE) {
72741+ if (align-1 >= PAGE_SIZE) {
72742 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72743 name, align, PAGE_SIZE);
72744 align = PAGE_SIZE;
72745@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72746 * /sys/module/foo/sections stuff
72747 * J. Corbet <corbet@lwn.net>
72748 */
72749-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72750+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72751
72752 static inline bool sect_empty(const Elf_Shdr *sect)
72753 {
72754@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72755 destroy_params(mod->kp, mod->num_kp);
72756
72757 /* This may be NULL, but that's OK */
72758- module_free(mod, mod->module_init);
72759+ module_free(mod, mod->module_init_rw);
72760+ module_free_exec(mod, mod->module_init_rx);
72761 kfree(mod->args);
72762 if (mod->percpu)
72763 percpu_modfree(mod->percpu);
72764@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72765 percpu_modfree(mod->refptr);
72766 #endif
72767 /* Free lock-classes: */
72768- lockdep_free_key_range(mod->module_core, mod->core_size);
72769+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72770+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72771
72772 /* Finally, free the core (containing the module structure) */
72773- module_free(mod, mod->module_core);
72774+ module_free_exec(mod, mod->module_core_rx);
72775+ module_free(mod, mod->module_core_rw);
72776
72777 #ifdef CONFIG_MPU
72778 update_protections(current->mm);
72779@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72780 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72781 int ret = 0;
72782 const struct kernel_symbol *ksym;
72783+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72784+ int is_fs_load = 0;
72785+ int register_filesystem_found = 0;
72786+ char *p;
72787+
72788+ p = strstr(mod->args, "grsec_modharden_fs");
72789+
72790+ if (p) {
72791+ char *endptr = p + strlen("grsec_modharden_fs");
72792+ /* copy \0 as well */
72793+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72794+ is_fs_load = 1;
72795+ }
72796+#endif
72797+
72798
72799 for (i = 1; i < n; i++) {
72800+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72801+ const char *name = strtab + sym[i].st_name;
72802+
72803+ /* it's a real shame this will never get ripped and copied
72804+ upstream! ;(
72805+ */
72806+ if (is_fs_load && !strcmp(name, "register_filesystem"))
72807+ register_filesystem_found = 1;
72808+#endif
72809 switch (sym[i].st_shndx) {
72810 case SHN_COMMON:
72811 /* We compiled with -fno-common. These are not
72812@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72813 strtab + sym[i].st_name, mod);
72814 /* Ok if resolved. */
72815 if (ksym) {
72816+ pax_open_kernel();
72817 sym[i].st_value = ksym->value;
72818+ pax_close_kernel();
72819 break;
72820 }
72821
72822@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72823 secbase = (unsigned long)mod->percpu;
72824 else
72825 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72826+ pax_open_kernel();
72827 sym[i].st_value += secbase;
72828+ pax_close_kernel();
72829 break;
72830 }
72831 }
72832
72833+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72834+ if (is_fs_load && !register_filesystem_found) {
72835+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72836+ ret = -EPERM;
72837+ }
72838+#endif
72839+
72840 return ret;
72841 }
72842
72843@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72844 || s->sh_entsize != ~0UL
72845 || strstarts(secstrings + s->sh_name, ".init"))
72846 continue;
72847- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72848+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72849+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72850+ else
72851+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72852 DEBUGP("\t%s\n", secstrings + s->sh_name);
72853 }
72854- if (m == 0)
72855- mod->core_text_size = mod->core_size;
72856 }
72857
72858 DEBUGP("Init section allocation order:\n");
72859@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72860 || s->sh_entsize != ~0UL
72861 || !strstarts(secstrings + s->sh_name, ".init"))
72862 continue;
72863- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72864- | INIT_OFFSET_MASK);
72865+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72866+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72867+ else
72868+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72869+ s->sh_entsize |= INIT_OFFSET_MASK;
72870 DEBUGP("\t%s\n", secstrings + s->sh_name);
72871 }
72872- if (m == 0)
72873- mod->init_text_size = mod->init_size;
72874 }
72875 }
72876
72877@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72878
72879 /* As per nm */
72880 static char elf_type(const Elf_Sym *sym,
72881- Elf_Shdr *sechdrs,
72882- const char *secstrings,
72883- struct module *mod)
72884+ const Elf_Shdr *sechdrs,
72885+ const char *secstrings)
72886 {
72887 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
72888 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
72889@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
72890
72891 /* Put symbol section at end of init part of module. */
72892 symsect->sh_flags |= SHF_ALLOC;
72893- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
72894+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
72895 symindex) | INIT_OFFSET_MASK;
72896 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
72897
72898@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
72899 }
72900
72901 /* Append room for core symbols at end of core part. */
72902- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
72903- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
72904+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
72905+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
72906
72907 /* Put string table section at end of init part of module. */
72908 strsect->sh_flags |= SHF_ALLOC;
72909- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
72910+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
72911 strindex) | INIT_OFFSET_MASK;
72912 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
72913
72914 /* Append room for core symbols' strings at end of core part. */
72915- *pstroffs = mod->core_size;
72916+ *pstroffs = mod->core_size_rx;
72917 __set_bit(0, strmap);
72918- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
72919+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
72920
72921 return symoffs;
72922 }
72923@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
72924 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72925 mod->strtab = (void *)sechdrs[strindex].sh_addr;
72926
72927+ pax_open_kernel();
72928+
72929 /* Set types up while we still have access to sections. */
72930 for (i = 0; i < mod->num_symtab; i++)
72931 mod->symtab[i].st_info
72932- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
72933+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
72934
72935- mod->core_symtab = dst = mod->module_core + symoffs;
72936+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
72937 src = mod->symtab;
72938 *dst = *src;
72939 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
72940@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
72941 }
72942 mod->core_num_syms = ndst;
72943
72944- mod->core_strtab = s = mod->module_core + stroffs;
72945+ mod->core_strtab = s = mod->module_core_rx + stroffs;
72946 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
72947 if (test_bit(i, strmap))
72948 *++s = mod->strtab[i];
72949+
72950+ pax_close_kernel();
72951 }
72952 #else
72953 static inline unsigned long layout_symtab(struct module *mod,
72954@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
72955 #endif
72956 }
72957
72958-static void *module_alloc_update_bounds(unsigned long size)
72959+static void *module_alloc_update_bounds_rw(unsigned long size)
72960 {
72961 void *ret = module_alloc(size);
72962
72963 if (ret) {
72964 /* Update module bounds. */
72965- if ((unsigned long)ret < module_addr_min)
72966- module_addr_min = (unsigned long)ret;
72967- if ((unsigned long)ret + size > module_addr_max)
72968- module_addr_max = (unsigned long)ret + size;
72969+ if ((unsigned long)ret < module_addr_min_rw)
72970+ module_addr_min_rw = (unsigned long)ret;
72971+ if ((unsigned long)ret + size > module_addr_max_rw)
72972+ module_addr_max_rw = (unsigned long)ret + size;
72973+ }
72974+ return ret;
72975+}
72976+
72977+static void *module_alloc_update_bounds_rx(unsigned long size)
72978+{
72979+ void *ret = module_alloc_exec(size);
72980+
72981+ if (ret) {
72982+ /* Update module bounds. */
72983+ if ((unsigned long)ret < module_addr_min_rx)
72984+ module_addr_min_rx = (unsigned long)ret;
72985+ if ((unsigned long)ret + size > module_addr_max_rx)
72986+ module_addr_max_rx = (unsigned long)ret + size;
72987 }
72988 return ret;
72989 }
72990@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72991 unsigned int i;
72992
72993 /* only scan the sections containing data */
72994- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
72995- (unsigned long)mod->module_core,
72996+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
72997+ (unsigned long)mod->module_core_rw,
72998 sizeof(struct module), GFP_KERNEL);
72999
73000 for (i = 1; i < hdr->e_shnum; i++) {
73001@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
73002 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
73003 continue;
73004
73005- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
73006- (unsigned long)mod->module_core,
73007+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
73008+ (unsigned long)mod->module_core_rw,
73009 sechdrs[i].sh_size, GFP_KERNEL);
73010 }
73011 }
73012@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
73013 Elf_Ehdr *hdr;
73014 Elf_Shdr *sechdrs;
73015 char *secstrings, *args, *modmagic, *strtab = NULL;
73016- char *staging;
73017+ char *staging, *license;
73018 unsigned int i;
73019 unsigned int symindex = 0;
73020 unsigned int strindex = 0;
73021@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
73022 goto free_hdr;
73023 }
73024
73025+ license = get_modinfo(sechdrs, infoindex, "license");
73026+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
73027+ if (!license || !license_is_gpl_compatible(license)) {
73028+ err -ENOEXEC;
73029+ goto free_hdr;
73030+ }
73031+#endif
73032+
73033 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
73034 /* This is allowed: modprobe --force will invalidate it. */
73035 if (!modmagic) {
73036@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
73037 secstrings, &stroffs, strmap);
73038
73039 /* Do the allocs. */
73040- ptr = module_alloc_update_bounds(mod->core_size);
73041+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
73042 /*
73043 * The pointer to this block is stored in the module structure
73044 * which is inside the block. Just mark it as not being a
73045@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
73046 err = -ENOMEM;
73047 goto free_percpu;
73048 }
73049- memset(ptr, 0, mod->core_size);
73050- mod->module_core = ptr;
73051+ memset(ptr, 0, mod->core_size_rw);
73052+ mod->module_core_rw = ptr;
73053
73054- ptr = module_alloc_update_bounds(mod->init_size);
73055+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
73056 /*
73057 * The pointer to this block is stored in the module structure
73058 * which is inside the block. This block doesn't need to be
73059 * scanned as it contains data and code that will be freed
73060 * after the module is initialized.
73061 */
73062- kmemleak_ignore(ptr);
73063- if (!ptr && mod->init_size) {
73064+ kmemleak_not_leak(ptr);
73065+ if (!ptr && mod->init_size_rw) {
73066 err = -ENOMEM;
73067- goto free_core;
73068+ goto free_core_rw;
73069 }
73070- memset(ptr, 0, mod->init_size);
73071- mod->module_init = ptr;
73072+ memset(ptr, 0, mod->init_size_rw);
73073+ mod->module_init_rw = ptr;
73074+
73075+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
73076+ kmemleak_not_leak(ptr);
73077+ if (!ptr) {
73078+ err = -ENOMEM;
73079+ goto free_init_rw;
73080+ }
73081+
73082+ pax_open_kernel();
73083+ memset(ptr, 0, mod->core_size_rx);
73084+ pax_close_kernel();
73085+ mod->module_core_rx = ptr;
73086+
73087+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
73088+ kmemleak_not_leak(ptr);
73089+ if (!ptr && mod->init_size_rx) {
73090+ err = -ENOMEM;
73091+ goto free_core_rx;
73092+ }
73093+
73094+ pax_open_kernel();
73095+ memset(ptr, 0, mod->init_size_rx);
73096+ pax_close_kernel();
73097+ mod->module_init_rx = ptr;
73098
73099 /* Transfer each section which specifies SHF_ALLOC */
73100 DEBUGP("final section addresses:\n");
73101@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73102 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73103 continue;
73104
73105- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73106- dest = mod->module_init
73107- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73108- else
73109- dest = mod->module_core + sechdrs[i].sh_entsize;
73110+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73111+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73112+ dest = mod->module_init_rw
73113+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73114+ else
73115+ dest = mod->module_init_rx
73116+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73117+ } else {
73118+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73119+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73120+ else
73121+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73122+ }
73123
73124- if (sechdrs[i].sh_type != SHT_NOBITS)
73125- memcpy(dest, (void *)sechdrs[i].sh_addr,
73126- sechdrs[i].sh_size);
73127+ if (sechdrs[i].sh_type != SHT_NOBITS) {
73128+
73129+#ifdef CONFIG_PAX_KERNEXEC
73130+#ifdef CONFIG_X86_64
73131+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73132+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73133+#endif
73134+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73135+ pax_open_kernel();
73136+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73137+ pax_close_kernel();
73138+ } else
73139+#endif
73140+
73141+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73142+ }
73143 /* Update sh_addr to point to copy in image. */
73144- sechdrs[i].sh_addr = (unsigned long)dest;
73145+
73146+#ifdef CONFIG_PAX_KERNEXEC
73147+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73148+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73149+ else
73150+#endif
73151+
73152+ sechdrs[i].sh_addr = (unsigned long)dest;
73153 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73154 }
73155 /* Module has been moved. */
73156@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73157 mod->name);
73158 if (!mod->refptr) {
73159 err = -ENOMEM;
73160- goto free_init;
73161+ goto free_init_rx;
73162 }
73163 #endif
73164 /* Now we've moved module, initialize linked lists, etc. */
73165@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73166 goto free_unload;
73167
73168 /* Set up license info based on the info section */
73169- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73170+ set_license(mod, license);
73171
73172 /*
73173 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73174@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73175 /* Set up MODINFO_ATTR fields */
73176 setup_modinfo(mod, sechdrs, infoindex);
73177
73178+ mod->args = args;
73179+
73180+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73181+ {
73182+ char *p, *p2;
73183+
73184+ if (strstr(mod->args, "grsec_modharden_netdev")) {
73185+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73186+ err = -EPERM;
73187+ goto cleanup;
73188+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73189+ p += strlen("grsec_modharden_normal");
73190+ p2 = strstr(p, "_");
73191+ if (p2) {
73192+ *p2 = '\0';
73193+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73194+ *p2 = '_';
73195+ }
73196+ err = -EPERM;
73197+ goto cleanup;
73198+ }
73199+ }
73200+#endif
73201+
73202+
73203 /* Fix up syms, so that st_value is a pointer to location. */
73204 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73205 mod);
73206@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73207
73208 /* Now do relocations. */
73209 for (i = 1; i < hdr->e_shnum; i++) {
73210- const char *strtab = (char *)sechdrs[strindex].sh_addr;
73211 unsigned int info = sechdrs[i].sh_info;
73212+ strtab = (char *)sechdrs[strindex].sh_addr;
73213
73214 /* Not a valid relocation section? */
73215 if (info >= hdr->e_shnum)
73216@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73217 * Do it before processing of module parameters, so the module
73218 * can provide parameter accessor functions of its own.
73219 */
73220- if (mod->module_init)
73221- flush_icache_range((unsigned long)mod->module_init,
73222- (unsigned long)mod->module_init
73223- + mod->init_size);
73224- flush_icache_range((unsigned long)mod->module_core,
73225- (unsigned long)mod->module_core + mod->core_size);
73226+ if (mod->module_init_rx)
73227+ flush_icache_range((unsigned long)mod->module_init_rx,
73228+ (unsigned long)mod->module_init_rx
73229+ + mod->init_size_rx);
73230+ flush_icache_range((unsigned long)mod->module_core_rx,
73231+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
73232
73233 set_fs(old_fs);
73234
73235- mod->args = args;
73236 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73237 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73238 mod->name);
73239@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73240 free_unload:
73241 module_unload_free(mod);
73242 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73243+ free_init_rx:
73244 percpu_modfree(mod->refptr);
73245- free_init:
73246 #endif
73247- module_free(mod, mod->module_init);
73248- free_core:
73249- module_free(mod, mod->module_core);
73250+ module_free_exec(mod, mod->module_init_rx);
73251+ free_core_rx:
73252+ module_free_exec(mod, mod->module_core_rx);
73253+ free_init_rw:
73254+ module_free(mod, mod->module_init_rw);
73255+ free_core_rw:
73256+ module_free(mod, mod->module_core_rw);
73257 /* mod will be freed with core. Don't access it beyond this line! */
73258 free_percpu:
73259 if (percpu)
73260@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73261 mod->symtab = mod->core_symtab;
73262 mod->strtab = mod->core_strtab;
73263 #endif
73264- module_free(mod, mod->module_init);
73265- mod->module_init = NULL;
73266- mod->init_size = 0;
73267- mod->init_text_size = 0;
73268+ module_free(mod, mod->module_init_rw);
73269+ module_free_exec(mod, mod->module_init_rx);
73270+ mod->module_init_rw = NULL;
73271+ mod->module_init_rx = NULL;
73272+ mod->init_size_rw = 0;
73273+ mod->init_size_rx = 0;
73274 mutex_unlock(&module_mutex);
73275
73276 return 0;
73277@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73278 unsigned long nextval;
73279
73280 /* At worse, next value is at end of module */
73281- if (within_module_init(addr, mod))
73282- nextval = (unsigned long)mod->module_init+mod->init_text_size;
73283+ if (within_module_init_rx(addr, mod))
73284+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73285+ else if (within_module_init_rw(addr, mod))
73286+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73287+ else if (within_module_core_rx(addr, mod))
73288+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73289+ else if (within_module_core_rw(addr, mod))
73290+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73291 else
73292- nextval = (unsigned long)mod->module_core+mod->core_text_size;
73293+ return NULL;
73294
73295 /* Scan for closest preceeding symbol, and next symbol. (ELF
73296 starts real symbols at 1). */
73297@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73298 char buf[8];
73299
73300 seq_printf(m, "%s %u",
73301- mod->name, mod->init_size + mod->core_size);
73302+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73303 print_unload_info(m, mod);
73304
73305 /* Informative for users. */
73306@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73307 mod->state == MODULE_STATE_COMING ? "Loading":
73308 "Live");
73309 /* Used by oprofile and other similar tools. */
73310- seq_printf(m, " 0x%p", mod->module_core);
73311+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73312
73313 /* Taints info */
73314 if (mod->taints)
73315@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73316
73317 static int __init proc_modules_init(void)
73318 {
73319+#ifndef CONFIG_GRKERNSEC_HIDESYM
73320+#ifdef CONFIG_GRKERNSEC_PROC_USER
73321+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73322+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73323+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73324+#else
73325 proc_create("modules", 0, NULL, &proc_modules_operations);
73326+#endif
73327+#else
73328+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73329+#endif
73330 return 0;
73331 }
73332 module_init(proc_modules_init);
73333@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73334 {
73335 struct module *mod;
73336
73337- if (addr < module_addr_min || addr > module_addr_max)
73338+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73339+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
73340 return NULL;
73341
73342 list_for_each_entry_rcu(mod, &modules, list)
73343- if (within_module_core(addr, mod)
73344- || within_module_init(addr, mod))
73345+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
73346 return mod;
73347 return NULL;
73348 }
73349@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73350 */
73351 struct module *__module_text_address(unsigned long addr)
73352 {
73353- struct module *mod = __module_address(addr);
73354+ struct module *mod;
73355+
73356+#ifdef CONFIG_X86_32
73357+ addr = ktla_ktva(addr);
73358+#endif
73359+
73360+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73361+ return NULL;
73362+
73363+ mod = __module_address(addr);
73364+
73365 if (mod) {
73366 /* Make sure it's within the text section. */
73367- if (!within(addr, mod->module_init, mod->init_text_size)
73368- && !within(addr, mod->module_core, mod->core_text_size))
73369+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73370 mod = NULL;
73371 }
73372 return mod;
73373diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73374index ec815a9..fe46e99 100644
73375--- a/kernel/mutex-debug.c
73376+++ b/kernel/mutex-debug.c
73377@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73378 }
73379
73380 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73381- struct thread_info *ti)
73382+ struct task_struct *task)
73383 {
73384 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73385
73386 /* Mark the current thread as blocked on the lock: */
73387- ti->task->blocked_on = waiter;
73388+ task->blocked_on = waiter;
73389 }
73390
73391 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73392- struct thread_info *ti)
73393+ struct task_struct *task)
73394 {
73395 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73396- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73397- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73398- ti->task->blocked_on = NULL;
73399+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
73400+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73401+ task->blocked_on = NULL;
73402
73403 list_del_init(&waiter->list);
73404 waiter->task = NULL;
73405@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73406 return;
73407
73408 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73409- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73410+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
73411 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73412 mutex_clear_owner(lock);
73413 }
73414diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73415index 6b2d735..372d3c4 100644
73416--- a/kernel/mutex-debug.h
73417+++ b/kernel/mutex-debug.h
73418@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73419 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73420 extern void debug_mutex_add_waiter(struct mutex *lock,
73421 struct mutex_waiter *waiter,
73422- struct thread_info *ti);
73423+ struct task_struct *task);
73424 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73425- struct thread_info *ti);
73426+ struct task_struct *task);
73427 extern void debug_mutex_unlock(struct mutex *lock);
73428 extern void debug_mutex_init(struct mutex *lock, const char *name,
73429 struct lock_class_key *key);
73430
73431 static inline void mutex_set_owner(struct mutex *lock)
73432 {
73433- lock->owner = current_thread_info();
73434+ lock->owner = current;
73435 }
73436
73437 static inline void mutex_clear_owner(struct mutex *lock)
73438diff --git a/kernel/mutex.c b/kernel/mutex.c
73439index f85644c..5ee9f77 100644
73440--- a/kernel/mutex.c
73441+++ b/kernel/mutex.c
73442@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73443 */
73444
73445 for (;;) {
73446- struct thread_info *owner;
73447+ struct task_struct *owner;
73448
73449 /*
73450 * If we own the BKL, then don't spin. The owner of
73451@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73452 spin_lock_mutex(&lock->wait_lock, flags);
73453
73454 debug_mutex_lock_common(lock, &waiter);
73455- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73456+ debug_mutex_add_waiter(lock, &waiter, task);
73457
73458 /* add waiting tasks to the end of the waitqueue (FIFO): */
73459 list_add_tail(&waiter.list, &lock->wait_list);
73460@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73461 * TASK_UNINTERRUPTIBLE case.)
73462 */
73463 if (unlikely(signal_pending_state(state, task))) {
73464- mutex_remove_waiter(lock, &waiter,
73465- task_thread_info(task));
73466+ mutex_remove_waiter(lock, &waiter, task);
73467 mutex_release(&lock->dep_map, 1, ip);
73468 spin_unlock_mutex(&lock->wait_lock, flags);
73469
73470@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73471 done:
73472 lock_acquired(&lock->dep_map, ip);
73473 /* got the lock - rejoice! */
73474- mutex_remove_waiter(lock, &waiter, current_thread_info());
73475+ mutex_remove_waiter(lock, &waiter, task);
73476 mutex_set_owner(lock);
73477
73478 /* set it to 0 if there are no waiters left: */
73479diff --git a/kernel/mutex.h b/kernel/mutex.h
73480index 67578ca..4115fbf 100644
73481--- a/kernel/mutex.h
73482+++ b/kernel/mutex.h
73483@@ -19,7 +19,7 @@
73484 #ifdef CONFIG_SMP
73485 static inline void mutex_set_owner(struct mutex *lock)
73486 {
73487- lock->owner = current_thread_info();
73488+ lock->owner = current;
73489 }
73490
73491 static inline void mutex_clear_owner(struct mutex *lock)
73492diff --git a/kernel/panic.c b/kernel/panic.c
73493index 96b45d0..ff70a46 100644
73494--- a/kernel/panic.c
73495+++ b/kernel/panic.c
73496@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73497 va_end(args);
73498 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73499 #ifdef CONFIG_DEBUG_BUGVERBOSE
73500- dump_stack();
73501+ /*
73502+ * Avoid nested stack-dumping if a panic occurs during oops processing
73503+ */
73504+ if (!oops_in_progress)
73505+ dump_stack();
73506 #endif
73507
73508 /*
73509@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73510 const char *board;
73511
73512 printk(KERN_WARNING "------------[ cut here ]------------\n");
73513- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73514+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73515 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73516 if (board)
73517 printk(KERN_WARNING "Hardware name: %s\n", board);
73518@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73519 */
73520 void __stack_chk_fail(void)
73521 {
73522- panic("stack-protector: Kernel stack is corrupted in: %p\n",
73523+ dump_stack();
73524+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73525 __builtin_return_address(0));
73526 }
73527 EXPORT_SYMBOL(__stack_chk_fail);
73528diff --git a/kernel/params.c b/kernel/params.c
73529index d656c27..21e452c 100644
73530--- a/kernel/params.c
73531+++ b/kernel/params.c
73532@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73533 return ret;
73534 }
73535
73536-static struct sysfs_ops module_sysfs_ops = {
73537+static const struct sysfs_ops module_sysfs_ops = {
73538 .show = module_attr_show,
73539 .store = module_attr_store,
73540 };
73541@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73542 return 0;
73543 }
73544
73545-static struct kset_uevent_ops module_uevent_ops = {
73546+static const struct kset_uevent_ops module_uevent_ops = {
73547 .filter = uevent_filter,
73548 };
73549
73550diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73551index 37ebc14..9c121d9 100644
73552--- a/kernel/perf_event.c
73553+++ b/kernel/perf_event.c
73554@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73555 */
73556 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73557
73558-static atomic64_t perf_event_id;
73559+static atomic64_unchecked_t perf_event_id;
73560
73561 /*
73562 * Lock for (sysadmin-configurable) event reservations:
73563@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73564 * In order to keep per-task stats reliable we need to flip the event
73565 * values when we flip the contexts.
73566 */
73567- value = atomic64_read(&next_event->count);
73568- value = atomic64_xchg(&event->count, value);
73569- atomic64_set(&next_event->count, value);
73570+ value = atomic64_read_unchecked(&next_event->count);
73571+ value = atomic64_xchg_unchecked(&event->count, value);
73572+ atomic64_set_unchecked(&next_event->count, value);
73573
73574 swap(event->total_time_enabled, next_event->total_time_enabled);
73575 swap(event->total_time_running, next_event->total_time_running);
73576@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73577 update_event_times(event);
73578 }
73579
73580- return atomic64_read(&event->count);
73581+ return atomic64_read_unchecked(&event->count);
73582 }
73583
73584 /*
73585@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
73586 values[n++] = 1 + leader->nr_siblings;
73587 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73588 values[n++] = leader->total_time_enabled +
73589- atomic64_read(&leader->child_total_time_enabled);
73590+ atomic64_read_unchecked(&leader->child_total_time_enabled);
73591 }
73592 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73593 values[n++] = leader->total_time_running +
73594- atomic64_read(&leader->child_total_time_running);
73595+ atomic64_read_unchecked(&leader->child_total_time_running);
73596 }
73597
73598 size = n * sizeof(u64);
73599@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
73600 values[n++] = perf_event_read_value(event);
73601 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73602 values[n++] = event->total_time_enabled +
73603- atomic64_read(&event->child_total_time_enabled);
73604+ atomic64_read_unchecked(&event->child_total_time_enabled);
73605 }
73606 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73607 values[n++] = event->total_time_running +
73608- atomic64_read(&event->child_total_time_running);
73609+ atomic64_read_unchecked(&event->child_total_time_running);
73610 }
73611 if (read_format & PERF_FORMAT_ID)
73612 values[n++] = primary_event_id(event);
73613@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
73614 static void perf_event_reset(struct perf_event *event)
73615 {
73616 (void)perf_event_read(event);
73617- atomic64_set(&event->count, 0);
73618+ atomic64_set_unchecked(&event->count, 0);
73619 perf_event_update_userpage(event);
73620 }
73621
73622@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
73623 ++userpg->lock;
73624 barrier();
73625 userpg->index = perf_event_index(event);
73626- userpg->offset = atomic64_read(&event->count);
73627+ userpg->offset = atomic64_read_unchecked(&event->count);
73628 if (event->state == PERF_EVENT_STATE_ACTIVE)
73629- userpg->offset -= atomic64_read(&event->hw.prev_count);
73630+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
73631
73632 userpg->time_enabled = event->total_time_enabled +
73633- atomic64_read(&event->child_total_time_enabled);
73634+ atomic64_read_unchecked(&event->child_total_time_enabled);
73635
73636 userpg->time_running = event->total_time_running +
73637- atomic64_read(&event->child_total_time_running);
73638+ atomic64_read_unchecked(&event->child_total_time_running);
73639
73640 barrier();
73641 ++userpg->lock;
73642@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73643 u64 values[4];
73644 int n = 0;
73645
73646- values[n++] = atomic64_read(&event->count);
73647+ values[n++] = atomic64_read_unchecked(&event->count);
73648 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73649 values[n++] = event->total_time_enabled +
73650- atomic64_read(&event->child_total_time_enabled);
73651+ atomic64_read_unchecked(&event->child_total_time_enabled);
73652 }
73653 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73654 values[n++] = event->total_time_running +
73655- atomic64_read(&event->child_total_time_running);
73656+ atomic64_read_unchecked(&event->child_total_time_running);
73657 }
73658 if (read_format & PERF_FORMAT_ID)
73659 values[n++] = primary_event_id(event);
73660@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73661 if (leader != event)
73662 leader->pmu->read(leader);
73663
73664- values[n++] = atomic64_read(&leader->count);
73665+ values[n++] = atomic64_read_unchecked(&leader->count);
73666 if (read_format & PERF_FORMAT_ID)
73667 values[n++] = primary_event_id(leader);
73668
73669@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73670 if (sub != event)
73671 sub->pmu->read(sub);
73672
73673- values[n++] = atomic64_read(&sub->count);
73674+ values[n++] = atomic64_read_unchecked(&sub->count);
73675 if (read_format & PERF_FORMAT_ID)
73676 values[n++] = primary_event_id(sub);
73677
73678@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73679 * need to add enough zero bytes after the string to handle
73680 * the 64bit alignment we do later.
73681 */
73682- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73683+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
73684 if (!buf) {
73685 name = strncpy(tmp, "//enomem", sizeof(tmp));
73686 goto got_name;
73687 }
73688- name = d_path(&file->f_path, buf, PATH_MAX);
73689+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73690 if (IS_ERR(name)) {
73691 name = strncpy(tmp, "//toolong", sizeof(tmp));
73692 goto got_name;
73693@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
73694 {
73695 struct hw_perf_event *hwc = &event->hw;
73696
73697- atomic64_add(nr, &event->count);
73698+ atomic64_add_unchecked(nr, &event->count);
73699
73700 if (!hwc->sample_period)
73701 return;
73702@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
73703 u64 now;
73704
73705 now = cpu_clock(cpu);
73706- prev = atomic64_read(&event->hw.prev_count);
73707- atomic64_set(&event->hw.prev_count, now);
73708- atomic64_add(now - prev, &event->count);
73709+ prev = atomic64_read_unchecked(&event->hw.prev_count);
73710+ atomic64_set_unchecked(&event->hw.prev_count, now);
73711+ atomic64_add_unchecked(now - prev, &event->count);
73712 }
73713
73714 static int cpu_clock_perf_event_enable(struct perf_event *event)
73715@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73716 struct hw_perf_event *hwc = &event->hw;
73717 int cpu = raw_smp_processor_id();
73718
73719- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73720+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73721 perf_swevent_start_hrtimer(event);
73722
73723 return 0;
73724@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73725 u64 prev;
73726 s64 delta;
73727
73728- prev = atomic64_xchg(&event->hw.prev_count, now);
73729+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73730 delta = now - prev;
73731- atomic64_add(delta, &event->count);
73732+ atomic64_add_unchecked(delta, &event->count);
73733 }
73734
73735 static int task_clock_perf_event_enable(struct perf_event *event)
73736@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73737
73738 now = event->ctx->time;
73739
73740- atomic64_set(&hwc->prev_count, now);
73741+ atomic64_set_unchecked(&hwc->prev_count, now);
73742
73743 perf_swevent_start_hrtimer(event);
73744
73745@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73746 event->parent = parent_event;
73747
73748 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73749- event->id = atomic64_inc_return(&perf_event_id);
73750+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
73751
73752 event->state = PERF_EVENT_STATE_INACTIVE;
73753
73754@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73755 if (child_event->attr.inherit_stat)
73756 perf_event_read_event(child_event, child);
73757
73758- child_val = atomic64_read(&child_event->count);
73759+ child_val = atomic64_read_unchecked(&child_event->count);
73760
73761 /*
73762 * Add back the child's count to the parent's count:
73763 */
73764- atomic64_add(child_val, &parent_event->count);
73765- atomic64_add(child_event->total_time_enabled,
73766+ atomic64_add_unchecked(child_val, &parent_event->count);
73767+ atomic64_add_unchecked(child_event->total_time_enabled,
73768 &parent_event->child_total_time_enabled);
73769- atomic64_add(child_event->total_time_running,
73770+ atomic64_add_unchecked(child_event->total_time_running,
73771 &parent_event->child_total_time_running);
73772
73773 /*
73774diff --git a/kernel/pid.c b/kernel/pid.c
73775index fce7198..4f23a7e 100644
73776--- a/kernel/pid.c
73777+++ b/kernel/pid.c
73778@@ -33,6 +33,7 @@
73779 #include <linux/rculist.h>
73780 #include <linux/bootmem.h>
73781 #include <linux/hash.h>
73782+#include <linux/security.h>
73783 #include <linux/pid_namespace.h>
73784 #include <linux/init_task.h>
73785 #include <linux/syscalls.h>
73786@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73787
73788 int pid_max = PID_MAX_DEFAULT;
73789
73790-#define RESERVED_PIDS 300
73791+#define RESERVED_PIDS 500
73792
73793 int pid_max_min = RESERVED_PIDS + 1;
73794 int pid_max_max = PID_MAX_LIMIT;
73795@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73796 */
73797 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73798 {
73799- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73800+ struct task_struct *task;
73801+
73802+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73803+
73804+ if (gr_pid_is_chrooted(task))
73805+ return NULL;
73806+
73807+ return task;
73808 }
73809
73810 struct task_struct *find_task_by_vpid(pid_t vnr)
73811@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73812 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73813 }
73814
73815+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73816+{
73817+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73818+}
73819+
73820 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73821 {
73822 struct pid *pid;
73823diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73824index 5c9dc22..d271117 100644
73825--- a/kernel/posix-cpu-timers.c
73826+++ b/kernel/posix-cpu-timers.c
73827@@ -6,6 +6,7 @@
73828 #include <linux/posix-timers.h>
73829 #include <linux/errno.h>
73830 #include <linux/math64.h>
73831+#include <linux/security.h>
73832 #include <asm/uaccess.h>
73833 #include <linux/kernel_stat.h>
73834 #include <trace/events/timer.h>
73835@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73836
73837 static __init int init_posix_cpu_timers(void)
73838 {
73839- struct k_clock process = {
73840+ static struct k_clock process = {
73841 .clock_getres = process_cpu_clock_getres,
73842 .clock_get = process_cpu_clock_get,
73843 .clock_set = do_posix_clock_nosettime,
73844@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73845 .nsleep = process_cpu_nsleep,
73846 .nsleep_restart = process_cpu_nsleep_restart,
73847 };
73848- struct k_clock thread = {
73849+ static struct k_clock thread = {
73850 .clock_getres = thread_cpu_clock_getres,
73851 .clock_get = thread_cpu_clock_get,
73852 .clock_set = do_posix_clock_nosettime,
73853diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73854index 5e76d22..cf1baeb 100644
73855--- a/kernel/posix-timers.c
73856+++ b/kernel/posix-timers.c
73857@@ -42,6 +42,7 @@
73858 #include <linux/compiler.h>
73859 #include <linux/idr.h>
73860 #include <linux/posix-timers.h>
73861+#include <linux/grsecurity.h>
73862 #include <linux/syscalls.h>
73863 #include <linux/wait.h>
73864 #include <linux/workqueue.h>
73865@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73866 * which we beg off on and pass to do_sys_settimeofday().
73867 */
73868
73869-static struct k_clock posix_clocks[MAX_CLOCKS];
73870+static struct k_clock *posix_clocks[MAX_CLOCKS];
73871
73872 /*
73873 * These ones are defined below.
73874@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73875 */
73876 #define CLOCK_DISPATCH(clock, call, arglist) \
73877 ((clock) < 0 ? posix_cpu_##call arglist : \
73878- (posix_clocks[clock].call != NULL \
73879- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73880+ (posix_clocks[clock]->call != NULL \
73881+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73882
73883 /*
73884 * Default clock hook functions when the struct k_clock passed
73885@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
73886 struct timespec *tp)
73887 {
73888 tp->tv_sec = 0;
73889- tp->tv_nsec = posix_clocks[which_clock].res;
73890+ tp->tv_nsec = posix_clocks[which_clock]->res;
73891 return 0;
73892 }
73893
73894@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
73895 return 0;
73896 if ((unsigned) which_clock >= MAX_CLOCKS)
73897 return 1;
73898- if (posix_clocks[which_clock].clock_getres != NULL)
73899+ if (posix_clocks[which_clock] == NULL)
73900 return 0;
73901- if (posix_clocks[which_clock].res != 0)
73902+ if (posix_clocks[which_clock]->clock_getres != NULL)
73903+ return 0;
73904+ if (posix_clocks[which_clock]->res != 0)
73905 return 0;
73906 return 1;
73907 }
73908@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
73909 */
73910 static __init int init_posix_timers(void)
73911 {
73912- struct k_clock clock_realtime = {
73913+ static struct k_clock clock_realtime = {
73914 .clock_getres = hrtimer_get_res,
73915 };
73916- struct k_clock clock_monotonic = {
73917+ static struct k_clock clock_monotonic = {
73918 .clock_getres = hrtimer_get_res,
73919 .clock_get = posix_ktime_get_ts,
73920 .clock_set = do_posix_clock_nosettime,
73921 };
73922- struct k_clock clock_monotonic_raw = {
73923+ static struct k_clock clock_monotonic_raw = {
73924 .clock_getres = hrtimer_get_res,
73925 .clock_get = posix_get_monotonic_raw,
73926 .clock_set = do_posix_clock_nosettime,
73927 .timer_create = no_timer_create,
73928 .nsleep = no_nsleep,
73929 };
73930- struct k_clock clock_realtime_coarse = {
73931+ static struct k_clock clock_realtime_coarse = {
73932 .clock_getres = posix_get_coarse_res,
73933 .clock_get = posix_get_realtime_coarse,
73934 .clock_set = do_posix_clock_nosettime,
73935 .timer_create = no_timer_create,
73936 .nsleep = no_nsleep,
73937 };
73938- struct k_clock clock_monotonic_coarse = {
73939+ static struct k_clock clock_monotonic_coarse = {
73940 .clock_getres = posix_get_coarse_res,
73941 .clock_get = posix_get_monotonic_coarse,
73942 .clock_set = do_posix_clock_nosettime,
73943@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
73944 .nsleep = no_nsleep,
73945 };
73946
73947+ pax_track_stack();
73948+
73949 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
73950 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
73951 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
73952@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
73953 return;
73954 }
73955
73956- posix_clocks[clock_id] = *new_clock;
73957+ posix_clocks[clock_id] = new_clock;
73958 }
73959 EXPORT_SYMBOL_GPL(register_posix_clock);
73960
73961@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
73962 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
73963 return -EFAULT;
73964
73965+ /* only the CLOCK_REALTIME clock can be set, all other clocks
73966+ have their clock_set fptr set to a nosettime dummy function
73967+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
73968+ call common_clock_set, which calls do_sys_settimeofday, which
73969+ we hook
73970+ */
73971+
73972 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
73973 }
73974
73975diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
73976index 04a9e90..bc355aa 100644
73977--- a/kernel/power/hibernate.c
73978+++ b/kernel/power/hibernate.c
73979@@ -48,14 +48,14 @@ enum {
73980
73981 static int hibernation_mode = HIBERNATION_SHUTDOWN;
73982
73983-static struct platform_hibernation_ops *hibernation_ops;
73984+static const struct platform_hibernation_ops *hibernation_ops;
73985
73986 /**
73987 * hibernation_set_ops - set the global hibernate operations
73988 * @ops: the hibernation operations to use in subsequent hibernation transitions
73989 */
73990
73991-void hibernation_set_ops(struct platform_hibernation_ops *ops)
73992+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
73993 {
73994 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
73995 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
73996diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
73997index e8b3370..484c2e4 100644
73998--- a/kernel/power/poweroff.c
73999+++ b/kernel/power/poweroff.c
74000@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
74001 .enable_mask = SYSRQ_ENABLE_BOOT,
74002 };
74003
74004-static int pm_sysrq_init(void)
74005+static int __init pm_sysrq_init(void)
74006 {
74007 register_sysrq_key('o', &sysrq_poweroff_op);
74008 return 0;
74009diff --git a/kernel/power/process.c b/kernel/power/process.c
74010index e7cd671..56d5f459 100644
74011--- a/kernel/power/process.c
74012+++ b/kernel/power/process.c
74013@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
74014 struct timeval start, end;
74015 u64 elapsed_csecs64;
74016 unsigned int elapsed_csecs;
74017+ bool timedout = false;
74018
74019 do_gettimeofday(&start);
74020
74021 end_time = jiffies + TIMEOUT;
74022 do {
74023 todo = 0;
74024+ if (time_after(jiffies, end_time))
74025+ timedout = true;
74026 read_lock(&tasklist_lock);
74027 do_each_thread(g, p) {
74028 if (frozen(p) || !freezeable(p))
74029@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
74030 * It is "frozen enough". If the task does wake
74031 * up, it will immediately call try_to_freeze.
74032 */
74033- if (!task_is_stopped_or_traced(p) &&
74034- !freezer_should_skip(p))
74035+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
74036 todo++;
74037+ if (timedout) {
74038+ printk(KERN_ERR "Task refusing to freeze:\n");
74039+ sched_show_task(p);
74040+ }
74041+ }
74042 } while_each_thread(g, p);
74043 read_unlock(&tasklist_lock);
74044 yield(); /* Yield is okay here */
74045- if (time_after(jiffies, end_time))
74046- break;
74047- } while (todo);
74048+ } while (todo && !timedout);
74049
74050 do_gettimeofday(&end);
74051 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
74052diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
74053index 40dd021..fb30ceb 100644
74054--- a/kernel/power/suspend.c
74055+++ b/kernel/power/suspend.c
74056@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
74057 [PM_SUSPEND_MEM] = "mem",
74058 };
74059
74060-static struct platform_suspend_ops *suspend_ops;
74061+static const struct platform_suspend_ops *suspend_ops;
74062
74063 /**
74064 * suspend_set_ops - Set the global suspend method table.
74065 * @ops: Pointer to ops structure.
74066 */
74067-void suspend_set_ops(struct platform_suspend_ops *ops)
74068+void suspend_set_ops(const struct platform_suspend_ops *ops)
74069 {
74070 mutex_lock(&pm_mutex);
74071 suspend_ops = ops;
74072diff --git a/kernel/printk.c b/kernel/printk.c
74073index 4cade47..4d17900 100644
74074--- a/kernel/printk.c
74075+++ b/kernel/printk.c
74076@@ -33,6 +33,7 @@
74077 #include <linux/bootmem.h>
74078 #include <linux/syscalls.h>
74079 #include <linux/kexec.h>
74080+#include <linux/syslog.h>
74081
74082 #include <asm/uaccess.h>
74083
74084@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
74085 }
74086 #endif
74087
74088-/*
74089- * Commands to do_syslog:
74090- *
74091- * 0 -- Close the log. Currently a NOP.
74092- * 1 -- Open the log. Currently a NOP.
74093- * 2 -- Read from the log.
74094- * 3 -- Read all messages remaining in the ring buffer.
74095- * 4 -- Read and clear all messages remaining in the ring buffer
74096- * 5 -- Clear ring buffer.
74097- * 6 -- Disable printk's to console
74098- * 7 -- Enable printk's to console
74099- * 8 -- Set level of messages printed to console
74100- * 9 -- Return number of unread characters in the log buffer
74101- * 10 -- Return size of the log buffer
74102- */
74103-int do_syslog(int type, char __user *buf, int len)
74104+int do_syslog(int type, char __user *buf, int len, bool from_file)
74105 {
74106 unsigned i, j, limit, count;
74107 int do_clear = 0;
74108 char c;
74109 int error = 0;
74110
74111- error = security_syslog(type);
74112+#ifdef CONFIG_GRKERNSEC_DMESG
74113+ if (grsec_enable_dmesg &&
74114+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74115+ !capable(CAP_SYS_ADMIN))
74116+ return -EPERM;
74117+#endif
74118+
74119+ error = security_syslog(type, from_file);
74120 if (error)
74121 return error;
74122
74123 switch (type) {
74124- case 0: /* Close log */
74125+ case SYSLOG_ACTION_CLOSE: /* Close log */
74126 break;
74127- case 1: /* Open log */
74128+ case SYSLOG_ACTION_OPEN: /* Open log */
74129 break;
74130- case 2: /* Read from log */
74131+ case SYSLOG_ACTION_READ: /* Read from log */
74132 error = -EINVAL;
74133 if (!buf || len < 0)
74134 goto out;
74135@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74136 if (!error)
74137 error = i;
74138 break;
74139- case 4: /* Read/clear last kernel messages */
74140+ /* Read/clear last kernel messages */
74141+ case SYSLOG_ACTION_READ_CLEAR:
74142 do_clear = 1;
74143 /* FALL THRU */
74144- case 3: /* Read last kernel messages */
74145+ /* Read last kernel messages */
74146+ case SYSLOG_ACTION_READ_ALL:
74147 error = -EINVAL;
74148 if (!buf || len < 0)
74149 goto out;
74150@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74151 }
74152 }
74153 break;
74154- case 5: /* Clear ring buffer */
74155+ /* Clear ring buffer */
74156+ case SYSLOG_ACTION_CLEAR:
74157 logged_chars = 0;
74158 break;
74159- case 6: /* Disable logging to console */
74160+ /* Disable logging to console */
74161+ case SYSLOG_ACTION_CONSOLE_OFF:
74162 if (saved_console_loglevel == -1)
74163 saved_console_loglevel = console_loglevel;
74164 console_loglevel = minimum_console_loglevel;
74165 break;
74166- case 7: /* Enable logging to console */
74167+ /* Enable logging to console */
74168+ case SYSLOG_ACTION_CONSOLE_ON:
74169 if (saved_console_loglevel != -1) {
74170 console_loglevel = saved_console_loglevel;
74171 saved_console_loglevel = -1;
74172 }
74173 break;
74174- case 8: /* Set level of messages printed to console */
74175+ /* Set level of messages printed to console */
74176+ case SYSLOG_ACTION_CONSOLE_LEVEL:
74177 error = -EINVAL;
74178 if (len < 1 || len > 8)
74179 goto out;
74180@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74181 saved_console_loglevel = -1;
74182 error = 0;
74183 break;
74184- case 9: /* Number of chars in the log buffer */
74185+ /* Number of chars in the log buffer */
74186+ case SYSLOG_ACTION_SIZE_UNREAD:
74187 error = log_end - log_start;
74188 break;
74189- case 10: /* Size of the log buffer */
74190+ /* Size of the log buffer */
74191+ case SYSLOG_ACTION_SIZE_BUFFER:
74192 error = log_buf_len;
74193 break;
74194 default:
74195@@ -415,7 +416,7 @@ out:
74196
74197 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74198 {
74199- return do_syslog(type, buf, len);
74200+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74201 }
74202
74203 /*
74204diff --git a/kernel/profile.c b/kernel/profile.c
74205index dfadc5b..7f59404 100644
74206--- a/kernel/profile.c
74207+++ b/kernel/profile.c
74208@@ -39,7 +39,7 @@ struct profile_hit {
74209 /* Oprofile timer tick hook */
74210 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74211
74212-static atomic_t *prof_buffer;
74213+static atomic_unchecked_t *prof_buffer;
74214 static unsigned long prof_len, prof_shift;
74215
74216 int prof_on __read_mostly;
74217@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74218 hits[i].pc = 0;
74219 continue;
74220 }
74221- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74222+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74223 hits[i].hits = hits[i].pc = 0;
74224 }
74225 }
74226@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74227 * Add the current hit(s) and flush the write-queue out
74228 * to the global buffer:
74229 */
74230- atomic_add(nr_hits, &prof_buffer[pc]);
74231+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74232 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74233- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74234+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74235 hits[i].pc = hits[i].hits = 0;
74236 }
74237 out:
74238@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74239 if (prof_on != type || !prof_buffer)
74240 return;
74241 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74242- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74243+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74244 }
74245 #endif /* !CONFIG_SMP */
74246 EXPORT_SYMBOL_GPL(profile_hits);
74247@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74248 return -EFAULT;
74249 buf++; p++; count--; read++;
74250 }
74251- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74252+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74253 if (copy_to_user(buf, (void *)pnt, count))
74254 return -EFAULT;
74255 read += count;
74256@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74257 }
74258 #endif
74259 profile_discard_flip_buffers();
74260- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74261+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74262 return count;
74263 }
74264
74265diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74266index 05625f6..733bf70 100644
74267--- a/kernel/ptrace.c
74268+++ b/kernel/ptrace.c
74269@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74270 return ret;
74271 }
74272
74273-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74274+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74275+ unsigned int log)
74276 {
74277 const struct cred *cred = current_cred(), *tcred;
74278
74279@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74280 cred->gid != tcred->egid ||
74281 cred->gid != tcred->sgid ||
74282 cred->gid != tcred->gid) &&
74283- !capable(CAP_SYS_PTRACE)) {
74284+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74285+ (log && !capable(CAP_SYS_PTRACE)))
74286+ ) {
74287 rcu_read_unlock();
74288 return -EPERM;
74289 }
74290@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74291 smp_rmb();
74292 if (task->mm)
74293 dumpable = get_dumpable(task->mm);
74294- if (!dumpable && !capable(CAP_SYS_PTRACE))
74295+ if (!dumpable &&
74296+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74297+ (log && !capable(CAP_SYS_PTRACE))))
74298 return -EPERM;
74299
74300 return security_ptrace_access_check(task, mode);
74301@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74302 {
74303 int err;
74304 task_lock(task);
74305- err = __ptrace_may_access(task, mode);
74306+ err = __ptrace_may_access(task, mode, 0);
74307+ task_unlock(task);
74308+ return !err;
74309+}
74310+
74311+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74312+{
74313+ int err;
74314+ task_lock(task);
74315+ err = __ptrace_may_access(task, mode, 1);
74316 task_unlock(task);
74317 return !err;
74318 }
74319@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74320 goto out;
74321
74322 task_lock(task);
74323- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74324+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74325 task_unlock(task);
74326 if (retval)
74327 goto unlock_creds;
74328@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74329 goto unlock_tasklist;
74330
74331 task->ptrace = PT_PTRACED;
74332- if (capable(CAP_SYS_PTRACE))
74333+ if (capable_nolog(CAP_SYS_PTRACE))
74334 task->ptrace |= PT_PTRACE_CAP;
74335
74336 __ptrace_link(task, current);
74337@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74338 {
74339 int copied = 0;
74340
74341+ pax_track_stack();
74342+
74343 while (len > 0) {
74344 char buf[128];
74345 int this_len, retval;
74346@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74347 {
74348 int copied = 0;
74349
74350+ pax_track_stack();
74351+
74352 while (len > 0) {
74353 char buf[128];
74354 int this_len, retval;
74355@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74356 int ret = -EIO;
74357 siginfo_t siginfo;
74358
74359+ pax_track_stack();
74360+
74361 switch (request) {
74362 case PTRACE_PEEKTEXT:
74363 case PTRACE_PEEKDATA:
74364@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74365 ret = ptrace_setoptions(child, data);
74366 break;
74367 case PTRACE_GETEVENTMSG:
74368- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74369+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74370 break;
74371
74372 case PTRACE_GETSIGINFO:
74373 ret = ptrace_getsiginfo(child, &siginfo);
74374 if (!ret)
74375- ret = copy_siginfo_to_user((siginfo_t __user *) data,
74376+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74377 &siginfo);
74378 break;
74379
74380 case PTRACE_SETSIGINFO:
74381- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74382+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74383 sizeof siginfo))
74384 ret = -EFAULT;
74385 else
74386@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74387 goto out;
74388 }
74389
74390+ if (gr_handle_ptrace(child, request)) {
74391+ ret = -EPERM;
74392+ goto out_put_task_struct;
74393+ }
74394+
74395 if (request == PTRACE_ATTACH) {
74396 ret = ptrace_attach(child);
74397 /*
74398 * Some architectures need to do book-keeping after
74399 * a ptrace attach.
74400 */
74401- if (!ret)
74402+ if (!ret) {
74403 arch_ptrace_attach(child);
74404+ gr_audit_ptrace(child);
74405+ }
74406 goto out_put_task_struct;
74407 }
74408
74409@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74410 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74411 if (copied != sizeof(tmp))
74412 return -EIO;
74413- return put_user(tmp, (unsigned long __user *)data);
74414+ return put_user(tmp, (__force unsigned long __user *)data);
74415 }
74416
74417 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74418@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74419 siginfo_t siginfo;
74420 int ret;
74421
74422+ pax_track_stack();
74423+
74424 switch (request) {
74425 case PTRACE_PEEKTEXT:
74426 case PTRACE_PEEKDATA:
74427@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74428 goto out;
74429 }
74430
74431+ if (gr_handle_ptrace(child, request)) {
74432+ ret = -EPERM;
74433+ goto out_put_task_struct;
74434+ }
74435+
74436 if (request == PTRACE_ATTACH) {
74437 ret = ptrace_attach(child);
74438 /*
74439 * Some architectures need to do book-keeping after
74440 * a ptrace attach.
74441 */
74442- if (!ret)
74443+ if (!ret) {
74444 arch_ptrace_attach(child);
74445+ gr_audit_ptrace(child);
74446+ }
74447 goto out_put_task_struct;
74448 }
74449
74450diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74451index 697c0a0..2402696 100644
74452--- a/kernel/rcutorture.c
74453+++ b/kernel/rcutorture.c
74454@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74455 { 0 };
74456 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74457 { 0 };
74458-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74459-static atomic_t n_rcu_torture_alloc;
74460-static atomic_t n_rcu_torture_alloc_fail;
74461-static atomic_t n_rcu_torture_free;
74462-static atomic_t n_rcu_torture_mberror;
74463-static atomic_t n_rcu_torture_error;
74464+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74465+static atomic_unchecked_t n_rcu_torture_alloc;
74466+static atomic_unchecked_t n_rcu_torture_alloc_fail;
74467+static atomic_unchecked_t n_rcu_torture_free;
74468+static atomic_unchecked_t n_rcu_torture_mberror;
74469+static atomic_unchecked_t n_rcu_torture_error;
74470 static long n_rcu_torture_timers;
74471 static struct list_head rcu_torture_removed;
74472 static cpumask_var_t shuffle_tmp_mask;
74473@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74474
74475 spin_lock_bh(&rcu_torture_lock);
74476 if (list_empty(&rcu_torture_freelist)) {
74477- atomic_inc(&n_rcu_torture_alloc_fail);
74478+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74479 spin_unlock_bh(&rcu_torture_lock);
74480 return NULL;
74481 }
74482- atomic_inc(&n_rcu_torture_alloc);
74483+ atomic_inc_unchecked(&n_rcu_torture_alloc);
74484 p = rcu_torture_freelist.next;
74485 list_del_init(p);
74486 spin_unlock_bh(&rcu_torture_lock);
74487@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74488 static void
74489 rcu_torture_free(struct rcu_torture *p)
74490 {
74491- atomic_inc(&n_rcu_torture_free);
74492+ atomic_inc_unchecked(&n_rcu_torture_free);
74493 spin_lock_bh(&rcu_torture_lock);
74494 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74495 spin_unlock_bh(&rcu_torture_lock);
74496@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74497 i = rp->rtort_pipe_count;
74498 if (i > RCU_TORTURE_PIPE_LEN)
74499 i = RCU_TORTURE_PIPE_LEN;
74500- atomic_inc(&rcu_torture_wcount[i]);
74501+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74502 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74503 rp->rtort_mbtest = 0;
74504 rcu_torture_free(rp);
74505@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74506 i = rp->rtort_pipe_count;
74507 if (i > RCU_TORTURE_PIPE_LEN)
74508 i = RCU_TORTURE_PIPE_LEN;
74509- atomic_inc(&rcu_torture_wcount[i]);
74510+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74511 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74512 rp->rtort_mbtest = 0;
74513 list_del(&rp->rtort_free);
74514@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74515 i = old_rp->rtort_pipe_count;
74516 if (i > RCU_TORTURE_PIPE_LEN)
74517 i = RCU_TORTURE_PIPE_LEN;
74518- atomic_inc(&rcu_torture_wcount[i]);
74519+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74520 old_rp->rtort_pipe_count++;
74521 cur_ops->deferred_free(old_rp);
74522 }
74523@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74524 return;
74525 }
74526 if (p->rtort_mbtest == 0)
74527- atomic_inc(&n_rcu_torture_mberror);
74528+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74529 spin_lock(&rand_lock);
74530 cur_ops->read_delay(&rand);
74531 n_rcu_torture_timers++;
74532@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74533 continue;
74534 }
74535 if (p->rtort_mbtest == 0)
74536- atomic_inc(&n_rcu_torture_mberror);
74537+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74538 cur_ops->read_delay(&rand);
74539 preempt_disable();
74540 pipe_count = p->rtort_pipe_count;
74541@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74542 rcu_torture_current,
74543 rcu_torture_current_version,
74544 list_empty(&rcu_torture_freelist),
74545- atomic_read(&n_rcu_torture_alloc),
74546- atomic_read(&n_rcu_torture_alloc_fail),
74547- atomic_read(&n_rcu_torture_free),
74548- atomic_read(&n_rcu_torture_mberror),
74549+ atomic_read_unchecked(&n_rcu_torture_alloc),
74550+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74551+ atomic_read_unchecked(&n_rcu_torture_free),
74552+ atomic_read_unchecked(&n_rcu_torture_mberror),
74553 n_rcu_torture_timers);
74554- if (atomic_read(&n_rcu_torture_mberror) != 0)
74555+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74556 cnt += sprintf(&page[cnt], " !!!");
74557 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74558 if (i > 1) {
74559 cnt += sprintf(&page[cnt], "!!! ");
74560- atomic_inc(&n_rcu_torture_error);
74561+ atomic_inc_unchecked(&n_rcu_torture_error);
74562 WARN_ON_ONCE(1);
74563 }
74564 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74565@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74566 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74567 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74568 cnt += sprintf(&page[cnt], " %d",
74569- atomic_read(&rcu_torture_wcount[i]));
74570+ atomic_read_unchecked(&rcu_torture_wcount[i]));
74571 }
74572 cnt += sprintf(&page[cnt], "\n");
74573 if (cur_ops->stats)
74574@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74575
74576 if (cur_ops->cleanup)
74577 cur_ops->cleanup();
74578- if (atomic_read(&n_rcu_torture_error))
74579+ if (atomic_read_unchecked(&n_rcu_torture_error))
74580 rcu_torture_print_module_parms("End of test: FAILURE");
74581 else
74582 rcu_torture_print_module_parms("End of test: SUCCESS");
74583@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
74584
74585 rcu_torture_current = NULL;
74586 rcu_torture_current_version = 0;
74587- atomic_set(&n_rcu_torture_alloc, 0);
74588- atomic_set(&n_rcu_torture_alloc_fail, 0);
74589- atomic_set(&n_rcu_torture_free, 0);
74590- atomic_set(&n_rcu_torture_mberror, 0);
74591- atomic_set(&n_rcu_torture_error, 0);
74592+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
74593+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
74594+ atomic_set_unchecked(&n_rcu_torture_free, 0);
74595+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
74596+ atomic_set_unchecked(&n_rcu_torture_error, 0);
74597 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
74598- atomic_set(&rcu_torture_wcount[i], 0);
74599+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
74600 for_each_possible_cpu(cpu) {
74601 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74602 per_cpu(rcu_torture_count, cpu)[i] = 0;
74603diff --git a/kernel/rcutree.c b/kernel/rcutree.c
74604index 683c4f3..97f54c6 100644
74605--- a/kernel/rcutree.c
74606+++ b/kernel/rcutree.c
74607@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
74608 /*
74609 * Do softirq processing for the current CPU.
74610 */
74611-static void rcu_process_callbacks(struct softirq_action *unused)
74612+static void rcu_process_callbacks(void)
74613 {
74614 /*
74615 * Memory references from any prior RCU read-side critical sections
74616diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
74617index c03edf7..ac1b341 100644
74618--- a/kernel/rcutree_plugin.h
74619+++ b/kernel/rcutree_plugin.h
74620@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
74621 */
74622 void __rcu_read_lock(void)
74623 {
74624- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
74625+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
74626 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
74627 }
74628 EXPORT_SYMBOL_GPL(__rcu_read_lock);
74629@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
74630 struct task_struct *t = current;
74631
74632 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
74633- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
74634+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
74635 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
74636 rcu_read_unlock_special(t);
74637 }
74638diff --git a/kernel/relay.c b/kernel/relay.c
74639index 760c262..a9fd241 100644
74640--- a/kernel/relay.c
74641+++ b/kernel/relay.c
74642@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
74643 unsigned int flags,
74644 int *nonpad_ret)
74645 {
74646- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
74647+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
74648 struct rchan_buf *rbuf = in->private_data;
74649 unsigned int subbuf_size = rbuf->chan->subbuf_size;
74650 uint64_t pos = (uint64_t) *ppos;
74651@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
74652 .ops = &relay_pipe_buf_ops,
74653 .spd_release = relay_page_release,
74654 };
74655+ ssize_t ret;
74656+
74657+ pax_track_stack();
74658
74659 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
74660 return 0;
74661diff --git a/kernel/resource.c b/kernel/resource.c
74662index fb11a58..4e61ae1 100644
74663--- a/kernel/resource.c
74664+++ b/kernel/resource.c
74665@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
74666
74667 static int __init ioresources_init(void)
74668 {
74669+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74670+#ifdef CONFIG_GRKERNSEC_PROC_USER
74671+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
74672+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
74673+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74674+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
74675+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
74676+#endif
74677+#else
74678 proc_create("ioports", 0, NULL, &proc_ioports_operations);
74679 proc_create("iomem", 0, NULL, &proc_iomem_operations);
74680+#endif
74681 return 0;
74682 }
74683 __initcall(ioresources_init);
74684diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
74685index a56f629..1fc4989 100644
74686--- a/kernel/rtmutex-tester.c
74687+++ b/kernel/rtmutex-tester.c
74688@@ -21,7 +21,7 @@
74689 #define MAX_RT_TEST_MUTEXES 8
74690
74691 static spinlock_t rttest_lock;
74692-static atomic_t rttest_event;
74693+static atomic_unchecked_t rttest_event;
74694
74695 struct test_thread_data {
74696 int opcode;
74697@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74698
74699 case RTTEST_LOCKCONT:
74700 td->mutexes[td->opdata] = 1;
74701- td->event = atomic_add_return(1, &rttest_event);
74702+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74703 return 0;
74704
74705 case RTTEST_RESET:
74706@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74707 return 0;
74708
74709 case RTTEST_RESETEVENT:
74710- atomic_set(&rttest_event, 0);
74711+ atomic_set_unchecked(&rttest_event, 0);
74712 return 0;
74713
74714 default:
74715@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74716 return ret;
74717
74718 td->mutexes[id] = 1;
74719- td->event = atomic_add_return(1, &rttest_event);
74720+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74721 rt_mutex_lock(&mutexes[id]);
74722- td->event = atomic_add_return(1, &rttest_event);
74723+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74724 td->mutexes[id] = 4;
74725 return 0;
74726
74727@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74728 return ret;
74729
74730 td->mutexes[id] = 1;
74731- td->event = atomic_add_return(1, &rttest_event);
74732+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74733 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74734- td->event = atomic_add_return(1, &rttest_event);
74735+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74736 td->mutexes[id] = ret ? 0 : 4;
74737 return ret ? -EINTR : 0;
74738
74739@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74740 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74741 return ret;
74742
74743- td->event = atomic_add_return(1, &rttest_event);
74744+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74745 rt_mutex_unlock(&mutexes[id]);
74746- td->event = atomic_add_return(1, &rttest_event);
74747+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74748 td->mutexes[id] = 0;
74749 return 0;
74750
74751@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74752 break;
74753
74754 td->mutexes[dat] = 2;
74755- td->event = atomic_add_return(1, &rttest_event);
74756+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74757 break;
74758
74759 case RTTEST_LOCKBKL:
74760@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74761 return;
74762
74763 td->mutexes[dat] = 3;
74764- td->event = atomic_add_return(1, &rttest_event);
74765+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74766 break;
74767
74768 case RTTEST_LOCKNOWAIT:
74769@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74770 return;
74771
74772 td->mutexes[dat] = 1;
74773- td->event = atomic_add_return(1, &rttest_event);
74774+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74775 return;
74776
74777 case RTTEST_LOCKBKL:
74778diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74779index 29bd4ba..8c5de90 100644
74780--- a/kernel/rtmutex.c
74781+++ b/kernel/rtmutex.c
74782@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74783 */
74784 spin_lock_irqsave(&pendowner->pi_lock, flags);
74785
74786- WARN_ON(!pendowner->pi_blocked_on);
74787+ BUG_ON(!pendowner->pi_blocked_on);
74788 WARN_ON(pendowner->pi_blocked_on != waiter);
74789 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74790
74791diff --git a/kernel/sched.c b/kernel/sched.c
74792index 0591df8..e3af3a4 100644
74793--- a/kernel/sched.c
74794+++ b/kernel/sched.c
74795@@ -5043,7 +5043,7 @@ out:
74796 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74797 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74798 */
74799-static void run_rebalance_domains(struct softirq_action *h)
74800+static void run_rebalance_domains(void)
74801 {
74802 int this_cpu = smp_processor_id();
74803 struct rq *this_rq = cpu_rq(this_cpu);
74804@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
74805 }
74806 }
74807
74808+#ifdef CONFIG_GRKERNSEC_SETXID
74809+extern void gr_delayed_cred_worker(void);
74810+static inline void gr_cred_schedule(void)
74811+{
74812+ if (unlikely(current->delayed_cred))
74813+ gr_delayed_cred_worker();
74814+}
74815+#else
74816+static inline void gr_cred_schedule(void)
74817+{
74818+}
74819+#endif
74820+
74821 /*
74822 * schedule() is the main scheduler function.
74823 */
74824@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
74825 struct rq *rq;
74826 int cpu;
74827
74828+ pax_track_stack();
74829+
74830 need_resched:
74831 preempt_disable();
74832 cpu = smp_processor_id();
74833@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
74834
74835 schedule_debug(prev);
74836
74837+ gr_cred_schedule();
74838+
74839 if (sched_feat(HRTICK))
74840 hrtick_clear(rq);
74841
74842@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
74843 * Look out! "owner" is an entirely speculative pointer
74844 * access and not reliable.
74845 */
74846-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74847+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74848 {
74849 unsigned int cpu;
74850 struct rq *rq;
74851@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74852 * DEBUG_PAGEALLOC could have unmapped it if
74853 * the mutex owner just released it and exited.
74854 */
74855- if (probe_kernel_address(&owner->cpu, cpu))
74856+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74857 return 0;
74858 #else
74859- cpu = owner->cpu;
74860+ cpu = task_thread_info(owner)->cpu;
74861 #endif
74862
74863 /*
74864@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74865 /*
74866 * Is that owner really running on that cpu?
74867 */
74868- if (task_thread_info(rq->curr) != owner || need_resched())
74869+ if (rq->curr != owner || need_resched())
74870 return 0;
74871
74872 cpu_relax();
74873@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
74874 /* convert nice value [19,-20] to rlimit style value [1,40] */
74875 int nice_rlim = 20 - nice;
74876
74877+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
74878+
74879 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
74880 capable(CAP_SYS_NICE));
74881 }
74882@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
74883 if (nice > 19)
74884 nice = 19;
74885
74886- if (increment < 0 && !can_nice(current, nice))
74887+ if (increment < 0 && (!can_nice(current, nice) ||
74888+ gr_handle_chroot_nice()))
74889 return -EPERM;
74890
74891 retval = security_task_setnice(current, nice);
74892@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
74893 long power;
74894 int weight;
74895
74896- WARN_ON(!sd || !sd->groups);
74897+ BUG_ON(!sd || !sd->groups);
74898
74899 if (cpu != group_first_cpu(sd->groups))
74900 return;
74901diff --git a/kernel/signal.c b/kernel/signal.c
74902index 2494827..cda80a0 100644
74903--- a/kernel/signal.c
74904+++ b/kernel/signal.c
74905@@ -41,12 +41,12 @@
74906
74907 static struct kmem_cache *sigqueue_cachep;
74908
74909-static void __user *sig_handler(struct task_struct *t, int sig)
74910+static __sighandler_t sig_handler(struct task_struct *t, int sig)
74911 {
74912 return t->sighand->action[sig - 1].sa.sa_handler;
74913 }
74914
74915-static int sig_handler_ignored(void __user *handler, int sig)
74916+static int sig_handler_ignored(__sighandler_t handler, int sig)
74917 {
74918 /* Is it explicitly or implicitly ignored? */
74919 return handler == SIG_IGN ||
74920@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
74921 static int sig_task_ignored(struct task_struct *t, int sig,
74922 int from_ancestor_ns)
74923 {
74924- void __user *handler;
74925+ __sighandler_t handler;
74926
74927 handler = sig_handler(t, sig);
74928
74929@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
74930 */
74931 user = get_uid(__task_cred(t)->user);
74932 atomic_inc(&user->sigpending);
74933+
74934+ if (!override_rlimit)
74935+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
74936 if (override_rlimit ||
74937 atomic_read(&user->sigpending) <=
74938 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
74939@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
74940
74941 int unhandled_signal(struct task_struct *tsk, int sig)
74942 {
74943- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
74944+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
74945 if (is_global_init(tsk))
74946 return 1;
74947 if (handler != SIG_IGN && handler != SIG_DFL)
74948@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
74949 }
74950 }
74951
74952+ /* allow glibc communication via tgkill to other threads in our
74953+ thread group */
74954+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
74955+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
74956+ && gr_handle_signal(t, sig))
74957+ return -EPERM;
74958+
74959 return security_task_kill(t, info, sig, 0);
74960 }
74961
74962@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74963 return send_signal(sig, info, p, 1);
74964 }
74965
74966-static int
74967+int
74968 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74969 {
74970 return send_signal(sig, info, t, 0);
74971@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74972 unsigned long int flags;
74973 int ret, blocked, ignored;
74974 struct k_sigaction *action;
74975+ int is_unhandled = 0;
74976
74977 spin_lock_irqsave(&t->sighand->siglock, flags);
74978 action = &t->sighand->action[sig-1];
74979@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74980 }
74981 if (action->sa.sa_handler == SIG_DFL)
74982 t->signal->flags &= ~SIGNAL_UNKILLABLE;
74983+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
74984+ is_unhandled = 1;
74985 ret = specific_send_sig_info(sig, info, t);
74986 spin_unlock_irqrestore(&t->sighand->siglock, flags);
74987
74988+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
74989+ normal operation */
74990+ if (is_unhandled) {
74991+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
74992+ gr_handle_crash(t, sig);
74993+ }
74994+
74995 return ret;
74996 }
74997
74998@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74999 {
75000 int ret = check_kill_permission(sig, info, p);
75001
75002- if (!ret && sig)
75003+ if (!ret && sig) {
75004 ret = do_send_sig_info(sig, info, p, true);
75005+ if (!ret)
75006+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
75007+ }
75008
75009 return ret;
75010 }
75011@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
75012 {
75013 siginfo_t info;
75014
75015+ pax_track_stack();
75016+
75017 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
75018
75019 memset(&info, 0, sizeof info);
75020@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
75021 int error = -ESRCH;
75022
75023 rcu_read_lock();
75024- p = find_task_by_vpid(pid);
75025+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
75026+ /* allow glibc communication via tgkill to other threads in our
75027+ thread group */
75028+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
75029+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
75030+ p = find_task_by_vpid_unrestricted(pid);
75031+ else
75032+#endif
75033+ p = find_task_by_vpid(pid);
75034 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
75035 error = check_kill_permission(sig, info, p);
75036 /*
75037diff --git a/kernel/smp.c b/kernel/smp.c
75038index aa9cff3..631a0de 100644
75039--- a/kernel/smp.c
75040+++ b/kernel/smp.c
75041@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
75042 }
75043 EXPORT_SYMBOL(smp_call_function);
75044
75045-void ipi_call_lock(void)
75046+void ipi_call_lock(void) __acquires(call_function.lock)
75047 {
75048 spin_lock(&call_function.lock);
75049 }
75050
75051-void ipi_call_unlock(void)
75052+void ipi_call_unlock(void) __releases(call_function.lock)
75053 {
75054 spin_unlock(&call_function.lock);
75055 }
75056
75057-void ipi_call_lock_irq(void)
75058+void ipi_call_lock_irq(void) __acquires(call_function.lock)
75059 {
75060 spin_lock_irq(&call_function.lock);
75061 }
75062
75063-void ipi_call_unlock_irq(void)
75064+void ipi_call_unlock_irq(void) __releases(call_function.lock)
75065 {
75066 spin_unlock_irq(&call_function.lock);
75067 }
75068diff --git a/kernel/softirq.c b/kernel/softirq.c
75069index 04a0252..580c512 100644
75070--- a/kernel/softirq.c
75071+++ b/kernel/softirq.c
75072@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
75073
75074 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
75075
75076-char *softirq_to_name[NR_SOFTIRQS] = {
75077+const char * const softirq_to_name[NR_SOFTIRQS] = {
75078 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
75079 "TASKLET", "SCHED", "HRTIMER", "RCU"
75080 };
75081@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
75082
75083 asmlinkage void __do_softirq(void)
75084 {
75085- struct softirq_action *h;
75086+ const struct softirq_action *h;
75087 __u32 pending;
75088 int max_restart = MAX_SOFTIRQ_RESTART;
75089 int cpu;
75090@@ -233,7 +233,7 @@ restart:
75091 kstat_incr_softirqs_this_cpu(h - softirq_vec);
75092
75093 trace_softirq_entry(h, softirq_vec);
75094- h->action(h);
75095+ h->action();
75096 trace_softirq_exit(h, softirq_vec);
75097 if (unlikely(prev_count != preempt_count())) {
75098 printk(KERN_ERR "huh, entered softirq %td %s %p"
75099@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
75100 local_irq_restore(flags);
75101 }
75102
75103-void open_softirq(int nr, void (*action)(struct softirq_action *))
75104+void open_softirq(int nr, void (*action)(void))
75105 {
75106- softirq_vec[nr].action = action;
75107+ pax_open_kernel();
75108+ *(void **)&softirq_vec[nr].action = action;
75109+ pax_close_kernel();
75110 }
75111
75112 /*
75113@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75114
75115 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75116
75117-static void tasklet_action(struct softirq_action *a)
75118+static void tasklet_action(void)
75119 {
75120 struct tasklet_struct *list;
75121
75122@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75123 }
75124 }
75125
75126-static void tasklet_hi_action(struct softirq_action *a)
75127+static void tasklet_hi_action(void)
75128 {
75129 struct tasklet_struct *list;
75130
75131diff --git a/kernel/sys.c b/kernel/sys.c
75132index e9512b1..8a10cb3 100644
75133--- a/kernel/sys.c
75134+++ b/kernel/sys.c
75135@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75136 error = -EACCES;
75137 goto out;
75138 }
75139+
75140+ if (gr_handle_chroot_setpriority(p, niceval)) {
75141+ error = -EACCES;
75142+ goto out;
75143+ }
75144+
75145 no_nice = security_task_setnice(p, niceval);
75146 if (no_nice) {
75147 error = no_nice;
75148@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75149 !(user = find_user(who)))
75150 goto out_unlock; /* No processes for this user */
75151
75152- do_each_thread(g, p)
75153+ do_each_thread(g, p) {
75154 if (__task_cred(p)->uid == who)
75155 error = set_one_prio(p, niceval, error);
75156- while_each_thread(g, p);
75157+ } while_each_thread(g, p);
75158 if (who != cred->uid)
75159 free_uid(user); /* For find_user() */
75160 break;
75161@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75162 !(user = find_user(who)))
75163 goto out_unlock; /* No processes for this user */
75164
75165- do_each_thread(g, p)
75166+ do_each_thread(g, p) {
75167 if (__task_cred(p)->uid == who) {
75168 niceval = 20 - task_nice(p);
75169 if (niceval > retval)
75170 retval = niceval;
75171 }
75172- while_each_thread(g, p);
75173+ } while_each_thread(g, p);
75174 if (who != cred->uid)
75175 free_uid(user); /* for find_user() */
75176 break;
75177@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75178 goto error;
75179 }
75180
75181+ if (gr_check_group_change(new->gid, new->egid, -1))
75182+ goto error;
75183+
75184 if (rgid != (gid_t) -1 ||
75185 (egid != (gid_t) -1 && egid != old->gid))
75186 new->sgid = new->egid;
75187@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75188 goto error;
75189
75190 retval = -EPERM;
75191+
75192+ if (gr_check_group_change(gid, gid, gid))
75193+ goto error;
75194+
75195 if (capable(CAP_SETGID))
75196 new->gid = new->egid = new->sgid = new->fsgid = gid;
75197 else if (gid == old->gid || gid == old->sgid)
75198@@ -559,7 +572,7 @@ error:
75199 /*
75200 * change the user struct in a credentials set to match the new UID
75201 */
75202-static int set_user(struct cred *new)
75203+int set_user(struct cred *new)
75204 {
75205 struct user_struct *new_user;
75206
75207@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75208 if (!new_user)
75209 return -EAGAIN;
75210
75211+ /*
75212+ * We don't fail in case of NPROC limit excess here because too many
75213+ * poorly written programs don't check set*uid() return code, assuming
75214+ * it never fails if called by root. We may still enforce NPROC limit
75215+ * for programs doing set*uid()+execve() by harmlessly deferring the
75216+ * failure to the execve() stage.
75217+ */
75218 if (atomic_read(&new_user->processes) >=
75219 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75220- new_user != INIT_USER) {
75221- free_uid(new_user);
75222- return -EAGAIN;
75223- }
75224+ new_user != INIT_USER)
75225+ current->flags |= PF_NPROC_EXCEEDED;
75226+ else
75227+ current->flags &= ~PF_NPROC_EXCEEDED;
75228
75229 free_uid(new->user);
75230 new->user = new_user;
75231@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75232 goto error;
75233 }
75234
75235+ if (gr_check_user_change(new->uid, new->euid, -1))
75236+ goto error;
75237+
75238 if (new->uid != old->uid) {
75239 retval = set_user(new);
75240 if (retval < 0)
75241@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75242 goto error;
75243
75244 retval = -EPERM;
75245+
75246+ if (gr_check_crash_uid(uid))
75247+ goto error;
75248+ if (gr_check_user_change(uid, uid, uid))
75249+ goto error;
75250+
75251 if (capable(CAP_SETUID)) {
75252 new->suid = new->uid = uid;
75253 if (uid != old->uid) {
75254@@ -721,9 +750,18 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75255
75256 retval = -EPERM;
75257 if (!capable(CAP_SETUID)) {
75258- if (ruid != (uid_t) -1 && ruid != old->uid &&
75259- ruid != old->euid && ruid != old->suid)
75260- goto error;
75261+ // if RBAC is enabled, require CAP_SETUID to change
75262+ // uid to euid (from a suid binary, for instance)
75263+ // this is a hardening of normal permissions, not
75264+ // weakening
75265+ if (gr_acl_is_enabled()) {
75266+ if (ruid != (uid_t) -1 && ruid != old->uid)
75267+ goto error;
75268+ } else {
75269+ if (ruid != (uid_t) -1 && ruid != old->uid &&
75270+ ruid != old->euid && ruid != old->suid)
75271+ goto error;
75272+ }
75273 if (euid != (uid_t) -1 && euid != old->uid &&
75274 euid != old->euid && euid != old->suid)
75275 goto error;
75276@@ -732,6 +770,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75277 goto error;
75278 }
75279
75280+ if (gr_check_user_change(ruid, euid, -1))
75281+ goto error;
75282+
75283 if (ruid != (uid_t) -1) {
75284 new->uid = ruid;
75285 if (ruid != old->uid) {
75286@@ -789,9 +830,18 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75287
75288 retval = -EPERM;
75289 if (!capable(CAP_SETGID)) {
75290- if (rgid != (gid_t) -1 && rgid != old->gid &&
75291- rgid != old->egid && rgid != old->sgid)
75292- goto error;
75293+ // if RBAC is enabled, require CAP_SETGID to change
75294+ // gid to egid (from a sgid binary, for instance)
75295+ // this is a hardening of normal permissions, not
75296+ // weakening
75297+ if (gr_acl_is_enabled()) {
75298+ if (rgid != (gid_t) -1 && rgid != old->gid)
75299+ goto error;
75300+ } else {
75301+ if (rgid != (gid_t) -1 && rgid != old->gid &&
75302+ rgid != old->egid && rgid != old->sgid)
75303+ goto error;
75304+ }
75305 if (egid != (gid_t) -1 && egid != old->gid &&
75306 egid != old->egid && egid != old->sgid)
75307 goto error;
75308@@ -800,6 +850,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75309 goto error;
75310 }
75311
75312+ if (gr_check_group_change(rgid, egid, -1))
75313+ goto error;
75314+
75315 if (rgid != (gid_t) -1)
75316 new->gid = rgid;
75317 if (egid != (gid_t) -1)
75318@@ -849,6 +902,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75319 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75320 goto error;
75321
75322+ if (gr_check_user_change(-1, -1, uid))
75323+ goto error;
75324+
75325 if (uid == old->uid || uid == old->euid ||
75326 uid == old->suid || uid == old->fsuid ||
75327 capable(CAP_SETUID)) {
75328@@ -889,6 +945,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75329 if (gid == old->gid || gid == old->egid ||
75330 gid == old->sgid || gid == old->fsgid ||
75331 capable(CAP_SETGID)) {
75332+ if (gr_check_group_change(-1, -1, gid))
75333+ goto error;
75334+
75335 if (gid != old_fsgid) {
75336 new->fsgid = gid;
75337 goto change_okay;
75338@@ -1454,7 +1513,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75339 error = get_dumpable(me->mm);
75340 break;
75341 case PR_SET_DUMPABLE:
75342- if (arg2 < 0 || arg2 > 1) {
75343+ if (arg2 > 1) {
75344 error = -EINVAL;
75345 break;
75346 }
75347diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75348index b8bd058..ab6a76be 100644
75349--- a/kernel/sysctl.c
75350+++ b/kernel/sysctl.c
75351@@ -63,6 +63,13 @@
75352 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75353
75354 #if defined(CONFIG_SYSCTL)
75355+#include <linux/grsecurity.h>
75356+#include <linux/grinternal.h>
75357+
75358+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75359+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75360+ const int op);
75361+extern int gr_handle_chroot_sysctl(const int op);
75362
75363 /* External variables not in a header file. */
75364 extern int C_A_D;
75365@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75366 static int proc_taint(struct ctl_table *table, int write,
75367 void __user *buffer, size_t *lenp, loff_t *ppos);
75368 #endif
75369+extern ctl_table grsecurity_table[];
75370
75371 static struct ctl_table root_table[];
75372 static struct ctl_table_root sysctl_table_root;
75373@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75374 int sysctl_legacy_va_layout;
75375 #endif
75376
75377+#ifdef CONFIG_PAX_SOFTMODE
75378+static ctl_table pax_table[] = {
75379+ {
75380+ .ctl_name = CTL_UNNUMBERED,
75381+ .procname = "softmode",
75382+ .data = &pax_softmode,
75383+ .maxlen = sizeof(unsigned int),
75384+ .mode = 0600,
75385+ .proc_handler = &proc_dointvec,
75386+ },
75387+
75388+ { .ctl_name = 0 }
75389+};
75390+#endif
75391+
75392 extern int prove_locking;
75393 extern int lock_stat;
75394
75395@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75396 #endif
75397
75398 static struct ctl_table kern_table[] = {
75399+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75400+ {
75401+ .ctl_name = CTL_UNNUMBERED,
75402+ .procname = "grsecurity",
75403+ .mode = 0500,
75404+ .child = grsecurity_table,
75405+ },
75406+#endif
75407+
75408+#ifdef CONFIG_PAX_SOFTMODE
75409+ {
75410+ .ctl_name = CTL_UNNUMBERED,
75411+ .procname = "pax",
75412+ .mode = 0500,
75413+ .child = pax_table,
75414+ },
75415+#endif
75416+
75417 {
75418 .ctl_name = CTL_UNNUMBERED,
75419 .procname = "sched_child_runs_first",
75420@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75421 .data = &modprobe_path,
75422 .maxlen = KMOD_PATH_LEN,
75423 .mode = 0644,
75424- .proc_handler = &proc_dostring,
75425- .strategy = &sysctl_string,
75426+ .proc_handler = &proc_dostring_modpriv,
75427+ .strategy = &sysctl_string_modpriv,
75428 },
75429 {
75430 .ctl_name = CTL_UNNUMBERED,
75431@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75432 .mode = 0644,
75433 .proc_handler = &proc_dointvec
75434 },
75435+ {
75436+ .procname = "heap_stack_gap",
75437+ .data = &sysctl_heap_stack_gap,
75438+ .maxlen = sizeof(sysctl_heap_stack_gap),
75439+ .mode = 0644,
75440+ .proc_handler = proc_doulongvec_minmax,
75441+ },
75442 #else
75443 {
75444 .ctl_name = CTL_UNNUMBERED,
75445@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75446 return 0;
75447 }
75448
75449+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75450+
75451 static int parse_table(int __user *name, int nlen,
75452 void __user *oldval, size_t __user *oldlenp,
75453 void __user *newval, size_t newlen,
75454@@ -1821,7 +1871,7 @@ repeat:
75455 if (n == table->ctl_name) {
75456 int error;
75457 if (table->child) {
75458- if (sysctl_perm(root, table, MAY_EXEC))
75459+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
75460 return -EPERM;
75461 name++;
75462 nlen--;
75463@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75464 int error;
75465 int mode;
75466
75467+ if (table->parent != NULL && table->parent->procname != NULL &&
75468+ table->procname != NULL &&
75469+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75470+ return -EACCES;
75471+ if (gr_handle_chroot_sysctl(op))
75472+ return -EACCES;
75473+ error = gr_handle_sysctl(table, op);
75474+ if (error)
75475+ return error;
75476+
75477+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75478+ if (error)
75479+ return error;
75480+
75481+ if (root->permissions)
75482+ mode = root->permissions(root, current->nsproxy, table);
75483+ else
75484+ mode = table->mode;
75485+
75486+ return test_perm(mode, op);
75487+}
75488+
75489+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75490+{
75491+ int error;
75492+ int mode;
75493+
75494 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75495 if (error)
75496 return error;
75497@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75498 buffer, lenp, ppos);
75499 }
75500
75501+int proc_dostring_modpriv(struct ctl_table *table, int write,
75502+ void __user *buffer, size_t *lenp, loff_t *ppos)
75503+{
75504+ if (write && !capable(CAP_SYS_MODULE))
75505+ return -EPERM;
75506+
75507+ return _proc_do_string(table->data, table->maxlen, write,
75508+ buffer, lenp, ppos);
75509+}
75510+
75511
75512 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75513 int *valp,
75514@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75515 vleft = table->maxlen / sizeof(unsigned long);
75516 left = *lenp;
75517
75518- for (; left && vleft--; i++, min++, max++, first=0) {
75519+ for (; left && vleft--; i++, first=0) {
75520 if (write) {
75521 while (left) {
75522 char c;
75523@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75524 return -ENOSYS;
75525 }
75526
75527+int proc_dostring_modpriv(struct ctl_table *table, int write,
75528+ void __user *buffer, size_t *lenp, loff_t *ppos)
75529+{
75530+ return -ENOSYS;
75531+}
75532+
75533 int proc_dointvec(struct ctl_table *table, int write,
75534 void __user *buffer, size_t *lenp, loff_t *ppos)
75535 {
75536@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75537 return 1;
75538 }
75539
75540+int sysctl_string_modpriv(struct ctl_table *table,
75541+ void __user *oldval, size_t __user *oldlenp,
75542+ void __user *newval, size_t newlen)
75543+{
75544+ if (newval && newlen && !capable(CAP_SYS_MODULE))
75545+ return -EPERM;
75546+
75547+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
75548+}
75549+
75550 /*
75551 * This function makes sure that all of the integers in the vector
75552 * are between the minimum and maximum values given in the arrays
75553@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75554 return -ENOSYS;
75555 }
75556
75557+int sysctl_string_modpriv(struct ctl_table *table,
75558+ void __user *oldval, size_t __user *oldlenp,
75559+ void __user *newval, size_t newlen)
75560+{
75561+ return -ENOSYS;
75562+}
75563+
75564 int sysctl_intvec(struct ctl_table *table,
75565 void __user *oldval, size_t __user *oldlenp,
75566 void __user *newval, size_t newlen)
75567@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75568 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75569 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75570 EXPORT_SYMBOL(proc_dostring);
75571+EXPORT_SYMBOL(proc_dostring_modpriv);
75572 EXPORT_SYMBOL(proc_doulongvec_minmax);
75573 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75574 EXPORT_SYMBOL(register_sysctl_table);
75575@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75576 EXPORT_SYMBOL(sysctl_jiffies);
75577 EXPORT_SYMBOL(sysctl_ms_jiffies);
75578 EXPORT_SYMBOL(sysctl_string);
75579+EXPORT_SYMBOL(sysctl_string_modpriv);
75580 EXPORT_SYMBOL(sysctl_data);
75581 EXPORT_SYMBOL(unregister_sysctl_table);
75582diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75583index 469193c..ea3ecb2 100644
75584--- a/kernel/sysctl_check.c
75585+++ b/kernel/sysctl_check.c
75586@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75587 } else {
75588 if ((table->strategy == sysctl_data) ||
75589 (table->strategy == sysctl_string) ||
75590+ (table->strategy == sysctl_string_modpriv) ||
75591 (table->strategy == sysctl_intvec) ||
75592 (table->strategy == sysctl_jiffies) ||
75593 (table->strategy == sysctl_ms_jiffies) ||
75594 (table->proc_handler == proc_dostring) ||
75595+ (table->proc_handler == proc_dostring_modpriv) ||
75596 (table->proc_handler == proc_dointvec) ||
75597 (table->proc_handler == proc_dointvec_minmax) ||
75598 (table->proc_handler == proc_dointvec_jiffies) ||
75599diff --git a/kernel/taskstats.c b/kernel/taskstats.c
75600index a4ef542..798bcd7 100644
75601--- a/kernel/taskstats.c
75602+++ b/kernel/taskstats.c
75603@@ -26,9 +26,12 @@
75604 #include <linux/cgroup.h>
75605 #include <linux/fs.h>
75606 #include <linux/file.h>
75607+#include <linux/grsecurity.h>
75608 #include <net/genetlink.h>
75609 #include <asm/atomic.h>
75610
75611+extern int gr_is_taskstats_denied(int pid);
75612+
75613 /*
75614 * Maximum length of a cpumask that can be specified in
75615 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
75616@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
75617 size_t size;
75618 cpumask_var_t mask;
75619
75620+ if (gr_is_taskstats_denied(current->pid))
75621+ return -EACCES;
75622+
75623 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
75624 return -ENOMEM;
75625
75626diff --git a/kernel/time.c b/kernel/time.c
75627index 33df60e..ca768bd 100644
75628--- a/kernel/time.c
75629+++ b/kernel/time.c
75630@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
75631 return error;
75632
75633 if (tz) {
75634+ /* we log in do_settimeofday called below, so don't log twice
75635+ */
75636+ if (!tv)
75637+ gr_log_timechange();
75638+
75639 /* SMP safe, global irq locking makes it work. */
75640 sys_tz = *tz;
75641 update_vsyscall_tz();
75642@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
75643 * Avoid unnecessary multiplications/divisions in the
75644 * two most common HZ cases:
75645 */
75646-unsigned int inline jiffies_to_msecs(const unsigned long j)
75647+inline unsigned int jiffies_to_msecs(const unsigned long j)
75648 {
75649 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
75650 return (MSEC_PER_SEC / HZ) * j;
75651@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
75652 }
75653 EXPORT_SYMBOL(jiffies_to_msecs);
75654
75655-unsigned int inline jiffies_to_usecs(const unsigned long j)
75656+inline unsigned int jiffies_to_usecs(const unsigned long j)
75657 {
75658 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
75659 return (USEC_PER_SEC / HZ) * j;
75660diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
75661index 57b953f..06f149f 100644
75662--- a/kernel/time/tick-broadcast.c
75663+++ b/kernel/time/tick-broadcast.c
75664@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
75665 * then clear the broadcast bit.
75666 */
75667 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
75668- int cpu = smp_processor_id();
75669+ cpu = smp_processor_id();
75670
75671 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
75672 tick_broadcast_clear_oneshot(cpu);
75673diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
75674index 4a71cff..ffb5548 100644
75675--- a/kernel/time/timekeeping.c
75676+++ b/kernel/time/timekeeping.c
75677@@ -14,6 +14,7 @@
75678 #include <linux/init.h>
75679 #include <linux/mm.h>
75680 #include <linux/sched.h>
75681+#include <linux/grsecurity.h>
75682 #include <linux/sysdev.h>
75683 #include <linux/clocksource.h>
75684 #include <linux/jiffies.h>
75685@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
75686 */
75687 struct timespec ts = xtime;
75688 timespec_add_ns(&ts, nsec);
75689- ACCESS_ONCE(xtime_cache) = ts;
75690+ ACCESS_ONCE_RW(xtime_cache) = ts;
75691 }
75692
75693 /* must hold xtime_lock */
75694@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
75695 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
75696 return -EINVAL;
75697
75698+ gr_log_timechange();
75699+
75700 write_seqlock_irqsave(&xtime_lock, flags);
75701
75702 timekeeping_forward_now();
75703diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
75704index 54c0dda..e9095d9 100644
75705--- a/kernel/time/timer_list.c
75706+++ b/kernel/time/timer_list.c
75707@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
75708
75709 static void print_name_offset(struct seq_file *m, void *sym)
75710 {
75711+#ifdef CONFIG_GRKERNSEC_HIDESYM
75712+ SEQ_printf(m, "<%p>", NULL);
75713+#else
75714 char symname[KSYM_NAME_LEN];
75715
75716 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
75717 SEQ_printf(m, "<%p>", sym);
75718 else
75719 SEQ_printf(m, "%s", symname);
75720+#endif
75721 }
75722
75723 static void
75724@@ -112,7 +116,11 @@ next_one:
75725 static void
75726 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
75727 {
75728+#ifdef CONFIG_GRKERNSEC_HIDESYM
75729+ SEQ_printf(m, " .base: %p\n", NULL);
75730+#else
75731 SEQ_printf(m, " .base: %p\n", base);
75732+#endif
75733 SEQ_printf(m, " .index: %d\n",
75734 base->index);
75735 SEQ_printf(m, " .resolution: %Lu nsecs\n",
75736@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
75737 {
75738 struct proc_dir_entry *pe;
75739
75740+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75741+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
75742+#else
75743 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
75744+#endif
75745 if (!pe)
75746 return -ENOMEM;
75747 return 0;
75748diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
75749index ee5681f..634089b 100644
75750--- a/kernel/time/timer_stats.c
75751+++ b/kernel/time/timer_stats.c
75752@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
75753 static unsigned long nr_entries;
75754 static struct entry entries[MAX_ENTRIES];
75755
75756-static atomic_t overflow_count;
75757+static atomic_unchecked_t overflow_count;
75758
75759 /*
75760 * The entries are in a hash-table, for fast lookup:
75761@@ -140,7 +140,7 @@ static void reset_entries(void)
75762 nr_entries = 0;
75763 memset(entries, 0, sizeof(entries));
75764 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
75765- atomic_set(&overflow_count, 0);
75766+ atomic_set_unchecked(&overflow_count, 0);
75767 }
75768
75769 static struct entry *alloc_entry(void)
75770@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75771 if (likely(entry))
75772 entry->count++;
75773 else
75774- atomic_inc(&overflow_count);
75775+ atomic_inc_unchecked(&overflow_count);
75776
75777 out_unlock:
75778 spin_unlock_irqrestore(lock, flags);
75779@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75780
75781 static void print_name_offset(struct seq_file *m, unsigned long addr)
75782 {
75783+#ifdef CONFIG_GRKERNSEC_HIDESYM
75784+ seq_printf(m, "<%p>", NULL);
75785+#else
75786 char symname[KSYM_NAME_LEN];
75787
75788 if (lookup_symbol_name(addr, symname) < 0)
75789 seq_printf(m, "<%p>", (void *)addr);
75790 else
75791 seq_printf(m, "%s", symname);
75792+#endif
75793 }
75794
75795 static int tstats_show(struct seq_file *m, void *v)
75796@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75797
75798 seq_puts(m, "Timer Stats Version: v0.2\n");
75799 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75800- if (atomic_read(&overflow_count))
75801+ if (atomic_read_unchecked(&overflow_count))
75802 seq_printf(m, "Overflow: %d entries\n",
75803- atomic_read(&overflow_count));
75804+ atomic_read_unchecked(&overflow_count));
75805
75806 for (i = 0; i < nr_entries; i++) {
75807 entry = entries + i;
75808@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75809 {
75810 struct proc_dir_entry *pe;
75811
75812+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75813+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75814+#else
75815 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75816+#endif
75817 if (!pe)
75818 return -ENOMEM;
75819 return 0;
75820diff --git a/kernel/timer.c b/kernel/timer.c
75821index cb3c1f1..8bf5526 100644
75822--- a/kernel/timer.c
75823+++ b/kernel/timer.c
75824@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75825 /*
75826 * This function runs timers and the timer-tq in bottom half context.
75827 */
75828-static void run_timer_softirq(struct softirq_action *h)
75829+static void run_timer_softirq(void)
75830 {
75831 struct tvec_base *base = __get_cpu_var(tvec_bases);
75832
75833diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75834index d9d6206..f19467e 100644
75835--- a/kernel/trace/blktrace.c
75836+++ b/kernel/trace/blktrace.c
75837@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75838 struct blk_trace *bt = filp->private_data;
75839 char buf[16];
75840
75841- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75842+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75843
75844 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75845 }
75846@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75847 return 1;
75848
75849 bt = buf->chan->private_data;
75850- atomic_inc(&bt->dropped);
75851+ atomic_inc_unchecked(&bt->dropped);
75852 return 0;
75853 }
75854
75855@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75856
75857 bt->dir = dir;
75858 bt->dev = dev;
75859- atomic_set(&bt->dropped, 0);
75860+ atomic_set_unchecked(&bt->dropped, 0);
75861
75862 ret = -EIO;
75863 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75864diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75865index 4872937..c794d40 100644
75866--- a/kernel/trace/ftrace.c
75867+++ b/kernel/trace/ftrace.c
75868@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75869
75870 ip = rec->ip;
75871
75872+ ret = ftrace_arch_code_modify_prepare();
75873+ FTRACE_WARN_ON(ret);
75874+ if (ret)
75875+ return 0;
75876+
75877 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75878+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75879 if (ret) {
75880 ftrace_bug(ret, ip);
75881 rec->flags |= FTRACE_FL_FAILED;
75882- return 0;
75883 }
75884- return 1;
75885+ return ret ? 0 : 1;
75886 }
75887
75888 /*
75889diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75890index e749a05..19c6e94 100644
75891--- a/kernel/trace/ring_buffer.c
75892+++ b/kernel/trace/ring_buffer.c
75893@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75894 * the reader page). But if the next page is a header page,
75895 * its flags will be non zero.
75896 */
75897-static int inline
75898+static inline int
75899 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75900 struct buffer_page *page, struct list_head *list)
75901 {
75902diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75903index a2a2d1f..7f32b09 100644
75904--- a/kernel/trace/trace.c
75905+++ b/kernel/trace/trace.c
75906@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
75907 size_t rem;
75908 unsigned int i;
75909
75910+ pax_track_stack();
75911+
75912 /* copy the tracer to avoid using a global lock all around */
75913 mutex_lock(&trace_types_lock);
75914 if (unlikely(old_tracer != current_trace && current_trace)) {
75915@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
75916 int entries, size, i;
75917 size_t ret;
75918
75919+ pax_track_stack();
75920+
75921 if (*ppos & (PAGE_SIZE - 1)) {
75922 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
75923 return -EINVAL;
75924@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
75925 };
75926 #endif
75927
75928-static struct dentry *d_tracer;
75929-
75930 struct dentry *tracing_init_dentry(void)
75931 {
75932+ static struct dentry *d_tracer;
75933 static int once;
75934
75935 if (d_tracer)
75936@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
75937 return d_tracer;
75938 }
75939
75940-static struct dentry *d_percpu;
75941-
75942 struct dentry *tracing_dentry_percpu(void)
75943 {
75944+ static struct dentry *d_percpu;
75945 static int once;
75946 struct dentry *d_tracer;
75947
75948diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
75949index d128f65..f37b4af 100644
75950--- a/kernel/trace/trace_events.c
75951+++ b/kernel/trace/trace_events.c
75952@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
75953 * Modules must own their file_operations to keep up with
75954 * reference counting.
75955 */
75956+
75957 struct ftrace_module_file_ops {
75958 struct list_head list;
75959 struct module *mod;
75960- struct file_operations id;
75961- struct file_operations enable;
75962- struct file_operations format;
75963- struct file_operations filter;
75964 };
75965
75966 static void remove_subsystem_dir(const char *name)
75967@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
75968
75969 file_ops->mod = mod;
75970
75971- file_ops->id = ftrace_event_id_fops;
75972- file_ops->id.owner = mod;
75973-
75974- file_ops->enable = ftrace_enable_fops;
75975- file_ops->enable.owner = mod;
75976-
75977- file_ops->filter = ftrace_event_filter_fops;
75978- file_ops->filter.owner = mod;
75979-
75980- file_ops->format = ftrace_event_format_fops;
75981- file_ops->format.owner = mod;
75982+ pax_open_kernel();
75983+ *(void **)&mod->trace_id.owner = mod;
75984+ *(void **)&mod->trace_enable.owner = mod;
75985+ *(void **)&mod->trace_filter.owner = mod;
75986+ *(void **)&mod->trace_format.owner = mod;
75987+ pax_close_kernel();
75988
75989 list_add(&file_ops->list, &ftrace_module_file_list);
75990
75991@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
75992 call->mod = mod;
75993 list_add(&call->list, &ftrace_events);
75994 event_create_dir(call, d_events,
75995- &file_ops->id, &file_ops->enable,
75996- &file_ops->filter, &file_ops->format);
75997+ &mod->trace_id, &mod->trace_enable,
75998+ &mod->trace_filter, &mod->trace_format);
75999 }
76000 }
76001
76002diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
76003index 0acd834..b800b56 100644
76004--- a/kernel/trace/trace_mmiotrace.c
76005+++ b/kernel/trace/trace_mmiotrace.c
76006@@ -23,7 +23,7 @@ struct header_iter {
76007 static struct trace_array *mmio_trace_array;
76008 static bool overrun_detected;
76009 static unsigned long prev_overruns;
76010-static atomic_t dropped_count;
76011+static atomic_unchecked_t dropped_count;
76012
76013 static void mmio_reset_data(struct trace_array *tr)
76014 {
76015@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
76016
76017 static unsigned long count_overruns(struct trace_iterator *iter)
76018 {
76019- unsigned long cnt = atomic_xchg(&dropped_count, 0);
76020+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
76021 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
76022
76023 if (over > prev_overruns)
76024@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
76025 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
76026 sizeof(*entry), 0, pc);
76027 if (!event) {
76028- atomic_inc(&dropped_count);
76029+ atomic_inc_unchecked(&dropped_count);
76030 return;
76031 }
76032 entry = ring_buffer_event_data(event);
76033@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
76034 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
76035 sizeof(*entry), 0, pc);
76036 if (!event) {
76037- atomic_inc(&dropped_count);
76038+ atomic_inc_unchecked(&dropped_count);
76039 return;
76040 }
76041 entry = ring_buffer_event_data(event);
76042diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
76043index b6c12c6..41fdc53 100644
76044--- a/kernel/trace/trace_output.c
76045+++ b/kernel/trace/trace_output.c
76046@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
76047 return 0;
76048 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
76049 if (!IS_ERR(p)) {
76050- p = mangle_path(s->buffer + s->len, p, "\n");
76051+ p = mangle_path(s->buffer + s->len, p, "\n\\");
76052 if (p) {
76053 s->len = p - s->buffer;
76054 return 1;
76055diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
76056index 8504ac7..ecf0adb 100644
76057--- a/kernel/trace/trace_stack.c
76058+++ b/kernel/trace/trace_stack.c
76059@@ -50,7 +50,7 @@ static inline void check_stack(void)
76060 return;
76061
76062 /* we do not handle interrupt stacks yet */
76063- if (!object_is_on_stack(&this_size))
76064+ if (!object_starts_on_stack(&this_size))
76065 return;
76066
76067 local_irq_save(flags);
76068diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
76069index 40cafb0..d5ead43 100644
76070--- a/kernel/trace/trace_workqueue.c
76071+++ b/kernel/trace/trace_workqueue.c
76072@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
76073 int cpu;
76074 pid_t pid;
76075 /* Can be inserted from interrupt or user context, need to be atomic */
76076- atomic_t inserted;
76077+ atomic_unchecked_t inserted;
76078 /*
76079 * Don't need to be atomic, works are serialized in a single workqueue thread
76080 * on a single CPU.
76081@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
76082 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
76083 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
76084 if (node->pid == wq_thread->pid) {
76085- atomic_inc(&node->inserted);
76086+ atomic_inc_unchecked(&node->inserted);
76087 goto found;
76088 }
76089 }
76090@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
76091 tsk = get_pid_task(pid, PIDTYPE_PID);
76092 if (tsk) {
76093 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
76094- atomic_read(&cws->inserted), cws->executed,
76095+ atomic_read_unchecked(&cws->inserted), cws->executed,
76096 tsk->comm);
76097 put_task_struct(tsk);
76098 }
76099diff --git a/kernel/user.c b/kernel/user.c
76100index 1b91701..8795237 100644
76101--- a/kernel/user.c
76102+++ b/kernel/user.c
76103@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76104 spin_lock_irq(&uidhash_lock);
76105 up = uid_hash_find(uid, hashent);
76106 if (up) {
76107+ put_user_ns(ns);
76108 key_put(new->uid_keyring);
76109 key_put(new->session_keyring);
76110 kmem_cache_free(uid_cachep, new);
76111diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76112index 234ceb1..ad74049 100644
76113--- a/lib/Kconfig.debug
76114+++ b/lib/Kconfig.debug
76115@@ -905,7 +905,7 @@ config LATENCYTOP
76116 select STACKTRACE
76117 select SCHEDSTATS
76118 select SCHED_DEBUG
76119- depends on HAVE_LATENCYTOP_SUPPORT
76120+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76121 help
76122 Enable this option if you want to use the LatencyTOP tool
76123 to find out which userspace is blocking on what kernel operations.
76124diff --git a/lib/bitmap.c b/lib/bitmap.c
76125index 7025658..8d14cab 100644
76126--- a/lib/bitmap.c
76127+++ b/lib/bitmap.c
76128@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76129 {
76130 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76131 u32 chunk;
76132- const char __user *ubuf = buf;
76133+ const char __user *ubuf = (const char __force_user *)buf;
76134
76135 bitmap_zero(maskp, nmaskbits);
76136
76137@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76138 {
76139 if (!access_ok(VERIFY_READ, ubuf, ulen))
76140 return -EFAULT;
76141- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76142+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76143 }
76144 EXPORT_SYMBOL(bitmap_parse_user);
76145
76146diff --git a/lib/bug.c b/lib/bug.c
76147index 300e41a..2779eb0 100644
76148--- a/lib/bug.c
76149+++ b/lib/bug.c
76150@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76151 return BUG_TRAP_TYPE_NONE;
76152
76153 bug = find_bug(bugaddr);
76154+ if (!bug)
76155+ return BUG_TRAP_TYPE_NONE;
76156
76157 printk(KERN_EMERG "------------[ cut here ]------------\n");
76158
76159diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76160index 2b413db..e21d207 100644
76161--- a/lib/debugobjects.c
76162+++ b/lib/debugobjects.c
76163@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76164 if (limit > 4)
76165 return;
76166
76167- is_on_stack = object_is_on_stack(addr);
76168+ is_on_stack = object_starts_on_stack(addr);
76169 if (is_on_stack == onstack)
76170 return;
76171
76172diff --git a/lib/devres.c b/lib/devres.c
76173index 72c8909..7543868 100644
76174--- a/lib/devres.c
76175+++ b/lib/devres.c
76176@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76177 {
76178 iounmap(addr);
76179 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76180- (void *)addr));
76181+ (void __force *)addr));
76182 }
76183 EXPORT_SYMBOL(devm_iounmap);
76184
76185@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76186 {
76187 ioport_unmap(addr);
76188 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76189- devm_ioport_map_match, (void *)addr));
76190+ devm_ioport_map_match, (void __force *)addr));
76191 }
76192 EXPORT_SYMBOL(devm_ioport_unmap);
76193
76194diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76195index 084e879..0674448 100644
76196--- a/lib/dma-debug.c
76197+++ b/lib/dma-debug.c
76198@@ -861,7 +861,7 @@ out:
76199
76200 static void check_for_stack(struct device *dev, void *addr)
76201 {
76202- if (object_is_on_stack(addr))
76203+ if (object_starts_on_stack(addr))
76204 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76205 "stack [addr=%p]\n", addr);
76206 }
76207diff --git a/lib/idr.c b/lib/idr.c
76208index eda7ba3..915dfae 100644
76209--- a/lib/idr.c
76210+++ b/lib/idr.c
76211@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76212 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76213
76214 /* if already at the top layer, we need to grow */
76215- if (id >= 1 << (idp->layers * IDR_BITS)) {
76216+ if (id >= (1 << (idp->layers * IDR_BITS))) {
76217 *starting_id = id;
76218 return IDR_NEED_TO_GROW;
76219 }
76220diff --git a/lib/inflate.c b/lib/inflate.c
76221index d102559..4215f31 100644
76222--- a/lib/inflate.c
76223+++ b/lib/inflate.c
76224@@ -266,7 +266,7 @@ static void free(void *where)
76225 malloc_ptr = free_mem_ptr;
76226 }
76227 #else
76228-#define malloc(a) kmalloc(a, GFP_KERNEL)
76229+#define malloc(a) kmalloc((a), GFP_KERNEL)
76230 #define free(a) kfree(a)
76231 #endif
76232
76233diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
76234index bd2bea9..6b3c95e 100644
76235--- a/lib/is_single_threaded.c
76236+++ b/lib/is_single_threaded.c
76237@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
76238 struct task_struct *p, *t;
76239 bool ret;
76240
76241+ if (!mm)
76242+ return true;
76243+
76244 if (atomic_read(&task->signal->live) != 1)
76245 return false;
76246
76247diff --git a/lib/kobject.c b/lib/kobject.c
76248index b512b74..8115eb1 100644
76249--- a/lib/kobject.c
76250+++ b/lib/kobject.c
76251@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76252 return ret;
76253 }
76254
76255-struct sysfs_ops kobj_sysfs_ops = {
76256+const struct sysfs_ops kobj_sysfs_ops = {
76257 .show = kobj_attr_show,
76258 .store = kobj_attr_store,
76259 };
76260@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76261 * If the kset was not able to be created, NULL will be returned.
76262 */
76263 static struct kset *kset_create(const char *name,
76264- struct kset_uevent_ops *uevent_ops,
76265+ const struct kset_uevent_ops *uevent_ops,
76266 struct kobject *parent_kobj)
76267 {
76268 struct kset *kset;
76269@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76270 * If the kset was not able to be created, NULL will be returned.
76271 */
76272 struct kset *kset_create_and_add(const char *name,
76273- struct kset_uevent_ops *uevent_ops,
76274+ const struct kset_uevent_ops *uevent_ops,
76275 struct kobject *parent_kobj)
76276 {
76277 struct kset *kset;
76278diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76279index 507b821..0bf8ed0 100644
76280--- a/lib/kobject_uevent.c
76281+++ b/lib/kobject_uevent.c
76282@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76283 const char *subsystem;
76284 struct kobject *top_kobj;
76285 struct kset *kset;
76286- struct kset_uevent_ops *uevent_ops;
76287+ const struct kset_uevent_ops *uevent_ops;
76288 u64 seq;
76289 int i = 0;
76290 int retval = 0;
76291diff --git a/lib/kref.c b/lib/kref.c
76292index 9ecd6e8..12c94c1 100644
76293--- a/lib/kref.c
76294+++ b/lib/kref.c
76295@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76296 */
76297 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76298 {
76299- WARN_ON(release == NULL);
76300+ BUG_ON(release == NULL);
76301 WARN_ON(release == (void (*)(struct kref *))kfree);
76302
76303 if (atomic_dec_and_test(&kref->refcount)) {
76304diff --git a/lib/parser.c b/lib/parser.c
76305index b00d020..1b34325 100644
76306--- a/lib/parser.c
76307+++ b/lib/parser.c
76308@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76309 char *buf;
76310 int ret;
76311
76312- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76313+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76314 if (!buf)
76315 return -ENOMEM;
76316 memcpy(buf, s->from, s->to - s->from);
76317diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76318index 92cdd99..a8149d7 100644
76319--- a/lib/radix-tree.c
76320+++ b/lib/radix-tree.c
76321@@ -81,7 +81,7 @@ struct radix_tree_preload {
76322 int nr;
76323 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76324 };
76325-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76326+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76327
76328 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76329 {
76330diff --git a/lib/random32.c b/lib/random32.c
76331index 217d5c4..45aba8a 100644
76332--- a/lib/random32.c
76333+++ b/lib/random32.c
76334@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76335 */
76336 static inline u32 __seed(u32 x, u32 m)
76337 {
76338- return (x < m) ? x + m : x;
76339+ return (x <= m) ? x + m + 1 : x;
76340 }
76341
76342 /**
76343diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76344index 33bed5e..1477e46 100644
76345--- a/lib/vsprintf.c
76346+++ b/lib/vsprintf.c
76347@@ -16,6 +16,9 @@
76348 * - scnprintf and vscnprintf
76349 */
76350
76351+#ifdef CONFIG_GRKERNSEC_HIDESYM
76352+#define __INCLUDED_BY_HIDESYM 1
76353+#endif
76354 #include <stdarg.h>
76355 #include <linux/module.h>
76356 #include <linux/types.h>
76357@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76358 return buf;
76359 }
76360
76361-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76362+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76363 {
76364 int len, i;
76365
76366 if ((unsigned long)s < PAGE_SIZE)
76367- s = "<NULL>";
76368+ s = "(null)";
76369
76370 len = strnlen(s, spec.precision);
76371
76372@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76373 unsigned long value = (unsigned long) ptr;
76374 #ifdef CONFIG_KALLSYMS
76375 char sym[KSYM_SYMBOL_LEN];
76376- if (ext != 'f' && ext != 's')
76377+ if (ext != 'f' && ext != 's' && ext != 'a')
76378 sprint_symbol(sym, value);
76379 else
76380 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76381@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76382 * - 'f' For simple symbolic function names without offset
76383 * - 'S' For symbolic direct pointers with offset
76384 * - 's' For symbolic direct pointers without offset
76385+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76386+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76387 * - 'R' For a struct resource pointer, it prints the range of
76388 * addresses (not the name nor the flags)
76389 * - 'M' For a 6-byte MAC address, it prints the address in the
76390@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76391 struct printf_spec spec)
76392 {
76393 if (!ptr)
76394- return string(buf, end, "(null)", spec);
76395+ return string(buf, end, "(nil)", spec);
76396
76397 switch (*fmt) {
76398 case 'F':
76399@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76400 case 's':
76401 /* Fallthrough */
76402 case 'S':
76403+#ifdef CONFIG_GRKERNSEC_HIDESYM
76404+ break;
76405+#else
76406+ return symbol_string(buf, end, ptr, spec, *fmt);
76407+#endif
76408+ case 'a':
76409+ /* Fallthrough */
76410+ case 'A':
76411 return symbol_string(buf, end, ptr, spec, *fmt);
76412 case 'R':
76413 return resource_string(buf, end, ptr, spec);
76414@@ -1445,7 +1458,7 @@ do { \
76415 size_t len;
76416 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76417 || (unsigned long)save_str < PAGE_SIZE)
76418- save_str = "<NULL>";
76419+ save_str = "(null)";
76420 len = strlen(save_str);
76421 if (str + len + 1 < end)
76422 memcpy(str, save_str, len + 1);
76423@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76424 typeof(type) value; \
76425 if (sizeof(type) == 8) { \
76426 args = PTR_ALIGN(args, sizeof(u32)); \
76427- *(u32 *)&value = *(u32 *)args; \
76428- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76429+ *(u32 *)&value = *(const u32 *)args; \
76430+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76431 } else { \
76432 args = PTR_ALIGN(args, sizeof(type)); \
76433- value = *(typeof(type) *)args; \
76434+ value = *(const typeof(type) *)args; \
76435 } \
76436 args += sizeof(type); \
76437 value; \
76438@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76439 const char *str_arg = args;
76440 size_t len = strlen(str_arg);
76441 args += len + 1;
76442- str = string(str, end, (char *)str_arg, spec);
76443+ str = string(str, end, str_arg, spec);
76444 break;
76445 }
76446
76447diff --git a/localversion-grsec b/localversion-grsec
76448new file mode 100644
76449index 0000000..7cd6065
76450--- /dev/null
76451+++ b/localversion-grsec
76452@@ -0,0 +1 @@
76453+-grsec
76454diff --git a/mm/Kconfig b/mm/Kconfig
76455index 2c19c0b..f3c3f83 100644
76456--- a/mm/Kconfig
76457+++ b/mm/Kconfig
76458@@ -228,7 +228,7 @@ config KSM
76459 config DEFAULT_MMAP_MIN_ADDR
76460 int "Low address space to protect from user allocation"
76461 depends on MMU
76462- default 4096
76463+ default 65536
76464 help
76465 This is the portion of low virtual memory which should be protected
76466 from userspace allocation. Keeping a user from writing to low pages
76467diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76468index 67a33a5..094dcf1 100644
76469--- a/mm/backing-dev.c
76470+++ b/mm/backing-dev.c
76471@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76472 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76473 spin_unlock(&bdi->wb_lock);
76474
76475- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76476+ tsk->flags |= PF_SWAPWRITE;
76477 set_freezable();
76478
76479 /*
76480@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76481 * Add the default flusher task that gets created for any bdi
76482 * that has dirty data pending writeout
76483 */
76484-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76485+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76486 {
76487 if (!bdi_cap_writeback_dirty(bdi))
76488 return;
76489diff --git a/mm/filemap.c b/mm/filemap.c
76490index a1fe378..e26702f 100644
76491--- a/mm/filemap.c
76492+++ b/mm/filemap.c
76493@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76494 struct address_space *mapping = file->f_mapping;
76495
76496 if (!mapping->a_ops->readpage)
76497- return -ENOEXEC;
76498+ return -ENODEV;
76499 file_accessed(file);
76500 vma->vm_ops = &generic_file_vm_ops;
76501 vma->vm_flags |= VM_CAN_NONLINEAR;
76502@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76503 *pos = i_size_read(inode);
76504
76505 if (limit != RLIM_INFINITY) {
76506+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76507 if (*pos >= limit) {
76508 send_sig(SIGXFSZ, current, 0);
76509 return -EFBIG;
76510diff --git a/mm/fremap.c b/mm/fremap.c
76511index b6ec85a..a24ac22 100644
76512--- a/mm/fremap.c
76513+++ b/mm/fremap.c
76514@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76515 retry:
76516 vma = find_vma(mm, start);
76517
76518+#ifdef CONFIG_PAX_SEGMEXEC
76519+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76520+ goto out;
76521+#endif
76522+
76523 /*
76524 * Make sure the vma is shared, that it supports prefaulting,
76525 * and that the remapped range is valid and fully within
76526@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76527 /*
76528 * drop PG_Mlocked flag for over-mapped range
76529 */
76530- unsigned int saved_flags = vma->vm_flags;
76531+ unsigned long saved_flags = vma->vm_flags;
76532 munlock_vma_pages_range(vma, start, start + size);
76533 vma->vm_flags = saved_flags;
76534 }
76535diff --git a/mm/highmem.c b/mm/highmem.c
76536index 9c1e627..5ca9447 100644
76537--- a/mm/highmem.c
76538+++ b/mm/highmem.c
76539@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76540 * So no dangers, even with speculative execution.
76541 */
76542 page = pte_page(pkmap_page_table[i]);
76543+ pax_open_kernel();
76544 pte_clear(&init_mm, (unsigned long)page_address(page),
76545 &pkmap_page_table[i]);
76546-
76547+ pax_close_kernel();
76548 set_page_address(page, NULL);
76549 need_flush = 1;
76550 }
76551@@ -177,9 +178,11 @@ start:
76552 }
76553 }
76554 vaddr = PKMAP_ADDR(last_pkmap_nr);
76555+
76556+ pax_open_kernel();
76557 set_pte_at(&init_mm, vaddr,
76558 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76559-
76560+ pax_close_kernel();
76561 pkmap_count[last_pkmap_nr] = 1;
76562 set_page_address(page, (void *)vaddr);
76563
76564diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76565index 5e1e508..ac70275 100644
76566--- a/mm/hugetlb.c
76567+++ b/mm/hugetlb.c
76568@@ -869,6 +869,7 @@ free:
76569 list_del(&page->lru);
76570 enqueue_huge_page(h, page);
76571 }
76572+ spin_unlock(&hugetlb_lock);
76573
76574 /* Free unnecessary surplus pages to the buddy allocator */
76575 if (!list_empty(&surplus_list)) {
76576@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76577 return 1;
76578 }
76579
76580+#ifdef CONFIG_PAX_SEGMEXEC
76581+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76582+{
76583+ struct mm_struct *mm = vma->vm_mm;
76584+ struct vm_area_struct *vma_m;
76585+ unsigned long address_m;
76586+ pte_t *ptep_m;
76587+
76588+ vma_m = pax_find_mirror_vma(vma);
76589+ if (!vma_m)
76590+ return;
76591+
76592+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76593+ address_m = address + SEGMEXEC_TASK_SIZE;
76594+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76595+ get_page(page_m);
76596+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
76597+}
76598+#endif
76599+
76600 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
76601 unsigned long address, pte_t *ptep, pte_t pte,
76602 struct page *pagecache_page)
76603@@ -2004,6 +2025,11 @@ retry_avoidcopy:
76604 huge_ptep_clear_flush(vma, address, ptep);
76605 set_huge_pte_at(mm, address, ptep,
76606 make_huge_pte(vma, new_page, 1));
76607+
76608+#ifdef CONFIG_PAX_SEGMEXEC
76609+ pax_mirror_huge_pte(vma, address, new_page);
76610+#endif
76611+
76612 /* Make the old page be freed below */
76613 new_page = old_page;
76614 }
76615@@ -2135,6 +2161,10 @@ retry:
76616 && (vma->vm_flags & VM_SHARED)));
76617 set_huge_pte_at(mm, address, ptep, new_pte);
76618
76619+#ifdef CONFIG_PAX_SEGMEXEC
76620+ pax_mirror_huge_pte(vma, address, page);
76621+#endif
76622+
76623 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
76624 /* Optimization, do the COW without a second fault */
76625 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
76626@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76627 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
76628 struct hstate *h = hstate_vma(vma);
76629
76630+#ifdef CONFIG_PAX_SEGMEXEC
76631+ struct vm_area_struct *vma_m;
76632+
76633+ vma_m = pax_find_mirror_vma(vma);
76634+ if (vma_m) {
76635+ unsigned long address_m;
76636+
76637+ if (vma->vm_start > vma_m->vm_start) {
76638+ address_m = address;
76639+ address -= SEGMEXEC_TASK_SIZE;
76640+ vma = vma_m;
76641+ h = hstate_vma(vma);
76642+ } else
76643+ address_m = address + SEGMEXEC_TASK_SIZE;
76644+
76645+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
76646+ return VM_FAULT_OOM;
76647+ address_m &= HPAGE_MASK;
76648+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
76649+ }
76650+#endif
76651+
76652 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
76653 if (!ptep)
76654 return VM_FAULT_OOM;
76655diff --git a/mm/internal.h b/mm/internal.h
76656index f03e8e2..7354343 100644
76657--- a/mm/internal.h
76658+++ b/mm/internal.h
76659@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
76660 * in mm/page_alloc.c
76661 */
76662 extern void __free_pages_bootmem(struct page *page, unsigned int order);
76663+extern void free_compound_page(struct page *page);
76664 extern void prep_compound_page(struct page *page, unsigned long order);
76665
76666
76667diff --git a/mm/kmemleak.c b/mm/kmemleak.c
76668index c346660..b47382f 100644
76669--- a/mm/kmemleak.c
76670+++ b/mm/kmemleak.c
76671@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
76672
76673 for (i = 0; i < object->trace_len; i++) {
76674 void *ptr = (void *)object->trace[i];
76675- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
76676+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
76677 }
76678 }
76679
76680diff --git a/mm/maccess.c b/mm/maccess.c
76681index 9073695..1127f348 100644
76682--- a/mm/maccess.c
76683+++ b/mm/maccess.c
76684@@ -14,7 +14,7 @@
76685 * Safely read from address @src to the buffer at @dst. If a kernel fault
76686 * happens, handle that and return -EFAULT.
76687 */
76688-long probe_kernel_read(void *dst, void *src, size_t size)
76689+long probe_kernel_read(void *dst, const void *src, size_t size)
76690 {
76691 long ret;
76692 mm_segment_t old_fs = get_fs();
76693@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
76694 set_fs(KERNEL_DS);
76695 pagefault_disable();
76696 ret = __copy_from_user_inatomic(dst,
76697- (__force const void __user *)src, size);
76698+ (const void __force_user *)src, size);
76699 pagefault_enable();
76700 set_fs(old_fs);
76701
76702@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
76703 * Safely write to address @dst from the buffer at @src. If a kernel fault
76704 * happens, handle that and return -EFAULT.
76705 */
76706-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
76707+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
76708 {
76709 long ret;
76710 mm_segment_t old_fs = get_fs();
76711
76712 set_fs(KERNEL_DS);
76713 pagefault_disable();
76714- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
76715+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
76716 pagefault_enable();
76717 set_fs(old_fs);
76718
76719diff --git a/mm/madvise.c b/mm/madvise.c
76720index 35b1479..499f7d4 100644
76721--- a/mm/madvise.c
76722+++ b/mm/madvise.c
76723@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
76724 pgoff_t pgoff;
76725 unsigned long new_flags = vma->vm_flags;
76726
76727+#ifdef CONFIG_PAX_SEGMEXEC
76728+ struct vm_area_struct *vma_m;
76729+#endif
76730+
76731 switch (behavior) {
76732 case MADV_NORMAL:
76733 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
76734@@ -103,6 +107,13 @@ success:
76735 /*
76736 * vm_flags is protected by the mmap_sem held in write mode.
76737 */
76738+
76739+#ifdef CONFIG_PAX_SEGMEXEC
76740+ vma_m = pax_find_mirror_vma(vma);
76741+ if (vma_m)
76742+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
76743+#endif
76744+
76745 vma->vm_flags = new_flags;
76746
76747 out:
76748@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76749 struct vm_area_struct ** prev,
76750 unsigned long start, unsigned long end)
76751 {
76752+
76753+#ifdef CONFIG_PAX_SEGMEXEC
76754+ struct vm_area_struct *vma_m;
76755+#endif
76756+
76757 *prev = vma;
76758 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
76759 return -EINVAL;
76760@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76761 zap_page_range(vma, start, end - start, &details);
76762 } else
76763 zap_page_range(vma, start, end - start, NULL);
76764+
76765+#ifdef CONFIG_PAX_SEGMEXEC
76766+ vma_m = pax_find_mirror_vma(vma);
76767+ if (vma_m) {
76768+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
76769+ struct zap_details details = {
76770+ .nonlinear_vma = vma_m,
76771+ .last_index = ULONG_MAX,
76772+ };
76773+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
76774+ } else
76775+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
76776+ }
76777+#endif
76778+
76779 return 0;
76780 }
76781
76782@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
76783 if (end < start)
76784 goto out;
76785
76786+#ifdef CONFIG_PAX_SEGMEXEC
76787+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76788+ if (end > SEGMEXEC_TASK_SIZE)
76789+ goto out;
76790+ } else
76791+#endif
76792+
76793+ if (end > TASK_SIZE)
76794+ goto out;
76795+
76796 error = 0;
76797 if (end == start)
76798 goto out;
76799diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76800index 8aeba53..b4a4198 100644
76801--- a/mm/memory-failure.c
76802+++ b/mm/memory-failure.c
76803@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76804
76805 int sysctl_memory_failure_recovery __read_mostly = 1;
76806
76807-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76808+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76809
76810 /*
76811 * Send all the processes who have the page mapped an ``action optional''
76812@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76813 si.si_signo = SIGBUS;
76814 si.si_errno = 0;
76815 si.si_code = BUS_MCEERR_AO;
76816- si.si_addr = (void *)addr;
76817+ si.si_addr = (void __user *)addr;
76818 #ifdef __ARCH_SI_TRAPNO
76819 si.si_trapno = trapno;
76820 #endif
76821@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76822 return 0;
76823 }
76824
76825- atomic_long_add(1, &mce_bad_pages);
76826+ atomic_long_add_unchecked(1, &mce_bad_pages);
76827
76828 /*
76829 * We need/can do nothing about count=0 pages.
76830diff --git a/mm/memory.c b/mm/memory.c
76831index 6c836d3..48f3264 100644
76832--- a/mm/memory.c
76833+++ b/mm/memory.c
76834@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76835 return;
76836
76837 pmd = pmd_offset(pud, start);
76838+
76839+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76840 pud_clear(pud);
76841 pmd_free_tlb(tlb, pmd, start);
76842+#endif
76843+
76844 }
76845
76846 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76847@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76848 if (end - 1 > ceiling - 1)
76849 return;
76850
76851+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76852 pud = pud_offset(pgd, start);
76853 pgd_clear(pgd);
76854 pud_free_tlb(tlb, pud, start);
76855+#endif
76856+
76857 }
76858
76859 /*
76860@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76861 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76862 i = 0;
76863
76864- do {
76865+ while (nr_pages) {
76866 struct vm_area_struct *vma;
76867
76868- vma = find_extend_vma(mm, start);
76869+ vma = find_vma(mm, start);
76870 if (!vma && in_gate_area(tsk, start)) {
76871 unsigned long pg = start & PAGE_MASK;
76872 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76873@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76874 continue;
76875 }
76876
76877- if (!vma ||
76878+ if (!vma || start < vma->vm_start ||
76879 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76880 !(vm_flags & vma->vm_flags))
76881 return i ? : -EFAULT;
76882@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76883 start += PAGE_SIZE;
76884 nr_pages--;
76885 } while (nr_pages && start < vma->vm_end);
76886- } while (nr_pages);
76887+ }
76888 return i;
76889 }
76890
76891@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76892 page_add_file_rmap(page);
76893 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76894
76895+#ifdef CONFIG_PAX_SEGMEXEC
76896+ pax_mirror_file_pte(vma, addr, page, ptl);
76897+#endif
76898+
76899 retval = 0;
76900 pte_unmap_unlock(pte, ptl);
76901 return retval;
76902@@ -1560,10 +1571,22 @@ out:
76903 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76904 struct page *page)
76905 {
76906+
76907+#ifdef CONFIG_PAX_SEGMEXEC
76908+ struct vm_area_struct *vma_m;
76909+#endif
76910+
76911 if (addr < vma->vm_start || addr >= vma->vm_end)
76912 return -EFAULT;
76913 if (!page_count(page))
76914 return -EINVAL;
76915+
76916+#ifdef CONFIG_PAX_SEGMEXEC
76917+ vma_m = pax_find_mirror_vma(vma);
76918+ if (vma_m)
76919+ vma_m->vm_flags |= VM_INSERTPAGE;
76920+#endif
76921+
76922 vma->vm_flags |= VM_INSERTPAGE;
76923 return insert_page(vma, addr, page, vma->vm_page_prot);
76924 }
76925@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
76926 unsigned long pfn)
76927 {
76928 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
76929+ BUG_ON(vma->vm_mirror);
76930
76931 if (addr < vma->vm_start || addr >= vma->vm_end)
76932 return -EFAULT;
76933@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
76934 copy_user_highpage(dst, src, va, vma);
76935 }
76936
76937+#ifdef CONFIG_PAX_SEGMEXEC
76938+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
76939+{
76940+ struct mm_struct *mm = vma->vm_mm;
76941+ spinlock_t *ptl;
76942+ pte_t *pte, entry;
76943+
76944+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
76945+ entry = *pte;
76946+ if (!pte_present(entry)) {
76947+ if (!pte_none(entry)) {
76948+ BUG_ON(pte_file(entry));
76949+ free_swap_and_cache(pte_to_swp_entry(entry));
76950+ pte_clear_not_present_full(mm, address, pte, 0);
76951+ }
76952+ } else {
76953+ struct page *page;
76954+
76955+ flush_cache_page(vma, address, pte_pfn(entry));
76956+ entry = ptep_clear_flush(vma, address, pte);
76957+ BUG_ON(pte_dirty(entry));
76958+ page = vm_normal_page(vma, address, entry);
76959+ if (page) {
76960+ update_hiwater_rss(mm);
76961+ if (PageAnon(page))
76962+ dec_mm_counter(mm, anon_rss);
76963+ else
76964+ dec_mm_counter(mm, file_rss);
76965+ page_remove_rmap(page);
76966+ page_cache_release(page);
76967+ }
76968+ }
76969+ pte_unmap_unlock(pte, ptl);
76970+}
76971+
76972+/* PaX: if vma is mirrored, synchronize the mirror's PTE
76973+ *
76974+ * the ptl of the lower mapped page is held on entry and is not released on exit
76975+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
76976+ */
76977+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76978+{
76979+ struct mm_struct *mm = vma->vm_mm;
76980+ unsigned long address_m;
76981+ spinlock_t *ptl_m;
76982+ struct vm_area_struct *vma_m;
76983+ pmd_t *pmd_m;
76984+ pte_t *pte_m, entry_m;
76985+
76986+ BUG_ON(!page_m || !PageAnon(page_m));
76987+
76988+ vma_m = pax_find_mirror_vma(vma);
76989+ if (!vma_m)
76990+ return;
76991+
76992+ BUG_ON(!PageLocked(page_m));
76993+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76994+ address_m = address + SEGMEXEC_TASK_SIZE;
76995+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76996+ pte_m = pte_offset_map_nested(pmd_m, address_m);
76997+ ptl_m = pte_lockptr(mm, pmd_m);
76998+ if (ptl != ptl_m) {
76999+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77000+ if (!pte_none(*pte_m))
77001+ goto out;
77002+ }
77003+
77004+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77005+ page_cache_get(page_m);
77006+ page_add_anon_rmap(page_m, vma_m, address_m);
77007+ inc_mm_counter(mm, anon_rss);
77008+ set_pte_at(mm, address_m, pte_m, entry_m);
77009+ update_mmu_cache(vma_m, address_m, entry_m);
77010+out:
77011+ if (ptl != ptl_m)
77012+ spin_unlock(ptl_m);
77013+ pte_unmap_nested(pte_m);
77014+ unlock_page(page_m);
77015+}
77016+
77017+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
77018+{
77019+ struct mm_struct *mm = vma->vm_mm;
77020+ unsigned long address_m;
77021+ spinlock_t *ptl_m;
77022+ struct vm_area_struct *vma_m;
77023+ pmd_t *pmd_m;
77024+ pte_t *pte_m, entry_m;
77025+
77026+ BUG_ON(!page_m || PageAnon(page_m));
77027+
77028+ vma_m = pax_find_mirror_vma(vma);
77029+ if (!vma_m)
77030+ return;
77031+
77032+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77033+ address_m = address + SEGMEXEC_TASK_SIZE;
77034+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77035+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77036+ ptl_m = pte_lockptr(mm, pmd_m);
77037+ if (ptl != ptl_m) {
77038+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77039+ if (!pte_none(*pte_m))
77040+ goto out;
77041+ }
77042+
77043+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
77044+ page_cache_get(page_m);
77045+ page_add_file_rmap(page_m);
77046+ inc_mm_counter(mm, file_rss);
77047+ set_pte_at(mm, address_m, pte_m, entry_m);
77048+ update_mmu_cache(vma_m, address_m, entry_m);
77049+out:
77050+ if (ptl != ptl_m)
77051+ spin_unlock(ptl_m);
77052+ pte_unmap_nested(pte_m);
77053+}
77054+
77055+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
77056+{
77057+ struct mm_struct *mm = vma->vm_mm;
77058+ unsigned long address_m;
77059+ spinlock_t *ptl_m;
77060+ struct vm_area_struct *vma_m;
77061+ pmd_t *pmd_m;
77062+ pte_t *pte_m, entry_m;
77063+
77064+ vma_m = pax_find_mirror_vma(vma);
77065+ if (!vma_m)
77066+ return;
77067+
77068+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
77069+ address_m = address + SEGMEXEC_TASK_SIZE;
77070+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
77071+ pte_m = pte_offset_map_nested(pmd_m, address_m);
77072+ ptl_m = pte_lockptr(mm, pmd_m);
77073+ if (ptl != ptl_m) {
77074+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
77075+ if (!pte_none(*pte_m))
77076+ goto out;
77077+ }
77078+
77079+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
77080+ set_pte_at(mm, address_m, pte_m, entry_m);
77081+out:
77082+ if (ptl != ptl_m)
77083+ spin_unlock(ptl_m);
77084+ pte_unmap_nested(pte_m);
77085+}
77086+
77087+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
77088+{
77089+ struct page *page_m;
77090+ pte_t entry;
77091+
77092+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
77093+ goto out;
77094+
77095+ entry = *pte;
77096+ page_m = vm_normal_page(vma, address, entry);
77097+ if (!page_m)
77098+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
77099+ else if (PageAnon(page_m)) {
77100+ if (pax_find_mirror_vma(vma)) {
77101+ pte_unmap_unlock(pte, ptl);
77102+ lock_page(page_m);
77103+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
77104+ if (pte_same(entry, *pte))
77105+ pax_mirror_anon_pte(vma, address, page_m, ptl);
77106+ else
77107+ unlock_page(page_m);
77108+ }
77109+ } else
77110+ pax_mirror_file_pte(vma, address, page_m, ptl);
77111+
77112+out:
77113+ pte_unmap_unlock(pte, ptl);
77114+}
77115+#endif
77116+
77117 /*
77118 * This routine handles present pages, when users try to write
77119 * to a shared page. It is done by copying the page to a new address
77120@@ -2156,6 +2360,12 @@ gotten:
77121 */
77122 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77123 if (likely(pte_same(*page_table, orig_pte))) {
77124+
77125+#ifdef CONFIG_PAX_SEGMEXEC
77126+ if (pax_find_mirror_vma(vma))
77127+ BUG_ON(!trylock_page(new_page));
77128+#endif
77129+
77130 if (old_page) {
77131 if (!PageAnon(old_page)) {
77132 dec_mm_counter(mm, file_rss);
77133@@ -2207,6 +2417,10 @@ gotten:
77134 page_remove_rmap(old_page);
77135 }
77136
77137+#ifdef CONFIG_PAX_SEGMEXEC
77138+ pax_mirror_anon_pte(vma, address, new_page, ptl);
77139+#endif
77140+
77141 /* Free the old page.. */
77142 new_page = old_page;
77143 ret |= VM_FAULT_WRITE;
77144@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77145 swap_free(entry);
77146 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77147 try_to_free_swap(page);
77148+
77149+#ifdef CONFIG_PAX_SEGMEXEC
77150+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77151+#endif
77152+
77153 unlock_page(page);
77154
77155 if (flags & FAULT_FLAG_WRITE) {
77156@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77157
77158 /* No need to invalidate - it was non-present before */
77159 update_mmu_cache(vma, address, pte);
77160+
77161+#ifdef CONFIG_PAX_SEGMEXEC
77162+ pax_mirror_anon_pte(vma, address, page, ptl);
77163+#endif
77164+
77165 unlock:
77166 pte_unmap_unlock(page_table, ptl);
77167 out:
77168@@ -2632,40 +2856,6 @@ out_release:
77169 }
77170
77171 /*
77172- * This is like a special single-page "expand_{down|up}wards()",
77173- * except we must first make sure that 'address{-|+}PAGE_SIZE'
77174- * doesn't hit another vma.
77175- */
77176-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77177-{
77178- address &= PAGE_MASK;
77179- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77180- struct vm_area_struct *prev = vma->vm_prev;
77181-
77182- /*
77183- * Is there a mapping abutting this one below?
77184- *
77185- * That's only ok if it's the same stack mapping
77186- * that has gotten split..
77187- */
77188- if (prev && prev->vm_end == address)
77189- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77190-
77191- expand_stack(vma, address - PAGE_SIZE);
77192- }
77193- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77194- struct vm_area_struct *next = vma->vm_next;
77195-
77196- /* As VM_GROWSDOWN but s/below/above/ */
77197- if (next && next->vm_start == address + PAGE_SIZE)
77198- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77199-
77200- expand_upwards(vma, address + PAGE_SIZE);
77201- }
77202- return 0;
77203-}
77204-
77205-/*
77206 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77207 * but allow concurrent faults), and pte mapped but not yet locked.
77208 * We return with mmap_sem still held, but pte unmapped and unlocked.
77209@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77210 unsigned long address, pte_t *page_table, pmd_t *pmd,
77211 unsigned int flags)
77212 {
77213- struct page *page;
77214+ struct page *page = NULL;
77215 spinlock_t *ptl;
77216 pte_t entry;
77217
77218- pte_unmap(page_table);
77219-
77220- /* Check if we need to add a guard page to the stack */
77221- if (check_stack_guard_page(vma, address) < 0)
77222- return VM_FAULT_SIGBUS;
77223-
77224- /* Use the zero-page for reads */
77225 if (!(flags & FAULT_FLAG_WRITE)) {
77226 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77227 vma->vm_page_prot));
77228- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77229+ ptl = pte_lockptr(mm, pmd);
77230+ spin_lock(ptl);
77231 if (!pte_none(*page_table))
77232 goto unlock;
77233 goto setpte;
77234 }
77235
77236 /* Allocate our own private page. */
77237+ pte_unmap(page_table);
77238+
77239 if (unlikely(anon_vma_prepare(vma)))
77240 goto oom;
77241 page = alloc_zeroed_user_highpage_movable(vma, address);
77242@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77243 if (!pte_none(*page_table))
77244 goto release;
77245
77246+#ifdef CONFIG_PAX_SEGMEXEC
77247+ if (pax_find_mirror_vma(vma))
77248+ BUG_ON(!trylock_page(page));
77249+#endif
77250+
77251 inc_mm_counter(mm, anon_rss);
77252 page_add_new_anon_rmap(page, vma, address);
77253 setpte:
77254@@ -2720,6 +2911,12 @@ setpte:
77255
77256 /* No need to invalidate - it was non-present before */
77257 update_mmu_cache(vma, address, entry);
77258+
77259+#ifdef CONFIG_PAX_SEGMEXEC
77260+ if (page)
77261+ pax_mirror_anon_pte(vma, address, page, ptl);
77262+#endif
77263+
77264 unlock:
77265 pte_unmap_unlock(page_table, ptl);
77266 return 0;
77267@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77268 */
77269 /* Only go through if we didn't race with anybody else... */
77270 if (likely(pte_same(*page_table, orig_pte))) {
77271+
77272+#ifdef CONFIG_PAX_SEGMEXEC
77273+ if (anon && pax_find_mirror_vma(vma))
77274+ BUG_ON(!trylock_page(page));
77275+#endif
77276+
77277 flush_icache_page(vma, page);
77278 entry = mk_pte(page, vma->vm_page_prot);
77279 if (flags & FAULT_FLAG_WRITE)
77280@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77281
77282 /* no need to invalidate: a not-present page won't be cached */
77283 update_mmu_cache(vma, address, entry);
77284+
77285+#ifdef CONFIG_PAX_SEGMEXEC
77286+ if (anon)
77287+ pax_mirror_anon_pte(vma, address, page, ptl);
77288+ else
77289+ pax_mirror_file_pte(vma, address, page, ptl);
77290+#endif
77291+
77292 } else {
77293 if (charged)
77294 mem_cgroup_uncharge_page(page);
77295@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77296 if (flags & FAULT_FLAG_WRITE)
77297 flush_tlb_page(vma, address);
77298 }
77299+
77300+#ifdef CONFIG_PAX_SEGMEXEC
77301+ pax_mirror_pte(vma, address, pte, pmd, ptl);
77302+ return 0;
77303+#endif
77304+
77305 unlock:
77306 pte_unmap_unlock(pte, ptl);
77307 return 0;
77308@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77309 pmd_t *pmd;
77310 pte_t *pte;
77311
77312+#ifdef CONFIG_PAX_SEGMEXEC
77313+ struct vm_area_struct *vma_m;
77314+#endif
77315+
77316 __set_current_state(TASK_RUNNING);
77317
77318 count_vm_event(PGFAULT);
77319@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77320 if (unlikely(is_vm_hugetlb_page(vma)))
77321 return hugetlb_fault(mm, vma, address, flags);
77322
77323+#ifdef CONFIG_PAX_SEGMEXEC
77324+ vma_m = pax_find_mirror_vma(vma);
77325+ if (vma_m) {
77326+ unsigned long address_m;
77327+ pgd_t *pgd_m;
77328+ pud_t *pud_m;
77329+ pmd_t *pmd_m;
77330+
77331+ if (vma->vm_start > vma_m->vm_start) {
77332+ address_m = address;
77333+ address -= SEGMEXEC_TASK_SIZE;
77334+ vma = vma_m;
77335+ } else
77336+ address_m = address + SEGMEXEC_TASK_SIZE;
77337+
77338+ pgd_m = pgd_offset(mm, address_m);
77339+ pud_m = pud_alloc(mm, pgd_m, address_m);
77340+ if (!pud_m)
77341+ return VM_FAULT_OOM;
77342+ pmd_m = pmd_alloc(mm, pud_m, address_m);
77343+ if (!pmd_m)
77344+ return VM_FAULT_OOM;
77345+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77346+ return VM_FAULT_OOM;
77347+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77348+ }
77349+#endif
77350+
77351 pgd = pgd_offset(mm, address);
77352 pud = pud_alloc(mm, pgd, address);
77353 if (!pud)
77354@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77355 gate_vma.vm_start = FIXADDR_USER_START;
77356 gate_vma.vm_end = FIXADDR_USER_END;
77357 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77358- gate_vma.vm_page_prot = __P101;
77359+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77360 /*
77361 * Make sure the vDSO gets into every core dump.
77362 * Dumping its contents makes post-mortem fully interpretable later
77363diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77364index 3c6e3e2..b1ddbb8 100644
77365--- a/mm/mempolicy.c
77366+++ b/mm/mempolicy.c
77367@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77368 struct vm_area_struct *next;
77369 int err;
77370
77371+#ifdef CONFIG_PAX_SEGMEXEC
77372+ struct vm_area_struct *vma_m;
77373+#endif
77374+
77375 err = 0;
77376 for (; vma && vma->vm_start < end; vma = next) {
77377 next = vma->vm_next;
77378@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77379 err = policy_vma(vma, new);
77380 if (err)
77381 break;
77382+
77383+#ifdef CONFIG_PAX_SEGMEXEC
77384+ vma_m = pax_find_mirror_vma(vma);
77385+ if (vma_m) {
77386+ err = policy_vma(vma_m, new);
77387+ if (err)
77388+ break;
77389+ }
77390+#endif
77391+
77392 }
77393 return err;
77394 }
77395@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77396
77397 if (end < start)
77398 return -EINVAL;
77399+
77400+#ifdef CONFIG_PAX_SEGMEXEC
77401+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77402+ if (end > SEGMEXEC_TASK_SIZE)
77403+ return -EINVAL;
77404+ } else
77405+#endif
77406+
77407+ if (end > TASK_SIZE)
77408+ return -EINVAL;
77409+
77410 if (end == start)
77411 return 0;
77412
77413@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77414 if (!mm)
77415 return -EINVAL;
77416
77417+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77418+ if (mm != current->mm &&
77419+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77420+ err = -EPERM;
77421+ goto out;
77422+ }
77423+#endif
77424+
77425 /*
77426 * Check if this process has the right to modify the specified
77427 * process. The right exists if the process has administrative
77428@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77429 rcu_read_lock();
77430 tcred = __task_cred(task);
77431 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77432- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77433- !capable(CAP_SYS_NICE)) {
77434+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77435 rcu_read_unlock();
77436 err = -EPERM;
77437 goto out;
77438@@ -2367,6 +2399,12 @@ static inline void check_huge_range(struct vm_area_struct *vma,
77439 }
77440 #endif
77441
77442+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77443+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
77444+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
77445+ _mm->pax_flags & MF_PAX_SEGMEXEC))
77446+#endif
77447+
77448 /*
77449 * Display pages allocated per node and memory policy via /proc.
77450 */
77451@@ -2381,6 +2419,13 @@ int show_numa_map(struct seq_file *m, void *v)
77452 int n;
77453 char buffer[50];
77454
77455+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77456+ if (current->exec_id != m->exec_id) {
77457+ gr_log_badprocpid("numa_maps");
77458+ return 0;
77459+ }
77460+#endif
77461+
77462 if (!mm)
77463 return 0;
77464
77465@@ -2392,11 +2437,15 @@ int show_numa_map(struct seq_file *m, void *v)
77466 mpol_to_str(buffer, sizeof(buffer), pol, 0);
77467 mpol_cond_put(pol);
77468
77469+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77470+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
77471+#else
77472 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
77473+#endif
77474
77475 if (file) {
77476 seq_printf(m, " file=");
77477- seq_path(m, &file->f_path, "\n\t= ");
77478+ seq_path(m, &file->f_path, "\n\t\\= ");
77479 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77480 seq_printf(m, " heap");
77481 } else if (vma->vm_start <= mm->start_stack &&
77482diff --git a/mm/migrate.c b/mm/migrate.c
77483index aaca868..2ebecdc 100644
77484--- a/mm/migrate.c
77485+++ b/mm/migrate.c
77486@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77487 unsigned long chunk_start;
77488 int err;
77489
77490+ pax_track_stack();
77491+
77492 task_nodes = cpuset_mems_allowed(task);
77493
77494 err = -ENOMEM;
77495@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77496 if (!mm)
77497 return -EINVAL;
77498
77499+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77500+ if (mm != current->mm &&
77501+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77502+ err = -EPERM;
77503+ goto out;
77504+ }
77505+#endif
77506+
77507 /*
77508 * Check if this process has the right to modify the specified
77509 * process. The right exists if the process has administrative
77510@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77511 rcu_read_lock();
77512 tcred = __task_cred(task);
77513 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77514- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77515- !capable(CAP_SYS_NICE)) {
77516+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77517 rcu_read_unlock();
77518 err = -EPERM;
77519 goto out;
77520diff --git a/mm/mlock.c b/mm/mlock.c
77521index 2d846cf..98134d2 100644
77522--- a/mm/mlock.c
77523+++ b/mm/mlock.c
77524@@ -13,6 +13,7 @@
77525 #include <linux/pagemap.h>
77526 #include <linux/mempolicy.h>
77527 #include <linux/syscalls.h>
77528+#include <linux/security.h>
77529 #include <linux/sched.h>
77530 #include <linux/module.h>
77531 #include <linux/rmap.h>
77532@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77533 }
77534 }
77535
77536-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77537-{
77538- return (vma->vm_flags & VM_GROWSDOWN) &&
77539- (vma->vm_start == addr) &&
77540- !vma_stack_continue(vma->vm_prev, addr);
77541-}
77542-
77543 /**
77544 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77545 * @vma: target vma
77546@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77547 if (vma->vm_flags & VM_WRITE)
77548 gup_flags |= FOLL_WRITE;
77549
77550- /* We don't try to access the guard page of a stack vma */
77551- if (stack_guard_page(vma, start)) {
77552- addr += PAGE_SIZE;
77553- nr_pages--;
77554- }
77555-
77556 while (nr_pages > 0) {
77557 int i;
77558
77559@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77560 {
77561 unsigned long nstart, end, tmp;
77562 struct vm_area_struct * vma, * prev;
77563- int error;
77564+ int error = -EINVAL;
77565
77566 len = PAGE_ALIGN(len);
77567 end = start + len;
77568@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77569 return -EINVAL;
77570 if (end == start)
77571 return 0;
77572+ if (end > TASK_SIZE)
77573+ return -EINVAL;
77574+
77575 vma = find_vma_prev(current->mm, start, &prev);
77576 if (!vma || vma->vm_start > start)
77577 return -ENOMEM;
77578@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77579 for (nstart = start ; ; ) {
77580 unsigned int newflags;
77581
77582+#ifdef CONFIG_PAX_SEGMEXEC
77583+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77584+ break;
77585+#endif
77586+
77587 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77588
77589 newflags = vma->vm_flags | VM_LOCKED;
77590@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77591 lock_limit >>= PAGE_SHIFT;
77592
77593 /* check against resource limits */
77594+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77595 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
77596 error = do_mlock(start, len, 1);
77597 up_write(&current->mm->mmap_sem);
77598@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
77599 static int do_mlockall(int flags)
77600 {
77601 struct vm_area_struct * vma, * prev = NULL;
77602- unsigned int def_flags = 0;
77603
77604 if (flags & MCL_FUTURE)
77605- def_flags = VM_LOCKED;
77606- current->mm->def_flags = def_flags;
77607+ current->mm->def_flags |= VM_LOCKED;
77608+ else
77609+ current->mm->def_flags &= ~VM_LOCKED;
77610 if (flags == MCL_FUTURE)
77611 goto out;
77612
77613 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
77614- unsigned int newflags;
77615+ unsigned long newflags;
77616
77617+#ifdef CONFIG_PAX_SEGMEXEC
77618+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77619+ break;
77620+#endif
77621+
77622+ BUG_ON(vma->vm_end > TASK_SIZE);
77623 newflags = vma->vm_flags | VM_LOCKED;
77624 if (!(flags & MCL_CURRENT))
77625 newflags &= ~VM_LOCKED;
77626@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
77627 lock_limit >>= PAGE_SHIFT;
77628
77629 ret = -ENOMEM;
77630+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
77631 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
77632 capable(CAP_IPC_LOCK))
77633 ret = do_mlockall(flags);
77634diff --git a/mm/mmap.c b/mm/mmap.c
77635index 4b80cbf..c5ce1df 100644
77636--- a/mm/mmap.c
77637+++ b/mm/mmap.c
77638@@ -45,6 +45,16 @@
77639 #define arch_rebalance_pgtables(addr, len) (addr)
77640 #endif
77641
77642+static inline void verify_mm_writelocked(struct mm_struct *mm)
77643+{
77644+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
77645+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77646+ up_read(&mm->mmap_sem);
77647+ BUG();
77648+ }
77649+#endif
77650+}
77651+
77652 static void unmap_region(struct mm_struct *mm,
77653 struct vm_area_struct *vma, struct vm_area_struct *prev,
77654 unsigned long start, unsigned long end);
77655@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
77656 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
77657 *
77658 */
77659-pgprot_t protection_map[16] = {
77660+pgprot_t protection_map[16] __read_only = {
77661 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77662 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77663 };
77664
77665 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77666 {
77667- return __pgprot(pgprot_val(protection_map[vm_flags &
77668+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
77669 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77670 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77671+
77672+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77673+ if (!nx_enabled &&
77674+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
77675+ (vm_flags & (VM_READ | VM_WRITE)))
77676+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
77677+#endif
77678+
77679+ return prot;
77680 }
77681 EXPORT_SYMBOL(vm_get_page_prot);
77682
77683 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77684 int sysctl_overcommit_ratio = 50; /* default is 50% */
77685 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
77686+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
77687 struct percpu_counter vm_committed_as;
77688
77689 /*
77690@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
77691 struct vm_area_struct *next = vma->vm_next;
77692
77693 might_sleep();
77694+ BUG_ON(vma->vm_mirror);
77695 if (vma->vm_ops && vma->vm_ops->close)
77696 vma->vm_ops->close(vma);
77697 if (vma->vm_file) {
77698@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
77699 * not page aligned -Ram Gupta
77700 */
77701 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
77702+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
77703 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
77704 (mm->end_data - mm->start_data) > rlim)
77705 goto out;
77706@@ -704,6 +726,12 @@ static int
77707 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
77708 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77709 {
77710+
77711+#ifdef CONFIG_PAX_SEGMEXEC
77712+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
77713+ return 0;
77714+#endif
77715+
77716 if (is_mergeable_vma(vma, file, vm_flags) &&
77717 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77718 if (vma->vm_pgoff == vm_pgoff)
77719@@ -723,6 +751,12 @@ static int
77720 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77721 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77722 {
77723+
77724+#ifdef CONFIG_PAX_SEGMEXEC
77725+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
77726+ return 0;
77727+#endif
77728+
77729 if (is_mergeable_vma(vma, file, vm_flags) &&
77730 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77731 pgoff_t vm_pglen;
77732@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77733 struct vm_area_struct *vma_merge(struct mm_struct *mm,
77734 struct vm_area_struct *prev, unsigned long addr,
77735 unsigned long end, unsigned long vm_flags,
77736- struct anon_vma *anon_vma, struct file *file,
77737+ struct anon_vma *anon_vma, struct file *file,
77738 pgoff_t pgoff, struct mempolicy *policy)
77739 {
77740 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
77741 struct vm_area_struct *area, *next;
77742
77743+#ifdef CONFIG_PAX_SEGMEXEC
77744+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
77745+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
77746+
77747+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
77748+#endif
77749+
77750 /*
77751 * We later require that vma->vm_flags == vm_flags,
77752 * so this tests vma->vm_flags & VM_SPECIAL, too.
77753@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77754 if (next && next->vm_end == end) /* cases 6, 7, 8 */
77755 next = next->vm_next;
77756
77757+#ifdef CONFIG_PAX_SEGMEXEC
77758+ if (prev)
77759+ prev_m = pax_find_mirror_vma(prev);
77760+ if (area)
77761+ area_m = pax_find_mirror_vma(area);
77762+ if (next)
77763+ next_m = pax_find_mirror_vma(next);
77764+#endif
77765+
77766 /*
77767 * Can it merge with the predecessor?
77768 */
77769@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77770 /* cases 1, 6 */
77771 vma_adjust(prev, prev->vm_start,
77772 next->vm_end, prev->vm_pgoff, NULL);
77773- } else /* cases 2, 5, 7 */
77774+
77775+#ifdef CONFIG_PAX_SEGMEXEC
77776+ if (prev_m)
77777+ vma_adjust(prev_m, prev_m->vm_start,
77778+ next_m->vm_end, prev_m->vm_pgoff, NULL);
77779+#endif
77780+
77781+ } else { /* cases 2, 5, 7 */
77782 vma_adjust(prev, prev->vm_start,
77783 end, prev->vm_pgoff, NULL);
77784+
77785+#ifdef CONFIG_PAX_SEGMEXEC
77786+ if (prev_m)
77787+ vma_adjust(prev_m, prev_m->vm_start,
77788+ end_m, prev_m->vm_pgoff, NULL);
77789+#endif
77790+
77791+ }
77792 return prev;
77793 }
77794
77795@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77796 mpol_equal(policy, vma_policy(next)) &&
77797 can_vma_merge_before(next, vm_flags,
77798 anon_vma, file, pgoff+pglen)) {
77799- if (prev && addr < prev->vm_end) /* case 4 */
77800+ if (prev && addr < prev->vm_end) { /* case 4 */
77801 vma_adjust(prev, prev->vm_start,
77802 addr, prev->vm_pgoff, NULL);
77803- else /* cases 3, 8 */
77804+
77805+#ifdef CONFIG_PAX_SEGMEXEC
77806+ if (prev_m)
77807+ vma_adjust(prev_m, prev_m->vm_start,
77808+ addr_m, prev_m->vm_pgoff, NULL);
77809+#endif
77810+
77811+ } else { /* cases 3, 8 */
77812 vma_adjust(area, addr, next->vm_end,
77813 next->vm_pgoff - pglen, NULL);
77814+
77815+#ifdef CONFIG_PAX_SEGMEXEC
77816+ if (area_m)
77817+ vma_adjust(area_m, addr_m, next_m->vm_end,
77818+ next_m->vm_pgoff - pglen, NULL);
77819+#endif
77820+
77821+ }
77822 return area;
77823 }
77824
77825@@ -898,14 +978,11 @@ none:
77826 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77827 struct file *file, long pages)
77828 {
77829- const unsigned long stack_flags
77830- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77831-
77832 if (file) {
77833 mm->shared_vm += pages;
77834 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77835 mm->exec_vm += pages;
77836- } else if (flags & stack_flags)
77837+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77838 mm->stack_vm += pages;
77839 if (flags & (VM_RESERVED|VM_IO))
77840 mm->reserved_vm += pages;
77841@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77842 * (the exception is when the underlying filesystem is noexec
77843 * mounted, in which case we dont add PROT_EXEC.)
77844 */
77845- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77846+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77847 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77848 prot |= PROT_EXEC;
77849
77850@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77851 /* Obtain the address to map to. we verify (or select) it and ensure
77852 * that it represents a valid section of the address space.
77853 */
77854- addr = get_unmapped_area(file, addr, len, pgoff, flags);
77855+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77856 if (addr & ~PAGE_MASK)
77857 return addr;
77858
77859@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77860 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77861 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77862
77863+#ifdef CONFIG_PAX_MPROTECT
77864+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77865+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77866+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77867+ gr_log_rwxmmap(file);
77868+
77869+#ifdef CONFIG_PAX_EMUPLT
77870+ vm_flags &= ~VM_EXEC;
77871+#else
77872+ return -EPERM;
77873+#endif
77874+
77875+ }
77876+
77877+ if (!(vm_flags & VM_EXEC))
77878+ vm_flags &= ~VM_MAYEXEC;
77879+#else
77880+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77881+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77882+#endif
77883+ else
77884+ vm_flags &= ~VM_MAYWRITE;
77885+ }
77886+#endif
77887+
77888+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77889+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77890+ vm_flags &= ~VM_PAGEEXEC;
77891+#endif
77892+
77893 if (flags & MAP_LOCKED)
77894 if (!can_do_mlock())
77895 return -EPERM;
77896@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77897 locked += mm->locked_vm;
77898 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77899 lock_limit >>= PAGE_SHIFT;
77900+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77901 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77902 return -EAGAIN;
77903 }
77904@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77905 if (error)
77906 return error;
77907
77908+ if (!gr_acl_handle_mmap(file, prot))
77909+ return -EACCES;
77910+
77911 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
77912 }
77913 EXPORT_SYMBOL(do_mmap_pgoff);
77914@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
77915 */
77916 int vma_wants_writenotify(struct vm_area_struct *vma)
77917 {
77918- unsigned int vm_flags = vma->vm_flags;
77919+ unsigned long vm_flags = vma->vm_flags;
77920
77921 /* If it was private or non-writable, the write bit is already clear */
77922- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
77923+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
77924 return 0;
77925
77926 /* The backer wishes to know when pages are first written to? */
77927@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
77928 unsigned long charged = 0;
77929 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
77930
77931+#ifdef CONFIG_PAX_SEGMEXEC
77932+ struct vm_area_struct *vma_m = NULL;
77933+#endif
77934+
77935+ /*
77936+ * mm->mmap_sem is required to protect against another thread
77937+ * changing the mappings in case we sleep.
77938+ */
77939+ verify_mm_writelocked(mm);
77940+
77941 /* Clear old maps */
77942 error = -ENOMEM;
77943-munmap_back:
77944 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77945 if (vma && vma->vm_start < addr + len) {
77946 if (do_munmap(mm, addr, len))
77947 return -ENOMEM;
77948- goto munmap_back;
77949+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77950+ BUG_ON(vma && vma->vm_start < addr + len);
77951 }
77952
77953 /* Check against address space limit. */
77954@@ -1173,6 +1294,16 @@ munmap_back:
77955 goto unacct_error;
77956 }
77957
77958+#ifdef CONFIG_PAX_SEGMEXEC
77959+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
77960+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77961+ if (!vma_m) {
77962+ error = -ENOMEM;
77963+ goto free_vma;
77964+ }
77965+ }
77966+#endif
77967+
77968 vma->vm_mm = mm;
77969 vma->vm_start = addr;
77970 vma->vm_end = addr + len;
77971@@ -1195,6 +1326,19 @@ munmap_back:
77972 error = file->f_op->mmap(file, vma);
77973 if (error)
77974 goto unmap_and_free_vma;
77975+
77976+#ifdef CONFIG_PAX_SEGMEXEC
77977+ if (vma_m && (vm_flags & VM_EXECUTABLE))
77978+ added_exe_file_vma(mm);
77979+#endif
77980+
77981+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77982+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
77983+ vma->vm_flags |= VM_PAGEEXEC;
77984+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77985+ }
77986+#endif
77987+
77988 if (vm_flags & VM_EXECUTABLE)
77989 added_exe_file_vma(mm);
77990
77991@@ -1218,6 +1362,11 @@ munmap_back:
77992 vma_link(mm, vma, prev, rb_link, rb_parent);
77993 file = vma->vm_file;
77994
77995+#ifdef CONFIG_PAX_SEGMEXEC
77996+ if (vma_m)
77997+ pax_mirror_vma(vma_m, vma);
77998+#endif
77999+
78000 /* Once vma denies write, undo our temporary denial count */
78001 if (correct_wcount)
78002 atomic_inc(&inode->i_writecount);
78003@@ -1226,6 +1375,7 @@ out:
78004
78005 mm->total_vm += len >> PAGE_SHIFT;
78006 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
78007+ track_exec_limit(mm, addr, addr + len, vm_flags);
78008 if (vm_flags & VM_LOCKED) {
78009 /*
78010 * makes pages present; downgrades, drops, reacquires mmap_sem
78011@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
78012 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
78013 charged = 0;
78014 free_vma:
78015+
78016+#ifdef CONFIG_PAX_SEGMEXEC
78017+ if (vma_m)
78018+ kmem_cache_free(vm_area_cachep, vma_m);
78019+#endif
78020+
78021 kmem_cache_free(vm_area_cachep, vma);
78022 unacct_error:
78023 if (charged)
78024@@ -1255,6 +1411,44 @@ unacct_error:
78025 return error;
78026 }
78027
78028+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
78029+{
78030+ if (!vma) {
78031+#ifdef CONFIG_STACK_GROWSUP
78032+ if (addr > sysctl_heap_stack_gap)
78033+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
78034+ else
78035+ vma = find_vma(current->mm, 0);
78036+ if (vma && (vma->vm_flags & VM_GROWSUP))
78037+ return false;
78038+#endif
78039+ return true;
78040+ }
78041+
78042+ if (addr + len > vma->vm_start)
78043+ return false;
78044+
78045+ if (vma->vm_flags & VM_GROWSDOWN)
78046+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
78047+#ifdef CONFIG_STACK_GROWSUP
78048+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
78049+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
78050+#endif
78051+
78052+ return true;
78053+}
78054+
78055+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
78056+{
78057+ if (vma->vm_start < len)
78058+ return -ENOMEM;
78059+ if (!(vma->vm_flags & VM_GROWSDOWN))
78060+ return vma->vm_start - len;
78061+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
78062+ return vma->vm_start - len - sysctl_heap_stack_gap;
78063+ return -ENOMEM;
78064+}
78065+
78066 /* Get an address range which is currently unmapped.
78067 * For shmat() with addr=0.
78068 *
78069@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
78070 if (flags & MAP_FIXED)
78071 return addr;
78072
78073+#ifdef CONFIG_PAX_RANDMMAP
78074+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78075+#endif
78076+
78077 if (addr) {
78078 addr = PAGE_ALIGN(addr);
78079- vma = find_vma(mm, addr);
78080- if (TASK_SIZE - len >= addr &&
78081- (!vma || addr + len <= vma->vm_start))
78082- return addr;
78083+ if (TASK_SIZE - len >= addr) {
78084+ vma = find_vma(mm, addr);
78085+ if (check_heap_stack_gap(vma, addr, len))
78086+ return addr;
78087+ }
78088 }
78089 if (len > mm->cached_hole_size) {
78090- start_addr = addr = mm->free_area_cache;
78091+ start_addr = addr = mm->free_area_cache;
78092 } else {
78093- start_addr = addr = TASK_UNMAPPED_BASE;
78094- mm->cached_hole_size = 0;
78095+ start_addr = addr = mm->mmap_base;
78096+ mm->cached_hole_size = 0;
78097 }
78098
78099 full_search:
78100@@ -1303,34 +1502,40 @@ full_search:
78101 * Start a new search - just in case we missed
78102 * some holes.
78103 */
78104- if (start_addr != TASK_UNMAPPED_BASE) {
78105- addr = TASK_UNMAPPED_BASE;
78106- start_addr = addr;
78107+ if (start_addr != mm->mmap_base) {
78108+ start_addr = addr = mm->mmap_base;
78109 mm->cached_hole_size = 0;
78110 goto full_search;
78111 }
78112 return -ENOMEM;
78113 }
78114- if (!vma || addr + len <= vma->vm_start) {
78115- /*
78116- * Remember the place where we stopped the search:
78117- */
78118- mm->free_area_cache = addr + len;
78119- return addr;
78120- }
78121+ if (check_heap_stack_gap(vma, addr, len))
78122+ break;
78123 if (addr + mm->cached_hole_size < vma->vm_start)
78124 mm->cached_hole_size = vma->vm_start - addr;
78125 addr = vma->vm_end;
78126 }
78127+
78128+ /*
78129+ * Remember the place where we stopped the search:
78130+ */
78131+ mm->free_area_cache = addr + len;
78132+ return addr;
78133 }
78134 #endif
78135
78136 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
78137 {
78138+
78139+#ifdef CONFIG_PAX_SEGMEXEC
78140+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78141+ return;
78142+#endif
78143+
78144 /*
78145 * Is this a new hole at the lowest possible address?
78146 */
78147- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
78148+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
78149 mm->free_area_cache = addr;
78150 mm->cached_hole_size = ~0UL;
78151 }
78152@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78153 {
78154 struct vm_area_struct *vma;
78155 struct mm_struct *mm = current->mm;
78156- unsigned long addr = addr0;
78157+ unsigned long base = mm->mmap_base, addr = addr0;
78158
78159 /* requested length too big for entire address space */
78160 if (len > TASK_SIZE)
78161@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78162 if (flags & MAP_FIXED)
78163 return addr;
78164
78165+#ifdef CONFIG_PAX_RANDMMAP
78166+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78167+#endif
78168+
78169 /* requesting a specific address */
78170 if (addr) {
78171 addr = PAGE_ALIGN(addr);
78172- vma = find_vma(mm, addr);
78173- if (TASK_SIZE - len >= addr &&
78174- (!vma || addr + len <= vma->vm_start))
78175- return addr;
78176+ if (TASK_SIZE - len >= addr) {
78177+ vma = find_vma(mm, addr);
78178+ if (check_heap_stack_gap(vma, addr, len))
78179+ return addr;
78180+ }
78181 }
78182
78183 /* check if free_area_cache is useful for us */
78184@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78185 /* make sure it can fit in the remaining address space */
78186 if (addr > len) {
78187 vma = find_vma(mm, addr-len);
78188- if (!vma || addr <= vma->vm_start)
78189+ if (check_heap_stack_gap(vma, addr - len, len))
78190 /* remember the address as a hint for next time */
78191 return (mm->free_area_cache = addr-len);
78192 }
78193@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78194 * return with success:
78195 */
78196 vma = find_vma(mm, addr);
78197- if (!vma || addr+len <= vma->vm_start)
78198+ if (check_heap_stack_gap(vma, addr, len))
78199 /* remember the address as a hint for next time */
78200 return (mm->free_area_cache = addr);
78201
78202@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78203 mm->cached_hole_size = vma->vm_start - addr;
78204
78205 /* try just below the current vma->vm_start */
78206- addr = vma->vm_start-len;
78207- } while (len < vma->vm_start);
78208+ addr = skip_heap_stack_gap(vma, len);
78209+ } while (!IS_ERR_VALUE(addr));
78210
78211 bottomup:
78212 /*
78213@@ -1414,13 +1624,21 @@ bottomup:
78214 * can happen with large stack limits and large mmap()
78215 * allocations.
78216 */
78217+ mm->mmap_base = TASK_UNMAPPED_BASE;
78218+
78219+#ifdef CONFIG_PAX_RANDMMAP
78220+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78221+ mm->mmap_base += mm->delta_mmap;
78222+#endif
78223+
78224+ mm->free_area_cache = mm->mmap_base;
78225 mm->cached_hole_size = ~0UL;
78226- mm->free_area_cache = TASK_UNMAPPED_BASE;
78227 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78228 /*
78229 * Restore the topdown base:
78230 */
78231- mm->free_area_cache = mm->mmap_base;
78232+ mm->mmap_base = base;
78233+ mm->free_area_cache = base;
78234 mm->cached_hole_size = ~0UL;
78235
78236 return addr;
78237@@ -1429,6 +1647,12 @@ bottomup:
78238
78239 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78240 {
78241+
78242+#ifdef CONFIG_PAX_SEGMEXEC
78243+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78244+ return;
78245+#endif
78246+
78247 /*
78248 * Is this a new hole at the highest possible address?
78249 */
78250@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78251 mm->free_area_cache = addr;
78252
78253 /* dont allow allocations above current base */
78254- if (mm->free_area_cache > mm->mmap_base)
78255+ if (mm->free_area_cache > mm->mmap_base) {
78256 mm->free_area_cache = mm->mmap_base;
78257+ mm->cached_hole_size = ~0UL;
78258+ }
78259 }
78260
78261 unsigned long
78262@@ -1545,6 +1771,27 @@ out:
78263 return prev ? prev->vm_next : vma;
78264 }
78265
78266+#ifdef CONFIG_PAX_SEGMEXEC
78267+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78268+{
78269+ struct vm_area_struct *vma_m;
78270+
78271+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78272+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78273+ BUG_ON(vma->vm_mirror);
78274+ return NULL;
78275+ }
78276+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78277+ vma_m = vma->vm_mirror;
78278+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78279+ BUG_ON(vma->vm_file != vma_m->vm_file);
78280+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78281+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78282+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78283+ return vma_m;
78284+}
78285+#endif
78286+
78287 /*
78288 * Verify that the stack growth is acceptable and
78289 * update accounting. This is shared with both the
78290@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78291 return -ENOMEM;
78292
78293 /* Stack limit test */
78294+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
78295 if (size > rlim[RLIMIT_STACK].rlim_cur)
78296 return -ENOMEM;
78297
78298@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78299 unsigned long limit;
78300 locked = mm->locked_vm + grow;
78301 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78302+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78303 if (locked > limit && !capable(CAP_IPC_LOCK))
78304 return -ENOMEM;
78305 }
78306@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78307 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78308 * vma is the last one with address > vma->vm_end. Have to extend vma.
78309 */
78310+#ifndef CONFIG_IA64
78311+static
78312+#endif
78313 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78314 {
78315 int error;
78316+ bool locknext;
78317
78318 if (!(vma->vm_flags & VM_GROWSUP))
78319 return -EFAULT;
78320
78321+ /* Also guard against wrapping around to address 0. */
78322+ if (address < PAGE_ALIGN(address+1))
78323+ address = PAGE_ALIGN(address+1);
78324+ else
78325+ return -ENOMEM;
78326+
78327 /*
78328 * We must make sure the anon_vma is allocated
78329 * so that the anon_vma locking is not a noop.
78330 */
78331 if (unlikely(anon_vma_prepare(vma)))
78332 return -ENOMEM;
78333+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78334+ if (locknext && anon_vma_prepare(vma->vm_next))
78335+ return -ENOMEM;
78336 anon_vma_lock(vma);
78337+ if (locknext)
78338+ anon_vma_lock(vma->vm_next);
78339
78340 /*
78341 * vma->vm_start/vm_end cannot change under us because the caller
78342 * is required to hold the mmap_sem in read mode. We need the
78343- * anon_vma lock to serialize against concurrent expand_stacks.
78344- * Also guard against wrapping around to address 0.
78345+ * anon_vma locks to serialize against concurrent expand_stacks
78346+ * and expand_upwards.
78347 */
78348- if (address < PAGE_ALIGN(address+4))
78349- address = PAGE_ALIGN(address+4);
78350- else {
78351- anon_vma_unlock(vma);
78352- return -ENOMEM;
78353- }
78354 error = 0;
78355
78356 /* Somebody else might have raced and expanded it already */
78357- if (address > vma->vm_end) {
78358+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78359+ error = -ENOMEM;
78360+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78361 unsigned long size, grow;
78362
78363 size = address - vma->vm_start;
78364@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78365 vma->vm_end = address;
78366 }
78367 }
78368+ if (locknext)
78369+ anon_vma_unlock(vma->vm_next);
78370 anon_vma_unlock(vma);
78371 return error;
78372 }
78373@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78374 unsigned long address)
78375 {
78376 int error;
78377+ bool lockprev = false;
78378+ struct vm_area_struct *prev;
78379
78380 /*
78381 * We must make sure the anon_vma is allocated
78382@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78383 if (error)
78384 return error;
78385
78386+ prev = vma->vm_prev;
78387+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78388+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78389+#endif
78390+ if (lockprev && anon_vma_prepare(prev))
78391+ return -ENOMEM;
78392+ if (lockprev)
78393+ anon_vma_lock(prev);
78394+
78395 anon_vma_lock(vma);
78396
78397 /*
78398@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78399 */
78400
78401 /* Somebody else might have raced and expanded it already */
78402- if (address < vma->vm_start) {
78403+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78404+ error = -ENOMEM;
78405+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78406 unsigned long size, grow;
78407
78408+#ifdef CONFIG_PAX_SEGMEXEC
78409+ struct vm_area_struct *vma_m;
78410+
78411+ vma_m = pax_find_mirror_vma(vma);
78412+#endif
78413+
78414 size = vma->vm_end - address;
78415 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78416
78417@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78418 if (!error) {
78419 vma->vm_start = address;
78420 vma->vm_pgoff -= grow;
78421+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78422+
78423+#ifdef CONFIG_PAX_SEGMEXEC
78424+ if (vma_m) {
78425+ vma_m->vm_start -= grow << PAGE_SHIFT;
78426+ vma_m->vm_pgoff -= grow;
78427+ }
78428+#endif
78429+
78430+
78431 }
78432 }
78433 }
78434 anon_vma_unlock(vma);
78435+ if (lockprev)
78436+ anon_vma_unlock(prev);
78437 return error;
78438 }
78439
78440@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78441 do {
78442 long nrpages = vma_pages(vma);
78443
78444+#ifdef CONFIG_PAX_SEGMEXEC
78445+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78446+ vma = remove_vma(vma);
78447+ continue;
78448+ }
78449+#endif
78450+
78451 mm->total_vm -= nrpages;
78452 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78453 vma = remove_vma(vma);
78454@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78455 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78456 vma->vm_prev = NULL;
78457 do {
78458+
78459+#ifdef CONFIG_PAX_SEGMEXEC
78460+ if (vma->vm_mirror) {
78461+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78462+ vma->vm_mirror->vm_mirror = NULL;
78463+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
78464+ vma->vm_mirror = NULL;
78465+ }
78466+#endif
78467+
78468 rb_erase(&vma->vm_rb, &mm->mm_rb);
78469 mm->map_count--;
78470 tail_vma = vma;
78471@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78472 struct mempolicy *pol;
78473 struct vm_area_struct *new;
78474
78475+#ifdef CONFIG_PAX_SEGMEXEC
78476+ struct vm_area_struct *vma_m, *new_m = NULL;
78477+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78478+#endif
78479+
78480 if (is_vm_hugetlb_page(vma) && (addr &
78481 ~(huge_page_mask(hstate_vma(vma)))))
78482 return -EINVAL;
78483
78484+#ifdef CONFIG_PAX_SEGMEXEC
78485+ vma_m = pax_find_mirror_vma(vma);
78486+
78487+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78488+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78489+ if (mm->map_count >= sysctl_max_map_count-1)
78490+ return -ENOMEM;
78491+ } else
78492+#endif
78493+
78494 if (mm->map_count >= sysctl_max_map_count)
78495 return -ENOMEM;
78496
78497@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78498 if (!new)
78499 return -ENOMEM;
78500
78501+#ifdef CONFIG_PAX_SEGMEXEC
78502+ if (vma_m) {
78503+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78504+ if (!new_m) {
78505+ kmem_cache_free(vm_area_cachep, new);
78506+ return -ENOMEM;
78507+ }
78508+ }
78509+#endif
78510+
78511 /* most fields are the same, copy all, and then fixup */
78512 *new = *vma;
78513
78514@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78515 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78516 }
78517
78518+#ifdef CONFIG_PAX_SEGMEXEC
78519+ if (vma_m) {
78520+ *new_m = *vma_m;
78521+ new_m->vm_mirror = new;
78522+ new->vm_mirror = new_m;
78523+
78524+ if (new_below)
78525+ new_m->vm_end = addr_m;
78526+ else {
78527+ new_m->vm_start = addr_m;
78528+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78529+ }
78530+ }
78531+#endif
78532+
78533 pol = mpol_dup(vma_policy(vma));
78534 if (IS_ERR(pol)) {
78535+
78536+#ifdef CONFIG_PAX_SEGMEXEC
78537+ if (new_m)
78538+ kmem_cache_free(vm_area_cachep, new_m);
78539+#endif
78540+
78541 kmem_cache_free(vm_area_cachep, new);
78542 return PTR_ERR(pol);
78543 }
78544@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78545 else
78546 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78547
78548+#ifdef CONFIG_PAX_SEGMEXEC
78549+ if (vma_m) {
78550+ mpol_get(pol);
78551+ vma_set_policy(new_m, pol);
78552+
78553+ if (new_m->vm_file) {
78554+ get_file(new_m->vm_file);
78555+ if (vma_m->vm_flags & VM_EXECUTABLE)
78556+ added_exe_file_vma(mm);
78557+ }
78558+
78559+ if (new_m->vm_ops && new_m->vm_ops->open)
78560+ new_m->vm_ops->open(new_m);
78561+
78562+ if (new_below)
78563+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
78564+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
78565+ else
78566+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
78567+ }
78568+#endif
78569+
78570 return 0;
78571 }
78572
78573@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78574 * work. This now handles partial unmappings.
78575 * Jeremy Fitzhardinge <jeremy@goop.org>
78576 */
78577+#ifdef CONFIG_PAX_SEGMEXEC
78578 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78579 {
78580+ int ret = __do_munmap(mm, start, len);
78581+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
78582+ return ret;
78583+
78584+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
78585+}
78586+
78587+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78588+#else
78589+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78590+#endif
78591+{
78592 unsigned long end;
78593 struct vm_area_struct *vma, *prev, *last;
78594
78595+ /*
78596+ * mm->mmap_sem is required to protect against another thread
78597+ * changing the mappings in case we sleep.
78598+ */
78599+ verify_mm_writelocked(mm);
78600+
78601 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
78602 return -EINVAL;
78603
78604@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78605 /* Fix up all other VM information */
78606 remove_vma_list(mm, vma);
78607
78608+ track_exec_limit(mm, start, end, 0UL);
78609+
78610 return 0;
78611 }
78612
78613@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
78614
78615 profile_munmap(addr);
78616
78617+#ifdef CONFIG_PAX_SEGMEXEC
78618+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
78619+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
78620+ return -EINVAL;
78621+#endif
78622+
78623 down_write(&mm->mmap_sem);
78624 ret = do_munmap(mm, addr, len);
78625 up_write(&mm->mmap_sem);
78626 return ret;
78627 }
78628
78629-static inline void verify_mm_writelocked(struct mm_struct *mm)
78630-{
78631-#ifdef CONFIG_DEBUG_VM
78632- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78633- WARN_ON(1);
78634- up_read(&mm->mmap_sem);
78635- }
78636-#endif
78637-}
78638-
78639 /*
78640 * this is really a simplified "do_mmap". it only handles
78641 * anonymous maps. eventually we may be able to do some
78642@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78643 struct rb_node ** rb_link, * rb_parent;
78644 pgoff_t pgoff = addr >> PAGE_SHIFT;
78645 int error;
78646+ unsigned long charged;
78647
78648 len = PAGE_ALIGN(len);
78649 if (!len)
78650@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78651
78652 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
78653
78654+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
78655+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78656+ flags &= ~VM_EXEC;
78657+
78658+#ifdef CONFIG_PAX_MPROTECT
78659+ if (mm->pax_flags & MF_PAX_MPROTECT)
78660+ flags &= ~VM_MAYEXEC;
78661+#endif
78662+
78663+ }
78664+#endif
78665+
78666 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
78667 if (error & ~PAGE_MASK)
78668 return error;
78669
78670+ charged = len >> PAGE_SHIFT;
78671+
78672 /*
78673 * mlock MCL_FUTURE?
78674 */
78675 if (mm->def_flags & VM_LOCKED) {
78676 unsigned long locked, lock_limit;
78677- locked = len >> PAGE_SHIFT;
78678+ locked = charged;
78679 locked += mm->locked_vm;
78680 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78681 lock_limit >>= PAGE_SHIFT;
78682@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78683 /*
78684 * Clear old maps. this also does some error checking for us
78685 */
78686- munmap_back:
78687 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78688 if (vma && vma->vm_start < addr + len) {
78689 if (do_munmap(mm, addr, len))
78690 return -ENOMEM;
78691- goto munmap_back;
78692+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78693+ BUG_ON(vma && vma->vm_start < addr + len);
78694 }
78695
78696 /* Check against address space limits *after* clearing old maps... */
78697- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
78698+ if (!may_expand_vm(mm, charged))
78699 return -ENOMEM;
78700
78701 if (mm->map_count > sysctl_max_map_count)
78702 return -ENOMEM;
78703
78704- if (security_vm_enough_memory(len >> PAGE_SHIFT))
78705+ if (security_vm_enough_memory(charged))
78706 return -ENOMEM;
78707
78708 /* Can we just expand an old private anonymous mapping? */
78709@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78710 */
78711 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78712 if (!vma) {
78713- vm_unacct_memory(len >> PAGE_SHIFT);
78714+ vm_unacct_memory(charged);
78715 return -ENOMEM;
78716 }
78717
78718@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78719 vma->vm_page_prot = vm_get_page_prot(flags);
78720 vma_link(mm, vma, prev, rb_link, rb_parent);
78721 out:
78722- mm->total_vm += len >> PAGE_SHIFT;
78723+ mm->total_vm += charged;
78724 if (flags & VM_LOCKED) {
78725 if (!mlock_vma_pages_range(vma, addr, addr + len))
78726- mm->locked_vm += (len >> PAGE_SHIFT);
78727+ mm->locked_vm += charged;
78728 }
78729+ track_exec_limit(mm, addr, addr + len, flags);
78730 return addr;
78731 }
78732
78733@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
78734 * Walk the list again, actually closing and freeing it,
78735 * with preemption enabled, without holding any MM locks.
78736 */
78737- while (vma)
78738+ while (vma) {
78739+ vma->vm_mirror = NULL;
78740 vma = remove_vma(vma);
78741+ }
78742
78743 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
78744 }
78745@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78746 struct vm_area_struct * __vma, * prev;
78747 struct rb_node ** rb_link, * rb_parent;
78748
78749+#ifdef CONFIG_PAX_SEGMEXEC
78750+ struct vm_area_struct *vma_m = NULL;
78751+#endif
78752+
78753 /*
78754 * The vm_pgoff of a purely anonymous vma should be irrelevant
78755 * until its first write fault, when page's anon_vma and index
78756@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78757 if ((vma->vm_flags & VM_ACCOUNT) &&
78758 security_vm_enough_memory_mm(mm, vma_pages(vma)))
78759 return -ENOMEM;
78760+
78761+#ifdef CONFIG_PAX_SEGMEXEC
78762+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
78763+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78764+ if (!vma_m)
78765+ return -ENOMEM;
78766+ }
78767+#endif
78768+
78769 vma_link(mm, vma, prev, rb_link, rb_parent);
78770+
78771+#ifdef CONFIG_PAX_SEGMEXEC
78772+ if (vma_m)
78773+ pax_mirror_vma(vma_m, vma);
78774+#endif
78775+
78776 return 0;
78777 }
78778
78779@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78780 struct rb_node **rb_link, *rb_parent;
78781 struct mempolicy *pol;
78782
78783+ BUG_ON(vma->vm_mirror);
78784+
78785 /*
78786 * If anonymous vma has not yet been faulted, update new pgoff
78787 * to match new location, to increase its chance of merging.
78788@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78789 return new_vma;
78790 }
78791
78792+#ifdef CONFIG_PAX_SEGMEXEC
78793+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
78794+{
78795+ struct vm_area_struct *prev_m;
78796+ struct rb_node **rb_link_m, *rb_parent_m;
78797+ struct mempolicy *pol_m;
78798+
78799+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
78800+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
78801+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
78802+ *vma_m = *vma;
78803+ pol_m = vma_policy(vma_m);
78804+ mpol_get(pol_m);
78805+ vma_set_policy(vma_m, pol_m);
78806+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
78807+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
78808+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
78809+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
78810+ if (vma_m->vm_file)
78811+ get_file(vma_m->vm_file);
78812+ if (vma_m->vm_ops && vma_m->vm_ops->open)
78813+ vma_m->vm_ops->open(vma_m);
78814+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
78815+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
78816+ vma_m->vm_mirror = vma;
78817+ vma->vm_mirror = vma_m;
78818+}
78819+#endif
78820+
78821 /*
78822 * Return true if the calling process may expand its vm space by the passed
78823 * number of pages
78824@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
78825 unsigned long lim;
78826
78827 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78828-
78829+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78830 if (cur + npages > lim)
78831 return 0;
78832 return 1;
78833@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78834 vma->vm_start = addr;
78835 vma->vm_end = addr + len;
78836
78837+#ifdef CONFIG_PAX_MPROTECT
78838+ if (mm->pax_flags & MF_PAX_MPROTECT) {
78839+#ifndef CONFIG_PAX_MPROTECT_COMPAT
78840+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78841+ return -EPERM;
78842+ if (!(vm_flags & VM_EXEC))
78843+ vm_flags &= ~VM_MAYEXEC;
78844+#else
78845+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78846+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78847+#endif
78848+ else
78849+ vm_flags &= ~VM_MAYWRITE;
78850+ }
78851+#endif
78852+
78853 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78854 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78855
78856diff --git a/mm/mprotect.c b/mm/mprotect.c
78857index 1737c7e..c7faeb4 100644
78858--- a/mm/mprotect.c
78859+++ b/mm/mprotect.c
78860@@ -24,10 +24,16 @@
78861 #include <linux/mmu_notifier.h>
78862 #include <linux/migrate.h>
78863 #include <linux/perf_event.h>
78864+
78865+#ifdef CONFIG_PAX_MPROTECT
78866+#include <linux/elf.h>
78867+#endif
78868+
78869 #include <asm/uaccess.h>
78870 #include <asm/pgtable.h>
78871 #include <asm/cacheflush.h>
78872 #include <asm/tlbflush.h>
78873+#include <asm/mmu_context.h>
78874
78875 #ifndef pgprot_modify
78876 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78877@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78878 flush_tlb_range(vma, start, end);
78879 }
78880
78881+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78882+/* called while holding the mmap semaphor for writing except stack expansion */
78883+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78884+{
78885+ unsigned long oldlimit, newlimit = 0UL;
78886+
78887+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78888+ return;
78889+
78890+ spin_lock(&mm->page_table_lock);
78891+ oldlimit = mm->context.user_cs_limit;
78892+ if ((prot & VM_EXEC) && oldlimit < end)
78893+ /* USER_CS limit moved up */
78894+ newlimit = end;
78895+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78896+ /* USER_CS limit moved down */
78897+ newlimit = start;
78898+
78899+ if (newlimit) {
78900+ mm->context.user_cs_limit = newlimit;
78901+
78902+#ifdef CONFIG_SMP
78903+ wmb();
78904+ cpus_clear(mm->context.cpu_user_cs_mask);
78905+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78906+#endif
78907+
78908+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
78909+ }
78910+ spin_unlock(&mm->page_table_lock);
78911+ if (newlimit == end) {
78912+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
78913+
78914+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
78915+ if (is_vm_hugetlb_page(vma))
78916+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
78917+ else
78918+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
78919+ }
78920+}
78921+#endif
78922+
78923 int
78924 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78925 unsigned long start, unsigned long end, unsigned long newflags)
78926@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78927 int error;
78928 int dirty_accountable = 0;
78929
78930+#ifdef CONFIG_PAX_SEGMEXEC
78931+ struct vm_area_struct *vma_m = NULL;
78932+ unsigned long start_m, end_m;
78933+
78934+ start_m = start + SEGMEXEC_TASK_SIZE;
78935+ end_m = end + SEGMEXEC_TASK_SIZE;
78936+#endif
78937+
78938 if (newflags == oldflags) {
78939 *pprev = vma;
78940 return 0;
78941 }
78942
78943+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
78944+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
78945+
78946+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
78947+ return -ENOMEM;
78948+
78949+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
78950+ return -ENOMEM;
78951+ }
78952+
78953 /*
78954 * If we make a private mapping writable we increase our commit;
78955 * but (without finer accounting) cannot reduce our commit if we
78956@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78957 }
78958 }
78959
78960+#ifdef CONFIG_PAX_SEGMEXEC
78961+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
78962+ if (start != vma->vm_start) {
78963+ error = split_vma(mm, vma, start, 1);
78964+ if (error)
78965+ goto fail;
78966+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
78967+ *pprev = (*pprev)->vm_next;
78968+ }
78969+
78970+ if (end != vma->vm_end) {
78971+ error = split_vma(mm, vma, end, 0);
78972+ if (error)
78973+ goto fail;
78974+ }
78975+
78976+ if (pax_find_mirror_vma(vma)) {
78977+ error = __do_munmap(mm, start_m, end_m - start_m);
78978+ if (error)
78979+ goto fail;
78980+ } else {
78981+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78982+ if (!vma_m) {
78983+ error = -ENOMEM;
78984+ goto fail;
78985+ }
78986+ vma->vm_flags = newflags;
78987+ pax_mirror_vma(vma_m, vma);
78988+ }
78989+ }
78990+#endif
78991+
78992 /*
78993 * First try to merge with previous and/or next vma.
78994 */
78995@@ -195,9 +293,21 @@ success:
78996 * vm_flags and vm_page_prot are protected by the mmap_sem
78997 * held in write mode.
78998 */
78999+
79000+#ifdef CONFIG_PAX_SEGMEXEC
79001+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
79002+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
79003+#endif
79004+
79005 vma->vm_flags = newflags;
79006+
79007+#ifdef CONFIG_PAX_MPROTECT
79008+ if (mm->binfmt && mm->binfmt->handle_mprotect)
79009+ mm->binfmt->handle_mprotect(vma, newflags);
79010+#endif
79011+
79012 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
79013- vm_get_page_prot(newflags));
79014+ vm_get_page_prot(vma->vm_flags));
79015
79016 if (vma_wants_writenotify(vma)) {
79017 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
79018@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79019 end = start + len;
79020 if (end <= start)
79021 return -ENOMEM;
79022+
79023+#ifdef CONFIG_PAX_SEGMEXEC
79024+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
79025+ if (end > SEGMEXEC_TASK_SIZE)
79026+ return -EINVAL;
79027+ } else
79028+#endif
79029+
79030+ if (end > TASK_SIZE)
79031+ return -EINVAL;
79032+
79033 if (!arch_validate_prot(prot))
79034 return -EINVAL;
79035
79036@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79037 /*
79038 * Does the application expect PROT_READ to imply PROT_EXEC:
79039 */
79040- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
79041+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
79042 prot |= PROT_EXEC;
79043
79044 vm_flags = calc_vm_prot_bits(prot);
79045@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79046 if (start > vma->vm_start)
79047 prev = vma;
79048
79049+#ifdef CONFIG_PAX_MPROTECT
79050+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
79051+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
79052+#endif
79053+
79054 for (nstart = start ; ; ) {
79055 unsigned long newflags;
79056
79057@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79058
79059 /* newflags >> 4 shift VM_MAY% in place of VM_% */
79060 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
79061+ if (prot & (PROT_WRITE | PROT_EXEC))
79062+ gr_log_rwxmprotect(vma->vm_file);
79063+
79064+ error = -EACCES;
79065+ goto out;
79066+ }
79067+
79068+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
79069 error = -EACCES;
79070 goto out;
79071 }
79072@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
79073 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
79074 if (error)
79075 goto out;
79076+
79077+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
79078+
79079 nstart = tmp;
79080
79081 if (nstart < prev->vm_end)
79082diff --git a/mm/mremap.c b/mm/mremap.c
79083index 3e98d79..1706cec 100644
79084--- a/mm/mremap.c
79085+++ b/mm/mremap.c
79086@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
79087 continue;
79088 pte = ptep_clear_flush(vma, old_addr, old_pte);
79089 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
79090+
79091+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
79092+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
79093+ pte = pte_exprotect(pte);
79094+#endif
79095+
79096 set_pte_at(mm, new_addr, new_pte, pte);
79097 }
79098
79099@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
79100 if (is_vm_hugetlb_page(vma))
79101 goto Einval;
79102
79103+#ifdef CONFIG_PAX_SEGMEXEC
79104+ if (pax_find_mirror_vma(vma))
79105+ goto Einval;
79106+#endif
79107+
79108 /* We can't remap across vm area boundaries */
79109 if (old_len > vma->vm_end - addr)
79110 goto Efault;
79111@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
79112 unsigned long ret = -EINVAL;
79113 unsigned long charged = 0;
79114 unsigned long map_flags;
79115+ unsigned long pax_task_size = TASK_SIZE;
79116
79117 if (new_addr & ~PAGE_MASK)
79118 goto out;
79119
79120- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
79121+#ifdef CONFIG_PAX_SEGMEXEC
79122+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79123+ pax_task_size = SEGMEXEC_TASK_SIZE;
79124+#endif
79125+
79126+ pax_task_size -= PAGE_SIZE;
79127+
79128+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
79129 goto out;
79130
79131 /* Check if the location we're moving into overlaps the
79132 * old location at all, and fail if it does.
79133 */
79134- if ((new_addr <= addr) && (new_addr+new_len) > addr)
79135- goto out;
79136-
79137- if ((addr <= new_addr) && (addr+old_len) > new_addr)
79138+ if (addr + old_len > new_addr && new_addr + new_len > addr)
79139 goto out;
79140
79141 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79142@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
79143 struct vm_area_struct *vma;
79144 unsigned long ret = -EINVAL;
79145 unsigned long charged = 0;
79146+ unsigned long pax_task_size = TASK_SIZE;
79147
79148 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
79149 goto out;
79150@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79151 if (!new_len)
79152 goto out;
79153
79154+#ifdef CONFIG_PAX_SEGMEXEC
79155+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79156+ pax_task_size = SEGMEXEC_TASK_SIZE;
79157+#endif
79158+
79159+ pax_task_size -= PAGE_SIZE;
79160+
79161+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79162+ old_len > pax_task_size || addr > pax_task_size-old_len)
79163+ goto out;
79164+
79165 if (flags & MREMAP_FIXED) {
79166 if (flags & MREMAP_MAYMOVE)
79167 ret = mremap_to(addr, old_len, new_addr, new_len);
79168@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79169 addr + new_len);
79170 }
79171 ret = addr;
79172+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79173 goto out;
79174 }
79175 }
79176@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79177 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79178 if (ret)
79179 goto out;
79180+
79181+ map_flags = vma->vm_flags;
79182 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79183+ if (!(ret & ~PAGE_MASK)) {
79184+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79185+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79186+ }
79187 }
79188 out:
79189 if (ret & ~PAGE_MASK)
79190diff --git a/mm/nommu.c b/mm/nommu.c
79191index 406e8d4..53970d3 100644
79192--- a/mm/nommu.c
79193+++ b/mm/nommu.c
79194@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79195 int sysctl_overcommit_ratio = 50; /* default is 50% */
79196 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79197 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79198-int heap_stack_gap = 0;
79199
79200 atomic_long_t mmap_pages_allocated;
79201
79202@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79203 EXPORT_SYMBOL(find_vma);
79204
79205 /*
79206- * find a VMA
79207- * - we don't extend stack VMAs under NOMMU conditions
79208- */
79209-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79210-{
79211- return find_vma(mm, addr);
79212-}
79213-
79214-/*
79215 * expand a stack to a given address
79216 * - not supported under NOMMU conditions
79217 */
79218diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79219index 3ecab7e..594a471 100644
79220--- a/mm/page_alloc.c
79221+++ b/mm/page_alloc.c
79222@@ -289,7 +289,7 @@ out:
79223 * This usage means that zero-order pages may not be compound.
79224 */
79225
79226-static void free_compound_page(struct page *page)
79227+void free_compound_page(struct page *page)
79228 {
79229 __free_pages_ok(page, compound_order(page));
79230 }
79231@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79232 int bad = 0;
79233 int wasMlocked = __TestClearPageMlocked(page);
79234
79235+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79236+ unsigned long index = 1UL << order;
79237+#endif
79238+
79239 kmemcheck_free_shadow(page, order);
79240
79241 for (i = 0 ; i < (1 << order) ; ++i)
79242@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79243 debug_check_no_obj_freed(page_address(page),
79244 PAGE_SIZE << order);
79245 }
79246+
79247+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79248+ for (; index; --index)
79249+ sanitize_highpage(page + index - 1);
79250+#endif
79251+
79252 arch_free_page(page, order);
79253 kernel_map_pages(page, 1 << order, 0);
79254
79255@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79256 arch_alloc_page(page, order);
79257 kernel_map_pages(page, 1 << order, 1);
79258
79259+#ifndef CONFIG_PAX_MEMORY_SANITIZE
79260 if (gfp_flags & __GFP_ZERO)
79261 prep_zero_page(page, order, gfp_flags);
79262+#endif
79263
79264 if (order && (gfp_flags & __GFP_COMP))
79265 prep_compound_page(page, order);
79266@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79267 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79268 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79269 }
79270+
79271+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79272+ sanitize_highpage(page);
79273+#endif
79274+
79275 arch_free_page(page, 0);
79276 kernel_map_pages(page, 1, 0);
79277
79278@@ -2179,6 +2196,8 @@ void show_free_areas(void)
79279 int cpu;
79280 struct zone *zone;
79281
79282+ pax_track_stack();
79283+
79284 for_each_populated_zone(zone) {
79285 show_node(zone);
79286 printk("%s per-cpu:\n", zone->name);
79287@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79288 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79289 }
79290 #else
79291-static void inline setup_usemap(struct pglist_data *pgdat,
79292+static inline void setup_usemap(struct pglist_data *pgdat,
79293 struct zone *zone, unsigned long zonesize) {}
79294 #endif /* CONFIG_SPARSEMEM */
79295
79296diff --git a/mm/percpu.c b/mm/percpu.c
79297index c90614a..5f7b7b8 100644
79298--- a/mm/percpu.c
79299+++ b/mm/percpu.c
79300@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79301 static unsigned int pcpu_high_unit_cpu __read_mostly;
79302
79303 /* the address of the first chunk which starts with the kernel static area */
79304-void *pcpu_base_addr __read_mostly;
79305+void *pcpu_base_addr __read_only;
79306 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79307
79308 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79309diff --git a/mm/rmap.c b/mm/rmap.c
79310index dd43373..d848cd7 100644
79311--- a/mm/rmap.c
79312+++ b/mm/rmap.c
79313@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79314 /* page_table_lock to protect against threads */
79315 spin_lock(&mm->page_table_lock);
79316 if (likely(!vma->anon_vma)) {
79317+
79318+#ifdef CONFIG_PAX_SEGMEXEC
79319+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79320+
79321+ if (vma_m) {
79322+ BUG_ON(vma_m->anon_vma);
79323+ vma_m->anon_vma = anon_vma;
79324+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79325+ }
79326+#endif
79327+
79328 vma->anon_vma = anon_vma;
79329 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79330 allocated = NULL;
79331diff --git a/mm/shmem.c b/mm/shmem.c
79332index 3e0005b..1d659a8 100644
79333--- a/mm/shmem.c
79334+++ b/mm/shmem.c
79335@@ -31,7 +31,7 @@
79336 #include <linux/swap.h>
79337 #include <linux/ima.h>
79338
79339-static struct vfsmount *shm_mnt;
79340+struct vfsmount *shm_mnt;
79341
79342 #ifdef CONFIG_SHMEM
79343 /*
79344@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79345 goto unlock;
79346 }
79347 entry = shmem_swp_entry(info, index, NULL);
79348+ if (!entry)
79349+ goto unlock;
79350 if (entry->val) {
79351 /*
79352 * The more uptodate page coming down from a stacked
79353@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79354 struct vm_area_struct pvma;
79355 struct page *page;
79356
79357+ pax_track_stack();
79358+
79359 spol = mpol_cond_copy(&mpol,
79360 mpol_shared_policy_lookup(&info->policy, idx));
79361
79362@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79363
79364 info = SHMEM_I(inode);
79365 inode->i_size = len-1;
79366- if (len <= (char *)inode - (char *)info) {
79367+ if (len <= (char *)inode - (char *)info && len <= 64) {
79368 /* do it inline */
79369 memcpy(info, symname, len);
79370 inode->i_op = &shmem_symlink_inline_operations;
79371@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79372 int err = -ENOMEM;
79373
79374 /* Round up to L1_CACHE_BYTES to resist false sharing */
79375- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79376- L1_CACHE_BYTES), GFP_KERNEL);
79377+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79378 if (!sbinfo)
79379 return -ENOMEM;
79380
79381diff --git a/mm/slab.c b/mm/slab.c
79382index c8d466a..909e01e 100644
79383--- a/mm/slab.c
79384+++ b/mm/slab.c
79385@@ -174,7 +174,7 @@
79386
79387 /* Legal flag mask for kmem_cache_create(). */
79388 #if DEBUG
79389-# define CREATE_MASK (SLAB_RED_ZONE | \
79390+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79391 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79392 SLAB_CACHE_DMA | \
79393 SLAB_STORE_USER | \
79394@@ -182,7 +182,7 @@
79395 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79396 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79397 #else
79398-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79399+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79400 SLAB_CACHE_DMA | \
79401 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79402 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79403@@ -308,7 +308,7 @@ struct kmem_list3 {
79404 * Need this for bootstrapping a per node allocator.
79405 */
79406 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79407-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79408+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79409 #define CACHE_CACHE 0
79410 #define SIZE_AC MAX_NUMNODES
79411 #define SIZE_L3 (2 * MAX_NUMNODES)
79412@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79413 if ((x)->max_freeable < i) \
79414 (x)->max_freeable = i; \
79415 } while (0)
79416-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79417-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79418-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79419-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79420+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79421+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79422+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79423+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79424 #else
79425 #define STATS_INC_ACTIVE(x) do { } while (0)
79426 #define STATS_DEC_ACTIVE(x) do { } while (0)
79427@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79428 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79429 */
79430 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79431- const struct slab *slab, void *obj)
79432+ const struct slab *slab, const void *obj)
79433 {
79434 u32 offset = (obj - slab->s_mem);
79435 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79436@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79437 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79438 sizes[INDEX_AC].cs_size,
79439 ARCH_KMALLOC_MINALIGN,
79440- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79441+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79442 NULL);
79443
79444 if (INDEX_AC != INDEX_L3) {
79445@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79446 kmem_cache_create(names[INDEX_L3].name,
79447 sizes[INDEX_L3].cs_size,
79448 ARCH_KMALLOC_MINALIGN,
79449- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79450+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79451 NULL);
79452 }
79453
79454@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79455 sizes->cs_cachep = kmem_cache_create(names->name,
79456 sizes->cs_size,
79457 ARCH_KMALLOC_MINALIGN,
79458- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79459+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79460 NULL);
79461 }
79462 #ifdef CONFIG_ZONE_DMA
79463@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79464 }
79465 /* cpu stats */
79466 {
79467- unsigned long allochit = atomic_read(&cachep->allochit);
79468- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79469- unsigned long freehit = atomic_read(&cachep->freehit);
79470- unsigned long freemiss = atomic_read(&cachep->freemiss);
79471+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79472+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79473+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79474+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79475
79476 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79477 allochit, allocmiss, freehit, freemiss);
79478@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79479
79480 static int __init slab_proc_init(void)
79481 {
79482- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79483+ mode_t gr_mode = S_IRUGO;
79484+
79485+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79486+ gr_mode = S_IRUSR;
79487+#endif
79488+
79489+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79490 #ifdef CONFIG_DEBUG_SLAB_LEAK
79491- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79492+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79493 #endif
79494 return 0;
79495 }
79496 module_init(slab_proc_init);
79497 #endif
79498
79499+void check_object_size(const void *ptr, unsigned long n, bool to)
79500+{
79501+
79502+#ifdef CONFIG_PAX_USERCOPY
79503+ struct page *page;
79504+ struct kmem_cache *cachep = NULL;
79505+ struct slab *slabp;
79506+ unsigned int objnr;
79507+ unsigned long offset;
79508+ const char *type;
79509+
79510+ if (!n)
79511+ return;
79512+
79513+ type = "<null>";
79514+ if (ZERO_OR_NULL_PTR(ptr))
79515+ goto report;
79516+
79517+ if (!virt_addr_valid(ptr))
79518+ return;
79519+
79520+ page = virt_to_head_page(ptr);
79521+
79522+ type = "<process stack>";
79523+ if (!PageSlab(page)) {
79524+ if (object_is_on_stack(ptr, n) == -1)
79525+ goto report;
79526+ return;
79527+ }
79528+
79529+ cachep = page_get_cache(page);
79530+ type = cachep->name;
79531+ if (!(cachep->flags & SLAB_USERCOPY))
79532+ goto report;
79533+
79534+ slabp = page_get_slab(page);
79535+ objnr = obj_to_index(cachep, slabp, ptr);
79536+ BUG_ON(objnr >= cachep->num);
79537+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79538+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79539+ return;
79540+
79541+report:
79542+ pax_report_usercopy(ptr, n, to, type);
79543+#endif
79544+
79545+}
79546+EXPORT_SYMBOL(check_object_size);
79547+
79548 /**
79549 * ksize - get the actual amount of memory allocated for a given object
79550 * @objp: Pointer to the object
79551diff --git a/mm/slob.c b/mm/slob.c
79552index 837ebd6..4712174 100644
79553--- a/mm/slob.c
79554+++ b/mm/slob.c
79555@@ -29,7 +29,7 @@
79556 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
79557 * alloc_pages() directly, allocating compound pages so the page order
79558 * does not have to be separately tracked, and also stores the exact
79559- * allocation size in page->private so that it can be used to accurately
79560+ * allocation size in slob_page->size so that it can be used to accurately
79561 * provide ksize(). These objects are detected in kfree() because slob_page()
79562 * is false for them.
79563 *
79564@@ -58,6 +58,7 @@
79565 */
79566
79567 #include <linux/kernel.h>
79568+#include <linux/sched.h>
79569 #include <linux/slab.h>
79570 #include <linux/mm.h>
79571 #include <linux/swap.h> /* struct reclaim_state */
79572@@ -100,7 +101,8 @@ struct slob_page {
79573 unsigned long flags; /* mandatory */
79574 atomic_t _count; /* mandatory */
79575 slobidx_t units; /* free units left in page */
79576- unsigned long pad[2];
79577+ unsigned long pad[1];
79578+ unsigned long size; /* size when >=PAGE_SIZE */
79579 slob_t *free; /* first free slob_t in page */
79580 struct list_head list; /* linked list of free pages */
79581 };
79582@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
79583 */
79584 static inline int is_slob_page(struct slob_page *sp)
79585 {
79586- return PageSlab((struct page *)sp);
79587+ return PageSlab((struct page *)sp) && !sp->size;
79588 }
79589
79590 static inline void set_slob_page(struct slob_page *sp)
79591@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
79592
79593 static inline struct slob_page *slob_page(const void *addr)
79594 {
79595- return (struct slob_page *)virt_to_page(addr);
79596+ return (struct slob_page *)virt_to_head_page(addr);
79597 }
79598
79599 /*
79600@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
79601 /*
79602 * Return the size of a slob block.
79603 */
79604-static slobidx_t slob_units(slob_t *s)
79605+static slobidx_t slob_units(const slob_t *s)
79606 {
79607 if (s->units > 0)
79608 return s->units;
79609@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
79610 /*
79611 * Return the next free slob block pointer after this one.
79612 */
79613-static slob_t *slob_next(slob_t *s)
79614+static slob_t *slob_next(const slob_t *s)
79615 {
79616 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
79617 slobidx_t next;
79618@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
79619 /*
79620 * Returns true if s is the last free block in its page.
79621 */
79622-static int slob_last(slob_t *s)
79623+static int slob_last(const slob_t *s)
79624 {
79625 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
79626 }
79627@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
79628 if (!page)
79629 return NULL;
79630
79631+ set_slob_page(page);
79632 return page_address(page);
79633 }
79634
79635@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
79636 if (!b)
79637 return NULL;
79638 sp = slob_page(b);
79639- set_slob_page(sp);
79640
79641 spin_lock_irqsave(&slob_lock, flags);
79642 sp->units = SLOB_UNITS(PAGE_SIZE);
79643 sp->free = b;
79644+ sp->size = 0;
79645 INIT_LIST_HEAD(&sp->list);
79646 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
79647 set_slob_page_free(sp, slob_list);
79648@@ -475,10 +478,9 @@ out:
79649 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
79650 #endif
79651
79652-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79653+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
79654 {
79655- unsigned int *m;
79656- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79657+ slob_t *m;
79658 void *ret;
79659
79660 lockdep_trace_alloc(gfp);
79661@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79662
79663 if (!m)
79664 return NULL;
79665- *m = size;
79666+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
79667+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
79668+ m[0].units = size;
79669+ m[1].units = align;
79670 ret = (void *)m + align;
79671
79672 trace_kmalloc_node(_RET_IP_, ret,
79673@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79674
79675 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
79676 if (ret) {
79677- struct page *page;
79678- page = virt_to_page(ret);
79679- page->private = size;
79680+ struct slob_page *sp;
79681+ sp = slob_page(ret);
79682+ sp->size = size;
79683 }
79684
79685 trace_kmalloc_node(_RET_IP_, ret,
79686 size, PAGE_SIZE << order, gfp, node);
79687 }
79688
79689- kmemleak_alloc(ret, size, 1, gfp);
79690+ return ret;
79691+}
79692+
79693+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79694+{
79695+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79696+ void *ret = __kmalloc_node_align(size, gfp, node, align);
79697+
79698+ if (!ZERO_OR_NULL_PTR(ret))
79699+ kmemleak_alloc(ret, size, 1, gfp);
79700 return ret;
79701 }
79702 EXPORT_SYMBOL(__kmalloc_node);
79703@@ -528,13 +542,92 @@ void kfree(const void *block)
79704 sp = slob_page(block);
79705 if (is_slob_page(sp)) {
79706 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79707- unsigned int *m = (unsigned int *)(block - align);
79708- slob_free(m, *m + align);
79709- } else
79710+ slob_t *m = (slob_t *)(block - align);
79711+ slob_free(m, m[0].units + align);
79712+ } else {
79713+ clear_slob_page(sp);
79714+ free_slob_page(sp);
79715+ sp->size = 0;
79716 put_page(&sp->page);
79717+ }
79718 }
79719 EXPORT_SYMBOL(kfree);
79720
79721+void check_object_size(const void *ptr, unsigned long n, bool to)
79722+{
79723+
79724+#ifdef CONFIG_PAX_USERCOPY
79725+ struct slob_page *sp;
79726+ const slob_t *free;
79727+ const void *base;
79728+ unsigned long flags;
79729+ const char *type;
79730+
79731+ if (!n)
79732+ return;
79733+
79734+ type = "<null>";
79735+ if (ZERO_OR_NULL_PTR(ptr))
79736+ goto report;
79737+
79738+ if (!virt_addr_valid(ptr))
79739+ return;
79740+
79741+ type = "<process stack>";
79742+ sp = slob_page(ptr);
79743+ if (!PageSlab((struct page*)sp)) {
79744+ if (object_is_on_stack(ptr, n) == -1)
79745+ goto report;
79746+ return;
79747+ }
79748+
79749+ type = "<slob>";
79750+ if (sp->size) {
79751+ base = page_address(&sp->page);
79752+ if (base <= ptr && n <= sp->size - (ptr - base))
79753+ return;
79754+ goto report;
79755+ }
79756+
79757+ /* some tricky double walking to find the chunk */
79758+ spin_lock_irqsave(&slob_lock, flags);
79759+ base = (void *)((unsigned long)ptr & PAGE_MASK);
79760+ free = sp->free;
79761+
79762+ while (!slob_last(free) && (void *)free <= ptr) {
79763+ base = free + slob_units(free);
79764+ free = slob_next(free);
79765+ }
79766+
79767+ while (base < (void *)free) {
79768+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
79769+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
79770+ int offset;
79771+
79772+ if (ptr < base + align)
79773+ break;
79774+
79775+ offset = ptr - base - align;
79776+ if (offset >= m) {
79777+ base += size;
79778+ continue;
79779+ }
79780+
79781+ if (n > m - offset)
79782+ break;
79783+
79784+ spin_unlock_irqrestore(&slob_lock, flags);
79785+ return;
79786+ }
79787+
79788+ spin_unlock_irqrestore(&slob_lock, flags);
79789+report:
79790+ pax_report_usercopy(ptr, n, to, type);
79791+#endif
79792+
79793+}
79794+EXPORT_SYMBOL(check_object_size);
79795+
79796 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
79797 size_t ksize(const void *block)
79798 {
79799@@ -547,10 +640,10 @@ size_t ksize(const void *block)
79800 sp = slob_page(block);
79801 if (is_slob_page(sp)) {
79802 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79803- unsigned int *m = (unsigned int *)(block - align);
79804- return SLOB_UNITS(*m) * SLOB_UNIT;
79805+ slob_t *m = (slob_t *)(block - align);
79806+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
79807 } else
79808- return sp->page.private;
79809+ return sp->size;
79810 }
79811 EXPORT_SYMBOL(ksize);
79812
79813@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79814 {
79815 struct kmem_cache *c;
79816
79817+#ifdef CONFIG_PAX_USERCOPY
79818+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
79819+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
79820+#else
79821 c = slob_alloc(sizeof(struct kmem_cache),
79822 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
79823+#endif
79824
79825 if (c) {
79826 c->name = name;
79827@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79828 {
79829 void *b;
79830
79831+#ifdef CONFIG_PAX_USERCOPY
79832+ b = __kmalloc_node_align(c->size, flags, node, c->align);
79833+#else
79834 if (c->size < PAGE_SIZE) {
79835 b = slob_alloc(c->size, flags, c->align, node);
79836 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79837 SLOB_UNITS(c->size) * SLOB_UNIT,
79838 flags, node);
79839 } else {
79840+ struct slob_page *sp;
79841+
79842 b = slob_new_pages(flags, get_order(c->size), node);
79843+ sp = slob_page(b);
79844+ sp->size = c->size;
79845 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79846 PAGE_SIZE << get_order(c->size),
79847 flags, node);
79848 }
79849+#endif
79850
79851 if (c->ctor)
79852 c->ctor(b);
79853@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79854
79855 static void __kmem_cache_free(void *b, int size)
79856 {
79857- if (size < PAGE_SIZE)
79858+ struct slob_page *sp = slob_page(b);
79859+
79860+ if (is_slob_page(sp))
79861 slob_free(b, size);
79862- else
79863+ else {
79864+ clear_slob_page(sp);
79865+ free_slob_page(sp);
79866+ sp->size = 0;
79867 slob_free_pages(b, get_order(size));
79868+ }
79869 }
79870
79871 static void kmem_rcu_free(struct rcu_head *head)
79872@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79873
79874 void kmem_cache_free(struct kmem_cache *c, void *b)
79875 {
79876+ int size = c->size;
79877+
79878+#ifdef CONFIG_PAX_USERCOPY
79879+ if (size + c->align < PAGE_SIZE) {
79880+ size += c->align;
79881+ b -= c->align;
79882+ }
79883+#endif
79884+
79885 kmemleak_free_recursive(b, c->flags);
79886 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79887 struct slob_rcu *slob_rcu;
79888- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79889+ slob_rcu = b + (size - sizeof(struct slob_rcu));
79890 INIT_RCU_HEAD(&slob_rcu->head);
79891- slob_rcu->size = c->size;
79892+ slob_rcu->size = size;
79893 call_rcu(&slob_rcu->head, kmem_rcu_free);
79894 } else {
79895- __kmem_cache_free(b, c->size);
79896+ __kmem_cache_free(b, size);
79897 }
79898
79899+#ifdef CONFIG_PAX_USERCOPY
79900+ trace_kfree(_RET_IP_, b);
79901+#else
79902 trace_kmem_cache_free(_RET_IP_, b);
79903+#endif
79904+
79905 }
79906 EXPORT_SYMBOL(kmem_cache_free);
79907
79908diff --git a/mm/slub.c b/mm/slub.c
79909index 4996fc7..87e01d0 100644
79910--- a/mm/slub.c
79911+++ b/mm/slub.c
79912@@ -201,7 +201,7 @@ struct track {
79913
79914 enum track_item { TRACK_ALLOC, TRACK_FREE };
79915
79916-#ifdef CONFIG_SLUB_DEBUG
79917+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79918 static int sysfs_slab_add(struct kmem_cache *);
79919 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79920 static void sysfs_slab_remove(struct kmem_cache *);
79921@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
79922 if (!t->addr)
79923 return;
79924
79925- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79926+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79927 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79928 }
79929
79930@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
79931
79932 page = virt_to_head_page(x);
79933
79934+ BUG_ON(!PageSlab(page));
79935+
79936 slab_free(s, page, x, _RET_IP_);
79937
79938 trace_kmem_cache_free(_RET_IP_, x);
79939@@ -1937,7 +1939,7 @@ static int slub_min_objects;
79940 * Merge control. If this is set then no merging of slab caches will occur.
79941 * (Could be removed. This was introduced to pacify the merge skeptics.)
79942 */
79943-static int slub_nomerge;
79944+static int slub_nomerge = 1;
79945
79946 /*
79947 * Calculate the order of allocation given an slab object size.
79948@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
79949 * list to avoid pounding the page allocator excessively.
79950 */
79951 set_min_partial(s, ilog2(s->size));
79952- s->refcount = 1;
79953+ atomic_set(&s->refcount, 1);
79954 #ifdef CONFIG_NUMA
79955 s->remote_node_defrag_ratio = 1000;
79956 #endif
79957@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
79958 void kmem_cache_destroy(struct kmem_cache *s)
79959 {
79960 down_write(&slub_lock);
79961- s->refcount--;
79962- if (!s->refcount) {
79963+ if (atomic_dec_and_test(&s->refcount)) {
79964 list_del(&s->list);
79965 up_write(&slub_lock);
79966 if (kmem_cache_close(s)) {
79967@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
79968 __setup("slub_nomerge", setup_slub_nomerge);
79969
79970 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
79971- const char *name, int size, gfp_t gfp_flags)
79972+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
79973 {
79974- unsigned int flags = 0;
79975-
79976 if (gfp_flags & SLUB_DMA)
79977- flags = SLAB_CACHE_DMA;
79978+ flags |= SLAB_CACHE_DMA;
79979
79980 /*
79981 * This function is called with IRQs disabled during early-boot on
79982@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
79983 EXPORT_SYMBOL(__kmalloc_node);
79984 #endif
79985
79986+void check_object_size(const void *ptr, unsigned long n, bool to)
79987+{
79988+
79989+#ifdef CONFIG_PAX_USERCOPY
79990+ struct page *page;
79991+ struct kmem_cache *s = NULL;
79992+ unsigned long offset;
79993+ const char *type;
79994+
79995+ if (!n)
79996+ return;
79997+
79998+ type = "<null>";
79999+ if (ZERO_OR_NULL_PTR(ptr))
80000+ goto report;
80001+
80002+ if (!virt_addr_valid(ptr))
80003+ return;
80004+
80005+ page = get_object_page(ptr);
80006+
80007+ type = "<process stack>";
80008+ if (!page) {
80009+ if (object_is_on_stack(ptr, n) == -1)
80010+ goto report;
80011+ return;
80012+ }
80013+
80014+ s = page->slab;
80015+ type = s->name;
80016+ if (!(s->flags & SLAB_USERCOPY))
80017+ goto report;
80018+
80019+ offset = (ptr - page_address(page)) % s->size;
80020+ if (offset <= s->objsize && n <= s->objsize - offset)
80021+ return;
80022+
80023+report:
80024+ pax_report_usercopy(ptr, n, to, type);
80025+#endif
80026+
80027+}
80028+EXPORT_SYMBOL(check_object_size);
80029+
80030 size_t ksize(const void *object)
80031 {
80032 struct page *page;
80033@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
80034 * kmem_cache_open for slab_state == DOWN.
80035 */
80036 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
80037- sizeof(struct kmem_cache_node), GFP_NOWAIT);
80038- kmalloc_caches[0].refcount = -1;
80039+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
80040+ atomic_set(&kmalloc_caches[0].refcount, -1);
80041 caches++;
80042
80043 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
80044@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
80045 /* Caches that are not of the two-to-the-power-of size */
80046 if (KMALLOC_MIN_SIZE <= 32) {
80047 create_kmalloc_cache(&kmalloc_caches[1],
80048- "kmalloc-96", 96, GFP_NOWAIT);
80049+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
80050 caches++;
80051 }
80052 if (KMALLOC_MIN_SIZE <= 64) {
80053 create_kmalloc_cache(&kmalloc_caches[2],
80054- "kmalloc-192", 192, GFP_NOWAIT);
80055+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
80056 caches++;
80057 }
80058
80059 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
80060 create_kmalloc_cache(&kmalloc_caches[i],
80061- "kmalloc", 1 << i, GFP_NOWAIT);
80062+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
80063 caches++;
80064 }
80065
80066@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
80067 /*
80068 * We may have set a slab to be unmergeable during bootstrap.
80069 */
80070- if (s->refcount < 0)
80071+ if (atomic_read(&s->refcount) < 0)
80072 return 1;
80073
80074 return 0;
80075@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80076 if (s) {
80077 int cpu;
80078
80079- s->refcount++;
80080+ atomic_inc(&s->refcount);
80081 /*
80082 * Adjust the object sizes so that we clear
80083 * the complete object on kzalloc.
80084@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
80085
80086 if (sysfs_slab_alias(s, name)) {
80087 down_write(&slub_lock);
80088- s->refcount--;
80089+ atomic_dec(&s->refcount);
80090 up_write(&slub_lock);
80091 goto err;
80092 }
80093@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
80094
80095 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
80096 {
80097- return sprintf(buf, "%d\n", s->refcount - 1);
80098+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
80099 }
80100 SLAB_ATTR_RO(aliases);
80101
80102@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
80103 kfree(s);
80104 }
80105
80106-static struct sysfs_ops slab_sysfs_ops = {
80107+static const struct sysfs_ops slab_sysfs_ops = {
80108 .show = slab_attr_show,
80109 .store = slab_attr_store,
80110 };
80111@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
80112 return 0;
80113 }
80114
80115-static struct kset_uevent_ops slab_uevent_ops = {
80116+static const struct kset_uevent_ops slab_uevent_ops = {
80117 .filter = uevent_filter,
80118 };
80119
80120@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
80121 return name;
80122 }
80123
80124+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80125 static int sysfs_slab_add(struct kmem_cache *s)
80126 {
80127 int err;
80128@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
80129 kobject_del(&s->kobj);
80130 kobject_put(&s->kobj);
80131 }
80132+#endif
80133
80134 /*
80135 * Need to buffer aliases during bootup until sysfs becomes
80136@@ -4632,6 +4677,7 @@ struct saved_alias {
80137
80138 static struct saved_alias *alias_list;
80139
80140+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
80141 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80142 {
80143 struct saved_alias *al;
80144@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
80145 alias_list = al;
80146 return 0;
80147 }
80148+#endif
80149
80150 static int __init slab_sysfs_init(void)
80151 {
80152@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80153
80154 static int __init slab_proc_init(void)
80155 {
80156- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80157+ mode_t gr_mode = S_IRUGO;
80158+
80159+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80160+ gr_mode = S_IRUSR;
80161+#endif
80162+
80163+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80164 return 0;
80165 }
80166 module_init(slab_proc_init);
80167diff --git a/mm/swap.c b/mm/swap.c
80168index 308e57d..5de19c0 100644
80169--- a/mm/swap.c
80170+++ b/mm/swap.c
80171@@ -30,6 +30,7 @@
80172 #include <linux/notifier.h>
80173 #include <linux/backing-dev.h>
80174 #include <linux/memcontrol.h>
80175+#include <linux/hugetlb.h>
80176
80177 #include "internal.h"
80178
80179@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80180 compound_page_dtor *dtor;
80181
80182 dtor = get_compound_page_dtor(page);
80183+ if (!PageHuge(page))
80184+ BUG_ON(dtor != free_compound_page);
80185 (*dtor)(page);
80186 }
80187 }
80188diff --git a/mm/util.c b/mm/util.c
80189index e48b493..24a601d 100644
80190--- a/mm/util.c
80191+++ b/mm/util.c
80192@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80193 void arch_pick_mmap_layout(struct mm_struct *mm)
80194 {
80195 mm->mmap_base = TASK_UNMAPPED_BASE;
80196+
80197+#ifdef CONFIG_PAX_RANDMMAP
80198+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80199+ mm->mmap_base += mm->delta_mmap;
80200+#endif
80201+
80202 mm->get_unmapped_area = arch_get_unmapped_area;
80203 mm->unmap_area = arch_unmap_area;
80204 }
80205diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80206index f34ffd0..e60c44f 100644
80207--- a/mm/vmalloc.c
80208+++ b/mm/vmalloc.c
80209@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80210
80211 pte = pte_offset_kernel(pmd, addr);
80212 do {
80213- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80214- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80215+
80216+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80217+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80218+ BUG_ON(!pte_exec(*pte));
80219+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80220+ continue;
80221+ }
80222+#endif
80223+
80224+ {
80225+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80226+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80227+ }
80228 } while (pte++, addr += PAGE_SIZE, addr != end);
80229 }
80230
80231@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80232 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80233 {
80234 pte_t *pte;
80235+ int ret = -ENOMEM;
80236
80237 /*
80238 * nr is a running index into the array which helps higher level
80239@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80240 pte = pte_alloc_kernel(pmd, addr);
80241 if (!pte)
80242 return -ENOMEM;
80243+
80244+ pax_open_kernel();
80245 do {
80246 struct page *page = pages[*nr];
80247
80248- if (WARN_ON(!pte_none(*pte)))
80249- return -EBUSY;
80250- if (WARN_ON(!page))
80251- return -ENOMEM;
80252+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80253+ if (!(pgprot_val(prot) & _PAGE_NX))
80254+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80255+ else
80256+#endif
80257+
80258+ if (WARN_ON(!pte_none(*pte))) {
80259+ ret = -EBUSY;
80260+ goto out;
80261+ }
80262+ if (WARN_ON(!page)) {
80263+ ret = -ENOMEM;
80264+ goto out;
80265+ }
80266 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80267 (*nr)++;
80268 } while (pte++, addr += PAGE_SIZE, addr != end);
80269- return 0;
80270+ ret = 0;
80271+out:
80272+ pax_close_kernel();
80273+ return ret;
80274 }
80275
80276 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80277@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80278 * and fall back on vmalloc() if that fails. Others
80279 * just put it in the vmalloc space.
80280 */
80281-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80282+#ifdef CONFIG_MODULES
80283+#ifdef MODULES_VADDR
80284 unsigned long addr = (unsigned long)x;
80285 if (addr >= MODULES_VADDR && addr < MODULES_END)
80286 return 1;
80287 #endif
80288+
80289+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80290+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80291+ return 1;
80292+#endif
80293+
80294+#endif
80295+
80296 return is_vmalloc_addr(x);
80297 }
80298
80299@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80300
80301 if (!pgd_none(*pgd)) {
80302 pud_t *pud = pud_offset(pgd, addr);
80303+#ifdef CONFIG_X86
80304+ if (!pud_large(*pud))
80305+#endif
80306 if (!pud_none(*pud)) {
80307 pmd_t *pmd = pmd_offset(pud, addr);
80308+#ifdef CONFIG_X86
80309+ if (!pmd_large(*pmd))
80310+#endif
80311 if (!pmd_none(*pmd)) {
80312 pte_t *ptep, pte;
80313
80314@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80315 struct rb_node *tmp;
80316
80317 while (*p) {
80318- struct vmap_area *tmp;
80319+ struct vmap_area *varea;
80320
80321 parent = *p;
80322- tmp = rb_entry(parent, struct vmap_area, rb_node);
80323- if (va->va_start < tmp->va_end)
80324+ varea = rb_entry(parent, struct vmap_area, rb_node);
80325+ if (va->va_start < varea->va_end)
80326 p = &(*p)->rb_left;
80327- else if (va->va_end > tmp->va_start)
80328+ else if (va->va_end > varea->va_start)
80329 p = &(*p)->rb_right;
80330 else
80331 BUG();
80332@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80333 struct vm_struct *area;
80334
80335 BUG_ON(in_interrupt());
80336+
80337+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80338+ if (flags & VM_KERNEXEC) {
80339+ if (start != VMALLOC_START || end != VMALLOC_END)
80340+ return NULL;
80341+ start = (unsigned long)MODULES_EXEC_VADDR;
80342+ end = (unsigned long)MODULES_EXEC_END;
80343+ }
80344+#endif
80345+
80346 if (flags & VM_IOREMAP) {
80347 int bit = fls(size);
80348
80349@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80350 if (count > totalram_pages)
80351 return NULL;
80352
80353+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80354+ if (!(pgprot_val(prot) & _PAGE_NX))
80355+ flags |= VM_KERNEXEC;
80356+#endif
80357+
80358 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80359 __builtin_return_address(0));
80360 if (!area)
80361@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80362 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80363 return NULL;
80364
80365+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80366+ if (!(pgprot_val(prot) & _PAGE_NX))
80367+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80368+ VMALLOC_START, VMALLOC_END, node,
80369+ gfp_mask, caller);
80370+ else
80371+#endif
80372+
80373 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80374 VMALLOC_START, VMALLOC_END, node,
80375 gfp_mask, caller);
80376@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80377 return addr;
80378 }
80379
80380+#undef __vmalloc
80381 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80382 {
80383 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80384@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80385 * For tight control over page level allocator and protection flags
80386 * use __vmalloc() instead.
80387 */
80388+#undef vmalloc
80389 void *vmalloc(unsigned long size)
80390 {
80391 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80392@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80393 * The resulting memory area is zeroed so it can be mapped to userspace
80394 * without leaking data.
80395 */
80396+#undef vmalloc_user
80397 void *vmalloc_user(unsigned long size)
80398 {
80399 struct vm_struct *area;
80400@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80401 * For tight control over page level allocator and protection flags
80402 * use __vmalloc() instead.
80403 */
80404+#undef vmalloc_node
80405 void *vmalloc_node(unsigned long size, int node)
80406 {
80407 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80408@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80409 * For tight control over page level allocator and protection flags
80410 * use __vmalloc() instead.
80411 */
80412-
80413+#undef vmalloc_exec
80414 void *vmalloc_exec(unsigned long size)
80415 {
80416- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80417+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80418 -1, __builtin_return_address(0));
80419 }
80420
80421@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80422 * Allocate enough 32bit PA addressable pages to cover @size from the
80423 * page level allocator and map them into contiguous kernel virtual space.
80424 */
80425+#undef vmalloc_32
80426 void *vmalloc_32(unsigned long size)
80427 {
80428 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80429@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80430 * The resulting memory area is 32bit addressable and zeroed so it can be
80431 * mapped to userspace without leaking data.
80432 */
80433+#undef vmalloc_32_user
80434 void *vmalloc_32_user(unsigned long size)
80435 {
80436 struct vm_struct *area;
80437@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80438 unsigned long uaddr = vma->vm_start;
80439 unsigned long usize = vma->vm_end - vma->vm_start;
80440
80441+ BUG_ON(vma->vm_mirror);
80442+
80443 if ((PAGE_SIZE-1) & (unsigned long)addr)
80444 return -EINVAL;
80445
80446diff --git a/mm/vmstat.c b/mm/vmstat.c
80447index 42d76c6..5643dc4 100644
80448--- a/mm/vmstat.c
80449+++ b/mm/vmstat.c
80450@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80451 *
80452 * vm_stat contains the global counters
80453 */
80454-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80455+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80456 EXPORT_SYMBOL(vm_stat);
80457
80458 #ifdef CONFIG_SMP
80459@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80460 v = p->vm_stat_diff[i];
80461 p->vm_stat_diff[i] = 0;
80462 local_irq_restore(flags);
80463- atomic_long_add(v, &zone->vm_stat[i]);
80464+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80465 global_diff[i] += v;
80466 #ifdef CONFIG_NUMA
80467 /* 3 seconds idle till flush */
80468@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80469
80470 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80471 if (global_diff[i])
80472- atomic_long_add(global_diff[i], &vm_stat[i]);
80473+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80474 }
80475
80476 #endif
80477@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80478 start_cpu_timer(cpu);
80479 #endif
80480 #ifdef CONFIG_PROC_FS
80481- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80482- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80483- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80484- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80485+ {
80486+ mode_t gr_mode = S_IRUGO;
80487+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80488+ gr_mode = S_IRUSR;
80489+#endif
80490+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80491+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80492+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80493+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80494+#else
80495+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80496+#endif
80497+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80498+ }
80499 #endif
80500 return 0;
80501 }
80502diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80503index a29c5ab..6143f20 100644
80504--- a/net/8021q/vlan.c
80505+++ b/net/8021q/vlan.c
80506@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80507 err = -EPERM;
80508 if (!capable(CAP_NET_ADMIN))
80509 break;
80510- if ((args.u.name_type >= 0) &&
80511- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80512+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80513 struct vlan_net *vn;
80514
80515 vn = net_generic(net, vlan_net_id);
80516diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80517index a2d2984..f9eb711 100644
80518--- a/net/9p/trans_fd.c
80519+++ b/net/9p/trans_fd.c
80520@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80521 oldfs = get_fs();
80522 set_fs(get_ds());
80523 /* The cast to a user pointer is valid due to the set_fs() */
80524- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80525+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80526 set_fs(oldfs);
80527
80528 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80529diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80530index 02cc7e7..4514f1b 100644
80531--- a/net/atm/atm_misc.c
80532+++ b/net/atm/atm_misc.c
80533@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80534 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80535 return 1;
80536 atm_return(vcc,truesize);
80537- atomic_inc(&vcc->stats->rx_drop);
80538+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80539 return 0;
80540 }
80541
80542@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80543 }
80544 }
80545 atm_return(vcc,guess);
80546- atomic_inc(&vcc->stats->rx_drop);
80547+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80548 return NULL;
80549 }
80550
80551@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80552
80553 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80554 {
80555-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80556+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80557 __SONET_ITEMS
80558 #undef __HANDLE_ITEM
80559 }
80560@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80561
80562 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80563 {
80564-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
80565+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
80566 __SONET_ITEMS
80567 #undef __HANDLE_ITEM
80568 }
80569diff --git a/net/atm/lec.h b/net/atm/lec.h
80570index 9d14d19..5c145f3 100644
80571--- a/net/atm/lec.h
80572+++ b/net/atm/lec.h
80573@@ -48,7 +48,7 @@ struct lane2_ops {
80574 const u8 *tlvs, u32 sizeoftlvs);
80575 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
80576 const u8 *tlvs, u32 sizeoftlvs);
80577-};
80578+} __no_const;
80579
80580 /*
80581 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
80582diff --git a/net/atm/mpc.h b/net/atm/mpc.h
80583index 0919a88..a23d54e 100644
80584--- a/net/atm/mpc.h
80585+++ b/net/atm/mpc.h
80586@@ -33,7 +33,7 @@ struct mpoa_client {
80587 struct mpc_parameters parameters; /* parameters for this client */
80588
80589 const struct net_device_ops *old_ops;
80590- struct net_device_ops new_ops;
80591+ net_device_ops_no_const new_ops;
80592 };
80593
80594
80595diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
80596index 4504a4b..1733f1e 100644
80597--- a/net/atm/mpoa_caches.c
80598+++ b/net/atm/mpoa_caches.c
80599@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
80600 struct timeval now;
80601 struct k_message msg;
80602
80603+ pax_track_stack();
80604+
80605 do_gettimeofday(&now);
80606
80607 write_lock_irq(&client->egress_lock);
80608diff --git a/net/atm/proc.c b/net/atm/proc.c
80609index ab8419a..aa91497 100644
80610--- a/net/atm/proc.c
80611+++ b/net/atm/proc.c
80612@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
80613 const struct k_atm_aal_stats *stats)
80614 {
80615 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
80616- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
80617- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
80618- atomic_read(&stats->rx_drop));
80619+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
80620+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
80621+ atomic_read_unchecked(&stats->rx_drop));
80622 }
80623
80624 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
80625@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
80626 {
80627 struct sock *sk = sk_atm(vcc);
80628
80629+#ifdef CONFIG_GRKERNSEC_HIDESYM
80630+ seq_printf(seq, "%p ", NULL);
80631+#else
80632 seq_printf(seq, "%p ", vcc);
80633+#endif
80634+
80635 if (!vcc->dev)
80636 seq_printf(seq, "Unassigned ");
80637 else
80638@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
80639 {
80640 if (!vcc->dev)
80641 seq_printf(seq, sizeof(void *) == 4 ?
80642+#ifdef CONFIG_GRKERNSEC_HIDESYM
80643+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
80644+#else
80645 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
80646+#endif
80647 else
80648 seq_printf(seq, "%3d %3d %5d ",
80649 vcc->dev->number, vcc->vpi, vcc->vci);
80650diff --git a/net/atm/resources.c b/net/atm/resources.c
80651index 56b7322..c48b84e 100644
80652--- a/net/atm/resources.c
80653+++ b/net/atm/resources.c
80654@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
80655 static void copy_aal_stats(struct k_atm_aal_stats *from,
80656 struct atm_aal_stats *to)
80657 {
80658-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80659+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80660 __AAL_STAT_ITEMS
80661 #undef __HANDLE_ITEM
80662 }
80663@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
80664 static void subtract_aal_stats(struct k_atm_aal_stats *from,
80665 struct atm_aal_stats *to)
80666 {
80667-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
80668+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
80669 __AAL_STAT_ITEMS
80670 #undef __HANDLE_ITEM
80671 }
80672diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
80673index 8567d47..bba2292 100644
80674--- a/net/bridge/br_private.h
80675+++ b/net/bridge/br_private.h
80676@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
80677
80678 #ifdef CONFIG_SYSFS
80679 /* br_sysfs_if.c */
80680-extern struct sysfs_ops brport_sysfs_ops;
80681+extern const struct sysfs_ops brport_sysfs_ops;
80682 extern int br_sysfs_addif(struct net_bridge_port *p);
80683
80684 /* br_sysfs_br.c */
80685diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
80686index 9a52ac5..c97538e 100644
80687--- a/net/bridge/br_stp_if.c
80688+++ b/net/bridge/br_stp_if.c
80689@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
80690 char *envp[] = { NULL };
80691
80692 if (br->stp_enabled == BR_USER_STP) {
80693- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
80694+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
80695 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
80696 br->dev->name, r);
80697
80698diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
80699index 820643a..ce77fb3 100644
80700--- a/net/bridge/br_sysfs_if.c
80701+++ b/net/bridge/br_sysfs_if.c
80702@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
80703 return ret;
80704 }
80705
80706-struct sysfs_ops brport_sysfs_ops = {
80707+const struct sysfs_ops brport_sysfs_ops = {
80708 .show = brport_show,
80709 .store = brport_store,
80710 };
80711diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80712index d73d47f..72df42a 100644
80713--- a/net/bridge/netfilter/ebtables.c
80714+++ b/net/bridge/netfilter/ebtables.c
80715@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
80716 unsigned int entries_size, nentries;
80717 char *entries;
80718
80719+ pax_track_stack();
80720+
80721 if (cmd == EBT_SO_GET_ENTRIES) {
80722 entries_size = t->private->entries_size;
80723 nentries = t->private->nentries;
80724diff --git a/net/can/bcm.c b/net/can/bcm.c
80725index 2ffd2e0..72a7486 100644
80726--- a/net/can/bcm.c
80727+++ b/net/can/bcm.c
80728@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
80729 struct bcm_sock *bo = bcm_sk(sk);
80730 struct bcm_op *op;
80731
80732+#ifdef CONFIG_GRKERNSEC_HIDESYM
80733+ seq_printf(m, ">>> socket %p", NULL);
80734+ seq_printf(m, " / sk %p", NULL);
80735+ seq_printf(m, " / bo %p", NULL);
80736+#else
80737 seq_printf(m, ">>> socket %p", sk->sk_socket);
80738 seq_printf(m, " / sk %p", sk);
80739 seq_printf(m, " / bo %p", bo);
80740+#endif
80741 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
80742 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
80743 seq_printf(m, " <<<\n");
80744diff --git a/net/compat.c b/net/compat.c
80745index 9559afc..ccd74e1 100644
80746--- a/net/compat.c
80747+++ b/net/compat.c
80748@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80749 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80750 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80751 return -EFAULT;
80752- kmsg->msg_name = compat_ptr(tmp1);
80753- kmsg->msg_iov = compat_ptr(tmp2);
80754- kmsg->msg_control = compat_ptr(tmp3);
80755+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80756+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80757+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80758 return 0;
80759 }
80760
80761@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80762 kern_msg->msg_name = NULL;
80763
80764 tot_len = iov_from_user_compat_to_kern(kern_iov,
80765- (struct compat_iovec __user *)kern_msg->msg_iov,
80766+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
80767 kern_msg->msg_iovlen);
80768 if (tot_len >= 0)
80769 kern_msg->msg_iov = kern_iov;
80770@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80771
80772 #define CMSG_COMPAT_FIRSTHDR(msg) \
80773 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80774- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80775+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80776 (struct compat_cmsghdr __user *)NULL)
80777
80778 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80779 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80780 (ucmlen) <= (unsigned long) \
80781 ((mhdr)->msg_controllen - \
80782- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80783+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80784
80785 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80786 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80787 {
80788 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80789- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80790+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80791 msg->msg_controllen)
80792 return NULL;
80793 return (struct compat_cmsghdr __user *)ptr;
80794@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80795 {
80796 struct compat_timeval ctv;
80797 struct compat_timespec cts[3];
80798- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80799+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80800 struct compat_cmsghdr cmhdr;
80801 int cmlen;
80802
80803@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80804
80805 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80806 {
80807- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80808+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80809 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80810 int fdnum = scm->fp->count;
80811 struct file **fp = scm->fp->fp;
80812@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80813 len = sizeof(ktime);
80814 old_fs = get_fs();
80815 set_fs(KERNEL_DS);
80816- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80817+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80818 set_fs(old_fs);
80819
80820 if (!err) {
80821@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80822 case MCAST_JOIN_GROUP:
80823 case MCAST_LEAVE_GROUP:
80824 {
80825- struct compat_group_req __user *gr32 = (void *)optval;
80826+ struct compat_group_req __user *gr32 = (void __user *)optval;
80827 struct group_req __user *kgr =
80828 compat_alloc_user_space(sizeof(struct group_req));
80829 u32 interface;
80830@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80831 case MCAST_BLOCK_SOURCE:
80832 case MCAST_UNBLOCK_SOURCE:
80833 {
80834- struct compat_group_source_req __user *gsr32 = (void *)optval;
80835+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80836 struct group_source_req __user *kgsr = compat_alloc_user_space(
80837 sizeof(struct group_source_req));
80838 u32 interface;
80839@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80840 }
80841 case MCAST_MSFILTER:
80842 {
80843- struct compat_group_filter __user *gf32 = (void *)optval;
80844+ struct compat_group_filter __user *gf32 = (void __user *)optval;
80845 struct group_filter __user *kgf;
80846 u32 interface, fmode, numsrc;
80847
80848diff --git a/net/core/dev.c b/net/core/dev.c
80849index 84a0705..575db4c 100644
80850--- a/net/core/dev.c
80851+++ b/net/core/dev.c
80852@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80853 if (no_module && capable(CAP_NET_ADMIN))
80854 no_module = request_module("netdev-%s", name);
80855 if (no_module && capable(CAP_SYS_MODULE)) {
80856+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80857+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
80858+#else
80859 if (!request_module("%s", name))
80860 pr_err("Loading kernel module for a network device "
80861 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80862 "instead\n", name);
80863+#endif
80864 }
80865 }
80866 EXPORT_SYMBOL(dev_load);
80867@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80868
80869 struct dev_gso_cb {
80870 void (*destructor)(struct sk_buff *skb);
80871-};
80872+} __no_const;
80873
80874 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80875
80876@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80877 }
80878 EXPORT_SYMBOL(netif_rx_ni);
80879
80880-static void net_tx_action(struct softirq_action *h)
80881+static void net_tx_action(void)
80882 {
80883 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80884
80885@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80886 EXPORT_SYMBOL(netif_napi_del);
80887
80888
80889-static void net_rx_action(struct softirq_action *h)
80890+static void net_rx_action(void)
80891 {
80892 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80893 unsigned long time_limit = jiffies + 2;
80894diff --git a/net/core/flow.c b/net/core/flow.c
80895index 9601587..8c4824e 100644
80896--- a/net/core/flow.c
80897+++ b/net/core/flow.c
80898@@ -35,11 +35,11 @@ struct flow_cache_entry {
80899 atomic_t *object_ref;
80900 };
80901
80902-atomic_t flow_cache_genid = ATOMIC_INIT(0);
80903+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80904
80905 static u32 flow_hash_shift;
80906 #define flow_hash_size (1 << flow_hash_shift)
80907-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
80908+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
80909
80910 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
80911
80912@@ -52,7 +52,7 @@ struct flow_percpu_info {
80913 u32 hash_rnd;
80914 int count;
80915 };
80916-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
80917+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
80918
80919 #define flow_hash_rnd_recalc(cpu) \
80920 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
80921@@ -69,7 +69,7 @@ struct flow_flush_info {
80922 atomic_t cpuleft;
80923 struct completion completion;
80924 };
80925-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
80926+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
80927
80928 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
80929
80930@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
80931 if (fle->family == family &&
80932 fle->dir == dir &&
80933 flow_key_compare(key, &fle->key) == 0) {
80934- if (fle->genid == atomic_read(&flow_cache_genid)) {
80935+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
80936 void *ret = fle->object;
80937
80938 if (ret)
80939@@ -228,7 +228,7 @@ nocache:
80940 err = resolver(net, key, family, dir, &obj, &obj_ref);
80941
80942 if (fle && !err) {
80943- fle->genid = atomic_read(&flow_cache_genid);
80944+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
80945
80946 if (fle->object)
80947 atomic_dec(fle->object_ref);
80948@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
80949
80950 fle = flow_table(cpu)[i];
80951 for (; fle; fle = fle->next) {
80952- unsigned genid = atomic_read(&flow_cache_genid);
80953+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
80954
80955 if (!fle->object || fle->genid == genid)
80956 continue;
80957diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80958index d4fd895..ac9b1e6 100644
80959--- a/net/core/rtnetlink.c
80960+++ b/net/core/rtnetlink.c
80961@@ -57,7 +57,7 @@ struct rtnl_link
80962 {
80963 rtnl_doit_func doit;
80964 rtnl_dumpit_func dumpit;
80965-};
80966+} __no_const;
80967
80968 static DEFINE_MUTEX(rtnl_mutex);
80969
80970diff --git a/net/core/scm.c b/net/core/scm.c
80971index d98eafc..1a190a9 100644
80972--- a/net/core/scm.c
80973+++ b/net/core/scm.c
80974@@ -191,7 +191,7 @@ error:
80975 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80976 {
80977 struct cmsghdr __user *cm
80978- = (__force struct cmsghdr __user *)msg->msg_control;
80979+ = (struct cmsghdr __force_user *)msg->msg_control;
80980 struct cmsghdr cmhdr;
80981 int cmlen = CMSG_LEN(len);
80982 int err;
80983@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80984 err = -EFAULT;
80985 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
80986 goto out;
80987- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
80988+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
80989 goto out;
80990 cmlen = CMSG_SPACE(len);
80991 if (msg->msg_controllen < cmlen)
80992@@ -229,7 +229,7 @@ out:
80993 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80994 {
80995 struct cmsghdr __user *cm
80996- = (__force struct cmsghdr __user*)msg->msg_control;
80997+ = (struct cmsghdr __force_user *)msg->msg_control;
80998
80999 int fdmax = 0;
81000 int fdnum = scm->fp->count;
81001@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
81002 if (fdnum < fdmax)
81003 fdmax = fdnum;
81004
81005- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
81006+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
81007 i++, cmfptr++)
81008 {
81009 int new_fd;
81010diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
81011index 45329d7..626aaa6 100644
81012--- a/net/core/secure_seq.c
81013+++ b/net/core/secure_seq.c
81014@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
81015 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
81016
81017 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81018- __be16 dport)
81019+ __be16 dport)
81020 {
81021 u32 secret[MD5_MESSAGE_BYTES / 4];
81022 u32 hash[MD5_DIGEST_WORDS];
81023@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
81024 secret[i] = net_secret[i];
81025
81026 md5_transform(hash, secret);
81027-
81028 return hash[0];
81029 }
81030 #endif
81031diff --git a/net/core/skbuff.c b/net/core/skbuff.c
81032index a807f8c..65f906f 100644
81033--- a/net/core/skbuff.c
81034+++ b/net/core/skbuff.c
81035@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
81036 struct sk_buff *frag_iter;
81037 struct sock *sk = skb->sk;
81038
81039+ pax_track_stack();
81040+
81041 /*
81042 * __skb_splice_bits() only fails if the output has no room left,
81043 * so no point in going over the frag_list for the error case.
81044diff --git a/net/core/sock.c b/net/core/sock.c
81045index 6605e75..3acebda 100644
81046--- a/net/core/sock.c
81047+++ b/net/core/sock.c
81048@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
81049 break;
81050
81051 case SO_PEERCRED:
81052+ {
81053+ struct ucred peercred;
81054 if (len > sizeof(sk->sk_peercred))
81055 len = sizeof(sk->sk_peercred);
81056- if (copy_to_user(optval, &sk->sk_peercred, len))
81057+ peercred = sk->sk_peercred;
81058+ if (copy_to_user(optval, &peercred, len))
81059 return -EFAULT;
81060 goto lenout;
81061+ }
81062
81063 case SO_PEERNAME:
81064 {
81065@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
81066 */
81067 smp_wmb();
81068 atomic_set(&sk->sk_refcnt, 1);
81069- atomic_set(&sk->sk_drops, 0);
81070+ atomic_set_unchecked(&sk->sk_drops, 0);
81071 }
81072 EXPORT_SYMBOL(sock_init_data);
81073
81074diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
81075index 2036568..c55883d 100644
81076--- a/net/decnet/sysctl_net_decnet.c
81077+++ b/net/decnet/sysctl_net_decnet.c
81078@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
81079
81080 if (len > *lenp) len = *lenp;
81081
81082- if (copy_to_user(buffer, addr, len))
81083+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
81084 return -EFAULT;
81085
81086 *lenp = len;
81087@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
81088
81089 if (len > *lenp) len = *lenp;
81090
81091- if (copy_to_user(buffer, devname, len))
81092+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
81093 return -EFAULT;
81094
81095 *lenp = len;
81096diff --git a/net/econet/Kconfig b/net/econet/Kconfig
81097index 39a2d29..f39c0fe 100644
81098--- a/net/econet/Kconfig
81099+++ b/net/econet/Kconfig
81100@@ -4,7 +4,7 @@
81101
81102 config ECONET
81103 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
81104- depends on EXPERIMENTAL && INET
81105+ depends on EXPERIMENTAL && INET && BROKEN
81106 ---help---
81107 Econet is a fairly old and slow networking protocol mainly used by
81108 Acorn computers to access file and print servers. It uses native
81109diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
81110index a413b1b..380849c 100644
81111--- a/net/ieee802154/dgram.c
81112+++ b/net/ieee802154/dgram.c
81113@@ -318,7 +318,7 @@ out:
81114 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
81115 {
81116 if (sock_queue_rcv_skb(sk, skb) < 0) {
81117- atomic_inc(&sk->sk_drops);
81118+ atomic_inc_unchecked(&sk->sk_drops);
81119 kfree_skb(skb);
81120 return NET_RX_DROP;
81121 }
81122diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
81123index 30e74ee..bfc6ee0 100644
81124--- a/net/ieee802154/raw.c
81125+++ b/net/ieee802154/raw.c
81126@@ -206,7 +206,7 @@ out:
81127 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
81128 {
81129 if (sock_queue_rcv_skb(sk, skb) < 0) {
81130- atomic_inc(&sk->sk_drops);
81131+ atomic_inc_unchecked(&sk->sk_drops);
81132 kfree_skb(skb);
81133 return NET_RX_DROP;
81134 }
81135diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
81136index dba56d2..acee5d6 100644
81137--- a/net/ipv4/inet_diag.c
81138+++ b/net/ipv4/inet_diag.c
81139@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
81140 r->idiag_retrans = 0;
81141
81142 r->id.idiag_if = sk->sk_bound_dev_if;
81143+#ifdef CONFIG_GRKERNSEC_HIDESYM
81144+ r->id.idiag_cookie[0] = 0;
81145+ r->id.idiag_cookie[1] = 0;
81146+#else
81147 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
81148 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
81149+#endif
81150
81151 r->id.idiag_sport = inet->sport;
81152 r->id.idiag_dport = inet->dport;
81153@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81154 r->idiag_family = tw->tw_family;
81155 r->idiag_retrans = 0;
81156 r->id.idiag_if = tw->tw_bound_dev_if;
81157+
81158+#ifdef CONFIG_GRKERNSEC_HIDESYM
81159+ r->id.idiag_cookie[0] = 0;
81160+ r->id.idiag_cookie[1] = 0;
81161+#else
81162 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81163 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81164+#endif
81165+
81166 r->id.idiag_sport = tw->tw_sport;
81167 r->id.idiag_dport = tw->tw_dport;
81168 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81169@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81170 if (sk == NULL)
81171 goto unlock;
81172
81173+#ifndef CONFIG_GRKERNSEC_HIDESYM
81174 err = -ESTALE;
81175 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81176 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81177 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81178 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81179 goto out;
81180+#endif
81181
81182 err = -ENOMEM;
81183 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81184@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81185 r->idiag_retrans = req->retrans;
81186
81187 r->id.idiag_if = sk->sk_bound_dev_if;
81188+
81189+#ifdef CONFIG_GRKERNSEC_HIDESYM
81190+ r->id.idiag_cookie[0] = 0;
81191+ r->id.idiag_cookie[1] = 0;
81192+#else
81193 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81194 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81195+#endif
81196
81197 tmo = req->expires - jiffies;
81198 if (tmo < 0)
81199diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81200index d717267..56de7e7 100644
81201--- a/net/ipv4/inet_hashtables.c
81202+++ b/net/ipv4/inet_hashtables.c
81203@@ -18,12 +18,15 @@
81204 #include <linux/sched.h>
81205 #include <linux/slab.h>
81206 #include <linux/wait.h>
81207+#include <linux/security.h>
81208
81209 #include <net/inet_connection_sock.h>
81210 #include <net/inet_hashtables.h>
81211 #include <net/secure_seq.h>
81212 #include <net/ip.h>
81213
81214+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81215+
81216 /*
81217 * Allocate and initialize a new local port bind bucket.
81218 * The bindhash mutex for snum's hash chain must be held here.
81219@@ -491,6 +494,8 @@ ok:
81220 }
81221 spin_unlock(&head->lock);
81222
81223+ gr_update_task_in_ip_table(current, inet_sk(sk));
81224+
81225 if (tw) {
81226 inet_twsk_deschedule(tw, death_row);
81227 inet_twsk_put(tw);
81228diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81229index 13b229f..6956484 100644
81230--- a/net/ipv4/inetpeer.c
81231+++ b/net/ipv4/inetpeer.c
81232@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81233 struct inet_peer *p, *n;
81234 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81235
81236+ pax_track_stack();
81237+
81238 /* Look up for the address quickly. */
81239 read_lock_bh(&peer_pool_lock);
81240 p = lookup(daddr, NULL);
81241@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81242 return NULL;
81243 n->v4daddr = daddr;
81244 atomic_set(&n->refcnt, 1);
81245- atomic_set(&n->rid, 0);
81246+ atomic_set_unchecked(&n->rid, 0);
81247 n->ip_id_count = secure_ip_id(daddr);
81248 n->tcp_ts_stamp = 0;
81249
81250diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81251index d3fe10b..feeafc9 100644
81252--- a/net/ipv4/ip_fragment.c
81253+++ b/net/ipv4/ip_fragment.c
81254@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81255 return 0;
81256
81257 start = qp->rid;
81258- end = atomic_inc_return(&peer->rid);
81259+ end = atomic_inc_return_unchecked(&peer->rid);
81260 qp->rid = end;
81261
81262 rc = qp->q.fragments && (end - start) > max;
81263diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81264index e982b5c..f079d75 100644
81265--- a/net/ipv4/ip_sockglue.c
81266+++ b/net/ipv4/ip_sockglue.c
81267@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81268 int val;
81269 int len;
81270
81271+ pax_track_stack();
81272+
81273 if (level != SOL_IP)
81274 return -EOPNOTSUPP;
81275
81276@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81277 if (sk->sk_type != SOCK_STREAM)
81278 return -ENOPROTOOPT;
81279
81280- msg.msg_control = optval;
81281+ msg.msg_control = (void __force_kernel *)optval;
81282 msg.msg_controllen = len;
81283 msg.msg_flags = 0;
81284
81285diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81286index f8d04c2..c1188f2 100644
81287--- a/net/ipv4/ipconfig.c
81288+++ b/net/ipv4/ipconfig.c
81289@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81290
81291 mm_segment_t oldfs = get_fs();
81292 set_fs(get_ds());
81293- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81294+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81295 set_fs(oldfs);
81296 return res;
81297 }
81298@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81299
81300 mm_segment_t oldfs = get_fs();
81301 set_fs(get_ds());
81302- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81303+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81304 set_fs(oldfs);
81305 return res;
81306 }
81307@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81308
81309 mm_segment_t oldfs = get_fs();
81310 set_fs(get_ds());
81311- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81312+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81313 set_fs(oldfs);
81314 return res;
81315 }
81316diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81317index c8b0cc3..4da5ae2 100644
81318--- a/net/ipv4/netfilter/arp_tables.c
81319+++ b/net/ipv4/netfilter/arp_tables.c
81320@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81321 private = &tmp;
81322 }
81323 #endif
81324+ memset(&info, 0, sizeof(info));
81325 info.valid_hooks = t->valid_hooks;
81326 memcpy(info.hook_entry, private->hook_entry,
81327 sizeof(info.hook_entry));
81328diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81329index c156db2..e772975 100644
81330--- a/net/ipv4/netfilter/ip_queue.c
81331+++ b/net/ipv4/netfilter/ip_queue.c
81332@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81333
81334 if (v->data_len < sizeof(*user_iph))
81335 return 0;
81336+ if (v->data_len > 65535)
81337+ return -EMSGSIZE;
81338+
81339 diff = v->data_len - e->skb->len;
81340 if (diff < 0) {
81341 if (pskb_trim(e->skb, v->data_len))
81342@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81343 static inline void
81344 __ipq_rcv_skb(struct sk_buff *skb)
81345 {
81346- int status, type, pid, flags, nlmsglen, skblen;
81347+ int status, type, pid, flags;
81348+ unsigned int nlmsglen, skblen;
81349 struct nlmsghdr *nlh;
81350
81351 skblen = skb->len;
81352diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81353index 0606db1..02e7e4c 100644
81354--- a/net/ipv4/netfilter/ip_tables.c
81355+++ b/net/ipv4/netfilter/ip_tables.c
81356@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81357 private = &tmp;
81358 }
81359 #endif
81360+ memset(&info, 0, sizeof(info));
81361 info.valid_hooks = t->valid_hooks;
81362 memcpy(info.hook_entry, private->hook_entry,
81363 sizeof(info.hook_entry));
81364diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81365index d9521f6..3c3eb25 100644
81366--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81367+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81368@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81369
81370 *len = 0;
81371
81372- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81373+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81374 if (*octets == NULL) {
81375 if (net_ratelimit())
81376 printk("OOM in bsalg (%d)\n", __LINE__);
81377diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81378index ab996f9..3da5f96 100644
81379--- a/net/ipv4/raw.c
81380+++ b/net/ipv4/raw.c
81381@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81382 /* Charge it to the socket. */
81383
81384 if (sock_queue_rcv_skb(sk, skb) < 0) {
81385- atomic_inc(&sk->sk_drops);
81386+ atomic_inc_unchecked(&sk->sk_drops);
81387 kfree_skb(skb);
81388 return NET_RX_DROP;
81389 }
81390@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81391 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81392 {
81393 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81394- atomic_inc(&sk->sk_drops);
81395+ atomic_inc_unchecked(&sk->sk_drops);
81396 kfree_skb(skb);
81397 return NET_RX_DROP;
81398 }
81399@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81400
81401 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81402 {
81403+ struct icmp_filter filter;
81404+
81405+ if (optlen < 0)
81406+ return -EINVAL;
81407 if (optlen > sizeof(struct icmp_filter))
81408 optlen = sizeof(struct icmp_filter);
81409- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81410+ if (copy_from_user(&filter, optval, optlen))
81411 return -EFAULT;
81412+ raw_sk(sk)->filter = filter;
81413+
81414 return 0;
81415 }
81416
81417 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81418 {
81419 int len, ret = -EFAULT;
81420+ struct icmp_filter filter;
81421
81422 if (get_user(len, optlen))
81423 goto out;
81424@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81425 if (len > sizeof(struct icmp_filter))
81426 len = sizeof(struct icmp_filter);
81427 ret = -EFAULT;
81428- if (put_user(len, optlen) ||
81429- copy_to_user(optval, &raw_sk(sk)->filter, len))
81430+ filter = raw_sk(sk)->filter;
81431+ if (put_user(len, optlen) || len > sizeof filter ||
81432+ copy_to_user(optval, &filter, len))
81433 goto out;
81434 ret = 0;
81435 out: return ret;
81436@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81437 sk_wmem_alloc_get(sp),
81438 sk_rmem_alloc_get(sp),
81439 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81440- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81441+ atomic_read(&sp->sk_refcnt),
81442+#ifdef CONFIG_GRKERNSEC_HIDESYM
81443+ NULL,
81444+#else
81445+ sp,
81446+#endif
81447+ atomic_read_unchecked(&sp->sk_drops));
81448 }
81449
81450 static int raw_seq_show(struct seq_file *seq, void *v)
81451diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81452index 58f141b..b759702 100644
81453--- a/net/ipv4/route.c
81454+++ b/net/ipv4/route.c
81455@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81456
81457 static inline int rt_genid(struct net *net)
81458 {
81459- return atomic_read(&net->ipv4.rt_genid);
81460+ return atomic_read_unchecked(&net->ipv4.rt_genid);
81461 }
81462
81463 #ifdef CONFIG_PROC_FS
81464@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81465 unsigned char shuffle;
81466
81467 get_random_bytes(&shuffle, sizeof(shuffle));
81468- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81469+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81470 }
81471
81472 /*
81473@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81474
81475 static __net_init int rt_secret_timer_init(struct net *net)
81476 {
81477- atomic_set(&net->ipv4.rt_genid,
81478+ atomic_set_unchecked(&net->ipv4.rt_genid,
81479 (int) ((num_physpages ^ (num_physpages>>8)) ^
81480 (jiffies ^ (jiffies >> 7))));
81481
81482diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81483index f095659..adc892a 100644
81484--- a/net/ipv4/tcp.c
81485+++ b/net/ipv4/tcp.c
81486@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81487 int val;
81488 int err = 0;
81489
81490+ pax_track_stack();
81491+
81492 /* This is a string value all the others are int's */
81493 if (optname == TCP_CONGESTION) {
81494 char name[TCP_CA_NAME_MAX];
81495@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81496 struct tcp_sock *tp = tcp_sk(sk);
81497 int val, len;
81498
81499+ pax_track_stack();
81500+
81501 if (get_user(len, optlen))
81502 return -EFAULT;
81503
81504diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81505index 6fc7961..33bad4a 100644
81506--- a/net/ipv4/tcp_ipv4.c
81507+++ b/net/ipv4/tcp_ipv4.c
81508@@ -85,6 +85,9 @@
81509 int sysctl_tcp_tw_reuse __read_mostly;
81510 int sysctl_tcp_low_latency __read_mostly;
81511
81512+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81513+extern int grsec_enable_blackhole;
81514+#endif
81515
81516 #ifdef CONFIG_TCP_MD5SIG
81517 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81518@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81519 return 0;
81520
81521 reset:
81522+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81523+ if (!grsec_enable_blackhole)
81524+#endif
81525 tcp_v4_send_reset(rsk, skb);
81526 discard:
81527 kfree_skb(skb);
81528@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81529 TCP_SKB_CB(skb)->sacked = 0;
81530
81531 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81532- if (!sk)
81533+ if (!sk) {
81534+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81535+ ret = 1;
81536+#endif
81537 goto no_tcp_socket;
81538+ }
81539
81540 process:
81541- if (sk->sk_state == TCP_TIME_WAIT)
81542+ if (sk->sk_state == TCP_TIME_WAIT) {
81543+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81544+ ret = 2;
81545+#endif
81546 goto do_time_wait;
81547+ }
81548
81549 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81550 goto discard_and_relse;
81551@@ -1651,6 +1665,10 @@ no_tcp_socket:
81552 bad_packet:
81553 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81554 } else {
81555+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81556+ if (!grsec_enable_blackhole || (ret == 1 &&
81557+ (skb->dev->flags & IFF_LOOPBACK)))
81558+#endif
81559 tcp_v4_send_reset(NULL, skb);
81560 }
81561
81562@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
81563 0, /* non standard timer */
81564 0, /* open_requests have no inode */
81565 atomic_read(&sk->sk_refcnt),
81566+#ifdef CONFIG_GRKERNSEC_HIDESYM
81567+ NULL,
81568+#else
81569 req,
81570+#endif
81571 len);
81572 }
81573
81574@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
81575 sock_i_uid(sk),
81576 icsk->icsk_probes_out,
81577 sock_i_ino(sk),
81578- atomic_read(&sk->sk_refcnt), sk,
81579+ atomic_read(&sk->sk_refcnt),
81580+#ifdef CONFIG_GRKERNSEC_HIDESYM
81581+ NULL,
81582+#else
81583+ sk,
81584+#endif
81585 jiffies_to_clock_t(icsk->icsk_rto),
81586 jiffies_to_clock_t(icsk->icsk_ack.ato),
81587 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
81588@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
81589 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
81590 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
81591 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81592- atomic_read(&tw->tw_refcnt), tw, len);
81593+ atomic_read(&tw->tw_refcnt),
81594+#ifdef CONFIG_GRKERNSEC_HIDESYM
81595+ NULL,
81596+#else
81597+ tw,
81598+#endif
81599+ len);
81600 }
81601
81602 #define TMPSZ 150
81603diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81604index 4c03598..e09a8e8 100644
81605--- a/net/ipv4/tcp_minisocks.c
81606+++ b/net/ipv4/tcp_minisocks.c
81607@@ -26,6 +26,10 @@
81608 #include <net/inet_common.h>
81609 #include <net/xfrm.h>
81610
81611+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81612+extern int grsec_enable_blackhole;
81613+#endif
81614+
81615 #ifdef CONFIG_SYSCTL
81616 #define SYNC_INIT 0 /* let the user enable it */
81617 #else
81618@@ -672,6 +676,10 @@ listen_overflow:
81619
81620 embryonic_reset:
81621 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
81622+
81623+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81624+ if (!grsec_enable_blackhole)
81625+#endif
81626 if (!(flg & TCP_FLAG_RST))
81627 req->rsk_ops->send_reset(sk, skb);
81628
81629diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
81630index af83bdf..ec91cb2 100644
81631--- a/net/ipv4/tcp_output.c
81632+++ b/net/ipv4/tcp_output.c
81633@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
81634 __u8 *md5_hash_location;
81635 int mss;
81636
81637+ pax_track_stack();
81638+
81639 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
81640 if (skb == NULL)
81641 return NULL;
81642diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81643index 59f5b5e..193860f 100644
81644--- a/net/ipv4/tcp_probe.c
81645+++ b/net/ipv4/tcp_probe.c
81646@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81647 if (cnt + width >= len)
81648 break;
81649
81650- if (copy_to_user(buf + cnt, tbuf, width))
81651+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81652 return -EFAULT;
81653 cnt += width;
81654 }
81655diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81656index 57d5501..a9ed13a 100644
81657--- a/net/ipv4/tcp_timer.c
81658+++ b/net/ipv4/tcp_timer.c
81659@@ -21,6 +21,10 @@
81660 #include <linux/module.h>
81661 #include <net/tcp.h>
81662
81663+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81664+extern int grsec_lastack_retries;
81665+#endif
81666+
81667 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81668 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81669 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81670@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
81671 }
81672 }
81673
81674+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81675+ if ((sk->sk_state == TCP_LAST_ACK) &&
81676+ (grsec_lastack_retries > 0) &&
81677+ (grsec_lastack_retries < retry_until))
81678+ retry_until = grsec_lastack_retries;
81679+#endif
81680+
81681 if (retransmits_timed_out(sk, retry_until)) {
81682 /* Has it gone just too far? */
81683 tcp_write_err(sk);
81684diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81685index 0ac8833..58d8c43 100644
81686--- a/net/ipv4/udp.c
81687+++ b/net/ipv4/udp.c
81688@@ -86,6 +86,7 @@
81689 #include <linux/types.h>
81690 #include <linux/fcntl.h>
81691 #include <linux/module.h>
81692+#include <linux/security.h>
81693 #include <linux/socket.h>
81694 #include <linux/sockios.h>
81695 #include <linux/igmp.h>
81696@@ -106,6 +107,10 @@
81697 #include <net/xfrm.h>
81698 #include "udp_impl.h"
81699
81700+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81701+extern int grsec_enable_blackhole;
81702+#endif
81703+
81704 struct udp_table udp_table;
81705 EXPORT_SYMBOL(udp_table);
81706
81707@@ -371,6 +376,9 @@ found:
81708 return s;
81709 }
81710
81711+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81712+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81713+
81714 /*
81715 * This routine is called by the ICMP module when it gets some
81716 * sort of error condition. If err < 0 then the socket should
81717@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81718 dport = usin->sin_port;
81719 if (dport == 0)
81720 return -EINVAL;
81721+
81722+ err = gr_search_udp_sendmsg(sk, usin);
81723+ if (err)
81724+ return err;
81725 } else {
81726 if (sk->sk_state != TCP_ESTABLISHED)
81727 return -EDESTADDRREQ;
81728+
81729+ err = gr_search_udp_sendmsg(sk, NULL);
81730+ if (err)
81731+ return err;
81732+
81733 daddr = inet->daddr;
81734 dport = inet->dport;
81735 /* Open fast path for connected socket.
81736@@ -945,6 +962,10 @@ try_again:
81737 if (!skb)
81738 goto out;
81739
81740+ err = gr_search_udp_recvmsg(sk, skb);
81741+ if (err)
81742+ goto out_free;
81743+
81744 ulen = skb->len - sizeof(struct udphdr);
81745 copied = len;
81746 if (copied > ulen)
81747@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81748 if (rc == -ENOMEM) {
81749 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81750 is_udplite);
81751- atomic_inc(&sk->sk_drops);
81752+ atomic_inc_unchecked(&sk->sk_drops);
81753 }
81754 goto drop;
81755 }
81756@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81757 goto csum_error;
81758
81759 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81760+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81761+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81762+#endif
81763 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81764
81765 /*
81766@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81767 sk_wmem_alloc_get(sp),
81768 sk_rmem_alloc_get(sp),
81769 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81770- atomic_read(&sp->sk_refcnt), sp,
81771- atomic_read(&sp->sk_drops), len);
81772+ atomic_read(&sp->sk_refcnt),
81773+#ifdef CONFIG_GRKERNSEC_HIDESYM
81774+ NULL,
81775+#else
81776+ sp,
81777+#endif
81778+ atomic_read_unchecked(&sp->sk_drops), len);
81779 }
81780
81781 int udp4_seq_show(struct seq_file *seq, void *v)
81782diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81783index 8ac3d09..fc58c5f 100644
81784--- a/net/ipv6/addrconf.c
81785+++ b/net/ipv6/addrconf.c
81786@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81787 p.iph.ihl = 5;
81788 p.iph.protocol = IPPROTO_IPV6;
81789 p.iph.ttl = 64;
81790- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81791+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81792
81793 if (ops->ndo_do_ioctl) {
81794 mm_segment_t oldfs = get_fs();
81795diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
81796index cc4797d..7cfdfcc 100644
81797--- a/net/ipv6/inet6_connection_sock.c
81798+++ b/net/ipv6/inet6_connection_sock.c
81799@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
81800 #ifdef CONFIG_XFRM
81801 {
81802 struct rt6_info *rt = (struct rt6_info *)dst;
81803- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
81804+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
81805 }
81806 #endif
81807 }
81808@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
81809 #ifdef CONFIG_XFRM
81810 if (dst) {
81811 struct rt6_info *rt = (struct rt6_info *)dst;
81812- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
81813+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
81814 sk->sk_dst_cache = NULL;
81815 dst_release(dst);
81816 dst = NULL;
81817diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
81818index 093e9b2..f72cddb 100644
81819--- a/net/ipv6/inet6_hashtables.c
81820+++ b/net/ipv6/inet6_hashtables.c
81821@@ -119,7 +119,7 @@ out:
81822 }
81823 EXPORT_SYMBOL(__inet6_lookup_established);
81824
81825-static int inline compute_score(struct sock *sk, struct net *net,
81826+static inline int compute_score(struct sock *sk, struct net *net,
81827 const unsigned short hnum,
81828 const struct in6_addr *daddr,
81829 const int dif)
81830diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81831index 4f7aaf6..f7acf45 100644
81832--- a/net/ipv6/ipv6_sockglue.c
81833+++ b/net/ipv6/ipv6_sockglue.c
81834@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81835 int val, valbool;
81836 int retv = -ENOPROTOOPT;
81837
81838+ pax_track_stack();
81839+
81840 if (optval == NULL)
81841 val=0;
81842 else {
81843@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81844 int len;
81845 int val;
81846
81847+ pax_track_stack();
81848+
81849 if (ip6_mroute_opt(optname))
81850 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81851
81852@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81853 if (sk->sk_type != SOCK_STREAM)
81854 return -ENOPROTOOPT;
81855
81856- msg.msg_control = optval;
81857+ msg.msg_control = (void __force_kernel *)optval;
81858 msg.msg_controllen = len;
81859 msg.msg_flags = 0;
81860
81861diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81862index 1cf3f0c..1d4376f 100644
81863--- a/net/ipv6/netfilter/ip6_queue.c
81864+++ b/net/ipv6/netfilter/ip6_queue.c
81865@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81866
81867 if (v->data_len < sizeof(*user_iph))
81868 return 0;
81869+ if (v->data_len > 65535)
81870+ return -EMSGSIZE;
81871+
81872 diff = v->data_len - e->skb->len;
81873 if (diff < 0) {
81874 if (pskb_trim(e->skb, v->data_len))
81875@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81876 static inline void
81877 __ipq_rcv_skb(struct sk_buff *skb)
81878 {
81879- int status, type, pid, flags, nlmsglen, skblen;
81880+ int status, type, pid, flags;
81881+ unsigned int nlmsglen, skblen;
81882 struct nlmsghdr *nlh;
81883
81884 skblen = skb->len;
81885diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81886index 78b5a36..7f37433 100644
81887--- a/net/ipv6/netfilter/ip6_tables.c
81888+++ b/net/ipv6/netfilter/ip6_tables.c
81889@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81890 private = &tmp;
81891 }
81892 #endif
81893+ memset(&info, 0, sizeof(info));
81894 info.valid_hooks = t->valid_hooks;
81895 memcpy(info.hook_entry, private->hook_entry,
81896 sizeof(info.hook_entry));
81897diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81898index 4f24570..b813b34 100644
81899--- a/net/ipv6/raw.c
81900+++ b/net/ipv6/raw.c
81901@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81902 {
81903 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81904 skb_checksum_complete(skb)) {
81905- atomic_inc(&sk->sk_drops);
81906+ atomic_inc_unchecked(&sk->sk_drops);
81907 kfree_skb(skb);
81908 return NET_RX_DROP;
81909 }
81910
81911 /* Charge it to the socket. */
81912 if (sock_queue_rcv_skb(sk,skb)<0) {
81913- atomic_inc(&sk->sk_drops);
81914+ atomic_inc_unchecked(&sk->sk_drops);
81915 kfree_skb(skb);
81916 return NET_RX_DROP;
81917 }
81918@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81919 struct raw6_sock *rp = raw6_sk(sk);
81920
81921 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81922- atomic_inc(&sk->sk_drops);
81923+ atomic_inc_unchecked(&sk->sk_drops);
81924 kfree_skb(skb);
81925 return NET_RX_DROP;
81926 }
81927@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81928
81929 if (inet->hdrincl) {
81930 if (skb_checksum_complete(skb)) {
81931- atomic_inc(&sk->sk_drops);
81932+ atomic_inc_unchecked(&sk->sk_drops);
81933 kfree_skb(skb);
81934 return NET_RX_DROP;
81935 }
81936@@ -518,7 +518,7 @@ csum_copy_err:
81937 as some normal condition.
81938 */
81939 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
81940- atomic_inc(&sk->sk_drops);
81941+ atomic_inc_unchecked(&sk->sk_drops);
81942 goto out;
81943 }
81944
81945@@ -600,7 +600,7 @@ out:
81946 return err;
81947 }
81948
81949-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81950+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81951 struct flowi *fl, struct rt6_info *rt,
81952 unsigned int flags)
81953 {
81954@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
81955 u16 proto;
81956 int err;
81957
81958+ pax_track_stack();
81959+
81960 /* Rough check on arithmetic overflow,
81961 better check is made in ip6_append_data().
81962 */
81963@@ -916,12 +918,17 @@ do_confirm:
81964 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81965 char __user *optval, int optlen)
81966 {
81967+ struct icmp6_filter filter;
81968+
81969 switch (optname) {
81970 case ICMPV6_FILTER:
81971+ if (optlen < 0)
81972+ return -EINVAL;
81973 if (optlen > sizeof(struct icmp6_filter))
81974 optlen = sizeof(struct icmp6_filter);
81975- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81976+ if (copy_from_user(&filter, optval, optlen))
81977 return -EFAULT;
81978+ raw6_sk(sk)->filter = filter;
81979 return 0;
81980 default:
81981 return -ENOPROTOOPT;
81982@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81983 char __user *optval, int __user *optlen)
81984 {
81985 int len;
81986+ struct icmp6_filter filter;
81987
81988 switch (optname) {
81989 case ICMPV6_FILTER:
81990@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81991 len = sizeof(struct icmp6_filter);
81992 if (put_user(len, optlen))
81993 return -EFAULT;
81994- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
81995+ filter = raw6_sk(sk)->filter;
81996+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
81997 return -EFAULT;
81998 return 0;
81999 default:
82000@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
82001 0, 0L, 0,
82002 sock_i_uid(sp), 0,
82003 sock_i_ino(sp),
82004- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
82005+ atomic_read(&sp->sk_refcnt),
82006+#ifdef CONFIG_GRKERNSEC_HIDESYM
82007+ NULL,
82008+#else
82009+ sp,
82010+#endif
82011+ atomic_read_unchecked(&sp->sk_drops));
82012 }
82013
82014 static int raw6_seq_show(struct seq_file *seq, void *v)
82015diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
82016index faae6df..d4430c1 100644
82017--- a/net/ipv6/tcp_ipv6.c
82018+++ b/net/ipv6/tcp_ipv6.c
82019@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
82020 }
82021 #endif
82022
82023+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82024+extern int grsec_enable_blackhole;
82025+#endif
82026+
82027 static void tcp_v6_hash(struct sock *sk)
82028 {
82029 if (sk->sk_state != TCP_CLOSE) {
82030@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
82031 return 0;
82032
82033 reset:
82034+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82035+ if (!grsec_enable_blackhole)
82036+#endif
82037 tcp_v6_send_reset(sk, skb);
82038 discard:
82039 if (opt_skb)
82040@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
82041 TCP_SKB_CB(skb)->sacked = 0;
82042
82043 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
82044- if (!sk)
82045+ if (!sk) {
82046+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82047+ ret = 1;
82048+#endif
82049 goto no_tcp_socket;
82050+ }
82051
82052 process:
82053- if (sk->sk_state == TCP_TIME_WAIT)
82054+ if (sk->sk_state == TCP_TIME_WAIT) {
82055+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82056+ ret = 2;
82057+#endif
82058 goto do_time_wait;
82059+ }
82060
82061 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
82062 goto discard_and_relse;
82063@@ -1701,6 +1716,10 @@ no_tcp_socket:
82064 bad_packet:
82065 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
82066 } else {
82067+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82068+ if (!grsec_enable_blackhole || (ret == 1 &&
82069+ (skb->dev->flags & IFF_LOOPBACK)))
82070+#endif
82071 tcp_v6_send_reset(NULL, skb);
82072 }
82073
82074@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
82075 uid,
82076 0, /* non standard timer */
82077 0, /* open_requests have no inode */
82078- 0, req);
82079+ 0,
82080+#ifdef CONFIG_GRKERNSEC_HIDESYM
82081+ NULL
82082+#else
82083+ req
82084+#endif
82085+ );
82086 }
82087
82088 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82089@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
82090 sock_i_uid(sp),
82091 icsk->icsk_probes_out,
82092 sock_i_ino(sp),
82093- atomic_read(&sp->sk_refcnt), sp,
82094+ atomic_read(&sp->sk_refcnt),
82095+#ifdef CONFIG_GRKERNSEC_HIDESYM
82096+ NULL,
82097+#else
82098+ sp,
82099+#endif
82100 jiffies_to_clock_t(icsk->icsk_rto),
82101 jiffies_to_clock_t(icsk->icsk_ack.ato),
82102 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
82103@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
82104 dest->s6_addr32[2], dest->s6_addr32[3], destp,
82105 tw->tw_substate, 0, 0,
82106 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
82107- atomic_read(&tw->tw_refcnt), tw);
82108+ atomic_read(&tw->tw_refcnt),
82109+#ifdef CONFIG_GRKERNSEC_HIDESYM
82110+ NULL
82111+#else
82112+ tw
82113+#endif
82114+ );
82115 }
82116
82117 static int tcp6_seq_show(struct seq_file *seq, void *v)
82118diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
82119index 9cc6289..052c521 100644
82120--- a/net/ipv6/udp.c
82121+++ b/net/ipv6/udp.c
82122@@ -49,6 +49,10 @@
82123 #include <linux/seq_file.h>
82124 #include "udp_impl.h"
82125
82126+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82127+extern int grsec_enable_blackhole;
82128+#endif
82129+
82130 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
82131 {
82132 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
82133@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
82134 if (rc == -ENOMEM) {
82135 UDP6_INC_STATS_BH(sock_net(sk),
82136 UDP_MIB_RCVBUFERRORS, is_udplite);
82137- atomic_inc(&sk->sk_drops);
82138+ atomic_inc_unchecked(&sk->sk_drops);
82139 }
82140 goto drop;
82141 }
82142@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
82143 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
82144 proto == IPPROTO_UDPLITE);
82145
82146+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
82147+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
82148+#endif
82149 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82150
82151 kfree_skb(skb);
82152@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82153 0, 0L, 0,
82154 sock_i_uid(sp), 0,
82155 sock_i_ino(sp),
82156- atomic_read(&sp->sk_refcnt), sp,
82157- atomic_read(&sp->sk_drops));
82158+ atomic_read(&sp->sk_refcnt),
82159+#ifdef CONFIG_GRKERNSEC_HIDESYM
82160+ NULL,
82161+#else
82162+ sp,
82163+#endif
82164+ atomic_read_unchecked(&sp->sk_drops));
82165 }
82166
82167 int udp6_seq_show(struct seq_file *seq, void *v)
82168diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82169index 811984d..11f59b7 100644
82170--- a/net/irda/ircomm/ircomm_tty.c
82171+++ b/net/irda/ircomm/ircomm_tty.c
82172@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82173 add_wait_queue(&self->open_wait, &wait);
82174
82175 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82176- __FILE__,__LINE__, tty->driver->name, self->open_count );
82177+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82178
82179 /* As far as I can see, we protect open_count - Jean II */
82180 spin_lock_irqsave(&self->spinlock, flags);
82181 if (!tty_hung_up_p(filp)) {
82182 extra_count = 1;
82183- self->open_count--;
82184+ local_dec(&self->open_count);
82185 }
82186 spin_unlock_irqrestore(&self->spinlock, flags);
82187- self->blocked_open++;
82188+ local_inc(&self->blocked_open);
82189
82190 while (1) {
82191 if (tty->termios->c_cflag & CBAUD) {
82192@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82193 }
82194
82195 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82196- __FILE__,__LINE__, tty->driver->name, self->open_count );
82197+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82198
82199 schedule();
82200 }
82201@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82202 if (extra_count) {
82203 /* ++ is not atomic, so this should be protected - Jean II */
82204 spin_lock_irqsave(&self->spinlock, flags);
82205- self->open_count++;
82206+ local_inc(&self->open_count);
82207 spin_unlock_irqrestore(&self->spinlock, flags);
82208 }
82209- self->blocked_open--;
82210+ local_dec(&self->blocked_open);
82211
82212 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82213- __FILE__,__LINE__, tty->driver->name, self->open_count);
82214+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82215
82216 if (!retval)
82217 self->flags |= ASYNC_NORMAL_ACTIVE;
82218@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82219 }
82220 /* ++ is not atomic, so this should be protected - Jean II */
82221 spin_lock_irqsave(&self->spinlock, flags);
82222- self->open_count++;
82223+ local_inc(&self->open_count);
82224
82225 tty->driver_data = self;
82226 self->tty = tty;
82227 spin_unlock_irqrestore(&self->spinlock, flags);
82228
82229 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82230- self->line, self->open_count);
82231+ self->line, local_read(&self->open_count));
82232
82233 /* Not really used by us, but lets do it anyway */
82234 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82235@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82236 return;
82237 }
82238
82239- if ((tty->count == 1) && (self->open_count != 1)) {
82240+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82241 /*
82242 * Uh, oh. tty->count is 1, which means that the tty
82243 * structure will be freed. state->count should always
82244@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82245 */
82246 IRDA_DEBUG(0, "%s(), bad serial port count; "
82247 "tty->count is 1, state->count is %d\n", __func__ ,
82248- self->open_count);
82249- self->open_count = 1;
82250+ local_read(&self->open_count));
82251+ local_set(&self->open_count, 1);
82252 }
82253
82254- if (--self->open_count < 0) {
82255+ if (local_dec_return(&self->open_count) < 0) {
82256 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82257- __func__, self->line, self->open_count);
82258- self->open_count = 0;
82259+ __func__, self->line, local_read(&self->open_count));
82260+ local_set(&self->open_count, 0);
82261 }
82262- if (self->open_count) {
82263+ if (local_read(&self->open_count)) {
82264 spin_unlock_irqrestore(&self->spinlock, flags);
82265
82266 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82267@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82268 tty->closing = 0;
82269 self->tty = NULL;
82270
82271- if (self->blocked_open) {
82272+ if (local_read(&self->blocked_open)) {
82273 if (self->close_delay)
82274 schedule_timeout_interruptible(self->close_delay);
82275 wake_up_interruptible(&self->open_wait);
82276@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82277 spin_lock_irqsave(&self->spinlock, flags);
82278 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82279 self->tty = NULL;
82280- self->open_count = 0;
82281+ local_set(&self->open_count, 0);
82282 spin_unlock_irqrestore(&self->spinlock, flags);
82283
82284 wake_up_interruptible(&self->open_wait);
82285@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82286 seq_putc(m, '\n');
82287
82288 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82289- seq_printf(m, "Open count: %d\n", self->open_count);
82290+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82291 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82292 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82293
82294diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82295index bada1b9..f325943 100644
82296--- a/net/iucv/af_iucv.c
82297+++ b/net/iucv/af_iucv.c
82298@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82299
82300 write_lock_bh(&iucv_sk_list.lock);
82301
82302- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82303+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82304 while (__iucv_get_sock_by_name(name)) {
82305 sprintf(name, "%08x",
82306- atomic_inc_return(&iucv_sk_list.autobind_name));
82307+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82308 }
82309
82310 write_unlock_bh(&iucv_sk_list.lock);
82311diff --git a/net/key/af_key.c b/net/key/af_key.c
82312index 4e98193..439b449 100644
82313--- a/net/key/af_key.c
82314+++ b/net/key/af_key.c
82315@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82316 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82317 struct xfrm_kmaddress k;
82318
82319+ pax_track_stack();
82320+
82321 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82322 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82323 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82324@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82325 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82326 else
82327 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82328+#ifdef CONFIG_GRKERNSEC_HIDESYM
82329+ NULL,
82330+#else
82331 s,
82332+#endif
82333 atomic_read(&s->sk_refcnt),
82334 sk_rmem_alloc_get(s),
82335 sk_wmem_alloc_get(s),
82336diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82337index bda96d1..c038b72 100644
82338--- a/net/lapb/lapb_iface.c
82339+++ b/net/lapb/lapb_iface.c
82340@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82341 goto out;
82342
82343 lapb->dev = dev;
82344- lapb->callbacks = *callbacks;
82345+ lapb->callbacks = callbacks;
82346
82347 __lapb_insert_cb(lapb);
82348
82349@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82350
82351 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82352 {
82353- if (lapb->callbacks.connect_confirmation)
82354- lapb->callbacks.connect_confirmation(lapb->dev, reason);
82355+ if (lapb->callbacks->connect_confirmation)
82356+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
82357 }
82358
82359 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82360 {
82361- if (lapb->callbacks.connect_indication)
82362- lapb->callbacks.connect_indication(lapb->dev, reason);
82363+ if (lapb->callbacks->connect_indication)
82364+ lapb->callbacks->connect_indication(lapb->dev, reason);
82365 }
82366
82367 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82368 {
82369- if (lapb->callbacks.disconnect_confirmation)
82370- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82371+ if (lapb->callbacks->disconnect_confirmation)
82372+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82373 }
82374
82375 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82376 {
82377- if (lapb->callbacks.disconnect_indication)
82378- lapb->callbacks.disconnect_indication(lapb->dev, reason);
82379+ if (lapb->callbacks->disconnect_indication)
82380+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
82381 }
82382
82383 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82384 {
82385- if (lapb->callbacks.data_indication)
82386- return lapb->callbacks.data_indication(lapb->dev, skb);
82387+ if (lapb->callbacks->data_indication)
82388+ return lapb->callbacks->data_indication(lapb->dev, skb);
82389
82390 kfree_skb(skb);
82391 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82392@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82393 {
82394 int used = 0;
82395
82396- if (lapb->callbacks.data_transmit) {
82397- lapb->callbacks.data_transmit(lapb->dev, skb);
82398+ if (lapb->callbacks->data_transmit) {
82399+ lapb->callbacks->data_transmit(lapb->dev, skb);
82400 used = 1;
82401 }
82402
82403diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82404index fe2d3f8..e57f683 100644
82405--- a/net/mac80211/cfg.c
82406+++ b/net/mac80211/cfg.c
82407@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82408 return err;
82409 }
82410
82411-struct cfg80211_ops mac80211_config_ops = {
82412+const struct cfg80211_ops mac80211_config_ops = {
82413 .add_virtual_intf = ieee80211_add_iface,
82414 .del_virtual_intf = ieee80211_del_iface,
82415 .change_virtual_intf = ieee80211_change_iface,
82416diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82417index 7d7879f..2d51f62 100644
82418--- a/net/mac80211/cfg.h
82419+++ b/net/mac80211/cfg.h
82420@@ -4,6 +4,6 @@
82421 #ifndef __CFG_H
82422 #define __CFG_H
82423
82424-extern struct cfg80211_ops mac80211_config_ops;
82425+extern const struct cfg80211_ops mac80211_config_ops;
82426
82427 #endif /* __CFG_H */
82428diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82429index 99c7525..9cb4937 100644
82430--- a/net/mac80211/debugfs_key.c
82431+++ b/net/mac80211/debugfs_key.c
82432@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82433 size_t count, loff_t *ppos)
82434 {
82435 struct ieee80211_key *key = file->private_data;
82436- int i, res, bufsize = 2 * key->conf.keylen + 2;
82437+ int i, bufsize = 2 * key->conf.keylen + 2;
82438 char *buf = kmalloc(bufsize, GFP_KERNEL);
82439 char *p = buf;
82440+ ssize_t res;
82441+
82442+ if (buf == NULL)
82443+ return -ENOMEM;
82444
82445 for (i = 0; i < key->conf.keylen; i++)
82446 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82447diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82448index 33a2e89..08650c8 100644
82449--- a/net/mac80211/debugfs_sta.c
82450+++ b/net/mac80211/debugfs_sta.c
82451@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82452 int i;
82453 struct sta_info *sta = file->private_data;
82454
82455+ pax_track_stack();
82456+
82457 spin_lock_bh(&sta->lock);
82458 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82459 sta->ampdu_mlme.dialog_token_allocator + 1);
82460diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82461index ca62bfe..6657a03 100644
82462--- a/net/mac80211/ieee80211_i.h
82463+++ b/net/mac80211/ieee80211_i.h
82464@@ -25,6 +25,7 @@
82465 #include <linux/etherdevice.h>
82466 #include <net/cfg80211.h>
82467 #include <net/mac80211.h>
82468+#include <asm/local.h>
82469 #include "key.h"
82470 #include "sta_info.h"
82471
82472@@ -635,7 +636,7 @@ struct ieee80211_local {
82473 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82474 spinlock_t queue_stop_reason_lock;
82475
82476- int open_count;
82477+ local_t open_count;
82478 int monitors, cooked_mntrs;
82479 /* number of interfaces with corresponding FIF_ flags */
82480 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82481diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82482index 079c500..eb3c6d4 100644
82483--- a/net/mac80211/iface.c
82484+++ b/net/mac80211/iface.c
82485@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82486 break;
82487 }
82488
82489- if (local->open_count == 0) {
82490+ if (local_read(&local->open_count) == 0) {
82491 res = drv_start(local);
82492 if (res)
82493 goto err_del_bss;
82494@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82495 * Validate the MAC address for this device.
82496 */
82497 if (!is_valid_ether_addr(dev->dev_addr)) {
82498- if (!local->open_count)
82499+ if (!local_read(&local->open_count))
82500 drv_stop(local);
82501 return -EADDRNOTAVAIL;
82502 }
82503@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82504
82505 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82506
82507- local->open_count++;
82508+ local_inc(&local->open_count);
82509 if (hw_reconf_flags) {
82510 ieee80211_hw_config(local, hw_reconf_flags);
82511 /*
82512@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82513 err_del_interface:
82514 drv_remove_interface(local, &conf);
82515 err_stop:
82516- if (!local->open_count)
82517+ if (!local_read(&local->open_count))
82518 drv_stop(local);
82519 err_del_bss:
82520 sdata->bss = NULL;
82521@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82522 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82523 }
82524
82525- local->open_count--;
82526+ local_dec(&local->open_count);
82527
82528 switch (sdata->vif.type) {
82529 case NL80211_IFTYPE_AP_VLAN:
82530@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82531
82532 ieee80211_recalc_ps(local, -1);
82533
82534- if (local->open_count == 0) {
82535+ if (local_read(&local->open_count) == 0) {
82536 ieee80211_clear_tx_pending(local);
82537 ieee80211_stop_device(local);
82538
82539diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82540index 2dfe176..74e4388 100644
82541--- a/net/mac80211/main.c
82542+++ b/net/mac80211/main.c
82543@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82544 local->hw.conf.power_level = power;
82545 }
82546
82547- if (changed && local->open_count) {
82548+ if (changed && local_read(&local->open_count)) {
82549 ret = drv_config(local, changed);
82550 /*
82551 * Goal:
82552diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82553index e67eea7..fcc227e 100644
82554--- a/net/mac80211/mlme.c
82555+++ b/net/mac80211/mlme.c
82556@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
82557 bool have_higher_than_11mbit = false, newsta = false;
82558 u16 ap_ht_cap_flags;
82559
82560+ pax_track_stack();
82561+
82562 /*
82563 * AssocResp and ReassocResp have identical structure, so process both
82564 * of them in this function.
82565diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
82566index e535f1c..4d733d1 100644
82567--- a/net/mac80211/pm.c
82568+++ b/net/mac80211/pm.c
82569@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
82570 }
82571
82572 /* stop hardware - this must stop RX */
82573- if (local->open_count)
82574+ if (local_read(&local->open_count))
82575 ieee80211_stop_device(local);
82576
82577 local->suspended = true;
82578diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
82579index b33efc4..0a2efb6 100644
82580--- a/net/mac80211/rate.c
82581+++ b/net/mac80211/rate.c
82582@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
82583 struct rate_control_ref *ref, *old;
82584
82585 ASSERT_RTNL();
82586- if (local->open_count)
82587+ if (local_read(&local->open_count))
82588 return -EBUSY;
82589
82590 ref = rate_control_alloc(name, local);
82591diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
82592index b1d7904..57e4da7 100644
82593--- a/net/mac80211/tx.c
82594+++ b/net/mac80211/tx.c
82595@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82596 return cpu_to_le16(dur);
82597 }
82598
82599-static int inline is_ieee80211_device(struct ieee80211_local *local,
82600+static inline int is_ieee80211_device(struct ieee80211_local *local,
82601 struct net_device *dev)
82602 {
82603 return local == wdev_priv(dev->ieee80211_ptr);
82604diff --git a/net/mac80211/util.c b/net/mac80211/util.c
82605index 31b1085..48fb26d 100644
82606--- a/net/mac80211/util.c
82607+++ b/net/mac80211/util.c
82608@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
82609 local->resuming = true;
82610
82611 /* restart hardware */
82612- if (local->open_count) {
82613+ if (local_read(&local->open_count)) {
82614 /*
82615 * Upon resume hardware can sometimes be goofy due to
82616 * various platform / driver / bus issues, so restarting
82617diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
82618index 634d14a..b35a608 100644
82619--- a/net/netfilter/Kconfig
82620+++ b/net/netfilter/Kconfig
82621@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
82622
82623 To compile it as a module, choose M here. If unsure, say N.
82624
82625+config NETFILTER_XT_MATCH_GRADM
82626+ tristate '"gradm" match support'
82627+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82628+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82629+ ---help---
82630+ The gradm match allows to match on grsecurity RBAC being enabled.
82631+ It is useful when iptables rules are applied early on bootup to
82632+ prevent connections to the machine (except from a trusted host)
82633+ while the RBAC system is disabled.
82634+
82635 config NETFILTER_XT_MATCH_HASHLIMIT
82636 tristate '"hashlimit" match support'
82637 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82638diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82639index 49f62ee..a17b2c6 100644
82640--- a/net/netfilter/Makefile
82641+++ b/net/netfilter/Makefile
82642@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
82643 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
82644 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82645 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82646+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82647 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82648 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82649 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82650diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
82651index 3c7e427..724043c 100644
82652--- a/net/netfilter/ipvs/ip_vs_app.c
82653+++ b/net/netfilter/ipvs/ip_vs_app.c
82654@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
82655 .open = ip_vs_app_open,
82656 .read = seq_read,
82657 .llseek = seq_lseek,
82658- .release = seq_release,
82659+ .release = seq_release_net,
82660 };
82661 #endif
82662
82663diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82664index 95682e5..457dbac 100644
82665--- a/net/netfilter/ipvs/ip_vs_conn.c
82666+++ b/net/netfilter/ipvs/ip_vs_conn.c
82667@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82668 /* if the connection is not template and is created
82669 * by sync, preserve the activity flag.
82670 */
82671- cp->flags |= atomic_read(&dest->conn_flags) &
82672+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
82673 (~IP_VS_CONN_F_INACTIVE);
82674 else
82675- cp->flags |= atomic_read(&dest->conn_flags);
82676+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
82677 cp->dest = dest;
82678
82679 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
82680@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
82681 atomic_set(&cp->refcnt, 1);
82682
82683 atomic_set(&cp->n_control, 0);
82684- atomic_set(&cp->in_pkts, 0);
82685+ atomic_set_unchecked(&cp->in_pkts, 0);
82686
82687 atomic_inc(&ip_vs_conn_count);
82688 if (flags & IP_VS_CONN_F_NO_CPORT)
82689@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
82690 .open = ip_vs_conn_open,
82691 .read = seq_read,
82692 .llseek = seq_lseek,
82693- .release = seq_release,
82694+ .release = seq_release_net,
82695 };
82696
82697 static const char *ip_vs_origin_name(unsigned flags)
82698@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
82699 .open = ip_vs_conn_sync_open,
82700 .read = seq_read,
82701 .llseek = seq_lseek,
82702- .release = seq_release,
82703+ .release = seq_release_net,
82704 };
82705
82706 #endif
82707@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82708
82709 /* Don't drop the entry if its number of incoming packets is not
82710 located in [0, 8] */
82711- i = atomic_read(&cp->in_pkts);
82712+ i = atomic_read_unchecked(&cp->in_pkts);
82713 if (i > 8 || i < 0) return 0;
82714
82715 if (!todrop_rate[i]) return 0;
82716diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82717index b95699f..5fee919 100644
82718--- a/net/netfilter/ipvs/ip_vs_core.c
82719+++ b/net/netfilter/ipvs/ip_vs_core.c
82720@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82721 ret = cp->packet_xmit(skb, cp, pp);
82722 /* do not touch skb anymore */
82723
82724- atomic_inc(&cp->in_pkts);
82725+ atomic_inc_unchecked(&cp->in_pkts);
82726 ip_vs_conn_put(cp);
82727 return ret;
82728 }
82729@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
82730 * Sync connection if it is about to close to
82731 * encorage the standby servers to update the connections timeout
82732 */
82733- pkts = atomic_add_return(1, &cp->in_pkts);
82734+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82735 if (af == AF_INET &&
82736 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
82737 (((cp->protocol != IPPROTO_TCP ||
82738diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82739index 02b2610..2d89424 100644
82740--- a/net/netfilter/ipvs/ip_vs_ctl.c
82741+++ b/net/netfilter/ipvs/ip_vs_ctl.c
82742@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
82743 ip_vs_rs_hash(dest);
82744 write_unlock_bh(&__ip_vs_rs_lock);
82745 }
82746- atomic_set(&dest->conn_flags, conn_flags);
82747+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
82748
82749 /* bind the service */
82750 if (!dest->svc) {
82751@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82752 " %-7s %-6d %-10d %-10d\n",
82753 &dest->addr.in6,
82754 ntohs(dest->port),
82755- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82756+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82757 atomic_read(&dest->weight),
82758 atomic_read(&dest->activeconns),
82759 atomic_read(&dest->inactconns));
82760@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82761 "%-7s %-6d %-10d %-10d\n",
82762 ntohl(dest->addr.ip),
82763 ntohs(dest->port),
82764- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82765+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82766 atomic_read(&dest->weight),
82767 atomic_read(&dest->activeconns),
82768 atomic_read(&dest->inactconns));
82769@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
82770 .open = ip_vs_info_open,
82771 .read = seq_read,
82772 .llseek = seq_lseek,
82773- .release = seq_release_private,
82774+ .release = seq_release_net,
82775 };
82776
82777 #endif
82778@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
82779 .open = ip_vs_stats_seq_open,
82780 .read = seq_read,
82781 .llseek = seq_lseek,
82782- .release = single_release,
82783+ .release = single_release_net,
82784 };
82785
82786 #endif
82787@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
82788
82789 entry.addr = dest->addr.ip;
82790 entry.port = dest->port;
82791- entry.conn_flags = atomic_read(&dest->conn_flags);
82792+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82793 entry.weight = atomic_read(&dest->weight);
82794 entry.u_threshold = dest->u_threshold;
82795 entry.l_threshold = dest->l_threshold;
82796@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
82797 unsigned char arg[128];
82798 int ret = 0;
82799
82800+ pax_track_stack();
82801+
82802 if (!capable(CAP_NET_ADMIN))
82803 return -EPERM;
82804
82805@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82806 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
82807
82808 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82809- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82810+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82811 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
82812 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
82813 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
82814diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82815index e177f0d..55e8581 100644
82816--- a/net/netfilter/ipvs/ip_vs_sync.c
82817+++ b/net/netfilter/ipvs/ip_vs_sync.c
82818@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
82819
82820 if (opt)
82821 memcpy(&cp->in_seq, opt, sizeof(*opt));
82822- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82823+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82824 cp->state = state;
82825 cp->old_state = cp->state;
82826 /*
82827diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82828index 30b3189..e2e4b55 100644
82829--- a/net/netfilter/ipvs/ip_vs_xmit.c
82830+++ b/net/netfilter/ipvs/ip_vs_xmit.c
82831@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82832 else
82833 rc = NF_ACCEPT;
82834 /* do not touch skb anymore */
82835- atomic_inc(&cp->in_pkts);
82836+ atomic_inc_unchecked(&cp->in_pkts);
82837 goto out;
82838 }
82839
82840@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82841 else
82842 rc = NF_ACCEPT;
82843 /* do not touch skb anymore */
82844- atomic_inc(&cp->in_pkts);
82845+ atomic_inc_unchecked(&cp->in_pkts);
82846 goto out;
82847 }
82848
82849diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82850index d521718..d0fd7a1 100644
82851--- a/net/netfilter/nf_conntrack_netlink.c
82852+++ b/net/netfilter/nf_conntrack_netlink.c
82853@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82854 static int
82855 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82856 struct nf_conntrack_tuple *tuple,
82857- enum ctattr_tuple type, u_int8_t l3num)
82858+ enum ctattr_type type, u_int8_t l3num)
82859 {
82860 struct nlattr *tb[CTA_TUPLE_MAX+1];
82861 int err;
82862diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82863index f900dc3..5e45346 100644
82864--- a/net/netfilter/nfnetlink_log.c
82865+++ b/net/netfilter/nfnetlink_log.c
82866@@ -68,7 +68,7 @@ struct nfulnl_instance {
82867 };
82868
82869 static DEFINE_RWLOCK(instances_lock);
82870-static atomic_t global_seq;
82871+static atomic_unchecked_t global_seq;
82872
82873 #define INSTANCE_BUCKETS 16
82874 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82875@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82876 /* global sequence number */
82877 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82878 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82879- htonl(atomic_inc_return(&global_seq)));
82880+ htonl(atomic_inc_return_unchecked(&global_seq)));
82881
82882 if (data_len) {
82883 struct nlattr *nla;
82884diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82885new file mode 100644
82886index 0000000..b1bac76
82887--- /dev/null
82888+++ b/net/netfilter/xt_gradm.c
82889@@ -0,0 +1,51 @@
82890+/*
82891+ * gradm match for netfilter
82892