]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/2.6.32.56/grsecurity-2.2.2-2.6.32.56-201202051926.patch
Auto commit, grsecurity-3.1-4.9.13-201703052141.patch added.
[thirdparty/grsecurity-scrape.git] / test / 2.6.32.56 / grsecurity-2.2.2-2.6.32.56-201202051926.patch
CommitLineData
dadd4cae
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index e1efc40..47f0daf 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -1,15 +1,19 @@
6 *.a
7 *.aux
8 *.bin
9+*.cis
10 *.cpio
11 *.csp
12+*.dbg
13 *.dsp
14 *.dvi
15 *.elf
16 *.eps
17 *.fw
18+*.gcno
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -38,8 +42,10 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *_MODULES
32+*_reg_safe.h
33 *_vga16.c
34 *~
35 *.9
36@@ -49,11 +55,16 @@
37 53c700_d.h
38 CVS
39 ChangeSet
40+GPATH
41+GRTAGS
42+GSYMS
43+GTAGS
44 Image
45 Kerntypes
46 Module.markers
47 Module.symvers
48 PENDING
49+PERF*
50 SCCS
51 System.map*
52 TAGS
53@@ -76,7 +87,11 @@ btfixupprep
54 build
55 bvmlinux
56 bzImage*
57+capability_names.h
58+capflags.c
59 classlist.h*
60+clut_vga16.c
61+common-cmds.h
62 comp*.log
63 compile.h*
64 conf
65@@ -84,6 +99,8 @@ config
66 config-*
67 config_data.h*
68 config_data.gz*
69+config.c
70+config.tmp
71 conmakehash
72 consolemap_deftbl.c*
73 cpustr.h
74@@ -97,19 +114,23 @@ elfconfig.h*
75 fixdep
76 fore200e_mkfirm
77 fore200e_pca_fw.c*
78+gate.lds
79 gconf
80 gen-devlist
81 gen_crc32table
82 gen_init_cpio
83 genksyms
84 *_gray256.c
85+hash
86+hid-example
87 ihex2fw
88 ikconfig.h*
89 initramfs_data.cpio
90+initramfs_data.cpio.bz2
91 initramfs_data.cpio.gz
92 initramfs_list
93 kallsyms
94-kconfig
95+kern_constants.h
96 keywords.c
97 ksym.c*
98 ksym.h*
99@@ -127,13 +148,16 @@ machtypes.h
100 map
101 maui_boot.h
102 mconf
103+mdp
104 miboot*
105 mk_elfconfig
106 mkboot
107 mkbugboot
108 mkcpustr
109 mkdep
110+mkpiggy
111 mkprep
112+mkregtable
113 mktables
114 mktree
115 modpost
116@@ -149,6 +173,7 @@ patches*
117 pca200e.bin
118 pca200e_ecd.bin2
119 piggy.gz
120+piggy.S
121 piggyback
122 pnmtologo
123 ppc_defs.h*
124@@ -157,12 +182,15 @@ qconf
125 raid6altivec*.c
126 raid6int*.c
127 raid6tables.c
128+regdb.c
129 relocs
130+rlim_names.h
131 series
132 setup
133 setup.bin
134 setup.elf
135 sImage
136+slabinfo
137 sm_tbl*
138 split-include
139 syscalltab.h
140@@ -171,6 +199,7 @@ tftpboot.img
141 timeconst.h
142 times.h*
143 trix_boot.h
144+user_constants.h
145 utsrelease.h*
146 vdso-syms.lds
147 vdso.lds
148@@ -186,14 +215,20 @@ version.h*
149 vmlinux
150 vmlinux-*
151 vmlinux.aout
152+vmlinux.bin.all
153+vmlinux.bin.bz2
154 vmlinux.lds
155+vmlinux.relocs
156+voffset.h
157 vsyscall.lds
158 vsyscall_32.lds
159 wanxlfw.inc
160 uImage
161 unifdef
162+utsrelease.h
163 wakeup.bin
164 wakeup.elf
165 wakeup.lds
166 zImage*
167 zconf.hash.c
168+zoffset.h
169diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
170index c840e7d..f4c451c 100644
171--- a/Documentation/kernel-parameters.txt
172+++ b/Documentation/kernel-parameters.txt
173@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters. It is defined in the file
174 the specified number of seconds. This is to be used if
175 your oopses keep scrolling off the screen.
176
177+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
178+ virtualization environments that don't cope well with the
179+ expand down segment used by UDEREF on X86-32 or the frequent
180+ page table updates on X86-64.
181+
182+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
183+
184 pcbit= [HW,ISDN]
185
186 pcd. [PARIDE]
187diff --git a/Makefile b/Makefile
188index 81ad738..cbdaeb0 100644
189--- a/Makefile
190+++ b/Makefile
191@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
192
193 HOSTCC = gcc
194 HOSTCXX = g++
195-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
196-HOSTCXXFLAGS = -O2
197+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
198+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
199+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
200
201 # Decide whether to build built-in, modular, or both.
202 # Normally, just do built-in.
203@@ -376,8 +377,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn --exc
204 # Rules shared between *config targets and build targets
205
206 # Basic helpers built in scripts/
207-PHONY += scripts_basic
208-scripts_basic:
209+PHONY += scripts_basic gcc-plugins
210+scripts_basic: gcc-plugins
211 $(Q)$(MAKE) $(build)=scripts/basic
212
213 # To avoid any implicit rule to kick in, define an empty command.
214@@ -403,7 +404,7 @@ endif
215 # of make so .config is not included in this case either (for *config).
216
217 no-dot-config-targets := clean mrproper distclean \
218- cscope TAGS tags help %docs check% \
219+ cscope gtags TAGS tags help %docs check% \
220 include/linux/version.h headers_% \
221 kernelrelease kernelversion
222
223@@ -526,6 +527,46 @@ else
224 KBUILD_CFLAGS += -O2
225 endif
226
227+ifndef DISABLE_PAX_PLUGINS
228+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(CC)"), y)
229+ifndef DISABLE_PAX_CONSTIFY_PLUGIN
230+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
231+endif
232+ifdef CONFIG_PAX_MEMORY_STACKLEAK
233+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
234+STACKLEAK_PLUGIN += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
235+endif
236+ifdef CONFIG_KALLOCSTAT_PLUGIN
237+KALLOCSTAT_PLUGIN := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
238+endif
239+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
240+KERNEXEC_PLUGIN := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
241+KERNEXEC_PLUGIN += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD)
242+endif
243+ifdef CONFIG_CHECKER_PLUGIN
244+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
245+CHECKER_PLUGIN := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
246+endif
247+endif
248+GCC_PLUGINS := $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN) $(KALLOCSTAT_PLUGIN) $(KERNEXEC_PLUGIN) $(CHECKER_PLUGIN)
249+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN KERNEXEC_PLUGIN CHECKER_PLUGIN
250+ifeq ($(KBUILD_EXTMOD),)
251+gcc-plugins:
252+ $(Q)$(MAKE) $(build)=tools/gcc
253+else
254+gcc-plugins: ;
255+endif
256+else
257+gcc-plugins:
258+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
259+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
260+else
261+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
262+endif
263+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
264+endif
265+endif
266+
267 include $(srctree)/arch/$(SRCARCH)/Makefile
268
269 ifneq ($(CONFIG_FRAME_WARN),0)
270@@ -647,7 +688,7 @@ export mod_strip_cmd
271
272
273 ifeq ($(KBUILD_EXTMOD),)
274-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
275+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
276
277 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
278 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
279@@ -868,6 +909,7 @@ vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
280
281 # The actual objects are generated when descending,
282 # make sure no implicit rule kicks in
283+$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): KBUILD_CFLAGS += $(GCC_PLUGINS)
284 $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
285
286 # Handle descending into subdirectories listed in $(vmlinux-dirs)
287@@ -877,7 +919,7 @@ $(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ;
288 # Error messages still appears in the original language
289
290 PHONY += $(vmlinux-dirs)
291-$(vmlinux-dirs): prepare scripts
292+$(vmlinux-dirs): gcc-plugins prepare scripts
293 $(Q)$(MAKE) $(build)=$@
294
295 # Build the kernel release string
296@@ -986,6 +1028,7 @@ prepare0: archprepare FORCE
297 $(Q)$(MAKE) $(build)=. missing-syscalls
298
299 # All the preparing..
300+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS),$(KBUILD_CFLAGS))
301 prepare: prepare0
302
303 # The asm symlink changes when $(ARCH) changes.
304@@ -1127,6 +1170,7 @@ all: modules
305 # using awk while concatenating to the final file.
306
307 PHONY += modules
308+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
309 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
310 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
311 @$(kecho) ' Building modules, stage 2.';
312@@ -1136,7 +1180,7 @@ modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux)
313
314 # Target to prepare building external modules
315 PHONY += modules_prepare
316-modules_prepare: prepare scripts
317+modules_prepare: gcc-plugins prepare scripts
318
319 # Target to install modules
320 PHONY += modules_install
321@@ -1201,7 +1245,7 @@ MRPROPER_FILES += .config .config.old include/asm .version .old_version \
322 include/linux/autoconf.h include/linux/version.h \
323 include/linux/utsrelease.h \
324 include/linux/bounds.h include/asm*/asm-offsets.h \
325- Module.symvers Module.markers tags TAGS cscope*
326+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
327
328 # clean - Delete most, but leave enough to build external modules
329 #
330@@ -1245,7 +1289,7 @@ distclean: mrproper
331 @find $(srctree) $(RCS_FIND_IGNORE) \
332 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
333 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
334- -o -name '.*.rej' -o -size 0 \
335+ -o -name '.*.rej' -o -name '*.so' -o -size 0 \
336 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
337 -type f -print | xargs rm -f
338
339@@ -1292,6 +1336,7 @@ help:
340 @echo ' modules_prepare - Set up for building external modules'
341 @echo ' tags/TAGS - Generate tags file for editors'
342 @echo ' cscope - Generate cscope index'
343+ @echo ' gtags - Generate GNU GLOBAL index'
344 @echo ' kernelrelease - Output the release version string'
345 @echo ' kernelversion - Output the version stored in Makefile'
346 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
347@@ -1393,6 +1438,7 @@ PHONY += $(module-dirs) modules
348 $(module-dirs): crmodverdir $(objtree)/Module.symvers
349 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
350
351+modules: KBUILD_CFLAGS += $(GCC_PLUGINS)
352 modules: $(module-dirs)
353 @$(kecho) ' Building modules, stage 2.';
354 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
355@@ -1448,7 +1494,7 @@ endif # KBUILD_EXTMOD
356 quiet_cmd_tags = GEN $@
357 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
358
359-tags TAGS cscope: FORCE
360+tags TAGS cscope gtags: FORCE
361 $(call cmd,tags)
362
363 # Scripts to check various things for consistency
364@@ -1513,17 +1559,19 @@ else
365 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
366 endif
367
368-%.s: %.c prepare scripts FORCE
369+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS)
370+%.s: %.c gcc-plugins prepare scripts FORCE
371 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
372 %.i: %.c prepare scripts FORCE
373 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
374-%.o: %.c prepare scripts FORCE
375+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS)
376+%.o: %.c gcc-plugins prepare scripts FORCE
377 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
378 %.lst: %.c prepare scripts FORCE
379 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
380-%.s: %.S prepare scripts FORCE
381+%.s: %.S gcc-plugins prepare scripts FORCE
382 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
383-%.o: %.S prepare scripts FORCE
384+%.o: %.S gcc-plugins prepare scripts FORCE
385 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
386 %.symtypes: %.c prepare scripts FORCE
387 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
388@@ -1533,11 +1581,13 @@ endif
389 $(cmd_crmodverdir)
390 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
391 $(build)=$(build-dir)
392-%/: prepare scripts FORCE
393+%/: KBUILD_CFLAGS += $(GCC_PLUGINS)
394+%/: gcc-plugins prepare scripts FORCE
395 $(cmd_crmodverdir)
396 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
397 $(build)=$(build-dir)
398-%.ko: prepare scripts FORCE
399+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS)
400+%.ko: gcc-plugins prepare scripts FORCE
401 $(cmd_crmodverdir)
402 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
403 $(build)=$(build-dir) $(@:.ko=.o)
404diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
405index 5c75c1b..c82f878 100644
406--- a/arch/alpha/include/asm/elf.h
407+++ b/arch/alpha/include/asm/elf.h
408@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
409
410 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
411
412+#ifdef CONFIG_PAX_ASLR
413+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
414+
415+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
416+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
417+#endif
418+
419 /* $0 is set by ld.so to a pointer to a function which might be
420 registered using atexit. This provides a mean for the dynamic
421 linker to call DT_FINI functions for shared libraries that have
422diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
423index 3f0c59f..cf1e100 100644
424--- a/arch/alpha/include/asm/pgtable.h
425+++ b/arch/alpha/include/asm/pgtable.h
426@@ -101,6 +101,17 @@ struct vm_area_struct;
427 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
428 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
429 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
430+
431+#ifdef CONFIG_PAX_PAGEEXEC
432+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
433+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
434+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
435+#else
436+# define PAGE_SHARED_NOEXEC PAGE_SHARED
437+# define PAGE_COPY_NOEXEC PAGE_COPY
438+# define PAGE_READONLY_NOEXEC PAGE_READONLY
439+#endif
440+
441 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
442
443 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
444diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
445index ebc3c89..20cfa63 100644
446--- a/arch/alpha/kernel/module.c
447+++ b/arch/alpha/kernel/module.c
448@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
449
450 /* The small sections were sorted to the end of the segment.
451 The following should definitely cover them. */
452- gp = (u64)me->module_core + me->core_size - 0x8000;
453+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
454 got = sechdrs[me->arch.gotsecindex].sh_addr;
455
456 for (i = 0; i < n; i++) {
457diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
458index a94e49c..d71dd44 100644
459--- a/arch/alpha/kernel/osf_sys.c
460+++ b/arch/alpha/kernel/osf_sys.c
461@@ -1172,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
462 /* At this point: (!vma || addr < vma->vm_end). */
463 if (limit - len < addr)
464 return -ENOMEM;
465- if (!vma || addr + len <= vma->vm_start)
466+ if (check_heap_stack_gap(vma, addr, len))
467 return addr;
468 addr = vma->vm_end;
469 vma = vma->vm_next;
470@@ -1208,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
471 merely specific addresses, but regions of memory -- perhaps
472 this feature should be incorporated into all ports? */
473
474+#ifdef CONFIG_PAX_RANDMMAP
475+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
476+#endif
477+
478 if (addr) {
479 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
480 if (addr != (unsigned long) -ENOMEM)
481@@ -1215,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
482 }
483
484 /* Next, try allocating at TASK_UNMAPPED_BASE. */
485- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
486- len, limit);
487+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
488+
489 if (addr != (unsigned long) -ENOMEM)
490 return addr;
491
492diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
493index 00a31de..2ded0f2 100644
494--- a/arch/alpha/mm/fault.c
495+++ b/arch/alpha/mm/fault.c
496@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
497 __reload_thread(pcb);
498 }
499
500+#ifdef CONFIG_PAX_PAGEEXEC
501+/*
502+ * PaX: decide what to do with offenders (regs->pc = fault address)
503+ *
504+ * returns 1 when task should be killed
505+ * 2 when patched PLT trampoline was detected
506+ * 3 when unpatched PLT trampoline was detected
507+ */
508+static int pax_handle_fetch_fault(struct pt_regs *regs)
509+{
510+
511+#ifdef CONFIG_PAX_EMUPLT
512+ int err;
513+
514+ do { /* PaX: patched PLT emulation #1 */
515+ unsigned int ldah, ldq, jmp;
516+
517+ err = get_user(ldah, (unsigned int *)regs->pc);
518+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
519+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
520+
521+ if (err)
522+ break;
523+
524+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
525+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
526+ jmp == 0x6BFB0000U)
527+ {
528+ unsigned long r27, addr;
529+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
530+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
531+
532+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
533+ err = get_user(r27, (unsigned long *)addr);
534+ if (err)
535+ break;
536+
537+ regs->r27 = r27;
538+ regs->pc = r27;
539+ return 2;
540+ }
541+ } while (0);
542+
543+ do { /* PaX: patched PLT emulation #2 */
544+ unsigned int ldah, lda, br;
545+
546+ err = get_user(ldah, (unsigned int *)regs->pc);
547+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
548+ err |= get_user(br, (unsigned int *)(regs->pc+8));
549+
550+ if (err)
551+ break;
552+
553+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
554+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
555+ (br & 0xFFE00000U) == 0xC3E00000U)
556+ {
557+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
558+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
559+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
560+
561+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
562+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
563+ return 2;
564+ }
565+ } while (0);
566+
567+ do { /* PaX: unpatched PLT emulation */
568+ unsigned int br;
569+
570+ err = get_user(br, (unsigned int *)regs->pc);
571+
572+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
573+ unsigned int br2, ldq, nop, jmp;
574+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
575+
576+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
577+ err = get_user(br2, (unsigned int *)addr);
578+ err |= get_user(ldq, (unsigned int *)(addr+4));
579+ err |= get_user(nop, (unsigned int *)(addr+8));
580+ err |= get_user(jmp, (unsigned int *)(addr+12));
581+ err |= get_user(resolver, (unsigned long *)(addr+16));
582+
583+ if (err)
584+ break;
585+
586+ if (br2 == 0xC3600000U &&
587+ ldq == 0xA77B000CU &&
588+ nop == 0x47FF041FU &&
589+ jmp == 0x6B7B0000U)
590+ {
591+ regs->r28 = regs->pc+4;
592+ regs->r27 = addr+16;
593+ regs->pc = resolver;
594+ return 3;
595+ }
596+ }
597+ } while (0);
598+#endif
599+
600+ return 1;
601+}
602+
603+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
604+{
605+ unsigned long i;
606+
607+ printk(KERN_ERR "PAX: bytes at PC: ");
608+ for (i = 0; i < 5; i++) {
609+ unsigned int c;
610+ if (get_user(c, (unsigned int *)pc+i))
611+ printk(KERN_CONT "???????? ");
612+ else
613+ printk(KERN_CONT "%08x ", c);
614+ }
615+ printk("\n");
616+}
617+#endif
618
619 /*
620 * This routine handles page faults. It determines the address,
621@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
622 good_area:
623 si_code = SEGV_ACCERR;
624 if (cause < 0) {
625- if (!(vma->vm_flags & VM_EXEC))
626+ if (!(vma->vm_flags & VM_EXEC)) {
627+
628+#ifdef CONFIG_PAX_PAGEEXEC
629+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
630+ goto bad_area;
631+
632+ up_read(&mm->mmap_sem);
633+ switch (pax_handle_fetch_fault(regs)) {
634+
635+#ifdef CONFIG_PAX_EMUPLT
636+ case 2:
637+ case 3:
638+ return;
639+#endif
640+
641+ }
642+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
643+ do_group_exit(SIGKILL);
644+#else
645 goto bad_area;
646+#endif
647+
648+ }
649 } else if (!cause) {
650 /* Allow reads even for write-only mappings */
651 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
652diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
653index 6aac3f5..265536b 100644
654--- a/arch/arm/include/asm/elf.h
655+++ b/arch/arm/include/asm/elf.h
656@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
657 the loader. We need to make sure that it is out of the way of the program
658 that it will "exec", and that there is sufficient room for the brk. */
659
660-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
661+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
662+
663+#ifdef CONFIG_PAX_ASLR
664+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
665+
666+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
667+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
668+#endif
669
670 /* When the program starts, a1 contains a pointer to a function to be
671 registered with atexit, as per the SVR4 ABI. A value of 0 means we
672diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
673index c019949..388fdd1 100644
674--- a/arch/arm/include/asm/kmap_types.h
675+++ b/arch/arm/include/asm/kmap_types.h
676@@ -19,6 +19,7 @@ enum km_type {
677 KM_SOFTIRQ0,
678 KM_SOFTIRQ1,
679 KM_L2_CACHE,
680+ KM_CLEARPAGE,
681 KM_TYPE_NR
682 };
683
684diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
685index 1d6bd40..fba0cb9 100644
686--- a/arch/arm/include/asm/uaccess.h
687+++ b/arch/arm/include/asm/uaccess.h
688@@ -22,6 +22,8 @@
689 #define VERIFY_READ 0
690 #define VERIFY_WRITE 1
691
692+extern void check_object_size(const void *ptr, unsigned long n, bool to);
693+
694 /*
695 * The exception table consists of pairs of addresses: the first is the
696 * address of an instruction that is allowed to fault, and the second is
697@@ -387,8 +389,23 @@ do { \
698
699
700 #ifdef CONFIG_MMU
701-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
702-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
703+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
704+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
705+
706+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
707+{
708+ if (!__builtin_constant_p(n))
709+ check_object_size(to, n, false);
710+ return ___copy_from_user(to, from, n);
711+}
712+
713+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
714+{
715+ if (!__builtin_constant_p(n))
716+ check_object_size(from, n, true);
717+ return ___copy_to_user(to, from, n);
718+}
719+
720 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
721 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
722 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
723@@ -403,6 +420,9 @@ extern unsigned long __must_check __strnlen_user(const char __user *s, long n);
724
725 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
726 {
727+ if ((long)n < 0)
728+ return n;
729+
730 if (access_ok(VERIFY_READ, from, n))
731 n = __copy_from_user(to, from, n);
732 else /* security hole - plug it */
733@@ -412,6 +432,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
734
735 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
736 {
737+ if ((long)n < 0)
738+ return n;
739+
740 if (access_ok(VERIFY_WRITE, to, n))
741 n = __copy_to_user(to, from, n);
742 return n;
743diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
744index 0e62770..e2c2cd6 100644
745--- a/arch/arm/kernel/armksyms.c
746+++ b/arch/arm/kernel/armksyms.c
747@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
748 #ifdef CONFIG_MMU
749 EXPORT_SYMBOL(copy_page);
750
751-EXPORT_SYMBOL(__copy_from_user);
752-EXPORT_SYMBOL(__copy_to_user);
753+EXPORT_SYMBOL(___copy_from_user);
754+EXPORT_SYMBOL(___copy_to_user);
755 EXPORT_SYMBOL(__clear_user);
756
757 EXPORT_SYMBOL(__get_user_1);
758diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
759index ba8ccfe..2dc34dc 100644
760--- a/arch/arm/kernel/kgdb.c
761+++ b/arch/arm/kernel/kgdb.c
762@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
763 * and we handle the normal undef case within the do_undefinstr
764 * handler.
765 */
766-struct kgdb_arch arch_kgdb_ops = {
767+const struct kgdb_arch arch_kgdb_ops = {
768 #ifndef __ARMEB__
769 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
770 #else /* ! __ARMEB__ */
771diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
772index 3f361a7..6e806e1 100644
773--- a/arch/arm/kernel/traps.c
774+++ b/arch/arm/kernel/traps.c
775@@ -247,6 +247,8 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
776
777 DEFINE_SPINLOCK(die_lock);
778
779+extern void gr_handle_kernel_exploit(void);
780+
781 /*
782 * This function is protected against re-entrancy.
783 */
784@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, struct pt_regs *regs, int err)
785 if (panic_on_oops)
786 panic("Fatal exception");
787
788+ gr_handle_kernel_exploit();
789+
790 do_exit(SIGSEGV);
791 }
792
793diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
794index e4fe124..0fc246b 100644
795--- a/arch/arm/lib/copy_from_user.S
796+++ b/arch/arm/lib/copy_from_user.S
797@@ -16,7 +16,7 @@
798 /*
799 * Prototype:
800 *
801- * size_t __copy_from_user(void *to, const void *from, size_t n)
802+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
803 *
804 * Purpose:
805 *
806@@ -84,11 +84,11 @@
807
808 .text
809
810-ENTRY(__copy_from_user)
811+ENTRY(___copy_from_user)
812
813 #include "copy_template.S"
814
815-ENDPROC(__copy_from_user)
816+ENDPROC(___copy_from_user)
817
818 .section .fixup,"ax"
819 .align 0
820diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
821index 1a71e15..ac7b258 100644
822--- a/arch/arm/lib/copy_to_user.S
823+++ b/arch/arm/lib/copy_to_user.S
824@@ -16,7 +16,7 @@
825 /*
826 * Prototype:
827 *
828- * size_t __copy_to_user(void *to, const void *from, size_t n)
829+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
830 *
831 * Purpose:
832 *
833@@ -88,11 +88,11 @@
834 .text
835
836 ENTRY(__copy_to_user_std)
837-WEAK(__copy_to_user)
838+WEAK(___copy_to_user)
839
840 #include "copy_template.S"
841
842-ENDPROC(__copy_to_user)
843+ENDPROC(___copy_to_user)
844
845 .section .fixup,"ax"
846 .align 0
847diff --git a/arch/arm/lib/uaccess.S b/arch/arm/lib/uaccess.S
848index ffdd274..91017b6 100644
849--- a/arch/arm/lib/uaccess.S
850+++ b/arch/arm/lib/uaccess.S
851@@ -19,7 +19,7 @@
852
853 #define PAGE_SHIFT 12
854
855-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
856+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
857 * Purpose : copy a block to user memory from kernel memory
858 * Params : to - user memory
859 * : from - kernel memory
860@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fault
861 sub r2, r2, ip
862 b .Lc2u_dest_aligned
863
864-ENTRY(__copy_to_user)
865+ENTRY(___copy_to_user)
866 stmfd sp!, {r2, r4 - r7, lr}
867 cmp r2, #4
868 blt .Lc2u_not_enough
869@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fault
870 ldrgtb r3, [r1], #0
871 USER( strgtbt r3, [r0], #1) @ May fault
872 b .Lc2u_finished
873-ENDPROC(__copy_to_user)
874+ENDPROC(___copy_to_user)
875
876 .section .fixup,"ax"
877 .align 0
878 9001: ldmfd sp!, {r0, r4 - r7, pc}
879 .previous
880
881-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
882+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
883 * Purpose : copy a block from user memory to kernel memory
884 * Params : to - kernel memory
885 * : from - user memory
886@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fault
887 sub r2, r2, ip
888 b .Lcfu_dest_aligned
889
890-ENTRY(__copy_from_user)
891+ENTRY(___copy_from_user)
892 stmfd sp!, {r0, r2, r4 - r7, lr}
893 cmp r2, #4
894 blt .Lcfu_not_enough
895@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fault
896 USER( ldrgtbt r3, [r1], #1) @ May fault
897 strgtb r3, [r0], #1
898 b .Lcfu_finished
899-ENDPROC(__copy_from_user)
900+ENDPROC(___copy_from_user)
901
902 .section .fixup,"ax"
903 .align 0
904diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
905index 6b967ff..67d5b2b 100644
906--- a/arch/arm/lib/uaccess_with_memcpy.c
907+++ b/arch/arm/lib/uaccess_with_memcpy.c
908@@ -97,7 +97,7 @@ out:
909 }
910
911 unsigned long
912-__copy_to_user(void __user *to, const void *from, unsigned long n)
913+___copy_to_user(void __user *to, const void *from, unsigned long n)
914 {
915 /*
916 * This test is stubbed out of the main function above to keep
917diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c
918index 4028724..beec230 100644
919--- a/arch/arm/mach-at91/pm.c
920+++ b/arch/arm/mach-at91/pm.c
921@@ -348,7 +348,7 @@ static void at91_pm_end(void)
922 }
923
924
925-static struct platform_suspend_ops at91_pm_ops ={
926+static const struct platform_suspend_ops at91_pm_ops ={
927 .valid = at91_pm_valid_state,
928 .begin = at91_pm_begin,
929 .enter = at91_pm_enter,
930diff --git a/arch/arm/mach-omap1/pm.c b/arch/arm/mach-omap1/pm.c
931index 5218943..0a34552 100644
932--- a/arch/arm/mach-omap1/pm.c
933+++ b/arch/arm/mach-omap1/pm.c
934@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq = {
935
936
937
938-static struct platform_suspend_ops omap_pm_ops ={
939+static const struct platform_suspend_ops omap_pm_ops ={
940 .prepare = omap_pm_prepare,
941 .enter = omap_pm_enter,
942 .finish = omap_pm_finish,
943diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
944index bff5c4e..d4c649b 100644
945--- a/arch/arm/mach-omap2/pm24xx.c
946+++ b/arch/arm/mach-omap2/pm24xx.c
947@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
948 enable_hlt();
949 }
950
951-static struct platform_suspend_ops omap_pm_ops = {
952+static const struct platform_suspend_ops omap_pm_ops = {
953 .prepare = omap2_pm_prepare,
954 .enter = omap2_pm_enter,
955 .finish = omap2_pm_finish,
956diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
957index 8946319..7d3e661 100644
958--- a/arch/arm/mach-omap2/pm34xx.c
959+++ b/arch/arm/mach-omap2/pm34xx.c
960@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
961 return;
962 }
963
964-static struct platform_suspend_ops omap_pm_ops = {
965+static const struct platform_suspend_ops omap_pm_ops = {
966 .begin = omap3_pm_begin,
967 .end = omap3_pm_end,
968 .prepare = omap3_pm_prepare,
969diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
970index b3d8d53..6e68ebc 100644
971--- a/arch/arm/mach-pnx4008/pm.c
972+++ b/arch/arm/mach-pnx4008/pm.c
973@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_state_t state)
974 (state == PM_SUSPEND_MEM);
975 }
976
977-static struct platform_suspend_ops pnx4008_pm_ops = {
978+static const struct platform_suspend_ops pnx4008_pm_ops = {
979 .enter = pnx4008_pm_enter,
980 .valid = pnx4008_pm_valid,
981 };
982diff --git a/arch/arm/mach-pxa/pm.c b/arch/arm/mach-pxa/pm.c
983index 7693355..9beb00a 100644
984--- a/arch/arm/mach-pxa/pm.c
985+++ b/arch/arm/mach-pxa/pm.c
986@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
987 pxa_cpu_pm_fns->finish();
988 }
989
990-static struct platform_suspend_ops pxa_pm_ops = {
991+static const struct platform_suspend_ops pxa_pm_ops = {
992 .valid = pxa_pm_valid,
993 .enter = pxa_pm_enter,
994 .prepare = pxa_pm_prepare,
995diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
996index 629e05d..06be589 100644
997--- a/arch/arm/mach-pxa/sharpsl_pm.c
998+++ b/arch/arm/mach-pxa/sharpsl_pm.c
999@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status(struct apm_power_info *info)
1000 }
1001
1002 #ifdef CONFIG_PM
1003-static struct platform_suspend_ops sharpsl_pm_ops = {
1004+static const struct platform_suspend_ops sharpsl_pm_ops = {
1005 .prepare = pxa_pm_prepare,
1006 .finish = pxa_pm_finish,
1007 .enter = corgi_pxa_pm_enter,
1008diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
1009index c83fdc8..ab9fc44 100644
1010--- a/arch/arm/mach-sa1100/pm.c
1011+++ b/arch/arm/mach-sa1100/pm.c
1012@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
1013 return virt_to_phys(sp);
1014 }
1015
1016-static struct platform_suspend_ops sa11x0_pm_ops = {
1017+static const struct platform_suspend_ops sa11x0_pm_ops = {
1018 .enter = sa11x0_pm_enter,
1019 .valid = suspend_valid_only_mem,
1020 };
1021diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
1022index 3191cd6..c0739db 100644
1023--- a/arch/arm/mm/fault.c
1024+++ b/arch/arm/mm/fault.c
1025@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
1026 }
1027 #endif
1028
1029+#ifdef CONFIG_PAX_PAGEEXEC
1030+ if (fsr & FSR_LNX_PF) {
1031+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
1032+ do_group_exit(SIGKILL);
1033+ }
1034+#endif
1035+
1036 tsk->thread.address = addr;
1037 tsk->thread.error_code = fsr;
1038 tsk->thread.trap_no = 14;
1039@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
1040 }
1041 #endif /* CONFIG_MMU */
1042
1043+#ifdef CONFIG_PAX_PAGEEXEC
1044+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1045+{
1046+ long i;
1047+
1048+ printk(KERN_ERR "PAX: bytes at PC: ");
1049+ for (i = 0; i < 20; i++) {
1050+ unsigned char c;
1051+ if (get_user(c, (__force unsigned char __user *)pc+i))
1052+ printk(KERN_CONT "?? ");
1053+ else
1054+ printk(KERN_CONT "%02x ", c);
1055+ }
1056+ printk("\n");
1057+
1058+ printk(KERN_ERR "PAX: bytes at SP-4: ");
1059+ for (i = -1; i < 20; i++) {
1060+ unsigned long c;
1061+ if (get_user(c, (__force unsigned long __user *)sp+i))
1062+ printk(KERN_CONT "???????? ");
1063+ else
1064+ printk(KERN_CONT "%08lx ", c);
1065+ }
1066+ printk("\n");
1067+}
1068+#endif
1069+
1070 /*
1071 * First Level Translation Fault Handler
1072 *
1073diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
1074index f5abc51..7ec524c 100644
1075--- a/arch/arm/mm/mmap.c
1076+++ b/arch/arm/mm/mmap.c
1077@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1078 if (len > TASK_SIZE)
1079 return -ENOMEM;
1080
1081+#ifdef CONFIG_PAX_RANDMMAP
1082+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
1083+#endif
1084+
1085 if (addr) {
1086 if (do_align)
1087 addr = COLOUR_ALIGN(addr, pgoff);
1088@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
1089 addr = PAGE_ALIGN(addr);
1090
1091 vma = find_vma(mm, addr);
1092- if (TASK_SIZE - len >= addr &&
1093- (!vma || addr + len <= vma->vm_start))
1094+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1095 return addr;
1096 }
1097 if (len > mm->cached_hole_size) {
1098- start_addr = addr = mm->free_area_cache;
1099+ start_addr = addr = mm->free_area_cache;
1100 } else {
1101- start_addr = addr = TASK_UNMAPPED_BASE;
1102- mm->cached_hole_size = 0;
1103+ start_addr = addr = mm->mmap_base;
1104+ mm->cached_hole_size = 0;
1105 }
1106
1107 full_search:
1108@@ -94,14 +97,14 @@ full_search:
1109 * Start a new search - just in case we missed
1110 * some holes.
1111 */
1112- if (start_addr != TASK_UNMAPPED_BASE) {
1113- start_addr = addr = TASK_UNMAPPED_BASE;
1114+ if (start_addr != mm->mmap_base) {
1115+ start_addr = addr = mm->mmap_base;
1116 mm->cached_hole_size = 0;
1117 goto full_search;
1118 }
1119 return -ENOMEM;
1120 }
1121- if (!vma || addr + len <= vma->vm_start) {
1122+ if (check_heap_stack_gap(vma, addr, len)) {
1123 /*
1124 * Remember the place where we stopped the search:
1125 */
1126diff --git a/arch/arm/plat-s3c/pm.c b/arch/arm/plat-s3c/pm.c
1127index 8d97db2..b66cfa5 100644
1128--- a/arch/arm/plat-s3c/pm.c
1129+++ b/arch/arm/plat-s3c/pm.c
1130@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
1131 s3c_pm_check_cleanup();
1132 }
1133
1134-static struct platform_suspend_ops s3c_pm_ops = {
1135+static const struct platform_suspend_ops s3c_pm_ops = {
1136 .enter = s3c_pm_enter,
1137 .prepare = s3c_pm_prepare,
1138 .finish = s3c_pm_finish,
1139diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
1140index d5d1d41..856e2ed 100644
1141--- a/arch/avr32/include/asm/elf.h
1142+++ b/arch/avr32/include/asm/elf.h
1143@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
1144 the loader. We need to make sure that it is out of the way of the program
1145 that it will "exec", and that there is sufficient room for the brk. */
1146
1147-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1148+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1149
1150+#ifdef CONFIG_PAX_ASLR
1151+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
1152+
1153+#define PAX_DELTA_MMAP_LEN 15
1154+#define PAX_DELTA_STACK_LEN 15
1155+#endif
1156
1157 /* This yields a mask that user programs can use to figure out what
1158 instruction set this CPU supports. This could be done in user space,
1159diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
1160index b7f5c68..556135c 100644
1161--- a/arch/avr32/include/asm/kmap_types.h
1162+++ b/arch/avr32/include/asm/kmap_types.h
1163@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
1164 D(11) KM_IRQ1,
1165 D(12) KM_SOFTIRQ0,
1166 D(13) KM_SOFTIRQ1,
1167-D(14) KM_TYPE_NR
1168+D(14) KM_CLEARPAGE,
1169+D(15) KM_TYPE_NR
1170 };
1171
1172 #undef D
1173diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
1174index f021edf..32d680e 100644
1175--- a/arch/avr32/mach-at32ap/pm.c
1176+++ b/arch/avr32/mach-at32ap/pm.c
1177@@ -176,7 +176,7 @@ out:
1178 return 0;
1179 }
1180
1181-static struct platform_suspend_ops avr32_pm_ops = {
1182+static const struct platform_suspend_ops avr32_pm_ops = {
1183 .valid = avr32_pm_valid_state,
1184 .enter = avr32_pm_enter,
1185 };
1186diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
1187index b61d86d..e292c7f 100644
1188--- a/arch/avr32/mm/fault.c
1189+++ b/arch/avr32/mm/fault.c
1190@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
1191
1192 int exception_trace = 1;
1193
1194+#ifdef CONFIG_PAX_PAGEEXEC
1195+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1196+{
1197+ unsigned long i;
1198+
1199+ printk(KERN_ERR "PAX: bytes at PC: ");
1200+ for (i = 0; i < 20; i++) {
1201+ unsigned char c;
1202+ if (get_user(c, (unsigned char *)pc+i))
1203+ printk(KERN_CONT "???????? ");
1204+ else
1205+ printk(KERN_CONT "%02x ", c);
1206+ }
1207+ printk("\n");
1208+}
1209+#endif
1210+
1211 /*
1212 * This routine handles page faults. It determines the address and the
1213 * problem, and then passes it off to one of the appropriate routines.
1214@@ -157,6 +174,16 @@ bad_area:
1215 up_read(&mm->mmap_sem);
1216
1217 if (user_mode(regs)) {
1218+
1219+#ifdef CONFIG_PAX_PAGEEXEC
1220+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
1221+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
1222+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
1223+ do_group_exit(SIGKILL);
1224+ }
1225+ }
1226+#endif
1227+
1228 if (exception_trace && printk_ratelimit())
1229 printk("%s%s[%d]: segfault at %08lx pc %08lx "
1230 "sp %08lx ecr %lu\n",
1231diff --git a/arch/blackfin/kernel/kgdb.c b/arch/blackfin/kernel/kgdb.c
1232index cce79d0..c406c85 100644
1233--- a/arch/blackfin/kernel/kgdb.c
1234+++ b/arch/blackfin/kernel/kgdb.c
1235@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vector, int signo,
1236 return -1; /* this means that we do not want to exit from the handler */
1237 }
1238
1239-struct kgdb_arch arch_kgdb_ops = {
1240+const struct kgdb_arch arch_kgdb_ops = {
1241 .gdb_bpt_instr = {0xa1},
1242 #ifdef CONFIG_SMP
1243 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
1244diff --git a/arch/blackfin/mach-common/pm.c b/arch/blackfin/mach-common/pm.c
1245index 8837be4..b2fb413 100644
1246--- a/arch/blackfin/mach-common/pm.c
1247+++ b/arch/blackfin/mach-common/pm.c
1248@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t state)
1249 return 0;
1250 }
1251
1252-struct platform_suspend_ops bfin_pm_ops = {
1253+const struct platform_suspend_ops bfin_pm_ops = {
1254 .enter = bfin_pm_enter,
1255 .valid = bfin_pm_valid,
1256 };
1257diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
1258index f8e16b2..c73ff79 100644
1259--- a/arch/frv/include/asm/kmap_types.h
1260+++ b/arch/frv/include/asm/kmap_types.h
1261@@ -23,6 +23,7 @@ enum km_type {
1262 KM_IRQ1,
1263 KM_SOFTIRQ0,
1264 KM_SOFTIRQ1,
1265+ KM_CLEARPAGE,
1266 KM_TYPE_NR
1267 };
1268
1269diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
1270index 385fd30..6c3d97e 100644
1271--- a/arch/frv/mm/elf-fdpic.c
1272+++ b/arch/frv/mm/elf-fdpic.c
1273@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1274 if (addr) {
1275 addr = PAGE_ALIGN(addr);
1276 vma = find_vma(current->mm, addr);
1277- if (TASK_SIZE - len >= addr &&
1278- (!vma || addr + len <= vma->vm_start))
1279+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
1280 goto success;
1281 }
1282
1283@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1284 for (; vma; vma = vma->vm_next) {
1285 if (addr > limit)
1286 break;
1287- if (addr + len <= vma->vm_start)
1288+ if (check_heap_stack_gap(vma, addr, len))
1289 goto success;
1290 addr = vma->vm_end;
1291 }
1292@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
1293 for (; vma; vma = vma->vm_next) {
1294 if (addr > limit)
1295 break;
1296- if (addr + len <= vma->vm_start)
1297+ if (check_heap_stack_gap(vma, addr, len))
1298 goto success;
1299 addr = vma->vm_end;
1300 }
1301diff --git a/arch/ia64/hp/common/hwsw_iommu.c b/arch/ia64/hp/common/hwsw_iommu.c
1302index e4a80d8..11a7ea1 100644
1303--- a/arch/ia64/hp/common/hwsw_iommu.c
1304+++ b/arch/ia64/hp/common/hwsw_iommu.c
1305@@ -17,7 +17,7 @@
1306 #include <linux/swiotlb.h>
1307 #include <asm/machvec.h>
1308
1309-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1310+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
1311
1312 /* swiotlb declarations & definitions: */
1313 extern int swiotlb_late_init_with_default_size (size_t size);
1314@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct device *dev)
1315 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
1316 }
1317
1318-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1319+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
1320 {
1321 if (use_swiotlb(dev))
1322 return &swiotlb_dma_ops;
1323diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
1324index 01ae69b..35752fd 100644
1325--- a/arch/ia64/hp/common/sba_iommu.c
1326+++ b/arch/ia64/hp/common/sba_iommu.c
1327@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_driver = {
1328 },
1329 };
1330
1331-extern struct dma_map_ops swiotlb_dma_ops;
1332+extern const struct dma_map_ops swiotlb_dma_ops;
1333
1334 static int __init
1335 sba_init(void)
1336@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
1337
1338 __setup("sbapagesize=",sba_page_override);
1339
1340-struct dma_map_ops sba_dma_ops = {
1341+const struct dma_map_ops sba_dma_ops = {
1342 .alloc_coherent = sba_alloc_coherent,
1343 .free_coherent = sba_free_coherent,
1344 .map_page = sba_map_page,
1345diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
1346index c69552b..c7122f4 100644
1347--- a/arch/ia64/ia32/binfmt_elf32.c
1348+++ b/arch/ia64/ia32/binfmt_elf32.c
1349@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_top);
1350
1351 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
1352
1353+#ifdef CONFIG_PAX_ASLR
1354+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1355+
1356+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1357+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1358+#endif
1359+
1360 /* Ugly but avoids duplication */
1361 #include "../../../fs/binfmt_elf.c"
1362
1363diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
1364index 0f15349..26b3429 100644
1365--- a/arch/ia64/ia32/ia32priv.h
1366+++ b/arch/ia64/ia32/ia32priv.h
1367@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
1368 #define ELF_DATA ELFDATA2LSB
1369 #define ELF_ARCH EM_386
1370
1371-#define IA32_STACK_TOP IA32_PAGE_OFFSET
1372+#ifdef CONFIG_PAX_RANDUSTACK
1373+#define __IA32_DELTA_STACK (current->mm->delta_stack)
1374+#else
1375+#define __IA32_DELTA_STACK 0UL
1376+#endif
1377+
1378+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
1379+
1380 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
1381 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
1382
1383diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
1384index 8d3c79c..71b3af6 100644
1385--- a/arch/ia64/include/asm/dma-mapping.h
1386+++ b/arch/ia64/include/asm/dma-mapping.h
1387@@ -12,7 +12,7 @@
1388
1389 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1390
1391-extern struct dma_map_ops *dma_ops;
1392+extern const struct dma_map_ops *dma_ops;
1393 extern struct ia64_machine_vector ia64_mv;
1394 extern void set_iommu_machvec(void);
1395
1396@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct device *, struct scatterlist *, int,
1397 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1398 dma_addr_t *daddr, gfp_t gfp)
1399 {
1400- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1401+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1402 void *caddr;
1403
1404 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1405@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1406 static inline void dma_free_coherent(struct device *dev, size_t size,
1407 void *caddr, dma_addr_t daddr)
1408 {
1409- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1410+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1411 debug_dma_free_coherent(dev, size, caddr, daddr);
1412 ops->free_coherent(dev, size, caddr, daddr);
1413 }
1414@@ -49,13 +49,13 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
1415
1416 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1417 {
1418- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1419+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1420 return ops->mapping_error(dev, daddr);
1421 }
1422
1423 static inline int dma_supported(struct device *dev, u64 mask)
1424 {
1425- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1426+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1427 return ops->dma_supported(dev, mask);
1428 }
1429
1430diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
1431index 86eddee..b116bb4 100644
1432--- a/arch/ia64/include/asm/elf.h
1433+++ b/arch/ia64/include/asm/elf.h
1434@@ -43,6 +43,13 @@
1435 */
1436 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1437
1438+#ifdef CONFIG_PAX_ASLR
1439+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1440+
1441+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1442+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1443+#endif
1444+
1445 #define PT_IA_64_UNWIND 0x70000001
1446
1447 /* IA-64 relocations: */
1448diff --git a/arch/ia64/include/asm/machvec.h b/arch/ia64/include/asm/machvec.h
1449index 367d299..9ad4279 100644
1450--- a/arch/ia64/include/asm/machvec.h
1451+++ b/arch/ia64/include/asm/machvec.h
1452@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event_t(void);
1453 /* DMA-mapping interface: */
1454 typedef void ia64_mv_dma_init (void);
1455 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1456-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1457+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1458
1459 /*
1460 * WARNING: The legacy I/O space is _architected_. Platforms are
1461@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(const char *cmdline);
1462 # endif /* CONFIG_IA64_GENERIC */
1463
1464 extern void swiotlb_dma_init(void);
1465-extern struct dma_map_ops *dma_get_ops(struct device *);
1466+extern const struct dma_map_ops *dma_get_ops(struct device *);
1467
1468 /*
1469 * Define default versions so we can extend machvec for new platforms without having
1470diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
1471index 8840a69..cdb63d9 100644
1472--- a/arch/ia64/include/asm/pgtable.h
1473+++ b/arch/ia64/include/asm/pgtable.h
1474@@ -12,7 +12,7 @@
1475 * David Mosberger-Tang <davidm@hpl.hp.com>
1476 */
1477
1478-
1479+#include <linux/const.h>
1480 #include <asm/mman.h>
1481 #include <asm/page.h>
1482 #include <asm/processor.h>
1483@@ -143,6 +143,17 @@
1484 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1485 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1486 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1487+
1488+#ifdef CONFIG_PAX_PAGEEXEC
1489+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1490+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1491+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1492+#else
1493+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1494+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1495+# define PAGE_COPY_NOEXEC PAGE_COPY
1496+#endif
1497+
1498 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1499 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1500 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1501diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
1502index 239ecdc..f94170e 100644
1503--- a/arch/ia64/include/asm/spinlock.h
1504+++ b/arch/ia64/include/asm/spinlock.h
1505@@ -72,7 +72,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
1506 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1507
1508 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1509- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1510+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1511 }
1512
1513 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1514diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
1515index 449c8c0..432a3d2 100644
1516--- a/arch/ia64/include/asm/uaccess.h
1517+++ b/arch/ia64/include/asm/uaccess.h
1518@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1519 const void *__cu_from = (from); \
1520 long __cu_len = (n); \
1521 \
1522- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1523+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1524 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1525 __cu_len; \
1526 })
1527@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
1528 long __cu_len = (n); \
1529 \
1530 __chk_user_ptr(__cu_from); \
1531- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1532+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1533 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1534 __cu_len; \
1535 })
1536diff --git a/arch/ia64/kernel/dma-mapping.c b/arch/ia64/kernel/dma-mapping.c
1537index f2c1600..969398a 100644
1538--- a/arch/ia64/kernel/dma-mapping.c
1539+++ b/arch/ia64/kernel/dma-mapping.c
1540@@ -3,7 +3,7 @@
1541 /* Set this to 1 if there is a HW IOMMU in the system */
1542 int iommu_detected __read_mostly;
1543
1544-struct dma_map_ops *dma_ops;
1545+const struct dma_map_ops *dma_ops;
1546 EXPORT_SYMBOL(dma_ops);
1547
1548 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1549@@ -16,7 +16,7 @@ static int __init dma_init(void)
1550 }
1551 fs_initcall(dma_init);
1552
1553-struct dma_map_ops *dma_get_ops(struct device *dev)
1554+const struct dma_map_ops *dma_get_ops(struct device *dev)
1555 {
1556 return dma_ops;
1557 }
1558diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
1559index 1481b0a..e7d38ff 100644
1560--- a/arch/ia64/kernel/module.c
1561+++ b/arch/ia64/kernel/module.c
1562@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1563 void
1564 module_free (struct module *mod, void *module_region)
1565 {
1566- if (mod && mod->arch.init_unw_table &&
1567- module_region == mod->module_init) {
1568+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1569 unw_remove_unwind_table(mod->arch.init_unw_table);
1570 mod->arch.init_unw_table = NULL;
1571 }
1572@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
1573 }
1574
1575 static inline int
1576+in_init_rx (const struct module *mod, uint64_t addr)
1577+{
1578+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1579+}
1580+
1581+static inline int
1582+in_init_rw (const struct module *mod, uint64_t addr)
1583+{
1584+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1585+}
1586+
1587+static inline int
1588 in_init (const struct module *mod, uint64_t addr)
1589 {
1590- return addr - (uint64_t) mod->module_init < mod->init_size;
1591+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1592+}
1593+
1594+static inline int
1595+in_core_rx (const struct module *mod, uint64_t addr)
1596+{
1597+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1598+}
1599+
1600+static inline int
1601+in_core_rw (const struct module *mod, uint64_t addr)
1602+{
1603+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1604 }
1605
1606 static inline int
1607 in_core (const struct module *mod, uint64_t addr)
1608 {
1609- return addr - (uint64_t) mod->module_core < mod->core_size;
1610+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1611 }
1612
1613 static inline int
1614@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
1615 break;
1616
1617 case RV_BDREL:
1618- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1619+ if (in_init_rx(mod, val))
1620+ val -= (uint64_t) mod->module_init_rx;
1621+ else if (in_init_rw(mod, val))
1622+ val -= (uint64_t) mod->module_init_rw;
1623+ else if (in_core_rx(mod, val))
1624+ val -= (uint64_t) mod->module_core_rx;
1625+ else if (in_core_rw(mod, val))
1626+ val -= (uint64_t) mod->module_core_rw;
1627 break;
1628
1629 case RV_LTV:
1630@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
1631 * addresses have been selected...
1632 */
1633 uint64_t gp;
1634- if (mod->core_size > MAX_LTOFF)
1635+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1636 /*
1637 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1638 * at the end of the module.
1639 */
1640- gp = mod->core_size - MAX_LTOFF / 2;
1641+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1642 else
1643- gp = mod->core_size / 2;
1644- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1645+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1646+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1647 mod->arch.gp = gp;
1648 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1649 }
1650diff --git a/arch/ia64/kernel/pci-dma.c b/arch/ia64/kernel/pci-dma.c
1651index f6b1ff0..de773fb 100644
1652--- a/arch/ia64/kernel/pci-dma.c
1653+++ b/arch/ia64/kernel/pci-dma.c
1654@@ -43,7 +43,7 @@ struct device fallback_dev = {
1655 .dma_mask = &fallback_dev.coherent_dma_mask,
1656 };
1657
1658-extern struct dma_map_ops intel_dma_ops;
1659+extern const struct dma_map_ops intel_dma_ops;
1660
1661 static int __init pci_iommu_init(void)
1662 {
1663@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *dev, u64 mask)
1664 }
1665 EXPORT_SYMBOL(iommu_dma_supported);
1666
1667+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1668+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1669+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1670+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1671+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1672+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1673+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1674+
1675+static const struct dma_map_ops intel_iommu_dma_ops = {
1676+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1677+ .alloc_coherent = intel_alloc_coherent,
1678+ .free_coherent = intel_free_coherent,
1679+ .map_sg = intel_map_sg,
1680+ .unmap_sg = intel_unmap_sg,
1681+ .map_page = intel_map_page,
1682+ .unmap_page = intel_unmap_page,
1683+ .mapping_error = intel_mapping_error,
1684+
1685+ .sync_single_for_cpu = machvec_dma_sync_single,
1686+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1687+ .sync_single_for_device = machvec_dma_sync_single,
1688+ .sync_sg_for_device = machvec_dma_sync_sg,
1689+ .dma_supported = iommu_dma_supported,
1690+};
1691+
1692 void __init pci_iommu_alloc(void)
1693 {
1694- dma_ops = &intel_dma_ops;
1695-
1696- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1697- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1698- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1699- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1700- dma_ops->dma_supported = iommu_dma_supported;
1701+ dma_ops = &intel_iommu_dma_ops;
1702
1703 /*
1704 * The order of these functions is important for
1705diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
1706index 285aae8..61dbab6 100644
1707--- a/arch/ia64/kernel/pci-swiotlb.c
1708+++ b/arch/ia64/kernel/pci-swiotlb.c
1709@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent(struct device *dev, size_t size,
1710 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1711 }
1712
1713-struct dma_map_ops swiotlb_dma_ops = {
1714+const struct dma_map_ops swiotlb_dma_ops = {
1715 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1716 .free_coherent = swiotlb_free_coherent,
1717 .map_page = swiotlb_map_page,
1718diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
1719index 609d500..7dde2a8 100644
1720--- a/arch/ia64/kernel/sys_ia64.c
1721+++ b/arch/ia64/kernel/sys_ia64.c
1722@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1723 if (REGION_NUMBER(addr) == RGN_HPAGE)
1724 addr = 0;
1725 #endif
1726+
1727+#ifdef CONFIG_PAX_RANDMMAP
1728+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1729+ addr = mm->free_area_cache;
1730+ else
1731+#endif
1732+
1733 if (!addr)
1734 addr = mm->free_area_cache;
1735
1736@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
1737 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1738 /* At this point: (!vma || addr < vma->vm_end). */
1739 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1740- if (start_addr != TASK_UNMAPPED_BASE) {
1741+ if (start_addr != mm->mmap_base) {
1742 /* Start a new search --- just in case we missed some holes. */
1743- addr = TASK_UNMAPPED_BASE;
1744+ addr = mm->mmap_base;
1745 goto full_search;
1746 }
1747 return -ENOMEM;
1748 }
1749- if (!vma || addr + len <= vma->vm_start) {
1750+ if (check_heap_stack_gap(vma, addr, len)) {
1751 /* Remember the address where we stopped this search: */
1752 mm->free_area_cache = addr + len;
1753 return addr;
1754diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
1755index 8f06035..b3a5818 100644
1756--- a/arch/ia64/kernel/topology.c
1757+++ b/arch/ia64/kernel/topology.c
1758@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
1759 return ret;
1760 }
1761
1762-static struct sysfs_ops cache_sysfs_ops = {
1763+static const struct sysfs_ops cache_sysfs_ops = {
1764 .show = cache_show
1765 };
1766
1767diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
1768index 0a0c77b..8e55a81 100644
1769--- a/arch/ia64/kernel/vmlinux.lds.S
1770+++ b/arch/ia64/kernel/vmlinux.lds.S
1771@@ -190,7 +190,7 @@ SECTIONS
1772 /* Per-cpu data: */
1773 . = ALIGN(PERCPU_PAGE_SIZE);
1774 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1775- __phys_per_cpu_start = __per_cpu_load;
1776+ __phys_per_cpu_start = per_cpu_load;
1777 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1778 * into percpu page size
1779 */
1780diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
1781index 19261a9..1611b7a 100644
1782--- a/arch/ia64/mm/fault.c
1783+++ b/arch/ia64/mm/fault.c
1784@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
1785 return pte_present(pte);
1786 }
1787
1788+#ifdef CONFIG_PAX_PAGEEXEC
1789+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
1790+{
1791+ unsigned long i;
1792+
1793+ printk(KERN_ERR "PAX: bytes at PC: ");
1794+ for (i = 0; i < 8; i++) {
1795+ unsigned int c;
1796+ if (get_user(c, (unsigned int *)pc+i))
1797+ printk(KERN_CONT "???????? ");
1798+ else
1799+ printk(KERN_CONT "%08x ", c);
1800+ }
1801+ printk("\n");
1802+}
1803+#endif
1804+
1805 void __kprobes
1806 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1807 {
1808@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
1809 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1810 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1811
1812- if ((vma->vm_flags & mask) != mask)
1813+ if ((vma->vm_flags & mask) != mask) {
1814+
1815+#ifdef CONFIG_PAX_PAGEEXEC
1816+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1817+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1818+ goto bad_area;
1819+
1820+ up_read(&mm->mmap_sem);
1821+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1822+ do_group_exit(SIGKILL);
1823+ }
1824+#endif
1825+
1826 goto bad_area;
1827
1828+ }
1829+
1830 survive:
1831 /*
1832 * If for any reason at all we couldn't handle the fault, make
1833diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
1834index b0f6157..a082bbc 100644
1835--- a/arch/ia64/mm/hugetlbpage.c
1836+++ b/arch/ia64/mm/hugetlbpage.c
1837@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
1838 /* At this point: (!vmm || addr < vmm->vm_end). */
1839 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1840 return -ENOMEM;
1841- if (!vmm || (addr + len) <= vmm->vm_start)
1842+ if (check_heap_stack_gap(vmm, addr, len))
1843 return addr;
1844 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1845 }
1846diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
1847index 1857766..05cc6a3 100644
1848--- a/arch/ia64/mm/init.c
1849+++ b/arch/ia64/mm/init.c
1850@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1851 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1852 vma->vm_end = vma->vm_start + PAGE_SIZE;
1853 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1854+
1855+#ifdef CONFIG_PAX_PAGEEXEC
1856+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1857+ vma->vm_flags &= ~VM_EXEC;
1858+
1859+#ifdef CONFIG_PAX_MPROTECT
1860+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1861+ vma->vm_flags &= ~VM_MAYEXEC;
1862+#endif
1863+
1864+ }
1865+#endif
1866+
1867 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1868 down_write(&current->mm->mmap_sem);
1869 if (insert_vm_struct(current->mm, vma)) {
1870diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
1871index 98b6849..8046766 100644
1872--- a/arch/ia64/sn/pci/pci_dma.c
1873+++ b/arch/ia64/sn/pci/pci_dma.c
1874@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
1875 return ret;
1876 }
1877
1878-static struct dma_map_ops sn_dma_ops = {
1879+static const struct dma_map_ops sn_dma_ops = {
1880 .alloc_coherent = sn_dma_alloc_coherent,
1881 .free_coherent = sn_dma_free_coherent,
1882 .map_page = sn_dma_map_page,
1883diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
1884index 82abd15..d95ae5d 100644
1885--- a/arch/m32r/lib/usercopy.c
1886+++ b/arch/m32r/lib/usercopy.c
1887@@ -14,6 +14,9 @@
1888 unsigned long
1889 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1890 {
1891+ if ((long)n < 0)
1892+ return n;
1893+
1894 prefetch(from);
1895 if (access_ok(VERIFY_WRITE, to, n))
1896 __copy_user(to,from,n);
1897@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1898 unsigned long
1899 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1900 {
1901+ if ((long)n < 0)
1902+ return n;
1903+
1904 prefetchw(to);
1905 if (access_ok(VERIFY_READ, from, n))
1906 __copy_user_zeroing(to,from,n);
1907diff --git a/arch/mips/Makefile b/arch/mips/Makefile
1908index 77f5021..2b1db8a 100644
1909--- a/arch/mips/Makefile
1910+++ b/arch/mips/Makefile
1911@@ -51,6 +51,8 @@ endif
1912 cflags-y := -ffunction-sections
1913 cflags-y += $(call cc-option, -mno-check-zero-division)
1914
1915+cflags-y += -Wno-sign-compare -Wno-extra
1916+
1917 ifdef CONFIG_32BIT
1918 ld-emul = $(32bit-emul)
1919 vmlinux-32 = vmlinux
1920diff --git a/arch/mips/alchemy/devboards/pm.c b/arch/mips/alchemy/devboards/pm.c
1921index 632f986..fd0378d 100644
1922--- a/arch/mips/alchemy/devboards/pm.c
1923+++ b/arch/mips/alchemy/devboards/pm.c
1924@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1925
1926 }
1927
1928-static struct platform_suspend_ops db1x_pm_ops = {
1929+static const struct platform_suspend_ops db1x_pm_ops = {
1930 .valid = suspend_valid_only_mem,
1931 .begin = db1x_pm_begin,
1932 .enter = db1x_pm_enter,
1933diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
1934index 7990694..4e93acf 100644
1935--- a/arch/mips/include/asm/elf.h
1936+++ b/arch/mips/include/asm/elf.h
1937@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_struct *, elf_fpregset_t *);
1938 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1939 #endif
1940
1941+#ifdef CONFIG_PAX_ASLR
1942+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1943+
1944+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1945+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1946+#endif
1947+
1948 #endif /* _ASM_ELF_H */
1949diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
1950index f266295..627cfff 100644
1951--- a/arch/mips/include/asm/page.h
1952+++ b/arch/mips/include/asm/page.h
1953@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
1954 #ifdef CONFIG_CPU_MIPS32
1955 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1956 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1957- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1958+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1959 #else
1960 typedef struct { unsigned long long pte; } pte_t;
1961 #define pte_val(x) ((x).pte)
1962diff --git a/arch/mips/include/asm/reboot.h b/arch/mips/include/asm/reboot.h
1963index e48c0bf..f3acf65 100644
1964--- a/arch/mips/include/asm/reboot.h
1965+++ b/arch/mips/include/asm/reboot.h
1966@@ -9,7 +9,7 @@
1967 #ifndef _ASM_REBOOT_H
1968 #define _ASM_REBOOT_H
1969
1970-extern void (*_machine_restart)(char *command);
1971-extern void (*_machine_halt)(void);
1972+extern void (*__noreturn _machine_restart)(char *command);
1973+extern void (*__noreturn _machine_halt)(void);
1974
1975 #endif /* _ASM_REBOOT_H */
1976diff --git a/arch/mips/include/asm/system.h b/arch/mips/include/asm/system.h
1977index 83b5509..9fa24a23 100644
1978--- a/arch/mips/include/asm/system.h
1979+++ b/arch/mips/include/asm/system.h
1980@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1981 */
1982 #define __ARCH_WANT_UNLOCKED_CTXSW
1983
1984-extern unsigned long arch_align_stack(unsigned long sp);
1985+#define arch_align_stack(x) ((x) & ~0xfUL)
1986
1987 #endif /* _ASM_SYSTEM_H */
1988diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
1989index 9fdd8bc..fcf9d68 100644
1990--- a/arch/mips/kernel/binfmt_elfn32.c
1991+++ b/arch/mips/kernel/binfmt_elfn32.c
1992@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
1993 #undef ELF_ET_DYN_BASE
1994 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1995
1996+#ifdef CONFIG_PAX_ASLR
1997+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1998+
1999+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2000+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2001+#endif
2002+
2003 #include <asm/processor.h>
2004 #include <linux/module.h>
2005 #include <linux/elfcore.h>
2006diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
2007index ff44823..cf0b48a 100644
2008--- a/arch/mips/kernel/binfmt_elfo32.c
2009+++ b/arch/mips/kernel/binfmt_elfo32.c
2010@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
2011 #undef ELF_ET_DYN_BASE
2012 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
2013
2014+#ifdef CONFIG_PAX_ASLR
2015+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
2016+
2017+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2018+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
2019+#endif
2020+
2021 #include <asm/processor.h>
2022
2023 /*
2024diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
2025index 50c9bb8..efdd5f8 100644
2026--- a/arch/mips/kernel/kgdb.c
2027+++ b/arch/mips/kernel/kgdb.c
2028@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
2029 return -1;
2030 }
2031
2032+/* cannot be const */
2033 struct kgdb_arch arch_kgdb_ops;
2034
2035 /*
2036diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
2037index f3d73e1..bb3f57a 100644
2038--- a/arch/mips/kernel/process.c
2039+++ b/arch/mips/kernel/process.c
2040@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_struct *task)
2041 out:
2042 return pc;
2043 }
2044-
2045-/*
2046- * Don't forget that the stack pointer must be aligned on a 8 bytes
2047- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
2048- */
2049-unsigned long arch_align_stack(unsigned long sp)
2050-{
2051- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2052- sp -= get_random_int() & ~PAGE_MASK;
2053-
2054- return sp & ALMASK;
2055-}
2056diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
2057index 060563a..7fbf310 100644
2058--- a/arch/mips/kernel/reset.c
2059+++ b/arch/mips/kernel/reset.c
2060@@ -19,8 +19,8 @@
2061 * So handle all using function pointers to machine specific
2062 * functions.
2063 */
2064-void (*_machine_restart)(char *command);
2065-void (*_machine_halt)(void);
2066+void (*__noreturn _machine_restart)(char *command);
2067+void (*__noreturn _machine_halt)(void);
2068 void (*pm_power_off)(void);
2069
2070 EXPORT_SYMBOL(pm_power_off);
2071@@ -29,16 +29,19 @@ void machine_restart(char *command)
2072 {
2073 if (_machine_restart)
2074 _machine_restart(command);
2075+ BUG();
2076 }
2077
2078 void machine_halt(void)
2079 {
2080 if (_machine_halt)
2081 _machine_halt();
2082+ BUG();
2083 }
2084
2085 void machine_power_off(void)
2086 {
2087 if (pm_power_off)
2088 pm_power_off();
2089+ BUG();
2090 }
2091diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
2092index 3f7f466..3abe0b5 100644
2093--- a/arch/mips/kernel/syscall.c
2094+++ b/arch/mips/kernel/syscall.c
2095@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2096 do_color_align = 0;
2097 if (filp || (flags & MAP_SHARED))
2098 do_color_align = 1;
2099+
2100+#ifdef CONFIG_PAX_RANDMMAP
2101+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
2102+#endif
2103+
2104 if (addr) {
2105 if (do_color_align)
2106 addr = COLOUR_ALIGN(addr, pgoff);
2107 else
2108 addr = PAGE_ALIGN(addr);
2109 vmm = find_vma(current->mm, addr);
2110- if (task_size - len >= addr &&
2111- (!vmm || addr + len <= vmm->vm_start))
2112+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
2113 return addr;
2114 }
2115- addr = TASK_UNMAPPED_BASE;
2116+ addr = current->mm->mmap_base;
2117 if (do_color_align)
2118 addr = COLOUR_ALIGN(addr, pgoff);
2119 else
2120@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2121 /* At this point: (!vmm || addr < vmm->vm_end). */
2122 if (task_size - len < addr)
2123 return -ENOMEM;
2124- if (!vmm || addr + len <= vmm->vm_start)
2125+ if (check_heap_stack_gap(vmm, addr, len))
2126 return addr;
2127 addr = vmm->vm_end;
2128 if (do_color_align)
2129diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
2130index e97a7a2..f18f5b0 100644
2131--- a/arch/mips/mm/fault.c
2132+++ b/arch/mips/mm/fault.c
2133@@ -26,6 +26,23 @@
2134 #include <asm/ptrace.h>
2135 #include <asm/highmem.h> /* For VMALLOC_END */
2136
2137+#ifdef CONFIG_PAX_PAGEEXEC
2138+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2139+{
2140+ unsigned long i;
2141+
2142+ printk(KERN_ERR "PAX: bytes at PC: ");
2143+ for (i = 0; i < 5; i++) {
2144+ unsigned int c;
2145+ if (get_user(c, (unsigned int *)pc+i))
2146+ printk(KERN_CONT "???????? ");
2147+ else
2148+ printk(KERN_CONT "%08x ", c);
2149+ }
2150+ printk("\n");
2151+}
2152+#endif
2153+
2154 /*
2155 * This routine handles page faults. It determines the address,
2156 * and the problem, and then passes it off to one of the appropriate
2157diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
2158index 9c802eb..0592e41 100644
2159--- a/arch/parisc/include/asm/elf.h
2160+++ b/arch/parisc/include/asm/elf.h
2161@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration... */
2162
2163 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
2164
2165+#ifdef CONFIG_PAX_ASLR
2166+#define PAX_ELF_ET_DYN_BASE 0x10000UL
2167+
2168+#define PAX_DELTA_MMAP_LEN 16
2169+#define PAX_DELTA_STACK_LEN 16
2170+#endif
2171+
2172 /* This yields a mask that user programs can use to figure out what
2173 instruction set this CPU supports. This could be done in user space,
2174 but it's not easy, and we've already done it here. */
2175diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
2176index a27d2e2..18fd845 100644
2177--- a/arch/parisc/include/asm/pgtable.h
2178+++ b/arch/parisc/include/asm/pgtable.h
2179@@ -207,6 +207,17 @@
2180 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
2181 #define PAGE_COPY PAGE_EXECREAD
2182 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
2183+
2184+#ifdef CONFIG_PAX_PAGEEXEC
2185+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
2186+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2187+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
2188+#else
2189+# define PAGE_SHARED_NOEXEC PAGE_SHARED
2190+# define PAGE_COPY_NOEXEC PAGE_COPY
2191+# define PAGE_READONLY_NOEXEC PAGE_READONLY
2192+#endif
2193+
2194 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
2195 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
2196 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
2197diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
2198index 2120746..8d70a5e 100644
2199--- a/arch/parisc/kernel/module.c
2200+++ b/arch/parisc/kernel/module.c
2201@@ -95,16 +95,38 @@
2202
2203 /* three functions to determine where in the module core
2204 * or init pieces the location is */
2205+static inline int in_init_rx(struct module *me, void *loc)
2206+{
2207+ return (loc >= me->module_init_rx &&
2208+ loc < (me->module_init_rx + me->init_size_rx));
2209+}
2210+
2211+static inline int in_init_rw(struct module *me, void *loc)
2212+{
2213+ return (loc >= me->module_init_rw &&
2214+ loc < (me->module_init_rw + me->init_size_rw));
2215+}
2216+
2217 static inline int in_init(struct module *me, void *loc)
2218 {
2219- return (loc >= me->module_init &&
2220- loc <= (me->module_init + me->init_size));
2221+ return in_init_rx(me, loc) || in_init_rw(me, loc);
2222+}
2223+
2224+static inline int in_core_rx(struct module *me, void *loc)
2225+{
2226+ return (loc >= me->module_core_rx &&
2227+ loc < (me->module_core_rx + me->core_size_rx));
2228+}
2229+
2230+static inline int in_core_rw(struct module *me, void *loc)
2231+{
2232+ return (loc >= me->module_core_rw &&
2233+ loc < (me->module_core_rw + me->core_size_rw));
2234 }
2235
2236 static inline int in_core(struct module *me, void *loc)
2237 {
2238- return (loc >= me->module_core &&
2239- loc <= (me->module_core + me->core_size));
2240+ return in_core_rx(me, loc) || in_core_rw(me, loc);
2241 }
2242
2243 static inline int in_local(struct module *me, void *loc)
2244@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
2245 }
2246
2247 /* align things a bit */
2248- me->core_size = ALIGN(me->core_size, 16);
2249- me->arch.got_offset = me->core_size;
2250- me->core_size += gots * sizeof(struct got_entry);
2251+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2252+ me->arch.got_offset = me->core_size_rw;
2253+ me->core_size_rw += gots * sizeof(struct got_entry);
2254
2255- me->core_size = ALIGN(me->core_size, 16);
2256- me->arch.fdesc_offset = me->core_size;
2257- me->core_size += fdescs * sizeof(Elf_Fdesc);
2258+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
2259+ me->arch.fdesc_offset = me->core_size_rw;
2260+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
2261
2262 me->arch.got_max = gots;
2263 me->arch.fdesc_max = fdescs;
2264@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2265
2266 BUG_ON(value == 0);
2267
2268- got = me->module_core + me->arch.got_offset;
2269+ got = me->module_core_rw + me->arch.got_offset;
2270 for (i = 0; got[i].addr; i++)
2271 if (got[i].addr == value)
2272 goto out;
2273@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
2274 #ifdef CONFIG_64BIT
2275 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2276 {
2277- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
2278+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
2279
2280 if (!value) {
2281 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
2282@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
2283
2284 /* Create new one */
2285 fdesc->addr = value;
2286- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2287+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2288 return (Elf_Addr)fdesc;
2289 }
2290 #endif /* CONFIG_64BIT */
2291@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
2292
2293 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
2294 end = table + sechdrs[me->arch.unwind_section].sh_size;
2295- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
2296+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
2297
2298 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
2299 me->arch.unwind_section, table, end, gp);
2300diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
2301index 9147391..f3d949a 100644
2302--- a/arch/parisc/kernel/sys_parisc.c
2303+++ b/arch/parisc/kernel/sys_parisc.c
2304@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
2305 /* At this point: (!vma || addr < vma->vm_end). */
2306 if (TASK_SIZE - len < addr)
2307 return -ENOMEM;
2308- if (!vma || addr + len <= vma->vm_start)
2309+ if (check_heap_stack_gap(vma, addr, len))
2310 return addr;
2311 addr = vma->vm_end;
2312 }
2313@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
2314 /* At this point: (!vma || addr < vma->vm_end). */
2315 if (TASK_SIZE - len < addr)
2316 return -ENOMEM;
2317- if (!vma || addr + len <= vma->vm_start)
2318+ if (check_heap_stack_gap(vma, addr, len))
2319 return addr;
2320 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
2321 if (addr < vma->vm_end) /* handle wraparound */
2322@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
2323 if (flags & MAP_FIXED)
2324 return addr;
2325 if (!addr)
2326- addr = TASK_UNMAPPED_BASE;
2327+ addr = current->mm->mmap_base;
2328
2329 if (filp) {
2330 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
2331diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
2332index 8b58bf0..7afff03 100644
2333--- a/arch/parisc/kernel/traps.c
2334+++ b/arch/parisc/kernel/traps.c
2335@@ -733,9 +733,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
2336
2337 down_read(&current->mm->mmap_sem);
2338 vma = find_vma(current->mm,regs->iaoq[0]);
2339- if (vma && (regs->iaoq[0] >= vma->vm_start)
2340- && (vma->vm_flags & VM_EXEC)) {
2341-
2342+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
2343 fault_address = regs->iaoq[0];
2344 fault_space = regs->iasq[0];
2345
2346diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
2347index c6afbfc..c5839f6 100644
2348--- a/arch/parisc/mm/fault.c
2349+++ b/arch/parisc/mm/fault.c
2350@@ -15,6 +15,7 @@
2351 #include <linux/sched.h>
2352 #include <linux/interrupt.h>
2353 #include <linux/module.h>
2354+#include <linux/unistd.h>
2355
2356 #include <asm/uaccess.h>
2357 #include <asm/traps.h>
2358@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
2359 static unsigned long
2360 parisc_acctyp(unsigned long code, unsigned int inst)
2361 {
2362- if (code == 6 || code == 16)
2363+ if (code == 6 || code == 7 || code == 16)
2364 return VM_EXEC;
2365
2366 switch (inst & 0xf0000000) {
2367@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
2368 }
2369 #endif
2370
2371+#ifdef CONFIG_PAX_PAGEEXEC
2372+/*
2373+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
2374+ *
2375+ * returns 1 when task should be killed
2376+ * 2 when rt_sigreturn trampoline was detected
2377+ * 3 when unpatched PLT trampoline was detected
2378+ */
2379+static int pax_handle_fetch_fault(struct pt_regs *regs)
2380+{
2381+
2382+#ifdef CONFIG_PAX_EMUPLT
2383+ int err;
2384+
2385+ do { /* PaX: unpatched PLT emulation */
2386+ unsigned int bl, depwi;
2387+
2388+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
2389+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
2390+
2391+ if (err)
2392+ break;
2393+
2394+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
2395+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
2396+
2397+ err = get_user(ldw, (unsigned int *)addr);
2398+ err |= get_user(bv, (unsigned int *)(addr+4));
2399+ err |= get_user(ldw2, (unsigned int *)(addr+8));
2400+
2401+ if (err)
2402+ break;
2403+
2404+ if (ldw == 0x0E801096U &&
2405+ bv == 0xEAC0C000U &&
2406+ ldw2 == 0x0E881095U)
2407+ {
2408+ unsigned int resolver, map;
2409+
2410+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
2411+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
2412+ if (err)
2413+ break;
2414+
2415+ regs->gr[20] = instruction_pointer(regs)+8;
2416+ regs->gr[21] = map;
2417+ regs->gr[22] = resolver;
2418+ regs->iaoq[0] = resolver | 3UL;
2419+ regs->iaoq[1] = regs->iaoq[0] + 4;
2420+ return 3;
2421+ }
2422+ }
2423+ } while (0);
2424+#endif
2425+
2426+#ifdef CONFIG_PAX_EMUTRAMP
2427+
2428+#ifndef CONFIG_PAX_EMUSIGRT
2429+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
2430+ return 1;
2431+#endif
2432+
2433+ do { /* PaX: rt_sigreturn emulation */
2434+ unsigned int ldi1, ldi2, bel, nop;
2435+
2436+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
2437+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
2438+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
2439+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
2440+
2441+ if (err)
2442+ break;
2443+
2444+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
2445+ ldi2 == 0x3414015AU &&
2446+ bel == 0xE4008200U &&
2447+ nop == 0x08000240U)
2448+ {
2449+ regs->gr[25] = (ldi1 & 2) >> 1;
2450+ regs->gr[20] = __NR_rt_sigreturn;
2451+ regs->gr[31] = regs->iaoq[1] + 16;
2452+ regs->sr[0] = regs->iasq[1];
2453+ regs->iaoq[0] = 0x100UL;
2454+ regs->iaoq[1] = regs->iaoq[0] + 4;
2455+ regs->iasq[0] = regs->sr[2];
2456+ regs->iasq[1] = regs->sr[2];
2457+ return 2;
2458+ }
2459+ } while (0);
2460+#endif
2461+
2462+ return 1;
2463+}
2464+
2465+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
2466+{
2467+ unsigned long i;
2468+
2469+ printk(KERN_ERR "PAX: bytes at PC: ");
2470+ for (i = 0; i < 5; i++) {
2471+ unsigned int c;
2472+ if (get_user(c, (unsigned int *)pc+i))
2473+ printk(KERN_CONT "???????? ");
2474+ else
2475+ printk(KERN_CONT "%08x ", c);
2476+ }
2477+ printk("\n");
2478+}
2479+#endif
2480+
2481 int fixup_exception(struct pt_regs *regs)
2482 {
2483 const struct exception_table_entry *fix;
2484@@ -192,8 +303,33 @@ good_area:
2485
2486 acc_type = parisc_acctyp(code,regs->iir);
2487
2488- if ((vma->vm_flags & acc_type) != acc_type)
2489+ if ((vma->vm_flags & acc_type) != acc_type) {
2490+
2491+#ifdef CONFIG_PAX_PAGEEXEC
2492+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2493+ (address & ~3UL) == instruction_pointer(regs))
2494+ {
2495+ up_read(&mm->mmap_sem);
2496+ switch (pax_handle_fetch_fault(regs)) {
2497+
2498+#ifdef CONFIG_PAX_EMUPLT
2499+ case 3:
2500+ return;
2501+#endif
2502+
2503+#ifdef CONFIG_PAX_EMUTRAMP
2504+ case 2:
2505+ return;
2506+#endif
2507+
2508+ }
2509+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2510+ do_group_exit(SIGKILL);
2511+ }
2512+#endif
2513+
2514 goto bad_area;
2515+ }
2516
2517 /*
2518 * If for any reason at all we couldn't handle the fault, make
2519diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
2520index c107b74..409dc0f 100644
2521--- a/arch/powerpc/Makefile
2522+++ b/arch/powerpc/Makefile
2523@@ -74,6 +74,8 @@ KBUILD_AFLAGS += -Iarch/$(ARCH)
2524 KBUILD_CFLAGS += -msoft-float -pipe -Iarch/$(ARCH) $(CFLAGS-y)
2525 CPP = $(CC) -E $(KBUILD_CFLAGS)
2526
2527+cflags-y += -Wno-sign-compare -Wno-extra
2528+
2529 CHECKFLAGS += -m$(CONFIG_WORD_SIZE) -D__powerpc__ -D__powerpc$(CONFIG_WORD_SIZE)__
2530
2531 ifeq ($(CONFIG_PPC64),y)
2532diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
2533index 6d94d27..50d4cad 100644
2534--- a/arch/powerpc/include/asm/device.h
2535+++ b/arch/powerpc/include/asm/device.h
2536@@ -14,7 +14,7 @@ struct dev_archdata {
2537 struct device_node *of_node;
2538
2539 /* DMA operations on that device */
2540- struct dma_map_ops *dma_ops;
2541+ const struct dma_map_ops *dma_ops;
2542
2543 /*
2544 * When an iommu is in use, dma_data is used as a ptr to the base of the
2545diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
2546index e281dae..2b8a784 100644
2547--- a/arch/powerpc/include/asm/dma-mapping.h
2548+++ b/arch/powerpc/include/asm/dma-mapping.h
2549@@ -69,9 +69,9 @@ static inline unsigned long device_to_mask(struct device *dev)
2550 #ifdef CONFIG_PPC64
2551 extern struct dma_map_ops dma_iommu_ops;
2552 #endif
2553-extern struct dma_map_ops dma_direct_ops;
2554+extern const struct dma_map_ops dma_direct_ops;
2555
2556-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2557+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2558 {
2559 /* We don't handle the NULL dev case for ISA for now. We could
2560 * do it via an out of line call but it is not needed for now. The
2561@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2562 return dev->archdata.dma_ops;
2563 }
2564
2565-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2566+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2567 {
2568 dev->archdata.dma_ops = ops;
2569 }
2570@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct device *dev, dma_addr_t off)
2571
2572 static inline int dma_supported(struct device *dev, u64 mask)
2573 {
2574- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2575+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2576
2577 if (unlikely(dma_ops == NULL))
2578 return 0;
2579@@ -132,7 +132,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
2580
2581 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2582 {
2583- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2584+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2585
2586 if (unlikely(dma_ops == NULL))
2587 return -EIO;
2588@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2589 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2590 dma_addr_t *dma_handle, gfp_t flag)
2591 {
2592- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2593+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2594 void *cpu_addr;
2595
2596 BUG_ON(!dma_ops);
2597@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2598 static inline void dma_free_coherent(struct device *dev, size_t size,
2599 void *cpu_addr, dma_addr_t dma_handle)
2600 {
2601- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2602+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2603
2604 BUG_ON(!dma_ops);
2605
2606@@ -173,7 +173,7 @@ static inline void dma_free_coherent(struct device *dev, size_t size,
2607
2608 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2609 {
2610- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2611+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2612
2613 if (dma_ops->mapping_error)
2614 return dma_ops->mapping_error(dev, dma_addr);
2615diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
2616index 5698502..5db093c 100644
2617--- a/arch/powerpc/include/asm/elf.h
2618+++ b/arch/powerpc/include/asm/elf.h
2619@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[ELF_NVSRHALFREG];
2620 the loader. We need to make sure that it is out of the way of the program
2621 that it will "exec", and that there is sufficient room for the brk. */
2622
2623-extern unsigned long randomize_et_dyn(unsigned long base);
2624-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2625+#define ELF_ET_DYN_BASE (0x20000000)
2626+
2627+#ifdef CONFIG_PAX_ASLR
2628+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2629+
2630+#ifdef __powerpc64__
2631+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2632+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2633+#else
2634+#define PAX_DELTA_MMAP_LEN 15
2635+#define PAX_DELTA_STACK_LEN 15
2636+#endif
2637+#endif
2638
2639 /*
2640 * Our registers are always unsigned longs, whether we're a 32 bit
2641@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
2642 (0x7ff >> (PAGE_SHIFT - 12)) : \
2643 (0x3ffff >> (PAGE_SHIFT - 12)))
2644
2645-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2646-#define arch_randomize_brk arch_randomize_brk
2647-
2648 #endif /* __KERNEL__ */
2649
2650 /*
2651diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
2652index edfc980..1766f59 100644
2653--- a/arch/powerpc/include/asm/iommu.h
2654+++ b/arch/powerpc/include/asm/iommu.h
2655@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(void);
2656 extern void iommu_init_early_dart(void);
2657 extern void iommu_init_early_pasemi(void);
2658
2659+/* dma-iommu.c */
2660+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2661+
2662 #ifdef CONFIG_PCI
2663 extern void pci_iommu_init(void);
2664 extern void pci_direct_iommu_init(void);
2665diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
2666index 9163695..5a00112 100644
2667--- a/arch/powerpc/include/asm/kmap_types.h
2668+++ b/arch/powerpc/include/asm/kmap_types.h
2669@@ -26,6 +26,7 @@ enum km_type {
2670 KM_SOFTIRQ1,
2671 KM_PPC_SYNC_PAGE,
2672 KM_PPC_SYNC_ICACHE,
2673+ KM_CLEARPAGE,
2674 KM_TYPE_NR
2675 };
2676
2677diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
2678index ff24254..fe45b21 100644
2679--- a/arch/powerpc/include/asm/page.h
2680+++ b/arch/powerpc/include/asm/page.h
2681@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2682 * and needs to be executable. This means the whole heap ends
2683 * up being executable.
2684 */
2685-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2686- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2687+#define VM_DATA_DEFAULT_FLAGS32 \
2688+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2689+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2690
2691 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2692 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2693@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2694 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2695 #endif
2696
2697+#define ktla_ktva(addr) (addr)
2698+#define ktva_ktla(addr) (addr)
2699+
2700 #ifndef __ASSEMBLY__
2701
2702 #undef STRICT_MM_TYPECHECKS
2703diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
2704index 3f17b83..1f9e766 100644
2705--- a/arch/powerpc/include/asm/page_64.h
2706+++ b/arch/powerpc/include/asm/page_64.h
2707@@ -180,15 +180,18 @@ do { \
2708 * stack by default, so in the absense of a PT_GNU_STACK program header
2709 * we turn execute permission off.
2710 */
2711-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2712- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2713+#define VM_STACK_DEFAULT_FLAGS32 \
2714+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2715+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2716
2717 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2718 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2719
2720+#ifndef CONFIG_PAX_PAGEEXEC
2721 #define VM_STACK_DEFAULT_FLAGS \
2722 (test_thread_flag(TIF_32BIT) ? \
2723 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2724+#endif
2725
2726 #include <asm-generic/getorder.h>
2727
2728diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
2729index b5ea626..4030822 100644
2730--- a/arch/powerpc/include/asm/pci.h
2731+++ b/arch/powerpc/include/asm/pci.h
2732@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
2733 }
2734
2735 #ifdef CONFIG_PCI
2736-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2737-extern struct dma_map_ops *get_pci_dma_ops(void);
2738+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2739+extern const struct dma_map_ops *get_pci_dma_ops(void);
2740 #else /* CONFIG_PCI */
2741 #define set_pci_dma_ops(d)
2742 #define get_pci_dma_ops() NULL
2743diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
2744index 2a5da06..d65bea2 100644
2745--- a/arch/powerpc/include/asm/pgtable.h
2746+++ b/arch/powerpc/include/asm/pgtable.h
2747@@ -2,6 +2,7 @@
2748 #define _ASM_POWERPC_PGTABLE_H
2749 #ifdef __KERNEL__
2750
2751+#include <linux/const.h>
2752 #ifndef __ASSEMBLY__
2753 #include <asm/processor.h> /* For TASK_SIZE */
2754 #include <asm/mmu.h>
2755diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
2756index 4aad413..85d86bf 100644
2757--- a/arch/powerpc/include/asm/pte-hash32.h
2758+++ b/arch/powerpc/include/asm/pte-hash32.h
2759@@ -21,6 +21,7 @@
2760 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2761 #define _PAGE_USER 0x004 /* usermode access allowed */
2762 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2763+#define _PAGE_EXEC _PAGE_GUARDED
2764 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2765 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2766 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2767diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
2768index 8c34149..78f425a 100644
2769--- a/arch/powerpc/include/asm/ptrace.h
2770+++ b/arch/powerpc/include/asm/ptrace.h
2771@@ -103,7 +103,7 @@ extern unsigned long profile_pc(struct pt_regs *regs);
2772 } while(0)
2773
2774 struct task_struct;
2775-extern unsigned long ptrace_get_reg(struct task_struct *task, int regno);
2776+extern unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno);
2777 extern int ptrace_put_reg(struct task_struct *task, int regno,
2778 unsigned long data);
2779
2780diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
2781index 32a7c30..be3a8bb 100644
2782--- a/arch/powerpc/include/asm/reg.h
2783+++ b/arch/powerpc/include/asm/reg.h
2784@@ -191,6 +191,7 @@
2785 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2786 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2787 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2788+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2789 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2790 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2791 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2792diff --git a/arch/powerpc/include/asm/swiotlb.h b/arch/powerpc/include/asm/swiotlb.h
2793index 8979d4c..d2fd0d3 100644
2794--- a/arch/powerpc/include/asm/swiotlb.h
2795+++ b/arch/powerpc/include/asm/swiotlb.h
2796@@ -13,7 +13,7 @@
2797
2798 #include <linux/swiotlb.h>
2799
2800-extern struct dma_map_ops swiotlb_dma_ops;
2801+extern const struct dma_map_ops swiotlb_dma_ops;
2802
2803 static inline void dma_mark_clean(void *addr, size_t size) {}
2804
2805diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
2806index 094a12a..877a60a 100644
2807--- a/arch/powerpc/include/asm/system.h
2808+++ b/arch/powerpc/include/asm/system.h
2809@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsigned long old, unsigned long new,
2810 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2811 #endif
2812
2813-extern unsigned long arch_align_stack(unsigned long sp);
2814+#define arch_align_stack(x) ((x) & ~0xfUL)
2815
2816 /* Used in very early kernel initialization. */
2817 extern unsigned long reloc_offset(void);
2818diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
2819index bd0fb84..a42a14b 100644
2820--- a/arch/powerpc/include/asm/uaccess.h
2821+++ b/arch/powerpc/include/asm/uaccess.h
2822@@ -13,6 +13,8 @@
2823 #define VERIFY_READ 0
2824 #define VERIFY_WRITE 1
2825
2826+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2827+
2828 /*
2829 * The fs value determines whether argument validity checking should be
2830 * performed or not. If get_fs() == USER_DS, checking is performed, with
2831@@ -327,52 +329,6 @@ do { \
2832 extern unsigned long __copy_tofrom_user(void __user *to,
2833 const void __user *from, unsigned long size);
2834
2835-#ifndef __powerpc64__
2836-
2837-static inline unsigned long copy_from_user(void *to,
2838- const void __user *from, unsigned long n)
2839-{
2840- unsigned long over;
2841-
2842- if (access_ok(VERIFY_READ, from, n))
2843- return __copy_tofrom_user((__force void __user *)to, from, n);
2844- if ((unsigned long)from < TASK_SIZE) {
2845- over = (unsigned long)from + n - TASK_SIZE;
2846- return __copy_tofrom_user((__force void __user *)to, from,
2847- n - over) + over;
2848- }
2849- return n;
2850-}
2851-
2852-static inline unsigned long copy_to_user(void __user *to,
2853- const void *from, unsigned long n)
2854-{
2855- unsigned long over;
2856-
2857- if (access_ok(VERIFY_WRITE, to, n))
2858- return __copy_tofrom_user(to, (__force void __user *)from, n);
2859- if ((unsigned long)to < TASK_SIZE) {
2860- over = (unsigned long)to + n - TASK_SIZE;
2861- return __copy_tofrom_user(to, (__force void __user *)from,
2862- n - over) + over;
2863- }
2864- return n;
2865-}
2866-
2867-#else /* __powerpc64__ */
2868-
2869-#define __copy_in_user(to, from, size) \
2870- __copy_tofrom_user((to), (from), (size))
2871-
2872-extern unsigned long copy_from_user(void *to, const void __user *from,
2873- unsigned long n);
2874-extern unsigned long copy_to_user(void __user *to, const void *from,
2875- unsigned long n);
2876-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2877- unsigned long n);
2878-
2879-#endif /* __powerpc64__ */
2880-
2881 static inline unsigned long __copy_from_user_inatomic(void *to,
2882 const void __user *from, unsigned long n)
2883 {
2884@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
2885 if (ret == 0)
2886 return 0;
2887 }
2888+
2889+ if (!__builtin_constant_p(n))
2890+ check_object_size(to, n, false);
2891+
2892 return __copy_tofrom_user((__force void __user *)to, from, n);
2893 }
2894
2895@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
2896 if (ret == 0)
2897 return 0;
2898 }
2899+
2900+ if (!__builtin_constant_p(n))
2901+ check_object_size(from, n, true);
2902+
2903 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2904 }
2905
2906@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_user(void __user *to,
2907 return __copy_to_user_inatomic(to, from, size);
2908 }
2909
2910+#ifndef __powerpc64__
2911+
2912+static inline unsigned long __must_check copy_from_user(void *to,
2913+ const void __user *from, unsigned long n)
2914+{
2915+ unsigned long over;
2916+
2917+ if ((long)n < 0)
2918+ return n;
2919+
2920+ if (access_ok(VERIFY_READ, from, n)) {
2921+ if (!__builtin_constant_p(n))
2922+ check_object_size(to, n, false);
2923+ return __copy_tofrom_user((__force void __user *)to, from, n);
2924+ }
2925+ if ((unsigned long)from < TASK_SIZE) {
2926+ over = (unsigned long)from + n - TASK_SIZE;
2927+ if (!__builtin_constant_p(n - over))
2928+ check_object_size(to, n - over, false);
2929+ return __copy_tofrom_user((__force void __user *)to, from,
2930+ n - over) + over;
2931+ }
2932+ return n;
2933+}
2934+
2935+static inline unsigned long __must_check copy_to_user(void __user *to,
2936+ const void *from, unsigned long n)
2937+{
2938+ unsigned long over;
2939+
2940+ if ((long)n < 0)
2941+ return n;
2942+
2943+ if (access_ok(VERIFY_WRITE, to, n)) {
2944+ if (!__builtin_constant_p(n))
2945+ check_object_size(from, n, true);
2946+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2947+ }
2948+ if ((unsigned long)to < TASK_SIZE) {
2949+ over = (unsigned long)to + n - TASK_SIZE;
2950+ if (!__builtin_constant_p(n))
2951+ check_object_size(from, n - over, true);
2952+ return __copy_tofrom_user(to, (__force void __user *)from,
2953+ n - over) + over;
2954+ }
2955+ return n;
2956+}
2957+
2958+#else /* __powerpc64__ */
2959+
2960+#define __copy_in_user(to, from, size) \
2961+ __copy_tofrom_user((to), (from), (size))
2962+
2963+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2964+{
2965+ if ((long)n < 0 || n > INT_MAX)
2966+ return n;
2967+
2968+ if (!__builtin_constant_p(n))
2969+ check_object_size(to, n, false);
2970+
2971+ if (likely(access_ok(VERIFY_READ, from, n)))
2972+ n = __copy_from_user(to, from, n);
2973+ else
2974+ memset(to, 0, n);
2975+ return n;
2976+}
2977+
2978+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2979+{
2980+ if ((long)n < 0 || n > INT_MAX)
2981+ return n;
2982+
2983+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2984+ if (!__builtin_constant_p(n))
2985+ check_object_size(from, n, true);
2986+ n = __copy_to_user(to, from, n);
2987+ }
2988+ return n;
2989+}
2990+
2991+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2992+ unsigned long n);
2993+
2994+#endif /* __powerpc64__ */
2995+
2996 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2997
2998 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2999diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
3000index bb37b1d..01fe9ce 100644
3001--- a/arch/powerpc/kernel/cacheinfo.c
3002+++ b/arch/powerpc/kernel/cacheinfo.c
3003@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_index_opt_attrs[] = {
3004 &cache_assoc_attr,
3005 };
3006
3007-static struct sysfs_ops cache_index_ops = {
3008+static const struct sysfs_ops cache_index_ops = {
3009 .show = cache_index_show,
3010 };
3011
3012diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c
3013index 37771a5..648530c 100644
3014--- a/arch/powerpc/kernel/dma-iommu.c
3015+++ b/arch/powerpc/kernel/dma-iommu.c
3016@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct device *dev, struct scatterlist *sglist,
3017 }
3018
3019 /* We support DMA to/from any memory page via the iommu */
3020-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
3021+int dma_iommu_dma_supported(struct device *dev, u64 mask)
3022 {
3023 struct iommu_table *tbl = get_iommu_table_base(dev);
3024
3025diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
3026index e96cbbd..bdd6d41 100644
3027--- a/arch/powerpc/kernel/dma-swiotlb.c
3028+++ b/arch/powerpc/kernel/dma-swiotlb.c
3029@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
3030 * map_page, and unmap_page on highmem, use normal dma_ops
3031 * for everything else.
3032 */
3033-struct dma_map_ops swiotlb_dma_ops = {
3034+const struct dma_map_ops swiotlb_dma_ops = {
3035 .alloc_coherent = dma_direct_alloc_coherent,
3036 .free_coherent = dma_direct_free_coherent,
3037 .map_sg = swiotlb_map_sg_attrs,
3038diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
3039index 6215062..ebea59c 100644
3040--- a/arch/powerpc/kernel/dma.c
3041+++ b/arch/powerpc/kernel/dma.c
3042@@ -134,7 +134,7 @@ static inline void dma_direct_sync_single_range(struct device *dev,
3043 }
3044 #endif
3045
3046-struct dma_map_ops dma_direct_ops = {
3047+const struct dma_map_ops dma_direct_ops = {
3048 .alloc_coherent = dma_direct_alloc_coherent,
3049 .free_coherent = dma_direct_free_coherent,
3050 .map_sg = dma_direct_map_sg,
3051diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
3052index 24dcc0e..a300455 100644
3053--- a/arch/powerpc/kernel/exceptions-64e.S
3054+++ b/arch/powerpc/kernel/exceptions-64e.S
3055@@ -455,6 +455,7 @@ storage_fault_common:
3056 std r14,_DAR(r1)
3057 std r15,_DSISR(r1)
3058 addi r3,r1,STACK_FRAME_OVERHEAD
3059+ bl .save_nvgprs
3060 mr r4,r14
3061 mr r5,r15
3062 ld r14,PACA_EXGEN+EX_R14(r13)
3063@@ -464,8 +465,7 @@ storage_fault_common:
3064 cmpdi r3,0
3065 bne- 1f
3066 b .ret_from_except_lite
3067-1: bl .save_nvgprs
3068- mr r5,r3
3069+1: mr r5,r3
3070 addi r3,r1,STACK_FRAME_OVERHEAD
3071 ld r4,_DAR(r1)
3072 bl .bad_page_fault
3073diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
3074index 1808876..9fd206a 100644
3075--- a/arch/powerpc/kernel/exceptions-64s.S
3076+++ b/arch/powerpc/kernel/exceptions-64s.S
3077@@ -818,10 +818,10 @@ handle_page_fault:
3078 11: ld r4,_DAR(r1)
3079 ld r5,_DSISR(r1)
3080 addi r3,r1,STACK_FRAME_OVERHEAD
3081+ bl .save_nvgprs
3082 bl .do_page_fault
3083 cmpdi r3,0
3084 beq+ 13f
3085- bl .save_nvgprs
3086 mr r5,r3
3087 addi r3,r1,STACK_FRAME_OVERHEAD
3088 lwz r4,_DAR(r1)
3089diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
3090index a4c8b38..1b09ad9 100644
3091--- a/arch/powerpc/kernel/ibmebus.c
3092+++ b/arch/powerpc/kernel/ibmebus.c
3093@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct device *dev, u64 mask)
3094 return 1;
3095 }
3096
3097-static struct dma_map_ops ibmebus_dma_ops = {
3098+static const struct dma_map_ops ibmebus_dma_ops = {
3099 .alloc_coherent = ibmebus_alloc_coherent,
3100 .free_coherent = ibmebus_free_coherent,
3101 .map_sg = ibmebus_map_sg,
3102diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
3103index 641c74b..8339ad7 100644
3104--- a/arch/powerpc/kernel/kgdb.c
3105+++ b/arch/powerpc/kernel/kgdb.c
3106@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct pt_regs *regs)
3107 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
3108 return 0;
3109
3110- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3111+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
3112 regs->nip += 4;
3113
3114 return 1;
3115@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
3116 /*
3117 * Global data
3118 */
3119-struct kgdb_arch arch_kgdb_ops = {
3120+const struct kgdb_arch arch_kgdb_ops = {
3121 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
3122 };
3123
3124diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
3125index 477c663..4f50234 100644
3126--- a/arch/powerpc/kernel/module.c
3127+++ b/arch/powerpc/kernel/module.c
3128@@ -31,11 +31,24 @@
3129
3130 LIST_HEAD(module_bug_list);
3131
3132+#ifdef CONFIG_PAX_KERNEXEC
3133 void *module_alloc(unsigned long size)
3134 {
3135 if (size == 0)
3136 return NULL;
3137
3138+ return vmalloc(size);
3139+}
3140+
3141+void *module_alloc_exec(unsigned long size)
3142+#else
3143+void *module_alloc(unsigned long size)
3144+#endif
3145+
3146+{
3147+ if (size == 0)
3148+ return NULL;
3149+
3150 return vmalloc_exec(size);
3151 }
3152
3153@@ -45,6 +58,13 @@ void module_free(struct module *mod, void *module_region)
3154 vfree(module_region);
3155 }
3156
3157+#ifdef CONFIG_PAX_KERNEXEC
3158+void module_free_exec(struct module *mod, void *module_region)
3159+{
3160+ module_free(mod, module_region);
3161+}
3162+#endif
3163+
3164 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
3165 const Elf_Shdr *sechdrs,
3166 const char *name)
3167diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
3168index f832773..0507238 100644
3169--- a/arch/powerpc/kernel/module_32.c
3170+++ b/arch/powerpc/kernel/module_32.c
3171@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
3172 me->arch.core_plt_section = i;
3173 }
3174 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
3175- printk("Module doesn't contain .plt or .init.plt sections.\n");
3176+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
3177 return -ENOEXEC;
3178 }
3179
3180@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *location,
3181
3182 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
3183 /* Init, or core PLT? */
3184- if (location >= mod->module_core
3185- && location < mod->module_core + mod->core_size)
3186+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
3187+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
3188 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
3189- else
3190+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
3191+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
3192 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
3193+ else {
3194+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
3195+ return ~0UL;
3196+ }
3197
3198 /* Find this entry, or if that fails, the next avail. entry */
3199 while (entry->jump[0]) {
3200diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
3201index cadbed6..b9bbb00 100644
3202--- a/arch/powerpc/kernel/pci-common.c
3203+++ b/arch/powerpc/kernel/pci-common.c
3204@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
3205 unsigned int ppc_pci_flags = 0;
3206
3207
3208-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3209+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
3210
3211-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
3212+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
3213 {
3214 pci_dma_ops = dma_ops;
3215 }
3216
3217-struct dma_map_ops *get_pci_dma_ops(void)
3218+const struct dma_map_ops *get_pci_dma_ops(void)
3219 {
3220 return pci_dma_ops;
3221 }
3222diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
3223index 7b816da..8d5c277 100644
3224--- a/arch/powerpc/kernel/process.c
3225+++ b/arch/powerpc/kernel/process.c
3226@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
3227 * Lookup NIP late so we have the best change of getting the
3228 * above info out without failing
3229 */
3230- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
3231- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
3232+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
3233+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
3234 #endif
3235 show_stack(current, (unsigned long *) regs->gpr[1]);
3236 if (!user_mode(regs))
3237@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3238 newsp = stack[0];
3239 ip = stack[STACK_FRAME_LR_SAVE];
3240 if (!firstframe || ip != lr) {
3241- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
3242+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
3243 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3244 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
3245- printk(" (%pS)",
3246+ printk(" (%pA)",
3247 (void *)current->ret_stack[curr_frame].ret);
3248 curr_frame--;
3249 }
3250@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
3251 struct pt_regs *regs = (struct pt_regs *)
3252 (sp + STACK_FRAME_OVERHEAD);
3253 lr = regs->link;
3254- printk("--- Exception: %lx at %pS\n LR = %pS\n",
3255+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
3256 regs->trap, (void *)regs->nip, (void *)lr);
3257 firstframe = 1;
3258 }
3259@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
3260 }
3261
3262 #endif /* THREAD_SHIFT < PAGE_SHIFT */
3263-
3264-unsigned long arch_align_stack(unsigned long sp)
3265-{
3266- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
3267- sp -= get_random_int() & ~PAGE_MASK;
3268- return sp & ~0xf;
3269-}
3270-
3271-static inline unsigned long brk_rnd(void)
3272-{
3273- unsigned long rnd = 0;
3274-
3275- /* 8MB for 32bit, 1GB for 64bit */
3276- if (is_32bit_task())
3277- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
3278- else
3279- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
3280-
3281- return rnd << PAGE_SHIFT;
3282-}
3283-
3284-unsigned long arch_randomize_brk(struct mm_struct *mm)
3285-{
3286- unsigned long base = mm->brk;
3287- unsigned long ret;
3288-
3289-#ifdef CONFIG_PPC_STD_MMU_64
3290- /*
3291- * If we are using 1TB segments and we are allowed to randomise
3292- * the heap, we can put it above 1TB so it is backed by a 1TB
3293- * segment. Otherwise the heap will be in the bottom 1TB
3294- * which always uses 256MB segments and this may result in a
3295- * performance penalty.
3296- */
3297- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
3298- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
3299-#endif
3300-
3301- ret = PAGE_ALIGN(base + brk_rnd());
3302-
3303- if (ret < mm->brk)
3304- return mm->brk;
3305-
3306- return ret;
3307-}
3308-
3309-unsigned long randomize_et_dyn(unsigned long base)
3310-{
3311- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
3312-
3313- if (ret < base)
3314- return base;
3315-
3316- return ret;
3317-}
3318diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
3319index ef14988..856c4bc 100644
3320--- a/arch/powerpc/kernel/ptrace.c
3321+++ b/arch/powerpc/kernel/ptrace.c
3322@@ -86,7 +86,7 @@ static int set_user_trap(struct task_struct *task, unsigned long trap)
3323 /*
3324 * Get contents of register REGNO in task TASK.
3325 */
3326-unsigned long ptrace_get_reg(struct task_struct *task, int regno)
3327+unsigned long ptrace_get_reg(struct task_struct *task, unsigned int regno)
3328 {
3329 if (task->thread.regs == NULL)
3330 return -EIO;
3331@@ -894,7 +894,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
3332
3333 CHECK_FULL_REGS(child->thread.regs);
3334 if (index < PT_FPR0) {
3335- tmp = ptrace_get_reg(child, (int) index);
3336+ tmp = ptrace_get_reg(child, index);
3337 } else {
3338 flush_fp_to_thread(child);
3339 tmp = ((unsigned long *)child->thread.fpr)
3340diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
3341index d670429..2bc59b2 100644
3342--- a/arch/powerpc/kernel/signal_32.c
3343+++ b/arch/powerpc/kernel/signal_32.c
3344@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
3345 /* Save user registers on the stack */
3346 frame = &rt_sf->uc.uc_mcontext;
3347 addr = frame;
3348- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
3349+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3350 if (save_user_regs(regs, frame, 0, 1))
3351 goto badframe;
3352 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
3353diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
3354index 2fe6fc6..ada0d96 100644
3355--- a/arch/powerpc/kernel/signal_64.c
3356+++ b/arch/powerpc/kernel/signal_64.c
3357@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
3358 current->thread.fpscr.val = 0;
3359
3360 /* Set up to return from userspace. */
3361- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
3362+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
3363 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
3364 } else {
3365 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
3366diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
3367index b97c2d6..dd01a6a 100644
3368--- a/arch/powerpc/kernel/sys_ppc32.c
3369+++ b/arch/powerpc/kernel/sys_ppc32.c
3370@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct __sysctl_args32 __user *args)
3371 if (oldlenp) {
3372 if (!error) {
3373 if (get_user(oldlen, oldlenp) ||
3374- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
3375+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
3376+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
3377 error = -EFAULT;
3378 }
3379- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
3380 }
3381 return error;
3382 }
3383diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
3384index 6f0ae1a..e4b6a56 100644
3385--- a/arch/powerpc/kernel/traps.c
3386+++ b/arch/powerpc/kernel/traps.c
3387@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
3388 static inline void pmac_backlight_unblank(void) { }
3389 #endif
3390
3391+extern void gr_handle_kernel_exploit(void);
3392+
3393 int die(const char *str, struct pt_regs *regs, long err)
3394 {
3395 static struct {
3396@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs *regs, long err)
3397 if (panic_on_oops)
3398 panic("Fatal exception");
3399
3400+ gr_handle_kernel_exploit();
3401+
3402 oops_exit();
3403 do_exit(err);
3404
3405diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
3406index 137dc22..fe57a79 100644
3407--- a/arch/powerpc/kernel/vdso.c
3408+++ b/arch/powerpc/kernel/vdso.c
3409@@ -36,6 +36,7 @@
3410 #include <asm/firmware.h>
3411 #include <asm/vdso.h>
3412 #include <asm/vdso_datapage.h>
3413+#include <asm/mman.h>
3414
3415 #include "setup.h"
3416
3417@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3418 vdso_base = VDSO32_MBASE;
3419 #endif
3420
3421- current->mm->context.vdso_base = 0;
3422+ current->mm->context.vdso_base = ~0UL;
3423
3424 /* vDSO has a problem and was disabled, just don't "enable" it for the
3425 * process
3426@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
3427 vdso_base = get_unmapped_area(NULL, vdso_base,
3428 (vdso_pages << PAGE_SHIFT) +
3429 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
3430- 0, 0);
3431+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
3432 if (IS_ERR_VALUE(vdso_base)) {
3433 rc = vdso_base;
3434 goto fail_mmapsem;
3435diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
3436index 77f6421..829564a 100644
3437--- a/arch/powerpc/kernel/vio.c
3438+++ b/arch/powerpc/kernel/vio.c
3439@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struct device *dev,
3440 vio_cmo_dealloc(viodev, alloc_size);
3441 }
3442
3443-struct dma_map_ops vio_dma_mapping_ops = {
3444+static const struct dma_map_ops vio_dma_mapping_ops = {
3445 .alloc_coherent = vio_dma_iommu_alloc_coherent,
3446 .free_coherent = vio_dma_iommu_free_coherent,
3447 .map_sg = vio_dma_iommu_map_sg,
3448 .unmap_sg = vio_dma_iommu_unmap_sg,
3449+ .dma_supported = dma_iommu_dma_supported,
3450 .map_page = vio_dma_iommu_map_page,
3451 .unmap_page = vio_dma_iommu_unmap_page,
3452
3453@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev)
3454
3455 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
3456 {
3457- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
3458 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
3459 }
3460
3461diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
3462index 5eea6f3..5d10396 100644
3463--- a/arch/powerpc/lib/usercopy_64.c
3464+++ b/arch/powerpc/lib/usercopy_64.c
3465@@ -9,22 +9,6 @@
3466 #include <linux/module.h>
3467 #include <asm/uaccess.h>
3468
3469-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
3470-{
3471- if (likely(access_ok(VERIFY_READ, from, n)))
3472- n = __copy_from_user(to, from, n);
3473- else
3474- memset(to, 0, n);
3475- return n;
3476-}
3477-
3478-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
3479-{
3480- if (likely(access_ok(VERIFY_WRITE, to, n)))
3481- n = __copy_to_user(to, from, n);
3482- return n;
3483-}
3484-
3485 unsigned long copy_in_user(void __user *to, const void __user *from,
3486 unsigned long n)
3487 {
3488@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
3489 return n;
3490 }
3491
3492-EXPORT_SYMBOL(copy_from_user);
3493-EXPORT_SYMBOL(copy_to_user);
3494 EXPORT_SYMBOL(copy_in_user);
3495
3496diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
3497index e7dae82..877ce0d 100644
3498--- a/arch/powerpc/mm/fault.c
3499+++ b/arch/powerpc/mm/fault.c
3500@@ -30,6 +30,10 @@
3501 #include <linux/kprobes.h>
3502 #include <linux/kdebug.h>
3503 #include <linux/perf_event.h>
3504+#include <linux/slab.h>
3505+#include <linux/pagemap.h>
3506+#include <linux/compiler.h>
3507+#include <linux/unistd.h>
3508
3509 #include <asm/firmware.h>
3510 #include <asm/page.h>
3511@@ -40,6 +44,7 @@
3512 #include <asm/uaccess.h>
3513 #include <asm/tlbflush.h>
3514 #include <asm/siginfo.h>
3515+#include <asm/ptrace.h>
3516
3517
3518 #ifdef CONFIG_KPROBES
3519@@ -64,6 +69,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
3520 }
3521 #endif
3522
3523+#ifdef CONFIG_PAX_PAGEEXEC
3524+/*
3525+ * PaX: decide what to do with offenders (regs->nip = fault address)
3526+ *
3527+ * returns 1 when task should be killed
3528+ */
3529+static int pax_handle_fetch_fault(struct pt_regs *regs)
3530+{
3531+ return 1;
3532+}
3533+
3534+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3535+{
3536+ unsigned long i;
3537+
3538+ printk(KERN_ERR "PAX: bytes at PC: ");
3539+ for (i = 0; i < 5; i++) {
3540+ unsigned int c;
3541+ if (get_user(c, (unsigned int __user *)pc+i))
3542+ printk(KERN_CONT "???????? ");
3543+ else
3544+ printk(KERN_CONT "%08x ", c);
3545+ }
3546+ printk("\n");
3547+}
3548+#endif
3549+
3550 /*
3551 * Check whether the instruction at regs->nip is a store using
3552 * an update addressing form which will update r1.
3553@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
3554 * indicate errors in DSISR but can validly be set in SRR1.
3555 */
3556 if (trap == 0x400)
3557- error_code &= 0x48200000;
3558+ error_code &= 0x58200000;
3559 else
3560 is_write = error_code & DSISR_ISSTORE;
3561 #else
3562@@ -250,7 +282,7 @@ good_area:
3563 * "undefined". Of those that can be set, this is the only
3564 * one which seems bad.
3565 */
3566- if (error_code & 0x10000000)
3567+ if (error_code & DSISR_GUARDED)
3568 /* Guarded storage error. */
3569 goto bad_area;
3570 #endif /* CONFIG_8xx */
3571@@ -265,7 +297,7 @@ good_area:
3572 * processors use the same I/D cache coherency mechanism
3573 * as embedded.
3574 */
3575- if (error_code & DSISR_PROTFAULT)
3576+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3577 goto bad_area;
3578 #endif /* CONFIG_PPC_STD_MMU */
3579
3580@@ -335,6 +367,23 @@ bad_area:
3581 bad_area_nosemaphore:
3582 /* User mode accesses cause a SIGSEGV */
3583 if (user_mode(regs)) {
3584+
3585+#ifdef CONFIG_PAX_PAGEEXEC
3586+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3587+#ifdef CONFIG_PPC_STD_MMU
3588+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3589+#else
3590+ if (is_exec && regs->nip == address) {
3591+#endif
3592+ switch (pax_handle_fetch_fault(regs)) {
3593+ }
3594+
3595+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3596+ do_group_exit(SIGKILL);
3597+ }
3598+ }
3599+#endif
3600+
3601 _exception(SIGSEGV, regs, code, address);
3602 return 0;
3603 }
3604diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
3605index 5973631..ad617af 100644
3606--- a/arch/powerpc/mm/mem.c
3607+++ b/arch/powerpc/mm/mem.c
3608@@ -250,7 +250,7 @@ static int __init mark_nonram_nosave(void)
3609 {
3610 unsigned long lmb_next_region_start_pfn,
3611 lmb_region_max_pfn;
3612- int i;
3613+ unsigned int i;
3614
3615 for (i = 0; i < lmb.memory.cnt - 1; i++) {
3616 lmb_region_max_pfn =
3617diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
3618index 0d957a4..26d968f 100644
3619--- a/arch/powerpc/mm/mmap_64.c
3620+++ b/arch/powerpc/mm/mmap_64.c
3621@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3622 */
3623 if (mmap_is_legacy()) {
3624 mm->mmap_base = TASK_UNMAPPED_BASE;
3625+
3626+#ifdef CONFIG_PAX_RANDMMAP
3627+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3628+ mm->mmap_base += mm->delta_mmap;
3629+#endif
3630+
3631 mm->get_unmapped_area = arch_get_unmapped_area;
3632 mm->unmap_area = arch_unmap_area;
3633 } else {
3634 mm->mmap_base = mmap_base();
3635+
3636+#ifdef CONFIG_PAX_RANDMMAP
3637+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3638+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3639+#endif
3640+
3641 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3642 mm->unmap_area = arch_unmap_area_topdown;
3643 }
3644diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
3645index ba51948..23009d9 100644
3646--- a/arch/powerpc/mm/slice.c
3647+++ b/arch/powerpc/mm/slice.c
3648@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
3649 if ((mm->task_size - len) < addr)
3650 return 0;
3651 vma = find_vma(mm, addr);
3652- return (!vma || (addr + len) <= vma->vm_start);
3653+ return check_heap_stack_gap(vma, addr, len);
3654 }
3655
3656 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3657@@ -256,7 +256,7 @@ full_search:
3658 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3659 continue;
3660 }
3661- if (!vma || addr + len <= vma->vm_start) {
3662+ if (check_heap_stack_gap(vma, addr, len)) {
3663 /*
3664 * Remember the place where we stopped the search:
3665 */
3666@@ -313,10 +313,14 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3667 }
3668 }
3669
3670- addr = mm->mmap_base;
3671- while (addr > len) {
3672+ if (mm->mmap_base < len)
3673+ addr = -ENOMEM;
3674+ else
3675+ addr = mm->mmap_base - len;
3676+
3677+ while (!IS_ERR_VALUE(addr)) {
3678 /* Go down by chunk size */
3679- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3680+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3681
3682 /* Check for hit with different page size */
3683 mask = slice_range_to_mask(addr, len);
3684@@ -336,7 +340,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3685 * return with success:
3686 */
3687 vma = find_vma(mm, addr);
3688- if (!vma || (addr + len) <= vma->vm_start) {
3689+ if (check_heap_stack_gap(vma, addr, len)) {
3690 /* remember the address as a hint for next time */
3691 if (use_cache)
3692 mm->free_area_cache = addr;
3693@@ -348,7 +352,7 @@ static unsigned long slice_find_area_topdown(struct mm_struct *mm,
3694 mm->cached_hole_size = vma->vm_start - addr;
3695
3696 /* try just below the current vma->vm_start */
3697- addr = vma->vm_start;
3698+ addr = skip_heap_stack_gap(vma, len);
3699 }
3700
3701 /*
3702@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
3703 if (fixed && addr > (mm->task_size - len))
3704 return -EINVAL;
3705
3706+#ifdef CONFIG_PAX_RANDMMAP
3707+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3708+ addr = 0;
3709+#endif
3710+
3711 /* If hint, make sure it matches our alignment restrictions */
3712 if (!fixed && addr) {
3713 addr = _ALIGN_UP(addr, 1ul << pshift);
3714diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c
3715index b5c753d..8f01abe 100644
3716--- a/arch/powerpc/platforms/52xx/lite5200_pm.c
3717+++ b/arch/powerpc/platforms/52xx/lite5200_pm.c
3718@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3719 lite5200_pm_target_state = PM_SUSPEND_ON;
3720 }
3721
3722-static struct platform_suspend_ops lite5200_pm_ops = {
3723+static const struct platform_suspend_ops lite5200_pm_ops = {
3724 .valid = lite5200_pm_valid,
3725 .begin = lite5200_pm_begin,
3726 .prepare = lite5200_pm_prepare,
3727diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3728index a55b0b6..478c18e 100644
3729--- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3730+++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3731@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3732 iounmap(mbar);
3733 }
3734
3735-static struct platform_suspend_ops mpc52xx_pm_ops = {
3736+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3737 .valid = mpc52xx_pm_valid,
3738 .prepare = mpc52xx_pm_prepare,
3739 .enter = mpc52xx_pm_enter,
3740diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c
3741index 08e65fc..643d3ac 100644
3742--- a/arch/powerpc/platforms/83xx/suspend.c
3743+++ b/arch/powerpc/platforms/83xx/suspend.c
3744@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3745 return ret;
3746 }
3747
3748-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3749+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3750 .valid = mpc83xx_suspend_valid,
3751 .begin = mpc83xx_suspend_begin,
3752 .enter = mpc83xx_suspend_enter,
3753diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
3754index ca5bfdf..1602e09 100644
3755--- a/arch/powerpc/platforms/cell/iommu.c
3756+++ b/arch/powerpc/platforms/cell/iommu.c
3757@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struct device *dev, u64 mask)
3758
3759 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3760
3761-struct dma_map_ops dma_iommu_fixed_ops = {
3762+const struct dma_map_ops dma_iommu_fixed_ops = {
3763 .alloc_coherent = dma_fixed_alloc_coherent,
3764 .free_coherent = dma_fixed_free_coherent,
3765 .map_sg = dma_fixed_map_sg,
3766diff --git a/arch/powerpc/platforms/ps3/system-bus.c b/arch/powerpc/platforms/ps3/system-bus.c
3767index e34b305..20e48ec 100644
3768--- a/arch/powerpc/platforms/ps3/system-bus.c
3769+++ b/arch/powerpc/platforms/ps3/system-bus.c
3770@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct device *_dev, u64 mask)
3771 return mask >= DMA_BIT_MASK(32);
3772 }
3773
3774-static struct dma_map_ops ps3_sb_dma_ops = {
3775+static const struct dma_map_ops ps3_sb_dma_ops = {
3776 .alloc_coherent = ps3_alloc_coherent,
3777 .free_coherent = ps3_free_coherent,
3778 .map_sg = ps3_sb_map_sg,
3779@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops = {
3780 .unmap_page = ps3_unmap_page,
3781 };
3782
3783-static struct dma_map_ops ps3_ioc0_dma_ops = {
3784+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3785 .alloc_coherent = ps3_alloc_coherent,
3786 .free_coherent = ps3_free_coherent,
3787 .map_sg = ps3_ioc0_map_sg,
3788diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
3789index f0e6f28..60d53ed 100644
3790--- a/arch/powerpc/platforms/pseries/Kconfig
3791+++ b/arch/powerpc/platforms/pseries/Kconfig
3792@@ -2,6 +2,8 @@ config PPC_PSERIES
3793 depends on PPC64 && PPC_BOOK3S
3794 bool "IBM pSeries & new (POWER5-based) iSeries"
3795 select MPIC
3796+ select PCI_MSI
3797+ select XICS
3798 select PPC_I8259
3799 select PPC_RTAS
3800 select RTAS_ERROR_LOGGING
3801diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
3802index 43c0aca..42c045b 100644
3803--- a/arch/s390/Kconfig
3804+++ b/arch/s390/Kconfig
3805@@ -194,28 +194,26 @@ config AUDIT_ARCH
3806
3807 config S390_SWITCH_AMODE
3808 bool "Switch kernel/user addressing modes"
3809+ default y
3810 help
3811 This option allows to switch the addressing modes of kernel and user
3812- space. The kernel parameter switch_amode=on will enable this feature,
3813- default is disabled. Enabling this (via kernel parameter) on machines
3814- earlier than IBM System z9-109 EC/BC will reduce system performance.
3815+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3816+ will reduce system performance.
3817
3818 Note that this option will also be selected by selecting the execute
3819- protection option below. Enabling the execute protection via the
3820- noexec kernel parameter will also switch the addressing modes,
3821- independent of the switch_amode kernel parameter.
3822+ protection option below. Enabling the execute protection will also
3823+ switch the addressing modes, independent of this option.
3824
3825
3826 config S390_EXEC_PROTECT
3827 bool "Data execute protection"
3828+ default y
3829 select S390_SWITCH_AMODE
3830 help
3831 This option allows to enable a buffer overflow protection for user
3832 space programs and it also selects the addressing mode option above.
3833- The kernel parameter noexec=on will enable this feature and also
3834- switch the addressing modes, default is disabled. Enabling this (via
3835- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3836- will reduce system performance.
3837+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3838+ reduce system performance.
3839
3840 comment "Code generation options"
3841
3842diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
3843index e885442..5e6c303 100644
3844--- a/arch/s390/include/asm/elf.h
3845+++ b/arch/s390/include/asm/elf.h
3846@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3847 that it will "exec", and that there is sufficient room for the brk. */
3848 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3849
3850+#ifdef CONFIG_PAX_ASLR
3851+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3852+
3853+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3854+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3855+#endif
3856+
3857 /* This yields a mask that user programs can use to figure out what
3858 instruction set this CPU supports. */
3859
3860diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
3861index e37478e..9ce0e9f 100644
3862--- a/arch/s390/include/asm/setup.h
3863+++ b/arch/s390/include/asm/setup.h
3864@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3865 void detect_memory_layout(struct mem_chunk chunk[]);
3866
3867 #ifdef CONFIG_S390_SWITCH_AMODE
3868-extern unsigned int switch_amode;
3869+#define switch_amode (1)
3870 #else
3871 #define switch_amode (0)
3872 #endif
3873
3874 #ifdef CONFIG_S390_EXEC_PROTECT
3875-extern unsigned int s390_noexec;
3876+#define s390_noexec (1)
3877 #else
3878 #define s390_noexec (0)
3879 #endif
3880diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
3881index 8377e91..e28e6f1 100644
3882--- a/arch/s390/include/asm/uaccess.h
3883+++ b/arch/s390/include/asm/uaccess.h
3884@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3885 copy_to_user(void __user *to, const void *from, unsigned long n)
3886 {
3887 might_fault();
3888+
3889+ if ((long)n < 0)
3890+ return n;
3891+
3892 if (access_ok(VERIFY_WRITE, to, n))
3893 n = __copy_to_user(to, from, n);
3894 return n;
3895@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
3896 static inline unsigned long __must_check
3897 __copy_from_user(void *to, const void __user *from, unsigned long n)
3898 {
3899+ if ((long)n < 0)
3900+ return n;
3901+
3902 if (__builtin_constant_p(n) && (n <= 256))
3903 return uaccess.copy_from_user_small(n, from, to);
3904 else
3905@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3906 copy_from_user(void *to, const void __user *from, unsigned long n)
3907 {
3908 might_fault();
3909+
3910+ if ((long)n < 0)
3911+ return n;
3912+
3913 if (access_ok(VERIFY_READ, from, n))
3914 n = __copy_from_user(to, from, n);
3915 else
3916diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
3917index 639380a..72e3c02 100644
3918--- a/arch/s390/kernel/module.c
3919+++ b/arch/s390/kernel/module.c
3920@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
3921
3922 /* Increase core size by size of got & plt and set start
3923 offsets for got and plt. */
3924- me->core_size = ALIGN(me->core_size, 4);
3925- me->arch.got_offset = me->core_size;
3926- me->core_size += me->arch.got_size;
3927- me->arch.plt_offset = me->core_size;
3928- me->core_size += me->arch.plt_size;
3929+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3930+ me->arch.got_offset = me->core_size_rw;
3931+ me->core_size_rw += me->arch.got_size;
3932+ me->arch.plt_offset = me->core_size_rx;
3933+ me->core_size_rx += me->arch.plt_size;
3934 return 0;
3935 }
3936
3937@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3938 if (info->got_initialized == 0) {
3939 Elf_Addr *gotent;
3940
3941- gotent = me->module_core + me->arch.got_offset +
3942+ gotent = me->module_core_rw + me->arch.got_offset +
3943 info->got_offset;
3944 *gotent = val;
3945 info->got_initialized = 1;
3946@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3947 else if (r_type == R_390_GOTENT ||
3948 r_type == R_390_GOTPLTENT)
3949 *(unsigned int *) loc =
3950- (val + (Elf_Addr) me->module_core - loc) >> 1;
3951+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3952 else if (r_type == R_390_GOT64 ||
3953 r_type == R_390_GOTPLT64)
3954 *(unsigned long *) loc = val;
3955@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3956 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3957 if (info->plt_initialized == 0) {
3958 unsigned int *ip;
3959- ip = me->module_core + me->arch.plt_offset +
3960+ ip = me->module_core_rx + me->arch.plt_offset +
3961 info->plt_offset;
3962 #ifndef CONFIG_64BIT
3963 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3964@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3965 val - loc + 0xffffUL < 0x1ffffeUL) ||
3966 (r_type == R_390_PLT32DBL &&
3967 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3968- val = (Elf_Addr) me->module_core +
3969+ val = (Elf_Addr) me->module_core_rx +
3970 me->arch.plt_offset +
3971 info->plt_offset;
3972 val += rela->r_addend - loc;
3973@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3974 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3975 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3976 val = val + rela->r_addend -
3977- ((Elf_Addr) me->module_core + me->arch.got_offset);
3978+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3979 if (r_type == R_390_GOTOFF16)
3980 *(unsigned short *) loc = val;
3981 else if (r_type == R_390_GOTOFF32)
3982@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
3983 break;
3984 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3985 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3986- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3987+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3988 rela->r_addend - loc;
3989 if (r_type == R_390_GOTPC)
3990 *(unsigned int *) loc = val;
3991diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
3992index 061479f..dbfb08c 100644
3993--- a/arch/s390/kernel/setup.c
3994+++ b/arch/s390/kernel/setup.c
3995@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *p)
3996 early_param("mem", early_parse_mem);
3997
3998 #ifdef CONFIG_S390_SWITCH_AMODE
3999-unsigned int switch_amode = 0;
4000-EXPORT_SYMBOL_GPL(switch_amode);
4001-
4002 static int set_amode_and_uaccess(unsigned long user_amode,
4003 unsigned long user32_amode)
4004 {
4005@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigned long user_amode,
4006 return 0;
4007 }
4008 }
4009-
4010-/*
4011- * Switch kernel/user addressing modes?
4012- */
4013-static int __init early_parse_switch_amode(char *p)
4014-{
4015- switch_amode = 1;
4016- return 0;
4017-}
4018-early_param("switch_amode", early_parse_switch_amode);
4019-
4020 #else /* CONFIG_S390_SWITCH_AMODE */
4021 static inline int set_amode_and_uaccess(unsigned long user_amode,
4022 unsigned long user32_amode)
4023@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(unsigned long user_amode,
4024 }
4025 #endif /* CONFIG_S390_SWITCH_AMODE */
4026
4027-#ifdef CONFIG_S390_EXEC_PROTECT
4028-unsigned int s390_noexec = 0;
4029-EXPORT_SYMBOL_GPL(s390_noexec);
4030-
4031-/*
4032- * Enable execute protection?
4033- */
4034-static int __init early_parse_noexec(char *p)
4035-{
4036- if (!strncmp(p, "off", 3))
4037- return 0;
4038- switch_amode = 1;
4039- s390_noexec = 1;
4040- return 0;
4041-}
4042-early_param("noexec", early_parse_noexec);
4043-#endif /* CONFIG_S390_EXEC_PROTECT */
4044-
4045 static void setup_addressing_mode(void)
4046 {
4047 if (s390_noexec) {
4048diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
4049index f4558cc..e461f37 100644
4050--- a/arch/s390/mm/mmap.c
4051+++ b/arch/s390/mm/mmap.c
4052@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4053 */
4054 if (mmap_is_legacy()) {
4055 mm->mmap_base = TASK_UNMAPPED_BASE;
4056+
4057+#ifdef CONFIG_PAX_RANDMMAP
4058+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4059+ mm->mmap_base += mm->delta_mmap;
4060+#endif
4061+
4062 mm->get_unmapped_area = arch_get_unmapped_area;
4063 mm->unmap_area = arch_unmap_area;
4064 } else {
4065 mm->mmap_base = mmap_base();
4066+
4067+#ifdef CONFIG_PAX_RANDMMAP
4068+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4069+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4070+#endif
4071+
4072 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4073 mm->unmap_area = arch_unmap_area_topdown;
4074 }
4075@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
4076 */
4077 if (mmap_is_legacy()) {
4078 mm->mmap_base = TASK_UNMAPPED_BASE;
4079+
4080+#ifdef CONFIG_PAX_RANDMMAP
4081+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4082+ mm->mmap_base += mm->delta_mmap;
4083+#endif
4084+
4085 mm->get_unmapped_area = s390_get_unmapped_area;
4086 mm->unmap_area = arch_unmap_area;
4087 } else {
4088 mm->mmap_base = mmap_base();
4089+
4090+#ifdef CONFIG_PAX_RANDMMAP
4091+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4092+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4093+#endif
4094+
4095 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
4096 mm->unmap_area = arch_unmap_area_topdown;
4097 }
4098diff --git a/arch/score/include/asm/system.h b/arch/score/include/asm/system.h
4099index 589d5c7..669e274 100644
4100--- a/arch/score/include/asm/system.h
4101+++ b/arch/score/include/asm/system.h
4102@@ -17,7 +17,7 @@ do { \
4103 #define finish_arch_switch(prev) do {} while (0)
4104
4105 typedef void (*vi_handler_t)(void);
4106-extern unsigned long arch_align_stack(unsigned long sp);
4107+#define arch_align_stack(x) (x)
4108
4109 #define mb() barrier()
4110 #define rmb() barrier()
4111diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
4112index 25d0803..d6c8e36 100644
4113--- a/arch/score/kernel/process.c
4114+++ b/arch/score/kernel/process.c
4115@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_struct *task)
4116
4117 return task_pt_regs(task)->cp0_epc;
4118 }
4119-
4120-unsigned long arch_align_stack(unsigned long sp)
4121-{
4122- return sp;
4123-}
4124diff --git a/arch/sh/boards/mach-hp6xx/pm.c b/arch/sh/boards/mach-hp6xx/pm.c
4125index d936c1a..304a252 100644
4126--- a/arch/sh/boards/mach-hp6xx/pm.c
4127+++ b/arch/sh/boards/mach-hp6xx/pm.c
4128@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_t state)
4129 return 0;
4130 }
4131
4132-static struct platform_suspend_ops hp6x0_pm_ops = {
4133+static const struct platform_suspend_ops hp6x0_pm_ops = {
4134 .enter = hp6x0_pm_enter,
4135 .valid = suspend_valid_only_mem,
4136 };
4137diff --git a/arch/sh/kernel/cpu/sh4/sq.c b/arch/sh/kernel/cpu/sh4/sq.c
4138index 8a8a993..7b3079b 100644
4139--- a/arch/sh/kernel/cpu/sh4/sq.c
4140+++ b/arch/sh/kernel/cpu/sh4/sq.c
4141@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[] = {
4142 NULL,
4143 };
4144
4145-static struct sysfs_ops sq_sysfs_ops = {
4146+static const struct sysfs_ops sq_sysfs_ops = {
4147 .show = sq_sysfs_show,
4148 .store = sq_sysfs_store,
4149 };
4150diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c
4151index ee3c2aa..c49cee6 100644
4152--- a/arch/sh/kernel/cpu/shmobile/pm.c
4153+++ b/arch/sh/kernel/cpu/shmobile/pm.c
4154@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t state)
4155 return 0;
4156 }
4157
4158-static struct platform_suspend_ops sh_pm_ops = {
4159+static const struct platform_suspend_ops sh_pm_ops = {
4160 .enter = sh_pm_enter,
4161 .valid = suspend_valid_only_mem,
4162 };
4163diff --git a/arch/sh/kernel/kgdb.c b/arch/sh/kernel/kgdb.c
4164index 3e532d0..9faa306 100644
4165--- a/arch/sh/kernel/kgdb.c
4166+++ b/arch/sh/kernel/kgdb.c
4167@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
4168 {
4169 }
4170
4171-struct kgdb_arch arch_kgdb_ops = {
4172+const struct kgdb_arch arch_kgdb_ops = {
4173 /* Breakpoint instruction: trapa #0x3c */
4174 #ifdef CONFIG_CPU_LITTLE_ENDIAN
4175 .gdb_bpt_instr = { 0x3c, 0xc3 },
4176diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
4177index afeb710..d1d1289 100644
4178--- a/arch/sh/mm/mmap.c
4179+++ b/arch/sh/mm/mmap.c
4180@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
4181 addr = PAGE_ALIGN(addr);
4182
4183 vma = find_vma(mm, addr);
4184- if (TASK_SIZE - len >= addr &&
4185- (!vma || addr + len <= vma->vm_start))
4186+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4187 return addr;
4188 }
4189
4190@@ -106,7 +105,7 @@ full_search:
4191 }
4192 return -ENOMEM;
4193 }
4194- if (likely(!vma || addr + len <= vma->vm_start)) {
4195+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4196 /*
4197 * Remember the place where we stopped the search:
4198 */
4199@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4200 addr = PAGE_ALIGN(addr);
4201
4202 vma = find_vma(mm, addr);
4203- if (TASK_SIZE - len >= addr &&
4204- (!vma || addr + len <= vma->vm_start))
4205+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
4206 return addr;
4207 }
4208
4209@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4210 /* make sure it can fit in the remaining address space */
4211 if (likely(addr > len)) {
4212 vma = find_vma(mm, addr-len);
4213- if (!vma || addr <= vma->vm_start) {
4214+ if (check_heap_stack_gap(vma, addr - len, len)) {
4215 /* remember the address as a hint for next time */
4216 return (mm->free_area_cache = addr-len);
4217 }
4218@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4219 if (unlikely(mm->mmap_base < len))
4220 goto bottomup;
4221
4222- addr = mm->mmap_base-len;
4223- if (do_colour_align)
4224- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4225+ addr = mm->mmap_base - len;
4226
4227 do {
4228+ if (do_colour_align)
4229+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4230 /*
4231 * Lookup failure means no vma is above this address,
4232 * else if new region fits below vma->vm_start,
4233 * return with success:
4234 */
4235 vma = find_vma(mm, addr);
4236- if (likely(!vma || addr+len <= vma->vm_start)) {
4237+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4238 /* remember the address as a hint for next time */
4239 return (mm->free_area_cache = addr);
4240 }
4241@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
4242 mm->cached_hole_size = vma->vm_start - addr;
4243
4244 /* try just below the current vma->vm_start */
4245- addr = vma->vm_start-len;
4246- if (do_colour_align)
4247- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4248- } while (likely(len < vma->vm_start));
4249+ addr = skip_heap_stack_gap(vma, len);
4250+ } while (!IS_ERR_VALUE(addr));
4251
4252 bottomup:
4253 /*
4254diff --git a/arch/sparc/Makefile b/arch/sparc/Makefile
4255index 113225b..7fd04e7 100644
4256--- a/arch/sparc/Makefile
4257+++ b/arch/sparc/Makefile
4258@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc/oprofile/
4259 # Export what is needed by arch/sparc/boot/Makefile
4260 export VMLINUX_INIT VMLINUX_MAIN
4261 VMLINUX_INIT := $(head-y) $(init-y)
4262-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
4263+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
4264 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
4265 VMLINUX_MAIN += $(drivers-y) $(net-y)
4266
4267diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
4268index f5cc06f..f858d47 100644
4269--- a/arch/sparc/include/asm/atomic_64.h
4270+++ b/arch/sparc/include/asm/atomic_64.h
4271@@ -14,18 +14,40 @@
4272 #define ATOMIC64_INIT(i) { (i) }
4273
4274 #define atomic_read(v) ((v)->counter)
4275+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
4276+{
4277+ return v->counter;
4278+}
4279 #define atomic64_read(v) ((v)->counter)
4280+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
4281+{
4282+ return v->counter;
4283+}
4284
4285 #define atomic_set(v, i) (((v)->counter) = i)
4286+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
4287+{
4288+ v->counter = i;
4289+}
4290 #define atomic64_set(v, i) (((v)->counter) = i)
4291+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
4292+{
4293+ v->counter = i;
4294+}
4295
4296 extern void atomic_add(int, atomic_t *);
4297+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
4298 extern void atomic64_add(long, atomic64_t *);
4299+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
4300 extern void atomic_sub(int, atomic_t *);
4301+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
4302 extern void atomic64_sub(long, atomic64_t *);
4303+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
4304
4305 extern int atomic_add_ret(int, atomic_t *);
4306+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
4307 extern long atomic64_add_ret(long, atomic64_t *);
4308+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
4309 extern int atomic_sub_ret(int, atomic_t *);
4310 extern long atomic64_sub_ret(long, atomic64_t *);
4311
4312@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4313 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
4314
4315 #define atomic_inc_return(v) atomic_add_ret(1, v)
4316+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
4317+{
4318+ return atomic_add_ret_unchecked(1, v);
4319+}
4320 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
4321+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
4322+{
4323+ return atomic64_add_ret_unchecked(1, v);
4324+}
4325
4326 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
4327 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
4328
4329 #define atomic_add_return(i, v) atomic_add_ret(i, v)
4330+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
4331+{
4332+ return atomic_add_ret_unchecked(i, v);
4333+}
4334 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
4335+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
4336+{
4337+ return atomic64_add_ret_unchecked(i, v);
4338+}
4339
4340 /*
4341 * atomic_inc_and_test - increment and test
4342@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4343 * other cases.
4344 */
4345 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
4346+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
4347+{
4348+ return atomic_inc_return_unchecked(v) == 0;
4349+}
4350 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
4351
4352 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
4353@@ -59,30 +101,65 @@ extern long atomic64_sub_ret(long, atomic64_t *);
4354 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
4355
4356 #define atomic_inc(v) atomic_add(1, v)
4357+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
4358+{
4359+ atomic_add_unchecked(1, v);
4360+}
4361 #define atomic64_inc(v) atomic64_add(1, v)
4362+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
4363+{
4364+ atomic64_add_unchecked(1, v);
4365+}
4366
4367 #define atomic_dec(v) atomic_sub(1, v)
4368+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
4369+{
4370+ atomic_sub_unchecked(1, v);
4371+}
4372 #define atomic64_dec(v) atomic64_sub(1, v)
4373+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
4374+{
4375+ atomic64_sub_unchecked(1, v);
4376+}
4377
4378 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
4379 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
4380
4381 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
4382+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
4383+{
4384+ return cmpxchg(&v->counter, old, new);
4385+}
4386 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
4387+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
4388+{
4389+ return xchg(&v->counter, new);
4390+}
4391
4392 static inline int atomic_add_unless(atomic_t *v, int a, int u)
4393 {
4394- int c, old;
4395+ int c, old, new;
4396 c = atomic_read(v);
4397 for (;;) {
4398- if (unlikely(c == (u)))
4399+ if (unlikely(c == u))
4400 break;
4401- old = atomic_cmpxchg((v), c, c + (a));
4402+
4403+ asm volatile("addcc %2, %0, %0\n"
4404+
4405+#ifdef CONFIG_PAX_REFCOUNT
4406+ "tvs %%icc, 6\n"
4407+#endif
4408+
4409+ : "=r" (new)
4410+ : "0" (c), "ir" (a)
4411+ : "cc");
4412+
4413+ old = atomic_cmpxchg(v, c, new);
4414 if (likely(old == c))
4415 break;
4416 c = old;
4417 }
4418- return c != (u);
4419+ return c != u;
4420 }
4421
4422 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
4423@@ -90,20 +167,35 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
4424 #define atomic64_cmpxchg(v, o, n) \
4425 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
4426 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
4427+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
4428+{
4429+ return xchg(&v->counter, new);
4430+}
4431
4432 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
4433 {
4434- long c, old;
4435+ long c, old, new;
4436 c = atomic64_read(v);
4437 for (;;) {
4438- if (unlikely(c == (u)))
4439+ if (unlikely(c == u))
4440 break;
4441- old = atomic64_cmpxchg((v), c, c + (a));
4442+
4443+ asm volatile("addcc %2, %0, %0\n"
4444+
4445+#ifdef CONFIG_PAX_REFCOUNT
4446+ "tvs %%xcc, 6\n"
4447+#endif
4448+
4449+ : "=r" (new)
4450+ : "0" (c), "ir" (a)
4451+ : "cc");
4452+
4453+ old = atomic64_cmpxchg(v, c, new);
4454 if (likely(old == c))
4455 break;
4456 c = old;
4457 }
4458- return c != (u);
4459+ return c != u;
4460 }
4461
4462 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
4463diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
4464index 41f85ae..fb54d5e 100644
4465--- a/arch/sparc/include/asm/cache.h
4466+++ b/arch/sparc/include/asm/cache.h
4467@@ -8,7 +8,7 @@
4468 #define _SPARC_CACHE_H
4469
4470 #define L1_CACHE_SHIFT 5
4471-#define L1_CACHE_BYTES 32
4472+#define L1_CACHE_BYTES 32UL
4473 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
4474
4475 #ifdef CONFIG_SPARC32
4476diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
4477index 5a8c308..38def92 100644
4478--- a/arch/sparc/include/asm/dma-mapping.h
4479+++ b/arch/sparc/include/asm/dma-mapping.h
4480@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *dev, u64 dma_mask);
4481 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
4482 #define dma_is_consistent(d, h) (1)
4483
4484-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
4485+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
4486 extern struct bus_type pci_bus_type;
4487
4488-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4489+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
4490 {
4491 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
4492 if (dev->bus == &pci_bus_type)
4493@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
4494 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4495 dma_addr_t *dma_handle, gfp_t flag)
4496 {
4497- struct dma_map_ops *ops = get_dma_ops(dev);
4498+ const struct dma_map_ops *ops = get_dma_ops(dev);
4499 void *cpu_addr;
4500
4501 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
4502@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(struct device *dev, size_t size,
4503 static inline void dma_free_coherent(struct device *dev, size_t size,
4504 void *cpu_addr, dma_addr_t dma_handle)
4505 {
4506- struct dma_map_ops *ops = get_dma_ops(dev);
4507+ const struct dma_map_ops *ops = get_dma_ops(dev);
4508
4509 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
4510 ops->free_coherent(dev, size, cpu_addr, dma_handle);
4511diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
4512index 381a1b5..b97e3ff 100644
4513--- a/arch/sparc/include/asm/elf_32.h
4514+++ b/arch/sparc/include/asm/elf_32.h
4515@@ -116,6 +116,13 @@ typedef struct {
4516
4517 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
4518
4519+#ifdef CONFIG_PAX_ASLR
4520+#define PAX_ELF_ET_DYN_BASE 0x10000UL
4521+
4522+#define PAX_DELTA_MMAP_LEN 16
4523+#define PAX_DELTA_STACK_LEN 16
4524+#endif
4525+
4526 /* This yields a mask that user programs can use to figure out what
4527 instruction set this cpu supports. This can NOT be done in userspace
4528 on Sparc. */
4529diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
4530index 9968085..c2106ef 100644
4531--- a/arch/sparc/include/asm/elf_64.h
4532+++ b/arch/sparc/include/asm/elf_64.h
4533@@ -163,6 +163,12 @@ typedef struct {
4534 #define ELF_ET_DYN_BASE 0x0000010000000000UL
4535 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
4536
4537+#ifdef CONFIG_PAX_ASLR
4538+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
4539+
4540+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
4541+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
4542+#endif
4543
4544 /* This yields a mask that user programs can use to figure out what
4545 instruction set this cpu supports. */
4546diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
4547index e0cabe7..efd60f1 100644
4548--- a/arch/sparc/include/asm/pgtable_32.h
4549+++ b/arch/sparc/include/asm/pgtable_32.h
4550@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
4551 BTFIXUPDEF_INT(page_none)
4552 BTFIXUPDEF_INT(page_copy)
4553 BTFIXUPDEF_INT(page_readonly)
4554+
4555+#ifdef CONFIG_PAX_PAGEEXEC
4556+BTFIXUPDEF_INT(page_shared_noexec)
4557+BTFIXUPDEF_INT(page_copy_noexec)
4558+BTFIXUPDEF_INT(page_readonly_noexec)
4559+#endif
4560+
4561 BTFIXUPDEF_INT(page_kernel)
4562
4563 #define PMD_SHIFT SUN4C_PMD_SHIFT
4564@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
4565 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
4566 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
4567
4568+#ifdef CONFIG_PAX_PAGEEXEC
4569+extern pgprot_t PAGE_SHARED_NOEXEC;
4570+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
4571+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
4572+#else
4573+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4574+# define PAGE_COPY_NOEXEC PAGE_COPY
4575+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4576+#endif
4577+
4578 extern unsigned long page_kernel;
4579
4580 #ifdef MODULE
4581diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
4582index 1407c07..7e10231 100644
4583--- a/arch/sparc/include/asm/pgtsrmmu.h
4584+++ b/arch/sparc/include/asm/pgtsrmmu.h
4585@@ -115,6 +115,13 @@
4586 SRMMU_EXEC | SRMMU_REF)
4587 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
4588 SRMMU_EXEC | SRMMU_REF)
4589+
4590+#ifdef CONFIG_PAX_PAGEEXEC
4591+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
4592+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4593+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
4594+#endif
4595+
4596 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
4597 SRMMU_DIRTY | SRMMU_REF)
4598
4599diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
4600index 43e5147..47622a1 100644
4601--- a/arch/sparc/include/asm/spinlock_64.h
4602+++ b/arch/sparc/include/asm/spinlock_64.h
4603@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla
4604
4605 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
4606
4607-static void inline arch_read_lock(raw_rwlock_t *lock)
4608+static inline void arch_read_lock(raw_rwlock_t *lock)
4609 {
4610 unsigned long tmp1, tmp2;
4611
4612 __asm__ __volatile__ (
4613 "1: ldsw [%2], %0\n"
4614 " brlz,pn %0, 2f\n"
4615-"4: add %0, 1, %1\n"
4616+"4: addcc %0, 1, %1\n"
4617+
4618+#ifdef CONFIG_PAX_REFCOUNT
4619+" tvs %%icc, 6\n"
4620+#endif
4621+
4622 " cas [%2], %0, %1\n"
4623 " cmp %0, %1\n"
4624 " bne,pn %%icc, 1b\n"
4625@@ -112,10 +117,10 @@ static void inline arch_read_lock(raw_rwlock_t *lock)
4626 " .previous"
4627 : "=&r" (tmp1), "=&r" (tmp2)
4628 : "r" (lock)
4629- : "memory");
4630+ : "memory", "cc");
4631 }
4632
4633-static int inline arch_read_trylock(raw_rwlock_t *lock)
4634+static inline int arch_read_trylock(raw_rwlock_t *lock)
4635 {
4636 int tmp1, tmp2;
4637
4638@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4639 "1: ldsw [%2], %0\n"
4640 " brlz,a,pn %0, 2f\n"
4641 " mov 0, %0\n"
4642-" add %0, 1, %1\n"
4643+" addcc %0, 1, %1\n"
4644+
4645+#ifdef CONFIG_PAX_REFCOUNT
4646+" tvs %%icc, 6\n"
4647+#endif
4648+
4649 " cas [%2], %0, %1\n"
4650 " cmp %0, %1\n"
4651 " bne,pn %%icc, 1b\n"
4652@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_rwlock_t *lock)
4653 return tmp1;
4654 }
4655
4656-static void inline arch_read_unlock(raw_rwlock_t *lock)
4657+static inline void arch_read_unlock(raw_rwlock_t *lock)
4658 {
4659 unsigned long tmp1, tmp2;
4660
4661 __asm__ __volatile__(
4662 "1: lduw [%2], %0\n"
4663-" sub %0, 1, %1\n"
4664+" subcc %0, 1, %1\n"
4665+
4666+#ifdef CONFIG_PAX_REFCOUNT
4667+" tvs %%icc, 6\n"
4668+#endif
4669+
4670 " cas [%2], %0, %1\n"
4671 " cmp %0, %1\n"
4672 " bne,pn %%xcc, 1b\n"
4673@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock)
4674 : "memory");
4675 }
4676
4677-static void inline arch_write_lock(raw_rwlock_t *lock)
4678+static inline void arch_write_lock(raw_rwlock_t *lock)
4679 {
4680 unsigned long mask, tmp1, tmp2;
4681
4682@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock)
4683 : "memory");
4684 }
4685
4686-static void inline arch_write_unlock(raw_rwlock_t *lock)
4687+static inline void arch_write_unlock(raw_rwlock_t *lock)
4688 {
4689 __asm__ __volatile__(
4690 " stw %%g0, [%0]"
4691@@ -186,7 +201,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock)
4692 : "memory");
4693 }
4694
4695-static int inline arch_write_trylock(raw_rwlock_t *lock)
4696+static inline int arch_write_trylock(raw_rwlock_t *lock)
4697 {
4698 unsigned long mask, tmp1, tmp2, result;
4699
4700diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
4701index 844d73a..f787fb9 100644
4702--- a/arch/sparc/include/asm/thread_info_32.h
4703+++ b/arch/sparc/include/asm/thread_info_32.h
4704@@ -50,6 +50,8 @@ struct thread_info {
4705 unsigned long w_saved;
4706
4707 struct restart_block restart_block;
4708+
4709+ unsigned long lowest_stack;
4710 };
4711
4712 /*
4713diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
4714index f78ad9a..9f55fc7 100644
4715--- a/arch/sparc/include/asm/thread_info_64.h
4716+++ b/arch/sparc/include/asm/thread_info_64.h
4717@@ -68,6 +68,8 @@ struct thread_info {
4718 struct pt_regs *kern_una_regs;
4719 unsigned int kern_una_insn;
4720
4721+ unsigned long lowest_stack;
4722+
4723 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4724 };
4725
4726diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
4727index e88fbe5..96b0ce5 100644
4728--- a/arch/sparc/include/asm/uaccess.h
4729+++ b/arch/sparc/include/asm/uaccess.h
4730@@ -1,5 +1,13 @@
4731 #ifndef ___ASM_SPARC_UACCESS_H
4732 #define ___ASM_SPARC_UACCESS_H
4733+
4734+#ifdef __KERNEL__
4735+#ifndef __ASSEMBLY__
4736+#include <linux/types.h>
4737+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4738+#endif
4739+#endif
4740+
4741 #if defined(__sparc__) && defined(__arch64__)
4742 #include <asm/uaccess_64.h>
4743 #else
4744diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
4745index 8303ac4..07f333d 100644
4746--- a/arch/sparc/include/asm/uaccess_32.h
4747+++ b/arch/sparc/include/asm/uaccess_32.h
4748@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
4749
4750 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4751 {
4752- if (n && __access_ok((unsigned long) to, n))
4753+ if ((long)n < 0)
4754+ return n;
4755+
4756+ if (n && __access_ok((unsigned long) to, n)) {
4757+ if (!__builtin_constant_p(n))
4758+ check_object_size(from, n, true);
4759 return __copy_user(to, (__force void __user *) from, n);
4760- else
4761+ } else
4762 return n;
4763 }
4764
4765 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4766 {
4767+ if ((long)n < 0)
4768+ return n;
4769+
4770+ if (!__builtin_constant_p(n))
4771+ check_object_size(from, n, true);
4772+
4773 return __copy_user(to, (__force void __user *) from, n);
4774 }
4775
4776 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4777 {
4778- if (n && __access_ok((unsigned long) from, n))
4779+ if ((long)n < 0)
4780+ return n;
4781+
4782+ if (n && __access_ok((unsigned long) from, n)) {
4783+ if (!__builtin_constant_p(n))
4784+ check_object_size(to, n, false);
4785 return __copy_user((__force void __user *) to, from, n);
4786- else
4787+ } else
4788 return n;
4789 }
4790
4791 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4792 {
4793+ if ((long)n < 0)
4794+ return n;
4795+
4796 return __copy_user((__force void __user *) to, from, n);
4797 }
4798
4799diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
4800index 9ea271e..7b8a271 100644
4801--- a/arch/sparc/include/asm/uaccess_64.h
4802+++ b/arch/sparc/include/asm/uaccess_64.h
4803@@ -9,6 +9,7 @@
4804 #include <linux/compiler.h>
4805 #include <linux/string.h>
4806 #include <linux/thread_info.h>
4807+#include <linux/kernel.h>
4808 #include <asm/asi.h>
4809 #include <asm/system.h>
4810 #include <asm/spitfire.h>
4811@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
4812 static inline unsigned long __must_check
4813 copy_from_user(void *to, const void __user *from, unsigned long size)
4814 {
4815- unsigned long ret = ___copy_from_user(to, from, size);
4816+ unsigned long ret;
4817
4818+ if ((long)size < 0 || size > INT_MAX)
4819+ return size;
4820+
4821+ if (!__builtin_constant_p(size))
4822+ check_object_size(to, size, false);
4823+
4824+ ret = ___copy_from_user(to, from, size);
4825 if (unlikely(ret))
4826 ret = copy_from_user_fixup(to, from, size);
4827 return ret;
4828@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
4829 static inline unsigned long __must_check
4830 copy_to_user(void __user *to, const void *from, unsigned long size)
4831 {
4832- unsigned long ret = ___copy_to_user(to, from, size);
4833+ unsigned long ret;
4834
4835+ if ((long)size < 0 || size > INT_MAX)
4836+ return size;
4837+
4838+ if (!__builtin_constant_p(size))
4839+ check_object_size(from, size, true);
4840+
4841+ ret = ___copy_to_user(to, from, size);
4842 if (unlikely(ret))
4843 ret = copy_to_user_fixup(to, from, size);
4844 return ret;
4845diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
4846index 2782681..77ded84 100644
4847--- a/arch/sparc/kernel/Makefile
4848+++ b/arch/sparc/kernel/Makefile
4849@@ -3,7 +3,7 @@
4850 #
4851
4852 asflags-y := -ansi
4853-ccflags-y := -Werror
4854+#ccflags-y := -Werror
4855
4856 extra-y := head_$(BITS).o
4857 extra-y += init_task.o
4858diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
4859index 7690cc2..ece64c9 100644
4860--- a/arch/sparc/kernel/iommu.c
4861+++ b/arch/sparc/kernel/iommu.c
4862@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
4863 spin_unlock_irqrestore(&iommu->lock, flags);
4864 }
4865
4866-static struct dma_map_ops sun4u_dma_ops = {
4867+static const struct dma_map_ops sun4u_dma_ops = {
4868 .alloc_coherent = dma_4u_alloc_coherent,
4869 .free_coherent = dma_4u_free_coherent,
4870 .map_page = dma_4u_map_page,
4871@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops = {
4872 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4873 };
4874
4875-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4876+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4877 EXPORT_SYMBOL(dma_ops);
4878
4879 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4880diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
4881index 9f61fd8..bd048db 100644
4882--- a/arch/sparc/kernel/ioport.c
4883+++ b/arch/sparc/kernel/ioport.c
4884@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
4885 BUG();
4886 }
4887
4888-struct dma_map_ops sbus_dma_ops = {
4889+const struct dma_map_ops sbus_dma_ops = {
4890 .alloc_coherent = sbus_alloc_coherent,
4891 .free_coherent = sbus_free_coherent,
4892 .map_page = sbus_map_page,
4893@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4894 .sync_sg_for_device = sbus_sync_sg_for_device,
4895 };
4896
4897-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4898+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4899 EXPORT_SYMBOL(dma_ops);
4900
4901 static int __init sparc_register_ioport(void)
4902@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist *
4903 }
4904 }
4905
4906-struct dma_map_ops pci32_dma_ops = {
4907+const struct dma_map_ops pci32_dma_ops = {
4908 .alloc_coherent = pci32_alloc_coherent,
4909 .free_coherent = pci32_free_coherent,
4910 .map_page = pci32_map_page,
4911diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c
4912index 04df4ed..55c4b6e 100644
4913--- a/arch/sparc/kernel/kgdb_32.c
4914+++ b/arch/sparc/kernel/kgdb_32.c
4915@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4916 {
4917 }
4918
4919-struct kgdb_arch arch_kgdb_ops = {
4920+const struct kgdb_arch arch_kgdb_ops = {
4921 /* Breakpoint instruction: ta 0x7d */
4922 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4923 };
4924diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
4925index f5a0fd4..d886f71 100644
4926--- a/arch/sparc/kernel/kgdb_64.c
4927+++ b/arch/sparc/kernel/kgdb_64.c
4928@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4929 {
4930 }
4931
4932-struct kgdb_arch arch_kgdb_ops = {
4933+const struct kgdb_arch arch_kgdb_ops = {
4934 /* Breakpoint instruction: ta 0x72 */
4935 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4936 };
4937diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c
4938index 23c33ff..d137fbd 100644
4939--- a/arch/sparc/kernel/pci_sun4v.c
4940+++ b/arch/sparc/kernel/pci_sun4v.c
4941@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
4942 spin_unlock_irqrestore(&iommu->lock, flags);
4943 }
4944
4945-static struct dma_map_ops sun4v_dma_ops = {
4946+static const struct dma_map_ops sun4v_dma_ops = {
4947 .alloc_coherent = dma_4v_alloc_coherent,
4948 .free_coherent = dma_4v_free_coherent,
4949 .map_page = dma_4v_map_page,
4950diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
4951index c49865b..b41a81b 100644
4952--- a/arch/sparc/kernel/process_32.c
4953+++ b/arch/sparc/kernel/process_32.c
4954@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4955 rw->ins[4], rw->ins[5],
4956 rw->ins[6],
4957 rw->ins[7]);
4958- printk("%pS\n", (void *) rw->ins[7]);
4959+ printk("%pA\n", (void *) rw->ins[7]);
4960 rw = (struct reg_window32 *) rw->ins[6];
4961 }
4962 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4963@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4964
4965 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4966 r->psr, r->pc, r->npc, r->y, print_tainted());
4967- printk("PC: <%pS>\n", (void *) r->pc);
4968+ printk("PC: <%pA>\n", (void *) r->pc);
4969 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4970 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4971 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4972 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4973 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4974 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4975- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4976+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4977
4978 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4979 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4980@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
4981 rw = (struct reg_window32 *) fp;
4982 pc = rw->ins[7];
4983 printk("[%08lx : ", pc);
4984- printk("%pS ] ", (void *) pc);
4985+ printk("%pA ] ", (void *) pc);
4986 fp = rw->ins[6];
4987 } while (++count < 16);
4988 printk("\n");
4989diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
4990index cb70476..3d0c191 100644
4991--- a/arch/sparc/kernel/process_64.c
4992+++ b/arch/sparc/kernel/process_64.c
4993@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_regs *regs)
4994 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4995 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4996 if (regs->tstate & TSTATE_PRIV)
4997- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4998+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4999 }
5000
5001 void show_regs(struct pt_regs *regs)
5002 {
5003 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
5004 regs->tpc, regs->tnpc, regs->y, print_tainted());
5005- printk("TPC: <%pS>\n", (void *) regs->tpc);
5006+ printk("TPC: <%pA>\n", (void *) regs->tpc);
5007 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
5008 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
5009 regs->u_regs[3]);
5010@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
5011 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
5012 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
5013 regs->u_regs[15]);
5014- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
5015+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
5016 show_regwindow(regs);
5017 }
5018
5019@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void)
5020 ((tp && tp->task) ? tp->task->pid : -1));
5021
5022 if (gp->tstate & TSTATE_PRIV) {
5023- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
5024+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
5025 (void *) gp->tpc,
5026 (void *) gp->o7,
5027 (void *) gp->i7,
5028diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c
5029index 6edc4e5..06a69b4 100644
5030--- a/arch/sparc/kernel/sigutil_64.c
5031+++ b/arch/sparc/kernel/sigutil_64.c
5032@@ -2,6 +2,7 @@
5033 #include <linux/types.h>
5034 #include <linux/thread_info.h>
5035 #include <linux/uaccess.h>
5036+#include <linux/errno.h>
5037
5038 #include <asm/sigcontext.h>
5039 #include <asm/fpumacro.h>
5040diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
5041index 3a82e65..ce0a53a 100644
5042--- a/arch/sparc/kernel/sys_sparc_32.c
5043+++ b/arch/sparc/kernel/sys_sparc_32.c
5044@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5045 if (ARCH_SUN4C && len > 0x20000000)
5046 return -ENOMEM;
5047 if (!addr)
5048- addr = TASK_UNMAPPED_BASE;
5049+ addr = current->mm->mmap_base;
5050
5051 if (flags & MAP_SHARED)
5052 addr = COLOUR_ALIGN(addr);
5053@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5054 }
5055 if (TASK_SIZE - PAGE_SIZE - len < addr)
5056 return -ENOMEM;
5057- if (!vmm || addr + len <= vmm->vm_start)
5058+ if (check_heap_stack_gap(vmm, addr, len))
5059 return addr;
5060 addr = vmm->vm_end;
5061 if (flags & MAP_SHARED)
5062diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
5063index cfa0e19..98972ac 100644
5064--- a/arch/sparc/kernel/sys_sparc_64.c
5065+++ b/arch/sparc/kernel/sys_sparc_64.c
5066@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5067 /* We do not accept a shared mapping if it would violate
5068 * cache aliasing constraints.
5069 */
5070- if ((flags & MAP_SHARED) &&
5071+ if ((filp || (flags & MAP_SHARED)) &&
5072 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5073 return -EINVAL;
5074 return addr;
5075@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5076 if (filp || (flags & MAP_SHARED))
5077 do_color_align = 1;
5078
5079+#ifdef CONFIG_PAX_RANDMMAP
5080+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5081+#endif
5082+
5083 if (addr) {
5084 if (do_color_align)
5085 addr = COLOUR_ALIGN(addr, pgoff);
5086@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
5087 addr = PAGE_ALIGN(addr);
5088
5089 vma = find_vma(mm, addr);
5090- if (task_size - len >= addr &&
5091- (!vma || addr + len <= vma->vm_start))
5092+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5093 return addr;
5094 }
5095
5096 if (len > mm->cached_hole_size) {
5097- start_addr = addr = mm->free_area_cache;
5098+ start_addr = addr = mm->free_area_cache;
5099 } else {
5100- start_addr = addr = TASK_UNMAPPED_BASE;
5101+ start_addr = addr = mm->mmap_base;
5102 mm->cached_hole_size = 0;
5103 }
5104
5105@@ -175,14 +178,14 @@ full_search:
5106 vma = find_vma(mm, VA_EXCLUDE_END);
5107 }
5108 if (unlikely(task_size < addr)) {
5109- if (start_addr != TASK_UNMAPPED_BASE) {
5110- start_addr = addr = TASK_UNMAPPED_BASE;
5111+ if (start_addr != mm->mmap_base) {
5112+ start_addr = addr = mm->mmap_base;
5113 mm->cached_hole_size = 0;
5114 goto full_search;
5115 }
5116 return -ENOMEM;
5117 }
5118- if (likely(!vma || addr + len <= vma->vm_start)) {
5119+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5120 /*
5121 * Remember the place where we stopped the search:
5122 */
5123@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5124 /* We do not accept a shared mapping if it would violate
5125 * cache aliasing constraints.
5126 */
5127- if ((flags & MAP_SHARED) &&
5128+ if ((filp || (flags & MAP_SHARED)) &&
5129 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
5130 return -EINVAL;
5131 return addr;
5132@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5133 addr = PAGE_ALIGN(addr);
5134
5135 vma = find_vma(mm, addr);
5136- if (task_size - len >= addr &&
5137- (!vma || addr + len <= vma->vm_start))
5138+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5139 return addr;
5140 }
5141
5142@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5143 /* make sure it can fit in the remaining address space */
5144 if (likely(addr > len)) {
5145 vma = find_vma(mm, addr-len);
5146- if (!vma || addr <= vma->vm_start) {
5147+ if (check_heap_stack_gap(vma, addr - len, len)) {
5148 /* remember the address as a hint for next time */
5149 return (mm->free_area_cache = addr-len);
5150 }
5151@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5152 if (unlikely(mm->mmap_base < len))
5153 goto bottomup;
5154
5155- addr = mm->mmap_base-len;
5156- if (do_color_align)
5157- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5158+ addr = mm->mmap_base - len;
5159
5160 do {
5161+ if (do_color_align)
5162+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5163 /*
5164 * Lookup failure means no vma is above this address,
5165 * else if new region fits below vma->vm_start,
5166 * return with success:
5167 */
5168 vma = find_vma(mm, addr);
5169- if (likely(!vma || addr+len <= vma->vm_start)) {
5170+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5171 /* remember the address as a hint for next time */
5172 return (mm->free_area_cache = addr);
5173 }
5174@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
5175 mm->cached_hole_size = vma->vm_start - addr;
5176
5177 /* try just below the current vma->vm_start */
5178- addr = vma->vm_start-len;
5179- if (do_color_align)
5180- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
5181- } while (likely(len < vma->vm_start));
5182+ addr = skip_heap_stack_gap(vma, len);
5183+ } while (!IS_ERR_VALUE(addr));
5184
5185 bottomup:
5186 /*
5187@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5188 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
5189 sysctl_legacy_va_layout) {
5190 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5191+
5192+#ifdef CONFIG_PAX_RANDMMAP
5193+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5194+ mm->mmap_base += mm->delta_mmap;
5195+#endif
5196+
5197 mm->get_unmapped_area = arch_get_unmapped_area;
5198 mm->unmap_area = arch_unmap_area;
5199 } else {
5200@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5201 gap = (task_size / 6 * 5);
5202
5203 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
5204+
5205+#ifdef CONFIG_PAX_RANDMMAP
5206+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5207+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5208+#endif
5209+
5210 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5211 mm->unmap_area = arch_unmap_area_topdown;
5212 }
5213diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
5214index c0490c7..84959d1 100644
5215--- a/arch/sparc/kernel/traps_32.c
5216+++ b/arch/sparc/kernel/traps_32.c
5217@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
5218 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
5219 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
5220
5221+extern void gr_handle_kernel_exploit(void);
5222+
5223 void die_if_kernel(char *str, struct pt_regs *regs)
5224 {
5225 static int die_counter;
5226@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5227 count++ < 30 &&
5228 (((unsigned long) rw) >= PAGE_OFFSET) &&
5229 !(((unsigned long) rw) & 0x7)) {
5230- printk("Caller[%08lx]: %pS\n", rw->ins[7],
5231+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
5232 (void *) rw->ins[7]);
5233 rw = (struct reg_window32 *)rw->ins[6];
5234 }
5235 }
5236 printk("Instruction DUMP:");
5237 instruction_dump ((unsigned long *) regs->pc);
5238- if(regs->psr & PSR_PS)
5239+ if(regs->psr & PSR_PS) {
5240+ gr_handle_kernel_exploit();
5241 do_exit(SIGKILL);
5242+ }
5243 do_exit(SIGSEGV);
5244 }
5245
5246diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
5247index 10f7bb9..cdb6793 100644
5248--- a/arch/sparc/kernel/traps_64.c
5249+++ b/arch/sparc/kernel/traps_64.c
5250@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
5251 i + 1,
5252 p->trapstack[i].tstate, p->trapstack[i].tpc,
5253 p->trapstack[i].tnpc, p->trapstack[i].tt);
5254- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
5255+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
5256 }
5257 }
5258
5259@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
5260
5261 lvl -= 0x100;
5262 if (regs->tstate & TSTATE_PRIV) {
5263+
5264+#ifdef CONFIG_PAX_REFCOUNT
5265+ if (lvl == 6)
5266+ pax_report_refcount_overflow(regs);
5267+#endif
5268+
5269 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
5270 die_if_kernel(buffer, regs);
5271 }
5272@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
5273 void bad_trap_tl1(struct pt_regs *regs, long lvl)
5274 {
5275 char buffer[32];
5276-
5277+
5278 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
5279 0, lvl, SIGTRAP) == NOTIFY_STOP)
5280 return;
5281
5282+#ifdef CONFIG_PAX_REFCOUNT
5283+ if (lvl == 6)
5284+ pax_report_refcount_overflow(regs);
5285+#endif
5286+
5287 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
5288
5289 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
5290@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
5291 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
5292 printk("%s" "ERROR(%d): ",
5293 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
5294- printk("TPC<%pS>\n", (void *) regs->tpc);
5295+ printk("TPC<%pA>\n", (void *) regs->tpc);
5296 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
5297 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
5298 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
5299@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5300 smp_processor_id(),
5301 (type & 0x1) ? 'I' : 'D',
5302 regs->tpc);
5303- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
5304+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
5305 panic("Irrecoverable Cheetah+ parity error.");
5306 }
5307
5308@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
5309 smp_processor_id(),
5310 (type & 0x1) ? 'I' : 'D',
5311 regs->tpc);
5312- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
5313+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
5314 }
5315
5316 struct sun4v_error_entry {
5317@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
5318
5319 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
5320 regs->tpc, tl);
5321- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
5322+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
5323 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5324- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
5325+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
5326 (void *) regs->u_regs[UREG_I7]);
5327 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
5328 "pte[%lx] error[%lx]\n",
5329@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
5330
5331 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
5332 regs->tpc, tl);
5333- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
5334+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
5335 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
5336- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
5337+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
5338 (void *) regs->u_regs[UREG_I7]);
5339 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
5340 "pte[%lx] error[%lx]\n",
5341@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
5342 fp = (unsigned long)sf->fp + STACK_BIAS;
5343 }
5344
5345- printk(" [%016lx] %pS\n", pc, (void *) pc);
5346+ printk(" [%016lx] %pA\n", pc, (void *) pc);
5347 } while (++count < 16);
5348 }
5349
5350@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
5351 return (struct reg_window *) (fp + STACK_BIAS);
5352 }
5353
5354+extern void gr_handle_kernel_exploit(void);
5355+
5356 void die_if_kernel(char *str, struct pt_regs *regs)
5357 {
5358 static int die_counter;
5359@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5360 while (rw &&
5361 count++ < 30&&
5362 is_kernel_stack(current, rw)) {
5363- printk("Caller[%016lx]: %pS\n", rw->ins[7],
5364+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
5365 (void *) rw->ins[7]);
5366
5367 rw = kernel_stack_up(rw);
5368@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_regs *regs)
5369 }
5370 user_instruction_dump ((unsigned int __user *) regs->tpc);
5371 }
5372- if (regs->tstate & TSTATE_PRIV)
5373+ if (regs->tstate & TSTATE_PRIV) {
5374+ gr_handle_kernel_exploit();
5375 do_exit(SIGKILL);
5376+ }
5377+
5378 do_exit(SIGSEGV);
5379 }
5380 EXPORT_SYMBOL(die_if_kernel);
5381diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S
5382index be183fe..1c8d332 100644
5383--- a/arch/sparc/kernel/una_asm_64.S
5384+++ b/arch/sparc/kernel/una_asm_64.S
5385@@ -127,7 +127,7 @@ do_int_load:
5386 wr %o5, 0x0, %asi
5387 retl
5388 mov 0, %o0
5389- .size __do_int_load, .-__do_int_load
5390+ .size do_int_load, .-do_int_load
5391
5392 .section __ex_table,"a"
5393 .word 4b, __retl_efault
5394diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
5395index 3792099..2af17d8 100644
5396--- a/arch/sparc/kernel/unaligned_64.c
5397+++ b/arch/sparc/kernel/unaligned_64.c
5398@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs *regs)
5399 if (count < 5) {
5400 last_time = jiffies;
5401 count++;
5402- printk("Kernel unaligned access at TPC[%lx] %pS\n",
5403+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
5404 regs->tpc, (void *) regs->tpc);
5405 }
5406 }
5407diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
5408index e75faf0..24f12f9 100644
5409--- a/arch/sparc/lib/Makefile
5410+++ b/arch/sparc/lib/Makefile
5411@@ -2,7 +2,7 @@
5412 #
5413
5414 asflags-y := -ansi -DST_DIV0=0x02
5415-ccflags-y := -Werror
5416+#ccflags-y := -Werror
5417
5418 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
5419 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
5420diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
5421index 0268210..f0291ca 100644
5422--- a/arch/sparc/lib/atomic_64.S
5423+++ b/arch/sparc/lib/atomic_64.S
5424@@ -18,7 +18,12 @@
5425 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5426 BACKOFF_SETUP(%o2)
5427 1: lduw [%o1], %g1
5428- add %g1, %o0, %g7
5429+ addcc %g1, %o0, %g7
5430+
5431+#ifdef CONFIG_PAX_REFCOUNT
5432+ tvs %icc, 6
5433+#endif
5434+
5435 cas [%o1], %g1, %g7
5436 cmp %g1, %g7
5437 bne,pn %icc, 2f
5438@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
5439 2: BACKOFF_SPIN(%o2, %o3, 1b)
5440 .size atomic_add, .-atomic_add
5441
5442+ .globl atomic_add_unchecked
5443+ .type atomic_add_unchecked,#function
5444+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5445+ BACKOFF_SETUP(%o2)
5446+1: lduw [%o1], %g1
5447+ add %g1, %o0, %g7
5448+ cas [%o1], %g1, %g7
5449+ cmp %g1, %g7
5450+ bne,pn %icc, 2f
5451+ nop
5452+ retl
5453+ nop
5454+2: BACKOFF_SPIN(%o2, %o3, 1b)
5455+ .size atomic_add_unchecked, .-atomic_add_unchecked
5456+
5457 .globl atomic_sub
5458 .type atomic_sub,#function
5459 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5460 BACKOFF_SETUP(%o2)
5461 1: lduw [%o1], %g1
5462- sub %g1, %o0, %g7
5463+ subcc %g1, %o0, %g7
5464+
5465+#ifdef CONFIG_PAX_REFCOUNT
5466+ tvs %icc, 6
5467+#endif
5468+
5469 cas [%o1], %g1, %g7
5470 cmp %g1, %g7
5471 bne,pn %icc, 2f
5472@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5473 2: BACKOFF_SPIN(%o2, %o3, 1b)
5474 .size atomic_sub, .-atomic_sub
5475
5476+ .globl atomic_sub_unchecked
5477+ .type atomic_sub_unchecked,#function
5478+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5479+ BACKOFF_SETUP(%o2)
5480+1: lduw [%o1], %g1
5481+ sub %g1, %o0, %g7
5482+ cas [%o1], %g1, %g7
5483+ cmp %g1, %g7
5484+ bne,pn %icc, 2f
5485+ nop
5486+ retl
5487+ nop
5488+2: BACKOFF_SPIN(%o2, %o3, 1b)
5489+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
5490+
5491 .globl atomic_add_ret
5492 .type atomic_add_ret,#function
5493 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5494 BACKOFF_SETUP(%o2)
5495 1: lduw [%o1], %g1
5496- add %g1, %o0, %g7
5497+ addcc %g1, %o0, %g7
5498+
5499+#ifdef CONFIG_PAX_REFCOUNT
5500+ tvs %icc, 6
5501+#endif
5502+
5503 cas [%o1], %g1, %g7
5504 cmp %g1, %g7
5505 bne,pn %icc, 2f
5506@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5507 2: BACKOFF_SPIN(%o2, %o3, 1b)
5508 .size atomic_add_ret, .-atomic_add_ret
5509
5510+ .globl atomic_add_ret_unchecked
5511+ .type atomic_add_ret_unchecked,#function
5512+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5513+ BACKOFF_SETUP(%o2)
5514+1: lduw [%o1], %g1
5515+ addcc %g1, %o0, %g7
5516+ cas [%o1], %g1, %g7
5517+ cmp %g1, %g7
5518+ bne,pn %icc, 2f
5519+ add %g7, %o0, %g7
5520+ sra %g7, 0, %o0
5521+ retl
5522+ nop
5523+2: BACKOFF_SPIN(%o2, %o3, 1b)
5524+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
5525+
5526 .globl atomic_sub_ret
5527 .type atomic_sub_ret,#function
5528 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5529 BACKOFF_SETUP(%o2)
5530 1: lduw [%o1], %g1
5531- sub %g1, %o0, %g7
5532+ subcc %g1, %o0, %g7
5533+
5534+#ifdef CONFIG_PAX_REFCOUNT
5535+ tvs %icc, 6
5536+#endif
5537+
5538 cas [%o1], %g1, %g7
5539 cmp %g1, %g7
5540 bne,pn %icc, 2f
5541@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5542 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5543 BACKOFF_SETUP(%o2)
5544 1: ldx [%o1], %g1
5545- add %g1, %o0, %g7
5546+ addcc %g1, %o0, %g7
5547+
5548+#ifdef CONFIG_PAX_REFCOUNT
5549+ tvs %xcc, 6
5550+#endif
5551+
5552 casx [%o1], %g1, %g7
5553 cmp %g1, %g7
5554 bne,pn %xcc, 2f
5555@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
5556 2: BACKOFF_SPIN(%o2, %o3, 1b)
5557 .size atomic64_add, .-atomic64_add
5558
5559+ .globl atomic64_add_unchecked
5560+ .type atomic64_add_unchecked,#function
5561+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5562+ BACKOFF_SETUP(%o2)
5563+1: ldx [%o1], %g1
5564+ addcc %g1, %o0, %g7
5565+ casx [%o1], %g1, %g7
5566+ cmp %g1, %g7
5567+ bne,pn %xcc, 2f
5568+ nop
5569+ retl
5570+ nop
5571+2: BACKOFF_SPIN(%o2, %o3, 1b)
5572+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
5573+
5574 .globl atomic64_sub
5575 .type atomic64_sub,#function
5576 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5577 BACKOFF_SETUP(%o2)
5578 1: ldx [%o1], %g1
5579- sub %g1, %o0, %g7
5580+ subcc %g1, %o0, %g7
5581+
5582+#ifdef CONFIG_PAX_REFCOUNT
5583+ tvs %xcc, 6
5584+#endif
5585+
5586 casx [%o1], %g1, %g7
5587 cmp %g1, %g7
5588 bne,pn %xcc, 2f
5589@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
5590 2: BACKOFF_SPIN(%o2, %o3, 1b)
5591 .size atomic64_sub, .-atomic64_sub
5592
5593+ .globl atomic64_sub_unchecked
5594+ .type atomic64_sub_unchecked,#function
5595+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
5596+ BACKOFF_SETUP(%o2)
5597+1: ldx [%o1], %g1
5598+ subcc %g1, %o0, %g7
5599+ casx [%o1], %g1, %g7
5600+ cmp %g1, %g7
5601+ bne,pn %xcc, 2f
5602+ nop
5603+ retl
5604+ nop
5605+2: BACKOFF_SPIN(%o2, %o3, 1b)
5606+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
5607+
5608 .globl atomic64_add_ret
5609 .type atomic64_add_ret,#function
5610 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5611 BACKOFF_SETUP(%o2)
5612 1: ldx [%o1], %g1
5613- add %g1, %o0, %g7
5614+ addcc %g1, %o0, %g7
5615+
5616+#ifdef CONFIG_PAX_REFCOUNT
5617+ tvs %xcc, 6
5618+#endif
5619+
5620 casx [%o1], %g1, %g7
5621 cmp %g1, %g7
5622 bne,pn %xcc, 2f
5623@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
5624 2: BACKOFF_SPIN(%o2, %o3, 1b)
5625 .size atomic64_add_ret, .-atomic64_add_ret
5626
5627+ .globl atomic64_add_ret_unchecked
5628+ .type atomic64_add_ret_unchecked,#function
5629+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
5630+ BACKOFF_SETUP(%o2)
5631+1: ldx [%o1], %g1
5632+ addcc %g1, %o0, %g7
5633+ casx [%o1], %g1, %g7
5634+ cmp %g1, %g7
5635+ bne,pn %xcc, 2f
5636+ add %g7, %o0, %g7
5637+ mov %g7, %o0
5638+ retl
5639+ nop
5640+2: BACKOFF_SPIN(%o2, %o3, 1b)
5641+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
5642+
5643 .globl atomic64_sub_ret
5644 .type atomic64_sub_ret,#function
5645 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
5646 BACKOFF_SETUP(%o2)
5647 1: ldx [%o1], %g1
5648- sub %g1, %o0, %g7
5649+ subcc %g1, %o0, %g7
5650+
5651+#ifdef CONFIG_PAX_REFCOUNT
5652+ tvs %xcc, 6
5653+#endif
5654+
5655 casx [%o1], %g1, %g7
5656 cmp %g1, %g7
5657 bne,pn %xcc, 2f
5658diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
5659index 704b126..2e79d76 100644
5660--- a/arch/sparc/lib/ksyms.c
5661+++ b/arch/sparc/lib/ksyms.c
5662@@ -144,12 +144,18 @@ EXPORT_SYMBOL(__downgrade_write);
5663
5664 /* Atomic counter implementation. */
5665 EXPORT_SYMBOL(atomic_add);
5666+EXPORT_SYMBOL(atomic_add_unchecked);
5667 EXPORT_SYMBOL(atomic_add_ret);
5668+EXPORT_SYMBOL(atomic_add_ret_unchecked);
5669 EXPORT_SYMBOL(atomic_sub);
5670+EXPORT_SYMBOL(atomic_sub_unchecked);
5671 EXPORT_SYMBOL(atomic_sub_ret);
5672 EXPORT_SYMBOL(atomic64_add);
5673+EXPORT_SYMBOL(atomic64_add_unchecked);
5674 EXPORT_SYMBOL(atomic64_add_ret);
5675+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
5676 EXPORT_SYMBOL(atomic64_sub);
5677+EXPORT_SYMBOL(atomic64_sub_unchecked);
5678 EXPORT_SYMBOL(atomic64_sub_ret);
5679
5680 /* Atomic bit operations. */
5681diff --git a/arch/sparc/lib/rwsem_64.S b/arch/sparc/lib/rwsem_64.S
5682index 91a7d29..ce75c29 100644
5683--- a/arch/sparc/lib/rwsem_64.S
5684+++ b/arch/sparc/lib/rwsem_64.S
5685@@ -11,7 +11,12 @@
5686 .globl __down_read
5687 __down_read:
5688 1: lduw [%o0], %g1
5689- add %g1, 1, %g7
5690+ addcc %g1, 1, %g7
5691+
5692+#ifdef CONFIG_PAX_REFCOUNT
5693+ tvs %icc, 6
5694+#endif
5695+
5696 cas [%o0], %g1, %g7
5697 cmp %g1, %g7
5698 bne,pn %icc, 1b
5699@@ -33,7 +38,12 @@ __down_read:
5700 .globl __down_read_trylock
5701 __down_read_trylock:
5702 1: lduw [%o0], %g1
5703- add %g1, 1, %g7
5704+ addcc %g1, 1, %g7
5705+
5706+#ifdef CONFIG_PAX_REFCOUNT
5707+ tvs %icc, 6
5708+#endif
5709+
5710 cmp %g7, 0
5711 bl,pn %icc, 2f
5712 mov 0, %o1
5713@@ -51,7 +61,12 @@ __down_write:
5714 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5715 1:
5716 lduw [%o0], %g3
5717- add %g3, %g1, %g7
5718+ addcc %g3, %g1, %g7
5719+
5720+#ifdef CONFIG_PAX_REFCOUNT
5721+ tvs %icc, 6
5722+#endif
5723+
5724 cas [%o0], %g3, %g7
5725 cmp %g3, %g7
5726 bne,pn %icc, 1b
5727@@ -77,7 +92,12 @@ __down_write_trylock:
5728 cmp %g3, 0
5729 bne,pn %icc, 2f
5730 mov 0, %o1
5731- add %g3, %g1, %g7
5732+ addcc %g3, %g1, %g7
5733+
5734+#ifdef CONFIG_PAX_REFCOUNT
5735+ tvs %icc, 6
5736+#endif
5737+
5738 cas [%o0], %g3, %g7
5739 cmp %g3, %g7
5740 bne,pn %icc, 1b
5741@@ -90,7 +110,12 @@ __down_write_trylock:
5742 __up_read:
5743 1:
5744 lduw [%o0], %g1
5745- sub %g1, 1, %g7
5746+ subcc %g1, 1, %g7
5747+
5748+#ifdef CONFIG_PAX_REFCOUNT
5749+ tvs %icc, 6
5750+#endif
5751+
5752 cas [%o0], %g1, %g7
5753 cmp %g1, %g7
5754 bne,pn %icc, 1b
5755@@ -118,7 +143,12 @@ __up_write:
5756 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5757 1:
5758 lduw [%o0], %g3
5759- sub %g3, %g1, %g7
5760+ subcc %g3, %g1, %g7
5761+
5762+#ifdef CONFIG_PAX_REFCOUNT
5763+ tvs %icc, 6
5764+#endif
5765+
5766 cas [%o0], %g3, %g7
5767 cmp %g3, %g7
5768 bne,pn %icc, 1b
5769@@ -143,7 +173,12 @@ __downgrade_write:
5770 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5771 1:
5772 lduw [%o0], %g3
5773- sub %g3, %g1, %g7
5774+ subcc %g3, %g1, %g7
5775+
5776+#ifdef CONFIG_PAX_REFCOUNT
5777+ tvs %icc, 6
5778+#endif
5779+
5780 cas [%o0], %g3, %g7
5781 cmp %g3, %g7
5782 bne,pn %icc, 1b
5783diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
5784index 79836a7..62f47a2 100644
5785--- a/arch/sparc/mm/Makefile
5786+++ b/arch/sparc/mm/Makefile
5787@@ -2,7 +2,7 @@
5788 #
5789
5790 asflags-y := -ansi
5791-ccflags-y := -Werror
5792+#ccflags-y := -Werror
5793
5794 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
5795 obj-y += fault_$(BITS).o
5796diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
5797index b99f81c..3453e93 100644
5798--- a/arch/sparc/mm/fault_32.c
5799+++ b/arch/sparc/mm/fault_32.c
5800@@ -21,6 +21,9 @@
5801 #include <linux/interrupt.h>
5802 #include <linux/module.h>
5803 #include <linux/kdebug.h>
5804+#include <linux/slab.h>
5805+#include <linux/pagemap.h>
5806+#include <linux/compiler.h>
5807
5808 #include <asm/system.h>
5809 #include <asm/page.h>
5810@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
5811 return safe_compute_effective_address(regs, insn);
5812 }
5813
5814+#ifdef CONFIG_PAX_PAGEEXEC
5815+#ifdef CONFIG_PAX_DLRESOLVE
5816+static void pax_emuplt_close(struct vm_area_struct *vma)
5817+{
5818+ vma->vm_mm->call_dl_resolve = 0UL;
5819+}
5820+
5821+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5822+{
5823+ unsigned int *kaddr;
5824+
5825+ vmf->page = alloc_page(GFP_HIGHUSER);
5826+ if (!vmf->page)
5827+ return VM_FAULT_OOM;
5828+
5829+ kaddr = kmap(vmf->page);
5830+ memset(kaddr, 0, PAGE_SIZE);
5831+ kaddr[0] = 0x9DE3BFA8U; /* save */
5832+ flush_dcache_page(vmf->page);
5833+ kunmap(vmf->page);
5834+ return VM_FAULT_MAJOR;
5835+}
5836+
5837+static const struct vm_operations_struct pax_vm_ops = {
5838+ .close = pax_emuplt_close,
5839+ .fault = pax_emuplt_fault
5840+};
5841+
5842+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5843+{
5844+ int ret;
5845+
5846+ vma->vm_mm = current->mm;
5847+ vma->vm_start = addr;
5848+ vma->vm_end = addr + PAGE_SIZE;
5849+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5850+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5851+ vma->vm_ops = &pax_vm_ops;
5852+
5853+ ret = insert_vm_struct(current->mm, vma);
5854+ if (ret)
5855+ return ret;
5856+
5857+ ++current->mm->total_vm;
5858+ return 0;
5859+}
5860+#endif
5861+
5862+/*
5863+ * PaX: decide what to do with offenders (regs->pc = fault address)
5864+ *
5865+ * returns 1 when task should be killed
5866+ * 2 when patched PLT trampoline was detected
5867+ * 3 when unpatched PLT trampoline was detected
5868+ */
5869+static int pax_handle_fetch_fault(struct pt_regs *regs)
5870+{
5871+
5872+#ifdef CONFIG_PAX_EMUPLT
5873+ int err;
5874+
5875+ do { /* PaX: patched PLT emulation #1 */
5876+ unsigned int sethi1, sethi2, jmpl;
5877+
5878+ err = get_user(sethi1, (unsigned int *)regs->pc);
5879+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5880+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5881+
5882+ if (err)
5883+ break;
5884+
5885+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5886+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5887+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5888+ {
5889+ unsigned int addr;
5890+
5891+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5892+ addr = regs->u_regs[UREG_G1];
5893+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5894+ regs->pc = addr;
5895+ regs->npc = addr+4;
5896+ return 2;
5897+ }
5898+ } while (0);
5899+
5900+ { /* PaX: patched PLT emulation #2 */
5901+ unsigned int ba;
5902+
5903+ err = get_user(ba, (unsigned int *)regs->pc);
5904+
5905+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5906+ unsigned int addr;
5907+
5908+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5909+ regs->pc = addr;
5910+ regs->npc = addr+4;
5911+ return 2;
5912+ }
5913+ }
5914+
5915+ do { /* PaX: patched PLT emulation #3 */
5916+ unsigned int sethi, jmpl, nop;
5917+
5918+ err = get_user(sethi, (unsigned int *)regs->pc);
5919+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5920+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5921+
5922+ if (err)
5923+ break;
5924+
5925+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5926+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5927+ nop == 0x01000000U)
5928+ {
5929+ unsigned int addr;
5930+
5931+ addr = (sethi & 0x003FFFFFU) << 10;
5932+ regs->u_regs[UREG_G1] = addr;
5933+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5934+ regs->pc = addr;
5935+ regs->npc = addr+4;
5936+ return 2;
5937+ }
5938+ } while (0);
5939+
5940+ do { /* PaX: unpatched PLT emulation step 1 */
5941+ unsigned int sethi, ba, nop;
5942+
5943+ err = get_user(sethi, (unsigned int *)regs->pc);
5944+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5945+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5946+
5947+ if (err)
5948+ break;
5949+
5950+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5951+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5952+ nop == 0x01000000U)
5953+ {
5954+ unsigned int addr, save, call;
5955+
5956+ if ((ba & 0xFFC00000U) == 0x30800000U)
5957+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5958+ else
5959+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5960+
5961+ err = get_user(save, (unsigned int *)addr);
5962+ err |= get_user(call, (unsigned int *)(addr+4));
5963+ err |= get_user(nop, (unsigned int *)(addr+8));
5964+ if (err)
5965+ break;
5966+
5967+#ifdef CONFIG_PAX_DLRESOLVE
5968+ if (save == 0x9DE3BFA8U &&
5969+ (call & 0xC0000000U) == 0x40000000U &&
5970+ nop == 0x01000000U)
5971+ {
5972+ struct vm_area_struct *vma;
5973+ unsigned long call_dl_resolve;
5974+
5975+ down_read(&current->mm->mmap_sem);
5976+ call_dl_resolve = current->mm->call_dl_resolve;
5977+ up_read(&current->mm->mmap_sem);
5978+ if (likely(call_dl_resolve))
5979+ goto emulate;
5980+
5981+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5982+
5983+ down_write(&current->mm->mmap_sem);
5984+ if (current->mm->call_dl_resolve) {
5985+ call_dl_resolve = current->mm->call_dl_resolve;
5986+ up_write(&current->mm->mmap_sem);
5987+ if (vma)
5988+ kmem_cache_free(vm_area_cachep, vma);
5989+ goto emulate;
5990+ }
5991+
5992+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5993+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5994+ up_write(&current->mm->mmap_sem);
5995+ if (vma)
5996+ kmem_cache_free(vm_area_cachep, vma);
5997+ return 1;
5998+ }
5999+
6000+ if (pax_insert_vma(vma, call_dl_resolve)) {
6001+ up_write(&current->mm->mmap_sem);
6002+ kmem_cache_free(vm_area_cachep, vma);
6003+ return 1;
6004+ }
6005+
6006+ current->mm->call_dl_resolve = call_dl_resolve;
6007+ up_write(&current->mm->mmap_sem);
6008+
6009+emulate:
6010+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6011+ regs->pc = call_dl_resolve;
6012+ regs->npc = addr+4;
6013+ return 3;
6014+ }
6015+#endif
6016+
6017+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6018+ if ((save & 0xFFC00000U) == 0x05000000U &&
6019+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6020+ nop == 0x01000000U)
6021+ {
6022+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6023+ regs->u_regs[UREG_G2] = addr + 4;
6024+ addr = (save & 0x003FFFFFU) << 10;
6025+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
6026+ regs->pc = addr;
6027+ regs->npc = addr+4;
6028+ return 3;
6029+ }
6030+ }
6031+ } while (0);
6032+
6033+ do { /* PaX: unpatched PLT emulation step 2 */
6034+ unsigned int save, call, nop;
6035+
6036+ err = get_user(save, (unsigned int *)(regs->pc-4));
6037+ err |= get_user(call, (unsigned int *)regs->pc);
6038+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
6039+ if (err)
6040+ break;
6041+
6042+ if (save == 0x9DE3BFA8U &&
6043+ (call & 0xC0000000U) == 0x40000000U &&
6044+ nop == 0x01000000U)
6045+ {
6046+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
6047+
6048+ regs->u_regs[UREG_RETPC] = regs->pc;
6049+ regs->pc = dl_resolve;
6050+ regs->npc = dl_resolve+4;
6051+ return 3;
6052+ }
6053+ } while (0);
6054+#endif
6055+
6056+ return 1;
6057+}
6058+
6059+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6060+{
6061+ unsigned long i;
6062+
6063+ printk(KERN_ERR "PAX: bytes at PC: ");
6064+ for (i = 0; i < 8; i++) {
6065+ unsigned int c;
6066+ if (get_user(c, (unsigned int *)pc+i))
6067+ printk(KERN_CONT "???????? ");
6068+ else
6069+ printk(KERN_CONT "%08x ", c);
6070+ }
6071+ printk("\n");
6072+}
6073+#endif
6074+
6075 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
6076 unsigned long address)
6077 {
6078@@ -231,6 +495,24 @@ good_area:
6079 if(!(vma->vm_flags & VM_WRITE))
6080 goto bad_area;
6081 } else {
6082+
6083+#ifdef CONFIG_PAX_PAGEEXEC
6084+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
6085+ up_read(&mm->mmap_sem);
6086+ switch (pax_handle_fetch_fault(regs)) {
6087+
6088+#ifdef CONFIG_PAX_EMUPLT
6089+ case 2:
6090+ case 3:
6091+ return;
6092+#endif
6093+
6094+ }
6095+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
6096+ do_group_exit(SIGKILL);
6097+ }
6098+#endif
6099+
6100 /* Allow reads even for write-only mappings */
6101 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
6102 goto bad_area;
6103diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
6104index 43b0da9..a0b78f9 100644
6105--- a/arch/sparc/mm/fault_64.c
6106+++ b/arch/sparc/mm/fault_64.c
6107@@ -20,6 +20,9 @@
6108 #include <linux/kprobes.h>
6109 #include <linux/kdebug.h>
6110 #include <linux/percpu.h>
6111+#include <linux/slab.h>
6112+#include <linux/pagemap.h>
6113+#include <linux/compiler.h>
6114
6115 #include <asm/page.h>
6116 #include <asm/pgtable.h>
6117@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
6118 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
6119 regs->tpc);
6120 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
6121- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
6122+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
6123 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
6124 dump_stack();
6125 unhandled_fault(regs->tpc, current, regs);
6126@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_address(struct pt_regs *regs,
6127 show_regs(regs);
6128 }
6129
6130+#ifdef CONFIG_PAX_PAGEEXEC
6131+#ifdef CONFIG_PAX_DLRESOLVE
6132+static void pax_emuplt_close(struct vm_area_struct *vma)
6133+{
6134+ vma->vm_mm->call_dl_resolve = 0UL;
6135+}
6136+
6137+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
6138+{
6139+ unsigned int *kaddr;
6140+
6141+ vmf->page = alloc_page(GFP_HIGHUSER);
6142+ if (!vmf->page)
6143+ return VM_FAULT_OOM;
6144+
6145+ kaddr = kmap(vmf->page);
6146+ memset(kaddr, 0, PAGE_SIZE);
6147+ kaddr[0] = 0x9DE3BFA8U; /* save */
6148+ flush_dcache_page(vmf->page);
6149+ kunmap(vmf->page);
6150+ return VM_FAULT_MAJOR;
6151+}
6152+
6153+static const struct vm_operations_struct pax_vm_ops = {
6154+ .close = pax_emuplt_close,
6155+ .fault = pax_emuplt_fault
6156+};
6157+
6158+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
6159+{
6160+ int ret;
6161+
6162+ vma->vm_mm = current->mm;
6163+ vma->vm_start = addr;
6164+ vma->vm_end = addr + PAGE_SIZE;
6165+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
6166+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
6167+ vma->vm_ops = &pax_vm_ops;
6168+
6169+ ret = insert_vm_struct(current->mm, vma);
6170+ if (ret)
6171+ return ret;
6172+
6173+ ++current->mm->total_vm;
6174+ return 0;
6175+}
6176+#endif
6177+
6178+/*
6179+ * PaX: decide what to do with offenders (regs->tpc = fault address)
6180+ *
6181+ * returns 1 when task should be killed
6182+ * 2 when patched PLT trampoline was detected
6183+ * 3 when unpatched PLT trampoline was detected
6184+ */
6185+static int pax_handle_fetch_fault(struct pt_regs *regs)
6186+{
6187+
6188+#ifdef CONFIG_PAX_EMUPLT
6189+ int err;
6190+
6191+ do { /* PaX: patched PLT emulation #1 */
6192+ unsigned int sethi1, sethi2, jmpl;
6193+
6194+ err = get_user(sethi1, (unsigned int *)regs->tpc);
6195+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
6196+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
6197+
6198+ if (err)
6199+ break;
6200+
6201+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
6202+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
6203+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
6204+ {
6205+ unsigned long addr;
6206+
6207+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
6208+ addr = regs->u_regs[UREG_G1];
6209+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6210+
6211+ if (test_thread_flag(TIF_32BIT))
6212+ addr &= 0xFFFFFFFFUL;
6213+
6214+ regs->tpc = addr;
6215+ regs->tnpc = addr+4;
6216+ return 2;
6217+ }
6218+ } while (0);
6219+
6220+ { /* PaX: patched PLT emulation #2 */
6221+ unsigned int ba;
6222+
6223+ err = get_user(ba, (unsigned int *)regs->tpc);
6224+
6225+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
6226+ unsigned long addr;
6227+
6228+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6229+
6230+ if (test_thread_flag(TIF_32BIT))
6231+ addr &= 0xFFFFFFFFUL;
6232+
6233+ regs->tpc = addr;
6234+ regs->tnpc = addr+4;
6235+ return 2;
6236+ }
6237+ }
6238+
6239+ do { /* PaX: patched PLT emulation #3 */
6240+ unsigned int sethi, jmpl, nop;
6241+
6242+ err = get_user(sethi, (unsigned int *)regs->tpc);
6243+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
6244+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6245+
6246+ if (err)
6247+ break;
6248+
6249+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6250+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
6251+ nop == 0x01000000U)
6252+ {
6253+ unsigned long addr;
6254+
6255+ addr = (sethi & 0x003FFFFFU) << 10;
6256+ regs->u_regs[UREG_G1] = addr;
6257+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6258+
6259+ if (test_thread_flag(TIF_32BIT))
6260+ addr &= 0xFFFFFFFFUL;
6261+
6262+ regs->tpc = addr;
6263+ regs->tnpc = addr+4;
6264+ return 2;
6265+ }
6266+ } while (0);
6267+
6268+ do { /* PaX: patched PLT emulation #4 */
6269+ unsigned int sethi, mov1, call, mov2;
6270+
6271+ err = get_user(sethi, (unsigned int *)regs->tpc);
6272+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
6273+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
6274+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
6275+
6276+ if (err)
6277+ break;
6278+
6279+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6280+ mov1 == 0x8210000FU &&
6281+ (call & 0xC0000000U) == 0x40000000U &&
6282+ mov2 == 0x9E100001U)
6283+ {
6284+ unsigned long addr;
6285+
6286+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
6287+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6288+
6289+ if (test_thread_flag(TIF_32BIT))
6290+ addr &= 0xFFFFFFFFUL;
6291+
6292+ regs->tpc = addr;
6293+ regs->tnpc = addr+4;
6294+ return 2;
6295+ }
6296+ } while (0);
6297+
6298+ do { /* PaX: patched PLT emulation #5 */
6299+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
6300+
6301+ err = get_user(sethi, (unsigned int *)regs->tpc);
6302+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6303+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6304+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
6305+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
6306+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
6307+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
6308+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
6309+
6310+ if (err)
6311+ break;
6312+
6313+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6314+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6315+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6316+ (or1 & 0xFFFFE000U) == 0x82106000U &&
6317+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6318+ sllx == 0x83287020U &&
6319+ jmpl == 0x81C04005U &&
6320+ nop == 0x01000000U)
6321+ {
6322+ unsigned long addr;
6323+
6324+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6325+ regs->u_regs[UREG_G1] <<= 32;
6326+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6327+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6328+ regs->tpc = addr;
6329+ regs->tnpc = addr+4;
6330+ return 2;
6331+ }
6332+ } while (0);
6333+
6334+ do { /* PaX: patched PLT emulation #6 */
6335+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
6336+
6337+ err = get_user(sethi, (unsigned int *)regs->tpc);
6338+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
6339+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
6340+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
6341+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
6342+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
6343+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
6344+
6345+ if (err)
6346+ break;
6347+
6348+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6349+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
6350+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6351+ sllx == 0x83287020U &&
6352+ (or & 0xFFFFE000U) == 0x8A116000U &&
6353+ jmpl == 0x81C04005U &&
6354+ nop == 0x01000000U)
6355+ {
6356+ unsigned long addr;
6357+
6358+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
6359+ regs->u_regs[UREG_G1] <<= 32;
6360+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
6361+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
6362+ regs->tpc = addr;
6363+ regs->tnpc = addr+4;
6364+ return 2;
6365+ }
6366+ } while (0);
6367+
6368+ do { /* PaX: unpatched PLT emulation step 1 */
6369+ unsigned int sethi, ba, nop;
6370+
6371+ err = get_user(sethi, (unsigned int *)regs->tpc);
6372+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6373+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6374+
6375+ if (err)
6376+ break;
6377+
6378+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6379+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
6380+ nop == 0x01000000U)
6381+ {
6382+ unsigned long addr;
6383+ unsigned int save, call;
6384+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
6385+
6386+ if ((ba & 0xFFC00000U) == 0x30800000U)
6387+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
6388+ else
6389+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6390+
6391+ if (test_thread_flag(TIF_32BIT))
6392+ addr &= 0xFFFFFFFFUL;
6393+
6394+ err = get_user(save, (unsigned int *)addr);
6395+ err |= get_user(call, (unsigned int *)(addr+4));
6396+ err |= get_user(nop, (unsigned int *)(addr+8));
6397+ if (err)
6398+ break;
6399+
6400+#ifdef CONFIG_PAX_DLRESOLVE
6401+ if (save == 0x9DE3BFA8U &&
6402+ (call & 0xC0000000U) == 0x40000000U &&
6403+ nop == 0x01000000U)
6404+ {
6405+ struct vm_area_struct *vma;
6406+ unsigned long call_dl_resolve;
6407+
6408+ down_read(&current->mm->mmap_sem);
6409+ call_dl_resolve = current->mm->call_dl_resolve;
6410+ up_read(&current->mm->mmap_sem);
6411+ if (likely(call_dl_resolve))
6412+ goto emulate;
6413+
6414+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
6415+
6416+ down_write(&current->mm->mmap_sem);
6417+ if (current->mm->call_dl_resolve) {
6418+ call_dl_resolve = current->mm->call_dl_resolve;
6419+ up_write(&current->mm->mmap_sem);
6420+ if (vma)
6421+ kmem_cache_free(vm_area_cachep, vma);
6422+ goto emulate;
6423+ }
6424+
6425+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
6426+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
6427+ up_write(&current->mm->mmap_sem);
6428+ if (vma)
6429+ kmem_cache_free(vm_area_cachep, vma);
6430+ return 1;
6431+ }
6432+
6433+ if (pax_insert_vma(vma, call_dl_resolve)) {
6434+ up_write(&current->mm->mmap_sem);
6435+ kmem_cache_free(vm_area_cachep, vma);
6436+ return 1;
6437+ }
6438+
6439+ current->mm->call_dl_resolve = call_dl_resolve;
6440+ up_write(&current->mm->mmap_sem);
6441+
6442+emulate:
6443+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6444+ regs->tpc = call_dl_resolve;
6445+ regs->tnpc = addr+4;
6446+ return 3;
6447+ }
6448+#endif
6449+
6450+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
6451+ if ((save & 0xFFC00000U) == 0x05000000U &&
6452+ (call & 0xFFFFE000U) == 0x85C0A000U &&
6453+ nop == 0x01000000U)
6454+ {
6455+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6456+ regs->u_regs[UREG_G2] = addr + 4;
6457+ addr = (save & 0x003FFFFFU) << 10;
6458+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
6459+
6460+ if (test_thread_flag(TIF_32BIT))
6461+ addr &= 0xFFFFFFFFUL;
6462+
6463+ regs->tpc = addr;
6464+ regs->tnpc = addr+4;
6465+ return 3;
6466+ }
6467+
6468+ /* PaX: 64-bit PLT stub */
6469+ err = get_user(sethi1, (unsigned int *)addr);
6470+ err |= get_user(sethi2, (unsigned int *)(addr+4));
6471+ err |= get_user(or1, (unsigned int *)(addr+8));
6472+ err |= get_user(or2, (unsigned int *)(addr+12));
6473+ err |= get_user(sllx, (unsigned int *)(addr+16));
6474+ err |= get_user(add, (unsigned int *)(addr+20));
6475+ err |= get_user(jmpl, (unsigned int *)(addr+24));
6476+ err |= get_user(nop, (unsigned int *)(addr+28));
6477+ if (err)
6478+ break;
6479+
6480+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
6481+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
6482+ (or1 & 0xFFFFE000U) == 0x88112000U &&
6483+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
6484+ sllx == 0x89293020U &&
6485+ add == 0x8A010005U &&
6486+ jmpl == 0x89C14000U &&
6487+ nop == 0x01000000U)
6488+ {
6489+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
6490+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
6491+ regs->u_regs[UREG_G4] <<= 32;
6492+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
6493+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
6494+ regs->u_regs[UREG_G4] = addr + 24;
6495+ addr = regs->u_regs[UREG_G5];
6496+ regs->tpc = addr;
6497+ regs->tnpc = addr+4;
6498+ return 3;
6499+ }
6500+ }
6501+ } while (0);
6502+
6503+#ifdef CONFIG_PAX_DLRESOLVE
6504+ do { /* PaX: unpatched PLT emulation step 2 */
6505+ unsigned int save, call, nop;
6506+
6507+ err = get_user(save, (unsigned int *)(regs->tpc-4));
6508+ err |= get_user(call, (unsigned int *)regs->tpc);
6509+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
6510+ if (err)
6511+ break;
6512+
6513+ if (save == 0x9DE3BFA8U &&
6514+ (call & 0xC0000000U) == 0x40000000U &&
6515+ nop == 0x01000000U)
6516+ {
6517+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
6518+
6519+ if (test_thread_flag(TIF_32BIT))
6520+ dl_resolve &= 0xFFFFFFFFUL;
6521+
6522+ regs->u_regs[UREG_RETPC] = regs->tpc;
6523+ regs->tpc = dl_resolve;
6524+ regs->tnpc = dl_resolve+4;
6525+ return 3;
6526+ }
6527+ } while (0);
6528+#endif
6529+
6530+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
6531+ unsigned int sethi, ba, nop;
6532+
6533+ err = get_user(sethi, (unsigned int *)regs->tpc);
6534+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
6535+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
6536+
6537+ if (err)
6538+ break;
6539+
6540+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
6541+ (ba & 0xFFF00000U) == 0x30600000U &&
6542+ nop == 0x01000000U)
6543+ {
6544+ unsigned long addr;
6545+
6546+ addr = (sethi & 0x003FFFFFU) << 10;
6547+ regs->u_regs[UREG_G1] = addr;
6548+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
6549+
6550+ if (test_thread_flag(TIF_32BIT))
6551+ addr &= 0xFFFFFFFFUL;
6552+
6553+ regs->tpc = addr;
6554+ regs->tnpc = addr+4;
6555+ return 2;
6556+ }
6557+ } while (0);
6558+
6559+#endif
6560+
6561+ return 1;
6562+}
6563+
6564+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6565+{
6566+ unsigned long i;
6567+
6568+ printk(KERN_ERR "PAX: bytes at PC: ");
6569+ for (i = 0; i < 8; i++) {
6570+ unsigned int c;
6571+ if (get_user(c, (unsigned int *)pc+i))
6572+ printk(KERN_CONT "???????? ");
6573+ else
6574+ printk(KERN_CONT "%08x ", c);
6575+ }
6576+ printk("\n");
6577+}
6578+#endif
6579+
6580 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6581 {
6582 struct mm_struct *mm = current->mm;
6583@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
6584 if (!vma)
6585 goto bad_area;
6586
6587+#ifdef CONFIG_PAX_PAGEEXEC
6588+ /* PaX: detect ITLB misses on non-exec pages */
6589+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
6590+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
6591+ {
6592+ if (address != regs->tpc)
6593+ goto good_area;
6594+
6595+ up_read(&mm->mmap_sem);
6596+ switch (pax_handle_fetch_fault(regs)) {
6597+
6598+#ifdef CONFIG_PAX_EMUPLT
6599+ case 2:
6600+ case 3:
6601+ return;
6602+#endif
6603+
6604+ }
6605+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
6606+ do_group_exit(SIGKILL);
6607+ }
6608+#endif
6609+
6610 /* Pure DTLB misses do not tell us whether the fault causing
6611 * load/store/atomic was a write or not, it only says that there
6612 * was no match. So in such a case we (carefully) read the
6613diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
6614index f27d103..1b06377 100644
6615--- a/arch/sparc/mm/hugetlbpage.c
6616+++ b/arch/sparc/mm/hugetlbpage.c
6617@@ -69,7 +69,7 @@ full_search:
6618 }
6619 return -ENOMEM;
6620 }
6621- if (likely(!vma || addr + len <= vma->vm_start)) {
6622+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6623 /*
6624 * Remember the place where we stopped the search:
6625 */
6626@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6627 /* make sure it can fit in the remaining address space */
6628 if (likely(addr > len)) {
6629 vma = find_vma(mm, addr-len);
6630- if (!vma || addr <= vma->vm_start) {
6631+ if (check_heap_stack_gap(vma, addr - len, len)) {
6632 /* remember the address as a hint for next time */
6633 return (mm->free_area_cache = addr-len);
6634 }
6635@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6636 if (unlikely(mm->mmap_base < len))
6637 goto bottomup;
6638
6639- addr = (mm->mmap_base-len) & HPAGE_MASK;
6640+ addr = mm->mmap_base - len;
6641
6642 do {
6643+ addr &= HPAGE_MASK;
6644 /*
6645 * Lookup failure means no vma is above this address,
6646 * else if new region fits below vma->vm_start,
6647 * return with success:
6648 */
6649 vma = find_vma(mm, addr);
6650- if (likely(!vma || addr+len <= vma->vm_start)) {
6651+ if (likely(check_heap_stack_gap(vma, addr, len))) {
6652 /* remember the address as a hint for next time */
6653 return (mm->free_area_cache = addr);
6654 }
6655@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
6656 mm->cached_hole_size = vma->vm_start - addr;
6657
6658 /* try just below the current vma->vm_start */
6659- addr = (vma->vm_start-len) & HPAGE_MASK;
6660- } while (likely(len < vma->vm_start));
6661+ addr = skip_heap_stack_gap(vma, len);
6662+ } while (!IS_ERR_VALUE(addr));
6663
6664 bottomup:
6665 /*
6666@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
6667 if (addr) {
6668 addr = ALIGN(addr, HPAGE_SIZE);
6669 vma = find_vma(mm, addr);
6670- if (task_size - len >= addr &&
6671- (!vma || addr + len <= vma->vm_start))
6672+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
6673 return addr;
6674 }
6675 if (mm->get_unmapped_area == arch_get_unmapped_area)
6676diff --git a/arch/sparc/mm/init_32.c b/arch/sparc/mm/init_32.c
6677index dc7c3b1..34c0070 100644
6678--- a/arch/sparc/mm/init_32.c
6679+++ b/arch/sparc/mm/init_32.c
6680@@ -317,6 +317,9 @@ extern void device_scan(void);
6681 pgprot_t PAGE_SHARED __read_mostly;
6682 EXPORT_SYMBOL(PAGE_SHARED);
6683
6684+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
6685+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
6686+
6687 void __init paging_init(void)
6688 {
6689 switch(sparc_cpu_model) {
6690@@ -345,17 +348,17 @@ void __init paging_init(void)
6691
6692 /* Initialize the protection map with non-constant, MMU dependent values. */
6693 protection_map[0] = PAGE_NONE;
6694- protection_map[1] = PAGE_READONLY;
6695- protection_map[2] = PAGE_COPY;
6696- protection_map[3] = PAGE_COPY;
6697+ protection_map[1] = PAGE_READONLY_NOEXEC;
6698+ protection_map[2] = PAGE_COPY_NOEXEC;
6699+ protection_map[3] = PAGE_COPY_NOEXEC;
6700 protection_map[4] = PAGE_READONLY;
6701 protection_map[5] = PAGE_READONLY;
6702 protection_map[6] = PAGE_COPY;
6703 protection_map[7] = PAGE_COPY;
6704 protection_map[8] = PAGE_NONE;
6705- protection_map[9] = PAGE_READONLY;
6706- protection_map[10] = PAGE_SHARED;
6707- protection_map[11] = PAGE_SHARED;
6708+ protection_map[9] = PAGE_READONLY_NOEXEC;
6709+ protection_map[10] = PAGE_SHARED_NOEXEC;
6710+ protection_map[11] = PAGE_SHARED_NOEXEC;
6711 protection_map[12] = PAGE_READONLY;
6712 protection_map[13] = PAGE_READONLY;
6713 protection_map[14] = PAGE_SHARED;
6714diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
6715index 509b1ff..bfd7118 100644
6716--- a/arch/sparc/mm/srmmu.c
6717+++ b/arch/sparc/mm/srmmu.c
6718@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6719 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6720 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6721 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6722+
6723+#ifdef CONFIG_PAX_PAGEEXEC
6724+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6725+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6726+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6727+#endif
6728+
6729 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6730 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6731
6732diff --git a/arch/um/Makefile b/arch/um/Makefile
6733index fc633db..5e1a1c2 100644
6734--- a/arch/um/Makefile
6735+++ b/arch/um/Makefile
6736@@ -49,6 +49,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
6737 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
6738 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64
6739
6740+ifdef CONSTIFY_PLUGIN
6741+USER_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6742+endif
6743+
6744 include $(srctree)/$(ARCH_DIR)/Makefile-$(SUBARCH)
6745
6746 #This will adjust *FLAGS accordingly to the platform.
6747diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
6748index 6c03acd..a5e0215 100644
6749--- a/arch/um/include/asm/kmap_types.h
6750+++ b/arch/um/include/asm/kmap_types.h
6751@@ -23,6 +23,7 @@ enum km_type {
6752 KM_IRQ1,
6753 KM_SOFTIRQ0,
6754 KM_SOFTIRQ1,
6755+ KM_CLEARPAGE,
6756 KM_TYPE_NR
6757 };
6758
6759diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
6760index 4cc9b6c..02e5029 100644
6761--- a/arch/um/include/asm/page.h
6762+++ b/arch/um/include/asm/page.h
6763@@ -14,6 +14,9 @@
6764 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6765 #define PAGE_MASK (~(PAGE_SIZE-1))
6766
6767+#define ktla_ktva(addr) (addr)
6768+#define ktva_ktla(addr) (addr)
6769+
6770 #ifndef __ASSEMBLY__
6771
6772 struct page;
6773diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
6774index 4a28a15..654dc2a 100644
6775--- a/arch/um/kernel/process.c
6776+++ b/arch/um/kernel/process.c
6777@@ -393,22 +393,6 @@ int singlestepping(void * t)
6778 return 2;
6779 }
6780
6781-/*
6782- * Only x86 and x86_64 have an arch_align_stack().
6783- * All other arches have "#define arch_align_stack(x) (x)"
6784- * in their asm/system.h
6785- * As this is included in UML from asm-um/system-generic.h,
6786- * we can use it to behave as the subarch does.
6787- */
6788-#ifndef arch_align_stack
6789-unsigned long arch_align_stack(unsigned long sp)
6790-{
6791- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6792- sp -= get_random_int() % 8192;
6793- return sp & ~0xf;
6794-}
6795-#endif
6796-
6797 unsigned long get_wchan(struct task_struct *p)
6798 {
6799 unsigned long stack_page, sp, ip;
6800diff --git a/arch/um/sys-i386/shared/sysdep/system.h b/arch/um/sys-i386/shared/sysdep/system.h
6801index d1b93c4..ae1b7fd 100644
6802--- a/arch/um/sys-i386/shared/sysdep/system.h
6803+++ b/arch/um/sys-i386/shared/sysdep/system.h
6804@@ -17,7 +17,7 @@
6805 # define AT_VECTOR_SIZE_ARCH 1
6806 #endif
6807
6808-extern unsigned long arch_align_stack(unsigned long sp);
6809+#define arch_align_stack(x) ((x) & ~0xfUL)
6810
6811 void default_idle(void);
6812
6813diff --git a/arch/um/sys-i386/syscalls.c b/arch/um/sys-i386/syscalls.c
6814index 857ca0b..9a2669d 100644
6815--- a/arch/um/sys-i386/syscalls.c
6816+++ b/arch/um/sys-i386/syscalls.c
6817@@ -11,6 +11,21 @@
6818 #include "asm/uaccess.h"
6819 #include "asm/unistd.h"
6820
6821+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6822+{
6823+ unsigned long pax_task_size = TASK_SIZE;
6824+
6825+#ifdef CONFIG_PAX_SEGMEXEC
6826+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6827+ pax_task_size = SEGMEXEC_TASK_SIZE;
6828+#endif
6829+
6830+ if (len > pax_task_size || addr > pax_task_size - len)
6831+ return -EINVAL;
6832+
6833+ return 0;
6834+}
6835+
6836 /*
6837 * Perform the select(nd, in, out, ex, tv) and mmap() system
6838 * calls. Linux/i386 didn't use to be able to handle more than
6839diff --git a/arch/um/sys-x86_64/shared/sysdep/system.h b/arch/um/sys-x86_64/shared/sysdep/system.h
6840index d1b93c4..ae1b7fd 100644
6841--- a/arch/um/sys-x86_64/shared/sysdep/system.h
6842+++ b/arch/um/sys-x86_64/shared/sysdep/system.h
6843@@ -17,7 +17,7 @@
6844 # define AT_VECTOR_SIZE_ARCH 1
6845 #endif
6846
6847-extern unsigned long arch_align_stack(unsigned long sp);
6848+#define arch_align_stack(x) ((x) & ~0xfUL)
6849
6850 void default_idle(void);
6851
6852diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
6853index 73ae02a..f932de5 100644
6854--- a/arch/x86/Kconfig
6855+++ b/arch/x86/Kconfig
6856@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
6857
6858 config X86_32_LAZY_GS
6859 def_bool y
6860- depends on X86_32 && !CC_STACKPROTECTOR
6861+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
6862
6863 config KTIME_SCALAR
6864 def_bool X86_32
6865@@ -1008,7 +1008,7 @@ choice
6866
6867 config NOHIGHMEM
6868 bool "off"
6869- depends on !X86_NUMAQ
6870+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6871 ---help---
6872 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
6873 However, the address space of 32-bit x86 processors is only 4
6874@@ -1045,7 +1045,7 @@ config NOHIGHMEM
6875
6876 config HIGHMEM4G
6877 bool "4GB"
6878- depends on !X86_NUMAQ
6879+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
6880 ---help---
6881 Select this if you have a 32-bit processor and between 1 and 4
6882 gigabytes of physical RAM.
6883@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
6884 hex
6885 default 0xB0000000 if VMSPLIT_3G_OPT
6886 default 0x80000000 if VMSPLIT_2G
6887- default 0x78000000 if VMSPLIT_2G_OPT
6888+ default 0x70000000 if VMSPLIT_2G_OPT
6889 default 0x40000000 if VMSPLIT_1G
6890 default 0xC0000000
6891 depends on X86_32
6892@@ -1460,6 +1460,7 @@ config SECCOMP
6893
6894 config CC_STACKPROTECTOR
6895 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
6896+ depends on X86_64 || !PAX_MEMORY_UDEREF
6897 ---help---
6898 This option turns on the -fstack-protector GCC feature. This
6899 feature puts, at the beginning of functions, a canary value on
6900@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
6901 config PHYSICAL_START
6902 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
6903 default "0x1000000"
6904+ range 0x400000 0x40000000
6905 ---help---
6906 This gives the physical address where the kernel is loaded.
6907
6908@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
6909 hex
6910 prompt "Alignment value to which kernel should be aligned" if X86_32
6911 default "0x1000000"
6912+ range 0x400000 0x1000000 if PAX_KERNEXEC
6913 range 0x2000 0x1000000
6914 ---help---
6915 This value puts the alignment restrictions on physical address
6916@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
6917 Say N if you want to disable CPU hotplug.
6918
6919 config COMPAT_VDSO
6920- def_bool y
6921+ def_bool n
6922 prompt "Compat VDSO support"
6923 depends on X86_32 || IA32_EMULATION
6924+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
6925 ---help---
6926 Map the 32-bit VDSO to the predictable old-style address too.
6927 ---help---
6928diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
6929index 0e566103..1a6b57e 100644
6930--- a/arch/x86/Kconfig.cpu
6931+++ b/arch/x86/Kconfig.cpu
6932@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
6933
6934 config X86_F00F_BUG
6935 def_bool y
6936- depends on M586MMX || M586TSC || M586 || M486 || M386
6937+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
6938
6939 config X86_WP_WORKS_OK
6940 def_bool y
6941@@ -360,7 +360,7 @@ config X86_POPAD_OK
6942
6943 config X86_ALIGNMENT_16
6944 def_bool y
6945- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6946+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
6947
6948 config X86_INTEL_USERCOPY
6949 def_bool y
6950@@ -406,7 +406,7 @@ config X86_CMPXCHG64
6951 # generates cmov.
6952 config X86_CMOV
6953 def_bool y
6954- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6955+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
6956
6957 config X86_MINIMUM_CPU_FAMILY
6958 int
6959diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
6960index d105f29..c928727 100644
6961--- a/arch/x86/Kconfig.debug
6962+++ b/arch/x86/Kconfig.debug
6963@@ -99,7 +99,7 @@ config X86_PTDUMP
6964 config DEBUG_RODATA
6965 bool "Write protect kernel read-only data structures"
6966 default y
6967- depends on DEBUG_KERNEL
6968+ depends on DEBUG_KERNEL && BROKEN
6969 ---help---
6970 Mark the kernel read-only data as write-protected in the pagetables,
6971 in order to catch accidental (and incorrect) writes to such const
6972diff --git a/arch/x86/Makefile b/arch/x86/Makefile
6973index d2d24c9..0f21f8d 100644
6974--- a/arch/x86/Makefile
6975+++ b/arch/x86/Makefile
6976@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
6977 else
6978 BITS := 64
6979 UTS_MACHINE := x86_64
6980+ biarch := $(call cc-option,-m64)
6981 CHECKFLAGS += -D__x86_64__ -m64
6982
6983 KBUILD_AFLAGS += -m64
6984@@ -189,3 +190,12 @@ define archhelp
6985 echo ' FDARGS="..." arguments for the booted kernel'
6986 echo ' FDINITRD=file initrd for the booted kernel'
6987 endef
6988+
6989+define OLD_LD
6990+
6991+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
6992+*** Please upgrade your binutils to 2.18 or newer
6993+endef
6994+
6995+archprepare:
6996+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
6997diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
6998index ec749c2..bbb5319 100644
6999--- a/arch/x86/boot/Makefile
7000+++ b/arch/x86/boot/Makefile
7001@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
7002 $(call cc-option, -fno-stack-protector) \
7003 $(call cc-option, -mpreferred-stack-boundary=2)
7004 KBUILD_CFLAGS += $(call cc-option, -m32)
7005+ifdef CONSTIFY_PLUGIN
7006+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7007+endif
7008 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7009 GCOV_PROFILE := n
7010
7011diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
7012index 878e4b9..20537ab 100644
7013--- a/arch/x86/boot/bitops.h
7014+++ b/arch/x86/boot/bitops.h
7015@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7016 u8 v;
7017 const u32 *p = (const u32 *)addr;
7018
7019- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7020+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
7021 return v;
7022 }
7023
7024@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
7025
7026 static inline void set_bit(int nr, void *addr)
7027 {
7028- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7029+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
7030 }
7031
7032 #endif /* BOOT_BITOPS_H */
7033diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
7034index 98239d2..f40214c 100644
7035--- a/arch/x86/boot/boot.h
7036+++ b/arch/x86/boot/boot.h
7037@@ -82,7 +82,7 @@ static inline void io_delay(void)
7038 static inline u16 ds(void)
7039 {
7040 u16 seg;
7041- asm("movw %%ds,%0" : "=rm" (seg));
7042+ asm volatile("movw %%ds,%0" : "=rm" (seg));
7043 return seg;
7044 }
7045
7046@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t addr)
7047 static inline int memcmp(const void *s1, const void *s2, size_t len)
7048 {
7049 u8 diff;
7050- asm("repe; cmpsb; setnz %0"
7051+ asm volatile("repe; cmpsb; setnz %0"
7052 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
7053 return diff;
7054 }
7055diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
7056index f8ed065..5bf5ff3 100644
7057--- a/arch/x86/boot/compressed/Makefile
7058+++ b/arch/x86/boot/compressed/Makefile
7059@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
7060 KBUILD_CFLAGS += $(cflags-y)
7061 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
7062 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
7063+ifdef CONSTIFY_PLUGIN
7064+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
7065+endif
7066
7067 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
7068 GCOV_PROFILE := n
7069diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
7070index f543b70..b60fba8 100644
7071--- a/arch/x86/boot/compressed/head_32.S
7072+++ b/arch/x86/boot/compressed/head_32.S
7073@@ -76,7 +76,7 @@ ENTRY(startup_32)
7074 notl %eax
7075 andl %eax, %ebx
7076 #else
7077- movl $LOAD_PHYSICAL_ADDR, %ebx
7078+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7079 #endif
7080
7081 /* Target address to relocate to for decompression */
7082@@ -149,7 +149,7 @@ relocated:
7083 * and where it was actually loaded.
7084 */
7085 movl %ebp, %ebx
7086- subl $LOAD_PHYSICAL_ADDR, %ebx
7087+ subl $____LOAD_PHYSICAL_ADDR, %ebx
7088 jz 2f /* Nothing to be done if loaded at compiled addr. */
7089 /*
7090 * Process relocations.
7091@@ -157,8 +157,7 @@ relocated:
7092
7093 1: subl $4, %edi
7094 movl (%edi), %ecx
7095- testl %ecx, %ecx
7096- jz 2f
7097+ jecxz 2f
7098 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
7099 jmp 1b
7100 2:
7101diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
7102index 077e1b6..2c6b13b 100644
7103--- a/arch/x86/boot/compressed/head_64.S
7104+++ b/arch/x86/boot/compressed/head_64.S
7105@@ -91,7 +91,7 @@ ENTRY(startup_32)
7106 notl %eax
7107 andl %eax, %ebx
7108 #else
7109- movl $LOAD_PHYSICAL_ADDR, %ebx
7110+ movl $____LOAD_PHYSICAL_ADDR, %ebx
7111 #endif
7112
7113 /* Target address to relocate to for decompression */
7114@@ -183,7 +183,7 @@ no_longmode:
7115 hlt
7116 jmp 1b
7117
7118-#include "../../kernel/verify_cpu_64.S"
7119+#include "../../kernel/verify_cpu.S"
7120
7121 /*
7122 * Be careful here startup_64 needs to be at a predictable
7123@@ -234,7 +234,7 @@ ENTRY(startup_64)
7124 notq %rax
7125 andq %rax, %rbp
7126 #else
7127- movq $LOAD_PHYSICAL_ADDR, %rbp
7128+ movq $____LOAD_PHYSICAL_ADDR, %rbp
7129 #endif
7130
7131 /* Target address to relocate to for decompression */
7132diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
7133index 842b2a3..f00178b 100644
7134--- a/arch/x86/boot/compressed/misc.c
7135+++ b/arch/x86/boot/compressed/misc.c
7136@@ -288,7 +288,7 @@ static void parse_elf(void *output)
7137 case PT_LOAD:
7138 #ifdef CONFIG_RELOCATABLE
7139 dest = output;
7140- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
7141+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
7142 #else
7143 dest = (void *)(phdr->p_paddr);
7144 #endif
7145@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
7146 error("Destination address too large");
7147 #endif
7148 #ifndef CONFIG_RELOCATABLE
7149- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
7150+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
7151 error("Wrong destination address");
7152 #endif
7153
7154diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
7155index bcbd36c..b1754af 100644
7156--- a/arch/x86/boot/compressed/mkpiggy.c
7157+++ b/arch/x86/boot/compressed/mkpiggy.c
7158@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
7159
7160 offs = (olen > ilen) ? olen - ilen : 0;
7161 offs += olen >> 12; /* Add 8 bytes for each 32K block */
7162- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
7163+ offs += 64*1024; /* Add 64K bytes slack */
7164 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
7165
7166 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
7167diff --git a/arch/x86/boot/compressed/relocs.c b/arch/x86/boot/compressed/relocs.c
7168index bbeb0c3..f5167ab 100644
7169--- a/arch/x86/boot/compressed/relocs.c
7170+++ b/arch/x86/boot/compressed/relocs.c
7171@@ -10,8 +10,11 @@
7172 #define USE_BSD
7173 #include <endian.h>
7174
7175+#include "../../../../include/linux/autoconf.h"
7176+
7177 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
7178 static Elf32_Ehdr ehdr;
7179+static Elf32_Phdr *phdr;
7180 static unsigned long reloc_count, reloc_idx;
7181 static unsigned long *relocs;
7182
7183@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
7184
7185 static int is_safe_abs_reloc(const char* sym_name)
7186 {
7187- int i;
7188+ unsigned int i;
7189
7190 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
7191 if (!strcmp(sym_name, safe_abs_relocs[i]))
7192@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
7193 }
7194 }
7195
7196+static void read_phdrs(FILE *fp)
7197+{
7198+ unsigned int i;
7199+
7200+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
7201+ if (!phdr) {
7202+ die("Unable to allocate %d program headers\n",
7203+ ehdr.e_phnum);
7204+ }
7205+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
7206+ die("Seek to %d failed: %s\n",
7207+ ehdr.e_phoff, strerror(errno));
7208+ }
7209+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
7210+ die("Cannot read ELF program headers: %s\n",
7211+ strerror(errno));
7212+ }
7213+ for(i = 0; i < ehdr.e_phnum; i++) {
7214+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
7215+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
7216+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
7217+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
7218+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
7219+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
7220+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
7221+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
7222+ }
7223+
7224+}
7225+
7226 static void read_shdrs(FILE *fp)
7227 {
7228- int i;
7229+ unsigned int i;
7230 Elf32_Shdr shdr;
7231
7232 secs = calloc(ehdr.e_shnum, sizeof(struct section));
7233@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
7234
7235 static void read_strtabs(FILE *fp)
7236 {
7237- int i;
7238+ unsigned int i;
7239 for (i = 0; i < ehdr.e_shnum; i++) {
7240 struct section *sec = &secs[i];
7241 if (sec->shdr.sh_type != SHT_STRTAB) {
7242@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
7243
7244 static void read_symtabs(FILE *fp)
7245 {
7246- int i,j;
7247+ unsigned int i,j;
7248 for (i = 0; i < ehdr.e_shnum; i++) {
7249 struct section *sec = &secs[i];
7250 if (sec->shdr.sh_type != SHT_SYMTAB) {
7251@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
7252
7253 static void read_relocs(FILE *fp)
7254 {
7255- int i,j;
7256+ unsigned int i,j;
7257+ uint32_t base;
7258+
7259 for (i = 0; i < ehdr.e_shnum; i++) {
7260 struct section *sec = &secs[i];
7261 if (sec->shdr.sh_type != SHT_REL) {
7262@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
7263 die("Cannot read symbol table: %s\n",
7264 strerror(errno));
7265 }
7266+ base = 0;
7267+ for (j = 0; j < ehdr.e_phnum; j++) {
7268+ if (phdr[j].p_type != PT_LOAD )
7269+ continue;
7270+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
7271+ continue;
7272+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
7273+ break;
7274+ }
7275 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
7276 Elf32_Rel *rel = &sec->reltab[j];
7277- rel->r_offset = elf32_to_cpu(rel->r_offset);
7278+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
7279 rel->r_info = elf32_to_cpu(rel->r_info);
7280 }
7281 }
7282@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
7283
7284 static void print_absolute_symbols(void)
7285 {
7286- int i;
7287+ unsigned int i;
7288 printf("Absolute symbols\n");
7289 printf(" Num: Value Size Type Bind Visibility Name\n");
7290 for (i = 0; i < ehdr.e_shnum; i++) {
7291 struct section *sec = &secs[i];
7292 char *sym_strtab;
7293 Elf32_Sym *sh_symtab;
7294- int j;
7295+ unsigned int j;
7296
7297 if (sec->shdr.sh_type != SHT_SYMTAB) {
7298 continue;
7299@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
7300
7301 static void print_absolute_relocs(void)
7302 {
7303- int i, printed = 0;
7304+ unsigned int i, printed = 0;
7305
7306 for (i = 0; i < ehdr.e_shnum; i++) {
7307 struct section *sec = &secs[i];
7308 struct section *sec_applies, *sec_symtab;
7309 char *sym_strtab;
7310 Elf32_Sym *sh_symtab;
7311- int j;
7312+ unsigned int j;
7313 if (sec->shdr.sh_type != SHT_REL) {
7314 continue;
7315 }
7316@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
7317
7318 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7319 {
7320- int i;
7321+ unsigned int i;
7322 /* Walk through the relocations */
7323 for (i = 0; i < ehdr.e_shnum; i++) {
7324 char *sym_strtab;
7325 Elf32_Sym *sh_symtab;
7326 struct section *sec_applies, *sec_symtab;
7327- int j;
7328+ unsigned int j;
7329 struct section *sec = &secs[i];
7330
7331 if (sec->shdr.sh_type != SHT_REL) {
7332@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
7333 if (sym->st_shndx == SHN_ABS) {
7334 continue;
7335 }
7336+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
7337+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
7338+ continue;
7339+
7340+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
7341+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
7342+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
7343+ continue;
7344+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
7345+ continue;
7346+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
7347+ continue;
7348+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
7349+ continue;
7350+#endif
7351 if (r_type == R_386_NONE || r_type == R_386_PC32) {
7352 /*
7353 * NONE can be ignored and and PC relative
7354@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, const void *vb)
7355
7356 static void emit_relocs(int as_text)
7357 {
7358- int i;
7359+ unsigned int i;
7360 /* Count how many relocations I have and allocate space for them. */
7361 reloc_count = 0;
7362 walk_relocs(count_reloc);
7363@@ -634,6 +693,7 @@ int main(int argc, char **argv)
7364 fname, strerror(errno));
7365 }
7366 read_ehdr(fp);
7367+ read_phdrs(fp);
7368 read_shdrs(fp);
7369 read_strtabs(fp);
7370 read_symtabs(fp);
7371diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
7372index 4d3ff03..e4972ff 100644
7373--- a/arch/x86/boot/cpucheck.c
7374+++ b/arch/x86/boot/cpucheck.c
7375@@ -74,7 +74,7 @@ static int has_fpu(void)
7376 u16 fcw = -1, fsw = -1;
7377 u32 cr0;
7378
7379- asm("movl %%cr0,%0" : "=r" (cr0));
7380+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
7381 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
7382 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
7383 asm volatile("movl %0,%%cr0" : : "r" (cr0));
7384@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
7385 {
7386 u32 f0, f1;
7387
7388- asm("pushfl ; "
7389+ asm volatile("pushfl ; "
7390 "pushfl ; "
7391 "popl %0 ; "
7392 "movl %0,%1 ; "
7393@@ -115,7 +115,7 @@ static void get_flags(void)
7394 set_bit(X86_FEATURE_FPU, cpu.flags);
7395
7396 if (has_eflag(X86_EFLAGS_ID)) {
7397- asm("cpuid"
7398+ asm volatile("cpuid"
7399 : "=a" (max_intel_level),
7400 "=b" (cpu_vendor[0]),
7401 "=d" (cpu_vendor[1]),
7402@@ -124,7 +124,7 @@ static void get_flags(void)
7403
7404 if (max_intel_level >= 0x00000001 &&
7405 max_intel_level <= 0x0000ffff) {
7406- asm("cpuid"
7407+ asm volatile("cpuid"
7408 : "=a" (tfms),
7409 "=c" (cpu.flags[4]),
7410 "=d" (cpu.flags[0])
7411@@ -136,7 +136,7 @@ static void get_flags(void)
7412 cpu.model += ((tfms >> 16) & 0xf) << 4;
7413 }
7414
7415- asm("cpuid"
7416+ asm volatile("cpuid"
7417 : "=a" (max_amd_level)
7418 : "a" (0x80000000)
7419 : "ebx", "ecx", "edx");
7420@@ -144,7 +144,7 @@ static void get_flags(void)
7421 if (max_amd_level >= 0x80000001 &&
7422 max_amd_level <= 0x8000ffff) {
7423 u32 eax = 0x80000001;
7424- asm("cpuid"
7425+ asm volatile("cpuid"
7426 : "+a" (eax),
7427 "=c" (cpu.flags[6]),
7428 "=d" (cpu.flags[1])
7429@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7430 u32 ecx = MSR_K7_HWCR;
7431 u32 eax, edx;
7432
7433- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7434+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7435 eax &= ~(1 << 15);
7436- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7437+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7438
7439 get_flags(); /* Make sure it really did something */
7440 err = check_flags();
7441@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7442 u32 ecx = MSR_VIA_FCR;
7443 u32 eax, edx;
7444
7445- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7446+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7447 eax |= (1<<1)|(1<<7);
7448- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7449+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7450
7451 set_bit(X86_FEATURE_CX8, cpu.flags);
7452 err = check_flags();
7453@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
7454 u32 eax, edx;
7455 u32 level = 1;
7456
7457- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7458- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7459- asm("cpuid"
7460+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
7461+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
7462+ asm volatile("cpuid"
7463 : "+a" (level), "=d" (cpu.flags[0])
7464 : : "ecx", "ebx");
7465- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7466+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
7467
7468 err = check_flags();
7469 }
7470diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
7471index b31cc54..8d69237 100644
7472--- a/arch/x86/boot/header.S
7473+++ b/arch/x86/boot/header.S
7474@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical pointer to
7475 # single linked list of
7476 # struct setup_data
7477
7478-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
7479+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
7480
7481 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
7482 #define VO_INIT_SIZE (VO__end - VO__text)
7483diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
7484index cae3feb..ff8ff2a 100644
7485--- a/arch/x86/boot/memory.c
7486+++ b/arch/x86/boot/memory.c
7487@@ -19,7 +19,7 @@
7488
7489 static int detect_memory_e820(void)
7490 {
7491- int count = 0;
7492+ unsigned int count = 0;
7493 struct biosregs ireg, oreg;
7494 struct e820entry *desc = boot_params.e820_map;
7495 static struct e820entry buf; /* static so it is zeroed */
7496diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
7497index 11e8c6e..fdbb1ed 100644
7498--- a/arch/x86/boot/video-vesa.c
7499+++ b/arch/x86/boot/video-vesa.c
7500@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
7501
7502 boot_params.screen_info.vesapm_seg = oreg.es;
7503 boot_params.screen_info.vesapm_off = oreg.di;
7504+ boot_params.screen_info.vesapm_size = oreg.cx;
7505 }
7506
7507 /*
7508diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
7509index d42da38..787cdf3 100644
7510--- a/arch/x86/boot/video.c
7511+++ b/arch/x86/boot/video.c
7512@@ -90,7 +90,7 @@ static void store_mode_params(void)
7513 static unsigned int get_entry(void)
7514 {
7515 char entry_buf[4];
7516- int i, len = 0;
7517+ unsigned int i, len = 0;
7518 int key;
7519 unsigned int v;
7520
7521diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
7522index 5b577d5..3c1fed4 100644
7523--- a/arch/x86/crypto/aes-x86_64-asm_64.S
7524+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
7525@@ -8,6 +8,8 @@
7526 * including this sentence is retained in full.
7527 */
7528
7529+#include <asm/alternative-asm.h>
7530+
7531 .extern crypto_ft_tab
7532 .extern crypto_it_tab
7533 .extern crypto_fl_tab
7534@@ -71,6 +73,8 @@ FUNC: movq r1,r2; \
7535 je B192; \
7536 leaq 32(r9),r9;
7537
7538+#define ret pax_force_retaddr 0, 1; ret
7539+
7540 #define epilogue(r1,r2,r3,r4,r5,r6,r7,r8,r9) \
7541 movq r1,r2; \
7542 movq r3,r4; \
7543diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
7544index eb0566e..e3ebad8 100644
7545--- a/arch/x86/crypto/aesni-intel_asm.S
7546+++ b/arch/x86/crypto/aesni-intel_asm.S
7547@@ -16,6 +16,7 @@
7548 */
7549
7550 #include <linux/linkage.h>
7551+#include <asm/alternative-asm.h>
7552
7553 .text
7554
7555@@ -52,6 +53,7 @@ _key_expansion_256a:
7556 pxor %xmm1, %xmm0
7557 movaps %xmm0, (%rcx)
7558 add $0x10, %rcx
7559+ pax_force_retaddr_bts
7560 ret
7561
7562 _key_expansion_192a:
7563@@ -75,6 +77,7 @@ _key_expansion_192a:
7564 shufps $0b01001110, %xmm2, %xmm1
7565 movaps %xmm1, 16(%rcx)
7566 add $0x20, %rcx
7567+ pax_force_retaddr_bts
7568 ret
7569
7570 _key_expansion_192b:
7571@@ -93,6 +96,7 @@ _key_expansion_192b:
7572
7573 movaps %xmm0, (%rcx)
7574 add $0x10, %rcx
7575+ pax_force_retaddr_bts
7576 ret
7577
7578 _key_expansion_256b:
7579@@ -104,6 +108,7 @@ _key_expansion_256b:
7580 pxor %xmm1, %xmm2
7581 movaps %xmm2, (%rcx)
7582 add $0x10, %rcx
7583+ pax_force_retaddr_bts
7584 ret
7585
7586 /*
7587@@ -239,7 +244,9 @@ ENTRY(aesni_set_key)
7588 cmp %rcx, %rdi
7589 jb .Ldec_key_loop
7590 xor %rax, %rax
7591+ pax_force_retaddr 0, 1
7592 ret
7593+ENDPROC(aesni_set_key)
7594
7595 /*
7596 * void aesni_enc(struct crypto_aes_ctx *ctx, u8 *dst, const u8 *src)
7597@@ -249,7 +256,9 @@ ENTRY(aesni_enc)
7598 movups (INP), STATE # input
7599 call _aesni_enc1
7600 movups STATE, (OUTP) # output
7601+ pax_force_retaddr 0, 1
7602 ret
7603+ENDPROC(aesni_enc)
7604
7605 /*
7606 * _aesni_enc1: internal ABI
7607@@ -319,6 +328,7 @@ _aesni_enc1:
7608 movaps 0x70(TKEYP), KEY
7609 # aesenclast KEY, STATE # last round
7610 .byte 0x66, 0x0f, 0x38, 0xdd, 0xc2
7611+ pax_force_retaddr_bts
7612 ret
7613
7614 /*
7615@@ -482,6 +492,7 @@ _aesni_enc4:
7616 .byte 0x66, 0x0f, 0x38, 0xdd, 0xea
7617 # aesenclast KEY, STATE4
7618 .byte 0x66, 0x0f, 0x38, 0xdd, 0xf2
7619+ pax_force_retaddr_bts
7620 ret
7621
7622 /*
7623@@ -493,7 +504,9 @@ ENTRY(aesni_dec)
7624 movups (INP), STATE # input
7625 call _aesni_dec1
7626 movups STATE, (OUTP) #output
7627+ pax_force_retaddr 0, 1
7628 ret
7629+ENDPROC(aesni_dec)
7630
7631 /*
7632 * _aesni_dec1: internal ABI
7633@@ -563,6 +576,7 @@ _aesni_dec1:
7634 movaps 0x70(TKEYP), KEY
7635 # aesdeclast KEY, STATE # last round
7636 .byte 0x66, 0x0f, 0x38, 0xdf, 0xc2
7637+ pax_force_retaddr_bts
7638 ret
7639
7640 /*
7641@@ -726,6 +740,7 @@ _aesni_dec4:
7642 .byte 0x66, 0x0f, 0x38, 0xdf, 0xea
7643 # aesdeclast KEY, STATE4
7644 .byte 0x66, 0x0f, 0x38, 0xdf, 0xf2
7645+ pax_force_retaddr_bts
7646 ret
7647
7648 /*
7649@@ -769,7 +784,9 @@ ENTRY(aesni_ecb_enc)
7650 cmp $16, LEN
7651 jge .Lecb_enc_loop1
7652 .Lecb_enc_ret:
7653+ pax_force_retaddr 0, 1
7654 ret
7655+ENDPROC(aesni_ecb_enc)
7656
7657 /*
7658 * void aesni_ecb_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7659@@ -813,7 +830,9 @@ ENTRY(aesni_ecb_dec)
7660 cmp $16, LEN
7661 jge .Lecb_dec_loop1
7662 .Lecb_dec_ret:
7663+ pax_force_retaddr 0, 1
7664 ret
7665+ENDPROC(aesni_ecb_dec)
7666
7667 /*
7668 * void aesni_cbc_enc(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7669@@ -837,7 +856,9 @@ ENTRY(aesni_cbc_enc)
7670 jge .Lcbc_enc_loop
7671 movups STATE, (IVP)
7672 .Lcbc_enc_ret:
7673+ pax_force_retaddr 0, 1
7674 ret
7675+ENDPROC(aesni_cbc_enc)
7676
7677 /*
7678 * void aesni_cbc_dec(struct crypto_aes_ctx *ctx, const u8 *dst, u8 *src,
7679@@ -894,4 +915,6 @@ ENTRY(aesni_cbc_dec)
7680 .Lcbc_dec_ret:
7681 movups IV, (IVP)
7682 .Lcbc_dec_just_ret:
7683+ pax_force_retaddr 0, 1
7684 ret
7685+ENDPROC(aesni_cbc_dec)
7686diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7687index 6214a9b..1f4fc9a 100644
7688--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
7689+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
7690@@ -1,3 +1,5 @@
7691+#include <asm/alternative-asm.h>
7692+
7693 # enter ECRYPT_encrypt_bytes
7694 .text
7695 .p2align 5
7696@@ -790,6 +792,7 @@ ECRYPT_encrypt_bytes:
7697 add %r11,%rsp
7698 mov %rdi,%rax
7699 mov %rsi,%rdx
7700+ pax_force_retaddr 0, 1
7701 ret
7702 # bytesatleast65:
7703 ._bytesatleast65:
7704@@ -891,6 +894,7 @@ ECRYPT_keysetup:
7705 add %r11,%rsp
7706 mov %rdi,%rax
7707 mov %rsi,%rdx
7708+ pax_force_retaddr
7709 ret
7710 # enter ECRYPT_ivsetup
7711 .text
7712@@ -917,4 +921,5 @@ ECRYPT_ivsetup:
7713 add %r11,%rsp
7714 mov %rdi,%rax
7715 mov %rsi,%rdx
7716+ pax_force_retaddr
7717 ret
7718diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
7719index 35974a5..5662ae2 100644
7720--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
7721+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
7722@@ -21,6 +21,7 @@
7723 .text
7724
7725 #include <asm/asm-offsets.h>
7726+#include <asm/alternative-asm.h>
7727
7728 #define a_offset 0
7729 #define b_offset 4
7730@@ -269,6 +270,7 @@ twofish_enc_blk:
7731
7732 popq R1
7733 movq $1,%rax
7734+ pax_force_retaddr 0, 1
7735 ret
7736
7737 twofish_dec_blk:
7738@@ -321,4 +323,5 @@ twofish_dec_blk:
7739
7740 popq R1
7741 movq $1,%rax
7742+ pax_force_retaddr 0, 1
7743 ret
7744diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
7745index 14531ab..a89a0c0 100644
7746--- a/arch/x86/ia32/ia32_aout.c
7747+++ b/arch/x86/ia32/ia32_aout.c
7748@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7749 unsigned long dump_start, dump_size;
7750 struct user32 dump;
7751
7752+ memset(&dump, 0, sizeof(dump));
7753+
7754 fs = get_fs();
7755 set_fs(KERNEL_DS);
7756 has_dumped = 1;
7757@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
7758 dump_size = dump.u_ssize << PAGE_SHIFT;
7759 DUMP_WRITE(dump_start, dump_size);
7760 }
7761- /*
7762- * Finally dump the task struct. Not be used by gdb, but
7763- * could be useful
7764- */
7765- set_fs(KERNEL_DS);
7766- DUMP_WRITE(current, sizeof(*current));
7767 end_coredump:
7768 set_fs(fs);
7769 return has_dumped;
7770diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
7771index 588a7aa..a3468b0 100644
7772--- a/arch/x86/ia32/ia32_signal.c
7773+++ b/arch/x86/ia32/ia32_signal.c
7774@@ -167,7 +167,7 @@ asmlinkage long sys32_sigaltstack(const stack_ia32_t __user *uss_ptr,
7775 }
7776 seg = get_fs();
7777 set_fs(KERNEL_DS);
7778- ret = do_sigaltstack(uss_ptr ? &uss : NULL, &uoss, regs->sp);
7779+ ret = do_sigaltstack(uss_ptr ? (const stack_t __force_user *)&uss : NULL, (stack_t __force_user *)&uoss, regs->sp);
7780 set_fs(seg);
7781 if (ret >= 0 && uoss_ptr) {
7782 if (!access_ok(VERIFY_WRITE, uoss_ptr, sizeof(stack_ia32_t)))
7783@@ -374,7 +374,7 @@ static int ia32_setup_sigcontext(struct sigcontext_ia32 __user *sc,
7784 */
7785 static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7786 size_t frame_size,
7787- void **fpstate)
7788+ void __user **fpstate)
7789 {
7790 unsigned long sp;
7791
7792@@ -395,7 +395,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7793
7794 if (used_math()) {
7795 sp = sp - sig_xstate_ia32_size;
7796- *fpstate = (struct _fpstate_ia32 *) sp;
7797+ *fpstate = (struct _fpstate_ia32 __user *) sp;
7798 if (save_i387_xstate_ia32(*fpstate) < 0)
7799 return (void __user *) -1L;
7800 }
7801@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
7802 sp -= frame_size;
7803 /* Align the stack pointer according to the i386 ABI,
7804 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
7805- sp = ((sp + 4) & -16ul) - 4;
7806+ sp = ((sp - 12) & -16ul) - 4;
7807 return (void __user *) sp;
7808 }
7809
7810@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_sigaction *ka,
7811 * These are actually not used anymore, but left because some
7812 * gdb versions depend on them as a marker.
7813 */
7814- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7815+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7816 } put_user_catch(err);
7817
7818 if (err)
7819@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7820 0xb8,
7821 __NR_ia32_rt_sigreturn,
7822 0x80cd,
7823- 0,
7824+ 0
7825 };
7826
7827 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
7828@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
7829
7830 if (ka->sa.sa_flags & SA_RESTORER)
7831 restorer = ka->sa.sa_restorer;
7832+ else if (current->mm->context.vdso)
7833+ /* Return stub is in 32bit vsyscall page */
7834+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
7835 else
7836- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
7837- rt_sigreturn);
7838+ restorer = &frame->retcode;
7839 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
7840
7841 /*
7842 * Not actually used anymore, but left because some gdb
7843 * versions need it.
7844 */
7845- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
7846+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
7847 } put_user_catch(err);
7848
7849 if (err)
7850diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
7851index 4edd8eb..29124b4 100644
7852--- a/arch/x86/ia32/ia32entry.S
7853+++ b/arch/x86/ia32/ia32entry.S
7854@@ -13,7 +13,9 @@
7855 #include <asm/thread_info.h>
7856 #include <asm/segment.h>
7857 #include <asm/irqflags.h>
7858+#include <asm/pgtable.h>
7859 #include <linux/linkage.h>
7860+#include <asm/alternative-asm.h>
7861
7862 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
7863 #include <linux/elf-em.h>
7864@@ -93,6 +95,32 @@ ENTRY(native_irq_enable_sysexit)
7865 ENDPROC(native_irq_enable_sysexit)
7866 #endif
7867
7868+ .macro pax_enter_kernel_user
7869+ pax_set_fptr_mask
7870+#ifdef CONFIG_PAX_MEMORY_UDEREF
7871+ call pax_enter_kernel_user
7872+#endif
7873+ .endm
7874+
7875+ .macro pax_exit_kernel_user
7876+#ifdef CONFIG_PAX_MEMORY_UDEREF
7877+ call pax_exit_kernel_user
7878+#endif
7879+#ifdef CONFIG_PAX_RANDKSTACK
7880+ pushq %rax
7881+ pushq %r11
7882+ call pax_randomize_kstack
7883+ popq %r11
7884+ popq %rax
7885+#endif
7886+ .endm
7887+
7888+.macro pax_erase_kstack
7889+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
7890+ call pax_erase_kstack
7891+#endif
7892+.endm
7893+
7894 /*
7895 * 32bit SYSENTER instruction entry.
7896 *
7897@@ -119,12 +147,6 @@ ENTRY(ia32_sysenter_target)
7898 CFI_REGISTER rsp,rbp
7899 SWAPGS_UNSAFE_STACK
7900 movq PER_CPU_VAR(kernel_stack), %rsp
7901- addq $(KERNEL_STACK_OFFSET),%rsp
7902- /*
7903- * No need to follow this irqs on/off section: the syscall
7904- * disabled irqs, here we enable it straight after entry:
7905- */
7906- ENABLE_INTERRUPTS(CLBR_NONE)
7907 movl %ebp,%ebp /* zero extension */
7908 pushq $__USER32_DS
7909 CFI_ADJUST_CFA_OFFSET 8
7910@@ -135,28 +157,42 @@ ENTRY(ia32_sysenter_target)
7911 pushfq
7912 CFI_ADJUST_CFA_OFFSET 8
7913 /*CFI_REL_OFFSET rflags,0*/
7914- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
7915- CFI_REGISTER rip,r10
7916+ orl $X86_EFLAGS_IF,(%rsp)
7917+ GET_THREAD_INFO(%r11)
7918+ movl TI_sysenter_return(%r11), %r11d
7919+ CFI_REGISTER rip,r11
7920 pushq $__USER32_CS
7921 CFI_ADJUST_CFA_OFFSET 8
7922 /*CFI_REL_OFFSET cs,0*/
7923 movl %eax, %eax
7924- pushq %r10
7925+ pushq %r11
7926 CFI_ADJUST_CFA_OFFSET 8
7927 CFI_REL_OFFSET rip,0
7928 pushq %rax
7929 CFI_ADJUST_CFA_OFFSET 8
7930 cld
7931 SAVE_ARGS 0,0,1
7932+ pax_enter_kernel_user
7933+ /*
7934+ * No need to follow this irqs on/off section: the syscall
7935+ * disabled irqs, here we enable it straight after entry:
7936+ */
7937+ ENABLE_INTERRUPTS(CLBR_NONE)
7938 /* no need to do an access_ok check here because rbp has been
7939 32bit zero extended */
7940+
7941+#ifdef CONFIG_PAX_MEMORY_UDEREF
7942+ mov $PAX_USER_SHADOW_BASE,%r11
7943+ add %r11,%rbp
7944+#endif
7945+
7946 1: movl (%rbp),%ebp
7947 .section __ex_table,"a"
7948 .quad 1b,ia32_badarg
7949 .previous
7950- GET_THREAD_INFO(%r10)
7951- orl $TS_COMPAT,TI_status(%r10)
7952- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
7953+ GET_THREAD_INFO(%r11)
7954+ orl $TS_COMPAT,TI_status(%r11)
7955+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
7956 CFI_REMEMBER_STATE
7957 jnz sysenter_tracesys
7958 cmpq $(IA32_NR_syscalls-1),%rax
7959@@ -166,13 +202,15 @@ sysenter_do_call:
7960 sysenter_dispatch:
7961 call *ia32_sys_call_table(,%rax,8)
7962 movq %rax,RAX-ARGOFFSET(%rsp)
7963- GET_THREAD_INFO(%r10)
7964+ GET_THREAD_INFO(%r11)
7965 DISABLE_INTERRUPTS(CLBR_NONE)
7966 TRACE_IRQS_OFF
7967- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
7968+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
7969 jnz sysexit_audit
7970 sysexit_from_sys_call:
7971- andl $~TS_COMPAT,TI_status(%r10)
7972+ pax_exit_kernel_user
7973+ pax_erase_kstack
7974+ andl $~TS_COMPAT,TI_status(%r11)
7975 /* clear IF, that popfq doesn't enable interrupts early */
7976 andl $~0x200,EFLAGS-R11(%rsp)
7977 movl RIP-R11(%rsp),%edx /* User %eip */
7978@@ -200,6 +238,9 @@ sysexit_from_sys_call:
7979 movl %eax,%esi /* 2nd arg: syscall number */
7980 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
7981 call audit_syscall_entry
7982+
7983+ pax_erase_kstack
7984+
7985 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
7986 cmpq $(IA32_NR_syscalls-1),%rax
7987 ja ia32_badsys
7988@@ -211,7 +252,7 @@ sysexit_from_sys_call:
7989 .endm
7990
7991 .macro auditsys_exit exit
7992- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
7993+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
7994 jnz ia32_ret_from_sys_call
7995 TRACE_IRQS_ON
7996 sti
7997@@ -221,12 +262,12 @@ sysexit_from_sys_call:
7998 movzbl %al,%edi /* zero-extend that into %edi */
7999 inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
8000 call audit_syscall_exit
8001- GET_THREAD_INFO(%r10)
8002+ GET_THREAD_INFO(%r11)
8003 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall return value */
8004 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
8005 cli
8006 TRACE_IRQS_OFF
8007- testl %edi,TI_flags(%r10)
8008+ testl %edi,TI_flags(%r11)
8009 jz \exit
8010 CLEAR_RREGS -ARGOFFSET
8011 jmp int_with_check
8012@@ -244,7 +285,7 @@ sysexit_audit:
8013
8014 sysenter_tracesys:
8015 #ifdef CONFIG_AUDITSYSCALL
8016- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8017+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8018 jz sysenter_auditsys
8019 #endif
8020 SAVE_REST
8021@@ -252,6 +293,9 @@ sysenter_tracesys:
8022 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
8023 movq %rsp,%rdi /* &pt_regs -> arg1 */
8024 call syscall_trace_enter
8025+
8026+ pax_erase_kstack
8027+
8028 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8029 RESTORE_REST
8030 cmpq $(IA32_NR_syscalls-1),%rax
8031@@ -283,19 +327,20 @@ ENDPROC(ia32_sysenter_target)
8032 ENTRY(ia32_cstar_target)
8033 CFI_STARTPROC32 simple
8034 CFI_SIGNAL_FRAME
8035- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
8036+ CFI_DEF_CFA rsp,0
8037 CFI_REGISTER rip,rcx
8038 /*CFI_REGISTER rflags,r11*/
8039 SWAPGS_UNSAFE_STACK
8040 movl %esp,%r8d
8041 CFI_REGISTER rsp,r8
8042 movq PER_CPU_VAR(kernel_stack),%rsp
8043+ SAVE_ARGS 8*6,1,1
8044+ pax_enter_kernel_user
8045 /*
8046 * No need to follow this irqs on/off section: the syscall
8047 * disabled irqs and here we enable it straight after entry:
8048 */
8049 ENABLE_INTERRUPTS(CLBR_NONE)
8050- SAVE_ARGS 8,1,1
8051 movl %eax,%eax /* zero extension */
8052 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
8053 movq %rcx,RIP-ARGOFFSET(%rsp)
8054@@ -311,13 +356,19 @@ ENTRY(ia32_cstar_target)
8055 /* no need to do an access_ok check here because r8 has been
8056 32bit zero extended */
8057 /* hardware stack frame is complete now */
8058+
8059+#ifdef CONFIG_PAX_MEMORY_UDEREF
8060+ mov $PAX_USER_SHADOW_BASE,%r11
8061+ add %r11,%r8
8062+#endif
8063+
8064 1: movl (%r8),%r9d
8065 .section __ex_table,"a"
8066 .quad 1b,ia32_badarg
8067 .previous
8068- GET_THREAD_INFO(%r10)
8069- orl $TS_COMPAT,TI_status(%r10)
8070- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8071+ GET_THREAD_INFO(%r11)
8072+ orl $TS_COMPAT,TI_status(%r11)
8073+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8074 CFI_REMEMBER_STATE
8075 jnz cstar_tracesys
8076 cmpq $IA32_NR_syscalls-1,%rax
8077@@ -327,13 +378,15 @@ cstar_do_call:
8078 cstar_dispatch:
8079 call *ia32_sys_call_table(,%rax,8)
8080 movq %rax,RAX-ARGOFFSET(%rsp)
8081- GET_THREAD_INFO(%r10)
8082+ GET_THREAD_INFO(%r11)
8083 DISABLE_INTERRUPTS(CLBR_NONE)
8084 TRACE_IRQS_OFF
8085- testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
8086+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
8087 jnz sysretl_audit
8088 sysretl_from_sys_call:
8089- andl $~TS_COMPAT,TI_status(%r10)
8090+ pax_exit_kernel_user
8091+ pax_erase_kstack
8092+ andl $~TS_COMPAT,TI_status(%r11)
8093 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
8094 movl RIP-ARGOFFSET(%rsp),%ecx
8095 CFI_REGISTER rip,rcx
8096@@ -361,7 +414,7 @@ sysretl_audit:
8097
8098 cstar_tracesys:
8099 #ifdef CONFIG_AUDITSYSCALL
8100- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r10)
8101+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
8102 jz cstar_auditsys
8103 #endif
8104 xchgl %r9d,%ebp
8105@@ -370,6 +423,9 @@ cstar_tracesys:
8106 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8107 movq %rsp,%rdi /* &pt_regs -> arg1 */
8108 call syscall_trace_enter
8109+
8110+ pax_erase_kstack
8111+
8112 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
8113 RESTORE_REST
8114 xchgl %ebp,%r9d
8115@@ -415,11 +471,6 @@ ENTRY(ia32_syscall)
8116 CFI_REL_OFFSET rip,RIP-RIP
8117 PARAVIRT_ADJUST_EXCEPTION_FRAME
8118 SWAPGS
8119- /*
8120- * No need to follow this irqs on/off section: the syscall
8121- * disabled irqs and here we enable it straight after entry:
8122- */
8123- ENABLE_INTERRUPTS(CLBR_NONE)
8124 movl %eax,%eax
8125 pushq %rax
8126 CFI_ADJUST_CFA_OFFSET 8
8127@@ -427,9 +478,15 @@ ENTRY(ia32_syscall)
8128 /* note the registers are not zero extended to the sf.
8129 this could be a problem. */
8130 SAVE_ARGS 0,0,1
8131- GET_THREAD_INFO(%r10)
8132- orl $TS_COMPAT,TI_status(%r10)
8133- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10)
8134+ pax_enter_kernel_user
8135+ /*
8136+ * No need to follow this irqs on/off section: the syscall
8137+ * disabled irqs and here we enable it straight after entry:
8138+ */
8139+ ENABLE_INTERRUPTS(CLBR_NONE)
8140+ GET_THREAD_INFO(%r11)
8141+ orl $TS_COMPAT,TI_status(%r11)
8142+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
8143 jnz ia32_tracesys
8144 cmpq $(IA32_NR_syscalls-1),%rax
8145 ja ia32_badsys
8146@@ -448,6 +505,9 @@ ia32_tracesys:
8147 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
8148 movq %rsp,%rdi /* &pt_regs -> arg1 */
8149 call syscall_trace_enter
8150+
8151+ pax_erase_kstack
8152+
8153 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
8154 RESTORE_REST
8155 cmpq $(IA32_NR_syscalls-1),%rax
8156@@ -462,6 +522,7 @@ ia32_badsys:
8157
8158 quiet_ni_syscall:
8159 movq $-ENOSYS,%rax
8160+ pax_force_retaddr
8161 ret
8162 CFI_ENDPROC
8163
8164diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
8165index 016218c..47ccbdd 100644
8166--- a/arch/x86/ia32/sys_ia32.c
8167+++ b/arch/x86/ia32/sys_ia32.c
8168@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
8169 */
8170 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
8171 {
8172- typeof(ubuf->st_uid) uid = 0;
8173- typeof(ubuf->st_gid) gid = 0;
8174+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
8175+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
8176 SET_UID(uid, stat->uid);
8177 SET_GID(gid, stat->gid);
8178 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
8179@@ -308,8 +308,8 @@ asmlinkage long sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
8180 }
8181 set_fs(KERNEL_DS);
8182 ret = sys_rt_sigprocmask(how,
8183- set ? (sigset_t __user *)&s : NULL,
8184- oset ? (sigset_t __user *)&s : NULL,
8185+ set ? (sigset_t __force_user *)&s : NULL,
8186+ oset ? (sigset_t __force_user *)&s : NULL,
8187 sigsetsize);
8188 set_fs(old_fs);
8189 if (ret)
8190@@ -371,7 +371,7 @@ asmlinkage long sys32_sched_rr_get_interval(compat_pid_t pid,
8191 mm_segment_t old_fs = get_fs();
8192
8193 set_fs(KERNEL_DS);
8194- ret = sys_sched_rr_get_interval(pid, (struct timespec __user *)&t);
8195+ ret = sys_sched_rr_get_interval(pid, (struct timespec __force_user *)&t);
8196 set_fs(old_fs);
8197 if (put_compat_timespec(&t, interval))
8198 return -EFAULT;
8199@@ -387,7 +387,7 @@ asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set,
8200 mm_segment_t old_fs = get_fs();
8201
8202 set_fs(KERNEL_DS);
8203- ret = sys_rt_sigpending((sigset_t __user *)&s, sigsetsize);
8204+ ret = sys_rt_sigpending((sigset_t __force_user *)&s, sigsetsize);
8205 set_fs(old_fs);
8206 if (!ret) {
8207 switch (_NSIG_WORDS) {
8208@@ -412,7 +412,7 @@ asmlinkage long sys32_rt_sigqueueinfo(int pid, int sig,
8209 if (copy_siginfo_from_user32(&info, uinfo))
8210 return -EFAULT;
8211 set_fs(KERNEL_DS);
8212- ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *)&info);
8213+ ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __force_user *)&info);
8214 set_fs(old_fs);
8215 return ret;
8216 }
8217@@ -513,7 +513,7 @@ asmlinkage long sys32_sendfile(int out_fd, int in_fd,
8218 return -EFAULT;
8219
8220 set_fs(KERNEL_DS);
8221- ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *)&of : NULL,
8222+ ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __force_user *)&of : NULL,
8223 count);
8224 set_fs(old_fs);
8225
8226diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
8227index e2077d3..b7a8919 100644
8228--- a/arch/x86/include/asm/alternative-asm.h
8229+++ b/arch/x86/include/asm/alternative-asm.h
8230@@ -8,10 +8,10 @@
8231
8232 #ifdef CONFIG_SMP
8233 .macro LOCK_PREFIX
8234-1: lock
8235+672: lock
8236 .section .smp_locks,"a"
8237 .align 4
8238- X86_ALIGN 1b
8239+ X86_ALIGN 672b
8240 .previous
8241 .endm
8242 #else
8243@@ -19,4 +19,43 @@
8244 .endm
8245 #endif
8246
8247+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
8248+ .macro pax_force_retaddr_bts rip=0
8249+ btsq $63,\rip(%rsp)
8250+ .endm
8251+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
8252+ .macro pax_force_retaddr rip=0, reload=0
8253+ btsq $63,\rip(%rsp)
8254+ .endm
8255+ .macro pax_force_fptr ptr
8256+ btsq $63,\ptr
8257+ .endm
8258+ .macro pax_set_fptr_mask
8259+ .endm
8260+#endif
8261+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
8262+ .macro pax_force_retaddr rip=0, reload=0
8263+ .if \reload
8264+ pax_set_fptr_mask
8265+ .endif
8266+ orq %r10,\rip(%rsp)
8267+ .endm
8268+ .macro pax_force_fptr ptr
8269+ orq %r10,\ptr
8270+ .endm
8271+ .macro pax_set_fptr_mask
8272+ movabs $0x8000000000000000,%r10
8273+ .endm
8274+#endif
8275+#else
8276+ .macro pax_force_retaddr rip=0, reload=0
8277+ .endm
8278+ .macro pax_force_fptr ptr
8279+ .endm
8280+ .macro pax_force_retaddr_bts rip=0
8281+ .endm
8282+ .macro pax_set_fptr_mask
8283+ .endm
8284+#endif
8285+
8286 #endif /* __ASSEMBLY__ */
8287diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
8288index c240efc..fdfadf3 100644
8289--- a/arch/x86/include/asm/alternative.h
8290+++ b/arch/x86/include/asm/alternative.h
8291@@ -85,7 +85,7 @@ static inline void alternatives_smp_switch(int smp) {}
8292 " .byte 662b-661b\n" /* sourcelen */ \
8293 " .byte 664f-663f\n" /* replacementlen */ \
8294 ".previous\n" \
8295- ".section .altinstr_replacement, \"ax\"\n" \
8296+ ".section .altinstr_replacement, \"a\"\n" \
8297 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
8298 ".previous"
8299
8300diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
8301index 474d80d..1f97d58 100644
8302--- a/arch/x86/include/asm/apic.h
8303+++ b/arch/x86/include/asm/apic.h
8304@@ -46,7 +46,7 @@ static inline void generic_apic_probe(void)
8305
8306 #ifdef CONFIG_X86_LOCAL_APIC
8307
8308-extern unsigned int apic_verbosity;
8309+extern int apic_verbosity;
8310 extern int local_apic_timer_c2_ok;
8311
8312 extern int disable_apic;
8313diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
8314index 20370c6..a2eb9b0 100644
8315--- a/arch/x86/include/asm/apm.h
8316+++ b/arch/x86/include/asm/apm.h
8317@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
8318 __asm__ __volatile__(APM_DO_ZERO_SEGS
8319 "pushl %%edi\n\t"
8320 "pushl %%ebp\n\t"
8321- "lcall *%%cs:apm_bios_entry\n\t"
8322+ "lcall *%%ss:apm_bios_entry\n\t"
8323 "setc %%al\n\t"
8324 "popl %%ebp\n\t"
8325 "popl %%edi\n\t"
8326@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
8327 __asm__ __volatile__(APM_DO_ZERO_SEGS
8328 "pushl %%edi\n\t"
8329 "pushl %%ebp\n\t"
8330- "lcall *%%cs:apm_bios_entry\n\t"
8331+ "lcall *%%ss:apm_bios_entry\n\t"
8332 "setc %%bl\n\t"
8333 "popl %%ebp\n\t"
8334 "popl %%edi\n\t"
8335diff --git a/arch/x86/include/asm/atomic_32.h b/arch/x86/include/asm/atomic_32.h
8336index dc5a667..939040c 100644
8337--- a/arch/x86/include/asm/atomic_32.h
8338+++ b/arch/x86/include/asm/atomic_32.h
8339@@ -25,6 +25,17 @@ static inline int atomic_read(const atomic_t *v)
8340 }
8341
8342 /**
8343+ * atomic_read_unchecked - read atomic variable
8344+ * @v: pointer of type atomic_unchecked_t
8345+ *
8346+ * Atomically reads the value of @v.
8347+ */
8348+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8349+{
8350+ return v->counter;
8351+}
8352+
8353+/**
8354 * atomic_set - set atomic variable
8355 * @v: pointer of type atomic_t
8356 * @i: required value
8357@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *v, int i)
8358 }
8359
8360 /**
8361+ * atomic_set_unchecked - set atomic variable
8362+ * @v: pointer of type atomic_unchecked_t
8363+ * @i: required value
8364+ *
8365+ * Atomically sets the value of @v to @i.
8366+ */
8367+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8368+{
8369+ v->counter = i;
8370+}
8371+
8372+/**
8373 * atomic_add - add integer to atomic variable
8374 * @i: integer value to add
8375 * @v: pointer of type atomic_t
8376@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *v, int i)
8377 */
8378 static inline void atomic_add(int i, atomic_t *v)
8379 {
8380- asm volatile(LOCK_PREFIX "addl %1,%0"
8381+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8382+
8383+#ifdef CONFIG_PAX_REFCOUNT
8384+ "jno 0f\n"
8385+ LOCK_PREFIX "subl %1,%0\n"
8386+ "int $4\n0:\n"
8387+ _ASM_EXTABLE(0b, 0b)
8388+#endif
8389+
8390+ : "+m" (v->counter)
8391+ : "ir" (i));
8392+}
8393+
8394+/**
8395+ * atomic_add_unchecked - add integer to atomic variable
8396+ * @i: integer value to add
8397+ * @v: pointer of type atomic_unchecked_t
8398+ *
8399+ * Atomically adds @i to @v.
8400+ */
8401+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8402+{
8403+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8404 : "+m" (v->counter)
8405 : "ir" (i));
8406 }
8407@@ -59,7 +104,29 @@ static inline void atomic_add(int i, atomic_t *v)
8408 */
8409 static inline void atomic_sub(int i, atomic_t *v)
8410 {
8411- asm volatile(LOCK_PREFIX "subl %1,%0"
8412+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8413+
8414+#ifdef CONFIG_PAX_REFCOUNT
8415+ "jno 0f\n"
8416+ LOCK_PREFIX "addl %1,%0\n"
8417+ "int $4\n0:\n"
8418+ _ASM_EXTABLE(0b, 0b)
8419+#endif
8420+
8421+ : "+m" (v->counter)
8422+ : "ir" (i));
8423+}
8424+
8425+/**
8426+ * atomic_sub_unchecked - subtract integer from atomic variable
8427+ * @i: integer value to subtract
8428+ * @v: pointer of type atomic_unchecked_t
8429+ *
8430+ * Atomically subtracts @i from @v.
8431+ */
8432+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8433+{
8434+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8435 : "+m" (v->counter)
8436 : "ir" (i));
8437 }
8438@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8439 {
8440 unsigned char c;
8441
8442- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8443+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8444+
8445+#ifdef CONFIG_PAX_REFCOUNT
8446+ "jno 0f\n"
8447+ LOCK_PREFIX "addl %2,%0\n"
8448+ "int $4\n0:\n"
8449+ _ASM_EXTABLE(0b, 0b)
8450+#endif
8451+
8452+ "sete %1\n"
8453 : "+m" (v->counter), "=qm" (c)
8454 : "ir" (i) : "memory");
8455 return c;
8456@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8457 */
8458 static inline void atomic_inc(atomic_t *v)
8459 {
8460- asm volatile(LOCK_PREFIX "incl %0"
8461+ asm volatile(LOCK_PREFIX "incl %0\n"
8462+
8463+#ifdef CONFIG_PAX_REFCOUNT
8464+ "jno 0f\n"
8465+ LOCK_PREFIX "decl %0\n"
8466+ "int $4\n0:\n"
8467+ _ASM_EXTABLE(0b, 0b)
8468+#endif
8469+
8470+ : "+m" (v->counter));
8471+}
8472+
8473+/**
8474+ * atomic_inc_unchecked - increment atomic variable
8475+ * @v: pointer of type atomic_unchecked_t
8476+ *
8477+ * Atomically increments @v by 1.
8478+ */
8479+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
8480+{
8481+ asm volatile(LOCK_PREFIX "incl %0\n"
8482 : "+m" (v->counter));
8483 }
8484
8485@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *v)
8486 */
8487 static inline void atomic_dec(atomic_t *v)
8488 {
8489- asm volatile(LOCK_PREFIX "decl %0"
8490+ asm volatile(LOCK_PREFIX "decl %0\n"
8491+
8492+#ifdef CONFIG_PAX_REFCOUNT
8493+ "jno 0f\n"
8494+ LOCK_PREFIX "incl %0\n"
8495+ "int $4\n0:\n"
8496+ _ASM_EXTABLE(0b, 0b)
8497+#endif
8498+
8499+ : "+m" (v->counter));
8500+}
8501+
8502+/**
8503+ * atomic_dec_unchecked - decrement atomic variable
8504+ * @v: pointer of type atomic_unchecked_t
8505+ *
8506+ * Atomically decrements @v by 1.
8507+ */
8508+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
8509+{
8510+ asm volatile(LOCK_PREFIX "decl %0\n"
8511 : "+m" (v->counter));
8512 }
8513
8514@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
8515 {
8516 unsigned char c;
8517
8518- asm volatile(LOCK_PREFIX "decl %0; sete %1"
8519+ asm volatile(LOCK_PREFIX "decl %0\n"
8520+
8521+#ifdef CONFIG_PAX_REFCOUNT
8522+ "jno 0f\n"
8523+ LOCK_PREFIX "incl %0\n"
8524+ "int $4\n0:\n"
8525+ _ASM_EXTABLE(0b, 0b)
8526+#endif
8527+
8528+ "sete %1\n"
8529 : "+m" (v->counter), "=qm" (c)
8530 : : "memory");
8531 return c != 0;
8532@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
8533 {
8534 unsigned char c;
8535
8536- asm volatile(LOCK_PREFIX "incl %0; sete %1"
8537+ asm volatile(LOCK_PREFIX "incl %0\n"
8538+
8539+#ifdef CONFIG_PAX_REFCOUNT
8540+ "jno 0f\n"
8541+ LOCK_PREFIX "decl %0\n"
8542+ "into\n0:\n"
8543+ _ASM_EXTABLE(0b, 0b)
8544+#endif
8545+
8546+ "sete %1\n"
8547+ : "+m" (v->counter), "=qm" (c)
8548+ : : "memory");
8549+ return c != 0;
8550+}
8551+
8552+/**
8553+ * atomic_inc_and_test_unchecked - increment and test
8554+ * @v: pointer of type atomic_unchecked_t
8555+ *
8556+ * Atomically increments @v by 1
8557+ * and returns true if the result is zero, or false for all
8558+ * other cases.
8559+ */
8560+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
8561+{
8562+ unsigned char c;
8563+
8564+ asm volatile(LOCK_PREFIX "incl %0\n"
8565+ "sete %1\n"
8566 : "+m" (v->counter), "=qm" (c)
8567 : : "memory");
8568 return c != 0;
8569@@ -156,7 +309,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
8570 {
8571 unsigned char c;
8572
8573- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
8574+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
8575+
8576+#ifdef CONFIG_PAX_REFCOUNT
8577+ "jno 0f\n"
8578+ LOCK_PREFIX "subl %2,%0\n"
8579+ "int $4\n0:\n"
8580+ _ASM_EXTABLE(0b, 0b)
8581+#endif
8582+
8583+ "sets %1\n"
8584 : "+m" (v->counter), "=qm" (c)
8585 : "ir" (i) : "memory");
8586 return c;
8587@@ -179,7 +341,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
8588 #endif
8589 /* Modern 486+ processor */
8590 __i = i;
8591- asm volatile(LOCK_PREFIX "xaddl %0, %1"
8592+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
8593+
8594+#ifdef CONFIG_PAX_REFCOUNT
8595+ "jno 0f\n"
8596+ "movl %0, %1\n"
8597+ "int $4\n0:\n"
8598+ _ASM_EXTABLE(0b, 0b)
8599+#endif
8600+
8601 : "+r" (i), "+m" (v->counter)
8602 : : "memory");
8603 return i + __i;
8604@@ -195,6 +365,38 @@ no_xadd: /* Legacy 386 processor */
8605 }
8606
8607 /**
8608+ * atomic_add_return_unchecked - add integer and return
8609+ * @v: pointer of type atomic_unchecked_t
8610+ * @i: integer value to add
8611+ *
8612+ * Atomically adds @i to @v and returns @i + @v
8613+ */
8614+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
8615+{
8616+ int __i;
8617+#ifdef CONFIG_M386
8618+ unsigned long flags;
8619+ if (unlikely(boot_cpu_data.x86 <= 3))
8620+ goto no_xadd;
8621+#endif
8622+ /* Modern 486+ processor */
8623+ __i = i;
8624+ asm volatile(LOCK_PREFIX "xaddl %0, %1"
8625+ : "+r" (i), "+m" (v->counter)
8626+ : : "memory");
8627+ return i + __i;
8628+
8629+#ifdef CONFIG_M386
8630+no_xadd: /* Legacy 386 processor */
8631+ local_irq_save(flags);
8632+ __i = atomic_read_unchecked(v);
8633+ atomic_set_unchecked(v, i + __i);
8634+ local_irq_restore(flags);
8635+ return i + __i;
8636+#endif
8637+}
8638+
8639+/**
8640 * atomic_sub_return - subtract integer and return
8641 * @v: pointer of type atomic_t
8642 * @i: integer value to subtract
8643@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
8644 return cmpxchg(&v->counter, old, new);
8645 }
8646
8647+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8648+{
8649+ return cmpxchg(&v->counter, old, new);
8650+}
8651+
8652 static inline int atomic_xchg(atomic_t *v, int new)
8653 {
8654 return xchg(&v->counter, new);
8655 }
8656
8657+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8658+{
8659+ return xchg(&v->counter, new);
8660+}
8661+
8662 /**
8663 * atomic_add_unless - add unless the number is already a given value
8664 * @v: pointer of type atomic_t
8665@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *v, int new)
8666 */
8667 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8668 {
8669- int c, old;
8670+ int c, old, new;
8671 c = atomic_read(v);
8672 for (;;) {
8673- if (unlikely(c == (u)))
8674+ if (unlikely(c == u))
8675 break;
8676- old = atomic_cmpxchg((v), c, c + (a));
8677+
8678+ asm volatile("addl %2,%0\n"
8679+
8680+#ifdef CONFIG_PAX_REFCOUNT
8681+ "jno 0f\n"
8682+ "subl %2,%0\n"
8683+ "int $4\n0:\n"
8684+ _ASM_EXTABLE(0b, 0b)
8685+#endif
8686+
8687+ : "=r" (new)
8688+ : "0" (c), "ir" (a));
8689+
8690+ old = atomic_cmpxchg(v, c, new);
8691 if (likely(old == c))
8692 break;
8693 c = old;
8694 }
8695- return c != (u);
8696+ return c != u;
8697 }
8698
8699 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8700
8701 #define atomic_inc_return(v) (atomic_add_return(1, v))
8702+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
8703+{
8704+ return atomic_add_return_unchecked(1, v);
8705+}
8706 #define atomic_dec_return(v) (atomic_sub_return(1, v))
8707
8708 /* These are x86-specific, used by some header files */
8709@@ -266,9 +495,18 @@ typedef struct {
8710 u64 __aligned(8) counter;
8711 } atomic64_t;
8712
8713+#ifdef CONFIG_PAX_REFCOUNT
8714+typedef struct {
8715+ u64 __aligned(8) counter;
8716+} atomic64_unchecked_t;
8717+#else
8718+typedef atomic64_t atomic64_unchecked_t;
8719+#endif
8720+
8721 #define ATOMIC64_INIT(val) { (val) }
8722
8723 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8724+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
8725
8726 /**
8727 * atomic64_xchg - xchg atomic64 variable
8728@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
8729 * the old value.
8730 */
8731 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8732+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8733
8734 /**
8735 * atomic64_set - set atomic64 variable
8736@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
8737 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
8738
8739 /**
8740+ * atomic64_unchecked_set - set atomic64 variable
8741+ * @ptr: pointer to type atomic64_unchecked_t
8742+ * @new_val: value to assign
8743+ *
8744+ * Atomically sets the value of @ptr to @new_val.
8745+ */
8746+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
8747+
8748+/**
8749 * atomic64_read - read atomic64 variable
8750 * @ptr: pointer to type atomic64_t
8751 *
8752@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64_t *ptr)
8753 return res;
8754 }
8755
8756-extern u64 atomic64_read(atomic64_t *ptr);
8757+/**
8758+ * atomic64_read_unchecked - read atomic64 variable
8759+ * @ptr: pointer to type atomic64_unchecked_t
8760+ *
8761+ * Atomically reads the value of @ptr and returns it.
8762+ */
8763+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
8764+{
8765+ u64 res;
8766+
8767+ /*
8768+ * Note, we inline this atomic64_unchecked_t primitive because
8769+ * it only clobbers EAX/EDX and leaves the others
8770+ * untouched. We also (somewhat subtly) rely on the
8771+ * fact that cmpxchg8b returns the current 64-bit value
8772+ * of the memory location we are touching:
8773+ */
8774+ asm volatile(
8775+ "mov %%ebx, %%eax\n\t"
8776+ "mov %%ecx, %%edx\n\t"
8777+ LOCK_PREFIX "cmpxchg8b %1\n"
8778+ : "=&A" (res)
8779+ : "m" (*ptr)
8780+ );
8781+
8782+ return res;
8783+}
8784
8785 /**
8786 * atomic64_add_return - add and return
8787@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta, atomic64_t *ptr);
8788 * Other variants with different arithmetic operators:
8789 */
8790 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
8791+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8792 extern u64 atomic64_inc_return(atomic64_t *ptr);
8793+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
8794 extern u64 atomic64_dec_return(atomic64_t *ptr);
8795+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
8796
8797 /**
8798 * atomic64_add - add integer to atomic64 variable
8799@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_t *ptr);
8800 extern void atomic64_add(u64 delta, atomic64_t *ptr);
8801
8802 /**
8803+ * atomic64_add_unchecked - add integer to atomic64 variable
8804+ * @delta: integer value to add
8805+ * @ptr: pointer to type atomic64_unchecked_t
8806+ *
8807+ * Atomically adds @delta to @ptr.
8808+ */
8809+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8810+
8811+/**
8812 * atomic64_sub - subtract the atomic64 variable
8813 * @delta: integer value to subtract
8814 * @ptr: pointer to type atomic64_t
8815@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atomic64_t *ptr);
8816 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
8817
8818 /**
8819+ * atomic64_sub_unchecked - subtract the atomic64 variable
8820+ * @delta: integer value to subtract
8821+ * @ptr: pointer to type atomic64_unchecked_t
8822+ *
8823+ * Atomically subtracts @delta from @ptr.
8824+ */
8825+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
8826+
8827+/**
8828 * atomic64_sub_and_test - subtract value from variable and test result
8829 * @delta: integer value to subtract
8830 * @ptr: pointer to type atomic64_t
8831@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 delta, atomic64_t *ptr);
8832 extern void atomic64_inc(atomic64_t *ptr);
8833
8834 /**
8835+ * atomic64_inc_unchecked - increment atomic64 variable
8836+ * @ptr: pointer to type atomic64_unchecked_t
8837+ *
8838+ * Atomically increments @ptr by 1.
8839+ */
8840+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
8841+
8842+/**
8843 * atomic64_dec - decrement atomic64 variable
8844 * @ptr: pointer to type atomic64_t
8845 *
8846@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr);
8847 extern void atomic64_dec(atomic64_t *ptr);
8848
8849 /**
8850+ * atomic64_dec_unchecked - decrement atomic64 variable
8851+ * @ptr: pointer to type atomic64_unchecked_t
8852+ *
8853+ * Atomically decrements @ptr by 1.
8854+ */
8855+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
8856+
8857+/**
8858 * atomic64_dec_and_test - decrement and test
8859 * @ptr: pointer to type atomic64_t
8860 *
8861diff --git a/arch/x86/include/asm/atomic_64.h b/arch/x86/include/asm/atomic_64.h
8862index d605dc2..fafd7bd 100644
8863--- a/arch/x86/include/asm/atomic_64.h
8864+++ b/arch/x86/include/asm/atomic_64.h
8865@@ -24,6 +24,17 @@ static inline int atomic_read(const atomic_t *v)
8866 }
8867
8868 /**
8869+ * atomic_read_unchecked - read atomic variable
8870+ * @v: pointer of type atomic_unchecked_t
8871+ *
8872+ * Atomically reads the value of @v.
8873+ */
8874+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
8875+{
8876+ return v->counter;
8877+}
8878+
8879+/**
8880 * atomic_set - set atomic variable
8881 * @v: pointer of type atomic_t
8882 * @i: required value
8883@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *v, int i)
8884 }
8885
8886 /**
8887+ * atomic_set_unchecked - set atomic variable
8888+ * @v: pointer of type atomic_unchecked_t
8889+ * @i: required value
8890+ *
8891+ * Atomically sets the value of @v to @i.
8892+ */
8893+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
8894+{
8895+ v->counter = i;
8896+}
8897+
8898+/**
8899 * atomic_add - add integer to atomic variable
8900 * @i: integer value to add
8901 * @v: pointer of type atomic_t
8902@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *v, int i)
8903 */
8904 static inline void atomic_add(int i, atomic_t *v)
8905 {
8906- asm volatile(LOCK_PREFIX "addl %1,%0"
8907+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8908+
8909+#ifdef CONFIG_PAX_REFCOUNT
8910+ "jno 0f\n"
8911+ LOCK_PREFIX "subl %1,%0\n"
8912+ "int $4\n0:\n"
8913+ _ASM_EXTABLE(0b, 0b)
8914+#endif
8915+
8916+ : "=m" (v->counter)
8917+ : "ir" (i), "m" (v->counter));
8918+}
8919+
8920+/**
8921+ * atomic_add_unchecked - add integer to atomic variable
8922+ * @i: integer value to add
8923+ * @v: pointer of type atomic_unchecked_t
8924+ *
8925+ * Atomically adds @i to @v.
8926+ */
8927+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
8928+{
8929+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
8930 : "=m" (v->counter)
8931 : "ir" (i), "m" (v->counter));
8932 }
8933@@ -58,7 +103,29 @@ static inline void atomic_add(int i, atomic_t *v)
8934 */
8935 static inline void atomic_sub(int i, atomic_t *v)
8936 {
8937- asm volatile(LOCK_PREFIX "subl %1,%0"
8938+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8939+
8940+#ifdef CONFIG_PAX_REFCOUNT
8941+ "jno 0f\n"
8942+ LOCK_PREFIX "addl %1,%0\n"
8943+ "int $4\n0:\n"
8944+ _ASM_EXTABLE(0b, 0b)
8945+#endif
8946+
8947+ : "=m" (v->counter)
8948+ : "ir" (i), "m" (v->counter));
8949+}
8950+
8951+/**
8952+ * atomic_sub_unchecked - subtract the atomic variable
8953+ * @i: integer value to subtract
8954+ * @v: pointer of type atomic_unchecked_t
8955+ *
8956+ * Atomically subtracts @i from @v.
8957+ */
8958+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
8959+{
8960+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
8961 : "=m" (v->counter)
8962 : "ir" (i), "m" (v->counter));
8963 }
8964@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8965 {
8966 unsigned char c;
8967
8968- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
8969+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
8970+
8971+#ifdef CONFIG_PAX_REFCOUNT
8972+ "jno 0f\n"
8973+ LOCK_PREFIX "addl %2,%0\n"
8974+ "int $4\n0:\n"
8975+ _ASM_EXTABLE(0b, 0b)
8976+#endif
8977+
8978+ "sete %1\n"
8979 : "=m" (v->counter), "=qm" (c)
8980 : "ir" (i), "m" (v->counter) : "memory");
8981 return c;
8982@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
8983 */
8984 static inline void atomic_inc(atomic_t *v)
8985 {
8986- asm volatile(LOCK_PREFIX "incl %0"
8987+ asm volatile(LOCK_PREFIX "incl %0\n"
8988+
8989+#ifdef CONFIG_PAX_REFCOUNT
8990+ "jno 0f\n"
8991+ LOCK_PREFIX "decl %0\n"
8992+ "int $4\n0:\n"
8993+ _ASM_EXTABLE(0b, 0b)
8994+#endif
8995+
8996+ : "=m" (v->counter)
8997+ : "m" (v->counter));
8998+}
8999+
9000+/**
9001+ * atomic_inc_unchecked - increment atomic variable
9002+ * @v: pointer of type atomic_unchecked_t
9003+ *
9004+ * Atomically increments @v by 1.
9005+ */
9006+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
9007+{
9008+ asm volatile(LOCK_PREFIX "incl %0\n"
9009 : "=m" (v->counter)
9010 : "m" (v->counter));
9011 }
9012@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *v)
9013 */
9014 static inline void atomic_dec(atomic_t *v)
9015 {
9016- asm volatile(LOCK_PREFIX "decl %0"
9017+ asm volatile(LOCK_PREFIX "decl %0\n"
9018+
9019+#ifdef CONFIG_PAX_REFCOUNT
9020+ "jno 0f\n"
9021+ LOCK_PREFIX "incl %0\n"
9022+ "int $4\n0:\n"
9023+ _ASM_EXTABLE(0b, 0b)
9024+#endif
9025+
9026+ : "=m" (v->counter)
9027+ : "m" (v->counter));
9028+}
9029+
9030+/**
9031+ * atomic_dec_unchecked - decrement atomic variable
9032+ * @v: pointer of type atomic_unchecked_t
9033+ *
9034+ * Atomically decrements @v by 1.
9035+ */
9036+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
9037+{
9038+ asm volatile(LOCK_PREFIX "decl %0\n"
9039 : "=m" (v->counter)
9040 : "m" (v->counter));
9041 }
9042@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
9043 {
9044 unsigned char c;
9045
9046- asm volatile(LOCK_PREFIX "decl %0; sete %1"
9047+ asm volatile(LOCK_PREFIX "decl %0\n"
9048+
9049+#ifdef CONFIG_PAX_REFCOUNT
9050+ "jno 0f\n"
9051+ LOCK_PREFIX "incl %0\n"
9052+ "int $4\n0:\n"
9053+ _ASM_EXTABLE(0b, 0b)
9054+#endif
9055+
9056+ "sete %1\n"
9057 : "=m" (v->counter), "=qm" (c)
9058 : "m" (v->counter) : "memory");
9059 return c != 0;
9060@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
9061 {
9062 unsigned char c;
9063
9064- asm volatile(LOCK_PREFIX "incl %0; sete %1"
9065+ asm volatile(LOCK_PREFIX "incl %0\n"
9066+
9067+#ifdef CONFIG_PAX_REFCOUNT
9068+ "jno 0f\n"
9069+ LOCK_PREFIX "decl %0\n"
9070+ "int $4\n0:\n"
9071+ _ASM_EXTABLE(0b, 0b)
9072+#endif
9073+
9074+ "sete %1\n"
9075+ : "=m" (v->counter), "=qm" (c)
9076+ : "m" (v->counter) : "memory");
9077+ return c != 0;
9078+}
9079+
9080+/**
9081+ * atomic_inc_and_test_unchecked - increment and test
9082+ * @v: pointer of type atomic_unchecked_t
9083+ *
9084+ * Atomically increments @v by 1
9085+ * and returns true if the result is zero, or false for all
9086+ * other cases.
9087+ */
9088+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
9089+{
9090+ unsigned char c;
9091+
9092+ asm volatile(LOCK_PREFIX "incl %0\n"
9093+ "sete %1\n"
9094 : "=m" (v->counter), "=qm" (c)
9095 : "m" (v->counter) : "memory");
9096 return c != 0;
9097@@ -157,7 +312,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9098 {
9099 unsigned char c;
9100
9101- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
9102+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
9103+
9104+#ifdef CONFIG_PAX_REFCOUNT
9105+ "jno 0f\n"
9106+ LOCK_PREFIX "subl %2,%0\n"
9107+ "int $4\n0:\n"
9108+ _ASM_EXTABLE(0b, 0b)
9109+#endif
9110+
9111+ "sets %1\n"
9112 : "=m" (v->counter), "=qm" (c)
9113 : "ir" (i), "m" (v->counter) : "memory");
9114 return c;
9115@@ -173,7 +337,31 @@ static inline int atomic_add_negative(int i, atomic_t *v)
9116 static inline int atomic_add_return(int i, atomic_t *v)
9117 {
9118 int __i = i;
9119- asm volatile(LOCK_PREFIX "xaddl %0, %1"
9120+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9121+
9122+#ifdef CONFIG_PAX_REFCOUNT
9123+ "jno 0f\n"
9124+ "movl %0, %1\n"
9125+ "int $4\n0:\n"
9126+ _ASM_EXTABLE(0b, 0b)
9127+#endif
9128+
9129+ : "+r" (i), "+m" (v->counter)
9130+ : : "memory");
9131+ return i + __i;
9132+}
9133+
9134+/**
9135+ * atomic_add_return_unchecked - add and return
9136+ * @i: integer value to add
9137+ * @v: pointer of type atomic_unchecked_t
9138+ *
9139+ * Atomically adds @i to @v and returns @i + @v
9140+ */
9141+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
9142+{
9143+ int __i = i;
9144+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
9145 : "+r" (i), "+m" (v->counter)
9146 : : "memory");
9147 return i + __i;
9148@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
9149 }
9150
9151 #define atomic_inc_return(v) (atomic_add_return(1, v))
9152+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
9153+{
9154+ return atomic_add_return_unchecked(1, v);
9155+}
9156 #define atomic_dec_return(v) (atomic_sub_return(1, v))
9157
9158 /* The 64-bit atomic type */
9159@@ -204,6 +396,18 @@ static inline long atomic64_read(const atomic64_t *v)
9160 }
9161
9162 /**
9163+ * atomic64_read_unchecked - read atomic64 variable
9164+ * @v: pointer of type atomic64_unchecked_t
9165+ *
9166+ * Atomically reads the value of @v.
9167+ * Doesn't imply a read memory barrier.
9168+ */
9169+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
9170+{
9171+ return v->counter;
9172+}
9173+
9174+/**
9175 * atomic64_set - set atomic64 variable
9176 * @v: pointer to type atomic64_t
9177 * @i: required value
9178@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
9179 }
9180
9181 /**
9182+ * atomic64_set_unchecked - set atomic64 variable
9183+ * @v: pointer to type atomic64_unchecked_t
9184+ * @i: required value
9185+ *
9186+ * Atomically sets the value of @v to @i.
9187+ */
9188+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
9189+{
9190+ v->counter = i;
9191+}
9192+
9193+/**
9194 * atomic64_add - add integer to atomic64 variable
9195 * @i: integer value to add
9196 * @v: pointer to type atomic64_t
9197@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
9198 */
9199 static inline void atomic64_add(long i, atomic64_t *v)
9200 {
9201+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
9202+
9203+#ifdef CONFIG_PAX_REFCOUNT
9204+ "jno 0f\n"
9205+ LOCK_PREFIX "subq %1,%0\n"
9206+ "int $4\n0:\n"
9207+ _ASM_EXTABLE(0b, 0b)
9208+#endif
9209+
9210+ : "=m" (v->counter)
9211+ : "er" (i), "m" (v->counter));
9212+}
9213+
9214+/**
9215+ * atomic64_add_unchecked - add integer to atomic64 variable
9216+ * @i: integer value to add
9217+ * @v: pointer to type atomic64_unchecked_t
9218+ *
9219+ * Atomically adds @i to @v.
9220+ */
9221+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
9222+{
9223 asm volatile(LOCK_PREFIX "addq %1,%0"
9224 : "=m" (v->counter)
9225 : "er" (i), "m" (v->counter));
9226@@ -238,7 +476,15 @@ static inline void atomic64_add(long i, atomic64_t *v)
9227 */
9228 static inline void atomic64_sub(long i, atomic64_t *v)
9229 {
9230- asm volatile(LOCK_PREFIX "subq %1,%0"
9231+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
9232+
9233+#ifdef CONFIG_PAX_REFCOUNT
9234+ "jno 0f\n"
9235+ LOCK_PREFIX "addq %1,%0\n"
9236+ "int $4\n0:\n"
9237+ _ASM_EXTABLE(0b, 0b)
9238+#endif
9239+
9240 : "=m" (v->counter)
9241 : "er" (i), "m" (v->counter));
9242 }
9243@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9244 {
9245 unsigned char c;
9246
9247- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
9248+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
9249+
9250+#ifdef CONFIG_PAX_REFCOUNT
9251+ "jno 0f\n"
9252+ LOCK_PREFIX "addq %2,%0\n"
9253+ "int $4\n0:\n"
9254+ _ASM_EXTABLE(0b, 0b)
9255+#endif
9256+
9257+ "sete %1\n"
9258 : "=m" (v->counter), "=qm" (c)
9259 : "er" (i), "m" (v->counter) : "memory");
9260 return c;
9261@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
9262 */
9263 static inline void atomic64_inc(atomic64_t *v)
9264 {
9265+ asm volatile(LOCK_PREFIX "incq %0\n"
9266+
9267+#ifdef CONFIG_PAX_REFCOUNT
9268+ "jno 0f\n"
9269+ LOCK_PREFIX "decq %0\n"
9270+ "int $4\n0:\n"
9271+ _ASM_EXTABLE(0b, 0b)
9272+#endif
9273+
9274+ : "=m" (v->counter)
9275+ : "m" (v->counter));
9276+}
9277+
9278+/**
9279+ * atomic64_inc_unchecked - increment atomic64 variable
9280+ * @v: pointer to type atomic64_unchecked_t
9281+ *
9282+ * Atomically increments @v by 1.
9283+ */
9284+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
9285+{
9286 asm volatile(LOCK_PREFIX "incq %0"
9287 : "=m" (v->counter)
9288 : "m" (v->counter));
9289@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64_t *v)
9290 */
9291 static inline void atomic64_dec(atomic64_t *v)
9292 {
9293- asm volatile(LOCK_PREFIX "decq %0"
9294+ asm volatile(LOCK_PREFIX "decq %0\n"
9295+
9296+#ifdef CONFIG_PAX_REFCOUNT
9297+ "jno 0f\n"
9298+ LOCK_PREFIX "incq %0\n"
9299+ "int $4\n0:\n"
9300+ _ASM_EXTABLE(0b, 0b)
9301+#endif
9302+
9303+ : "=m" (v->counter)
9304+ : "m" (v->counter));
9305+}
9306+
9307+/**
9308+ * atomic64_dec_unchecked - decrement atomic64 variable
9309+ * @v: pointer to type atomic64_t
9310+ *
9311+ * Atomically decrements @v by 1.
9312+ */
9313+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
9314+{
9315+ asm volatile(LOCK_PREFIX "decq %0\n"
9316 : "=m" (v->counter)
9317 : "m" (v->counter));
9318 }
9319@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
9320 {
9321 unsigned char c;
9322
9323- asm volatile(LOCK_PREFIX "decq %0; sete %1"
9324+ asm volatile(LOCK_PREFIX "decq %0\n"
9325+
9326+#ifdef CONFIG_PAX_REFCOUNT
9327+ "jno 0f\n"
9328+ LOCK_PREFIX "incq %0\n"
9329+ "int $4\n0:\n"
9330+ _ASM_EXTABLE(0b, 0b)
9331+#endif
9332+
9333+ "sete %1\n"
9334 : "=m" (v->counter), "=qm" (c)
9335 : "m" (v->counter) : "memory");
9336 return c != 0;
9337@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
9338 {
9339 unsigned char c;
9340
9341- asm volatile(LOCK_PREFIX "incq %0; sete %1"
9342+ asm volatile(LOCK_PREFIX "incq %0\n"
9343+
9344+#ifdef CONFIG_PAX_REFCOUNT
9345+ "jno 0f\n"
9346+ LOCK_PREFIX "decq %0\n"
9347+ "int $4\n0:\n"
9348+ _ASM_EXTABLE(0b, 0b)
9349+#endif
9350+
9351+ "sete %1\n"
9352 : "=m" (v->counter), "=qm" (c)
9353 : "m" (v->counter) : "memory");
9354 return c != 0;
9355@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9356 {
9357 unsigned char c;
9358
9359- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
9360+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
9361+
9362+#ifdef CONFIG_PAX_REFCOUNT
9363+ "jno 0f\n"
9364+ LOCK_PREFIX "subq %2,%0\n"
9365+ "int $4\n0:\n"
9366+ _ASM_EXTABLE(0b, 0b)
9367+#endif
9368+
9369+ "sets %1\n"
9370 : "=m" (v->counter), "=qm" (c)
9371 : "er" (i), "m" (v->counter) : "memory");
9372 return c;
9373@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
9374 static inline long atomic64_add_return(long i, atomic64_t *v)
9375 {
9376 long __i = i;
9377- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
9378+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
9379+
9380+#ifdef CONFIG_PAX_REFCOUNT
9381+ "jno 0f\n"
9382+ "movq %0, %1\n"
9383+ "int $4\n0:\n"
9384+ _ASM_EXTABLE(0b, 0b)
9385+#endif
9386+
9387+ : "+r" (i), "+m" (v->counter)
9388+ : : "memory");
9389+ return i + __i;
9390+}
9391+
9392+/**
9393+ * atomic64_add_return_unchecked - add and return
9394+ * @i: integer value to add
9395+ * @v: pointer to type atomic64_unchecked_t
9396+ *
9397+ * Atomically adds @i to @v and returns @i + @v
9398+ */
9399+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
9400+{
9401+ long __i = i;
9402+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
9403 : "+r" (i), "+m" (v->counter)
9404 : : "memory");
9405 return i + __i;
9406@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
9407 }
9408
9409 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
9410+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
9411+{
9412+ return atomic64_add_return_unchecked(1, v);
9413+}
9414 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
9415
9416 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9417@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
9418 return cmpxchg(&v->counter, old, new);
9419 }
9420
9421+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
9422+{
9423+ return cmpxchg(&v->counter, old, new);
9424+}
9425+
9426 static inline long atomic64_xchg(atomic64_t *v, long new)
9427 {
9428 return xchg(&v->counter, new);
9429 }
9430
9431+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
9432+{
9433+ return xchg(&v->counter, new);
9434+}
9435+
9436 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
9437 {
9438 return cmpxchg(&v->counter, old, new);
9439 }
9440
9441+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
9442+{
9443+ return cmpxchg(&v->counter, old, new);
9444+}
9445+
9446 static inline long atomic_xchg(atomic_t *v, int new)
9447 {
9448 return xchg(&v->counter, new);
9449 }
9450
9451+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
9452+{
9453+ return xchg(&v->counter, new);
9454+}
9455+
9456 /**
9457 * atomic_add_unless - add unless the number is a given value
9458 * @v: pointer of type atomic_t
9459@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t *v, int new)
9460 */
9461 static inline int atomic_add_unless(atomic_t *v, int a, int u)
9462 {
9463- int c, old;
9464+ int c, old, new;
9465 c = atomic_read(v);
9466 for (;;) {
9467- if (unlikely(c == (u)))
9468+ if (unlikely(c == u))
9469 break;
9470- old = atomic_cmpxchg((v), c, c + (a));
9471+
9472+ asm volatile("addl %2,%0\n"
9473+
9474+#ifdef CONFIG_PAX_REFCOUNT
9475+ "jno 0f\n"
9476+ "subl %2,%0\n"
9477+ "int $4\n0:\n"
9478+ _ASM_EXTABLE(0b, 0b)
9479+#endif
9480+
9481+ : "=r" (new)
9482+ : "0" (c), "ir" (a));
9483+
9484+ old = atomic_cmpxchg(v, c, new);
9485 if (likely(old == c))
9486 break;
9487 c = old;
9488 }
9489- return c != (u);
9490+ return c != u;
9491 }
9492
9493 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
9494@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
9495 */
9496 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
9497 {
9498- long c, old;
9499+ long c, old, new;
9500 c = atomic64_read(v);
9501 for (;;) {
9502- if (unlikely(c == (u)))
9503+ if (unlikely(c == u))
9504 break;
9505- old = atomic64_cmpxchg((v), c, c + (a));
9506+
9507+ asm volatile("addq %2,%0\n"
9508+
9509+#ifdef CONFIG_PAX_REFCOUNT
9510+ "jno 0f\n"
9511+ "subq %2,%0\n"
9512+ "int $4\n0:\n"
9513+ _ASM_EXTABLE(0b, 0b)
9514+#endif
9515+
9516+ : "=r" (new)
9517+ : "0" (c), "er" (a));
9518+
9519+ old = atomic64_cmpxchg(v, c, new);
9520 if (likely(old == c))
9521 break;
9522 c = old;
9523 }
9524- return c != (u);
9525+ return c != u;
9526 }
9527
9528 /**
9529diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
9530index 02b47a6..d5c4b15 100644
9531--- a/arch/x86/include/asm/bitops.h
9532+++ b/arch/x86/include/asm/bitops.h
9533@@ -38,7 +38,7 @@
9534 * a mask operation on a byte.
9535 */
9536 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
9537-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
9538+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
9539 #define CONST_MASK(nr) (1 << ((nr) & 7))
9540
9541 /**
9542diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
9543index 7a10659..8bbf355 100644
9544--- a/arch/x86/include/asm/boot.h
9545+++ b/arch/x86/include/asm/boot.h
9546@@ -11,10 +11,15 @@
9547 #include <asm/pgtable_types.h>
9548
9549 /* Physical address where kernel should be loaded. */
9550-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9551+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
9552 + (CONFIG_PHYSICAL_ALIGN - 1)) \
9553 & ~(CONFIG_PHYSICAL_ALIGN - 1))
9554
9555+#ifndef __ASSEMBLY__
9556+extern unsigned char __LOAD_PHYSICAL_ADDR[];
9557+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
9558+#endif
9559+
9560 /* Minimum kernel alignment, as a power of two */
9561 #ifdef CONFIG_X86_64
9562 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
9563diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
9564index 549860d..7d45f68 100644
9565--- a/arch/x86/include/asm/cache.h
9566+++ b/arch/x86/include/asm/cache.h
9567@@ -5,9 +5,10 @@
9568
9569 /* L1 cache line size */
9570 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
9571-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
9572+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
9573
9574 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
9575+#define __read_only __attribute__((__section__(".data.read_only")))
9576
9577 #ifdef CONFIG_X86_VSMP
9578 /* vSMP Internode cacheline shift */
9579diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
9580index b54f6af..5b376a6 100644
9581--- a/arch/x86/include/asm/cacheflush.h
9582+++ b/arch/x86/include/asm/cacheflush.h
9583@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
9584 static inline unsigned long get_page_memtype(struct page *pg)
9585 {
9586 if (!PageUncached(pg) && !PageWC(pg))
9587- return -1;
9588+ return ~0UL;
9589 else if (!PageUncached(pg) && PageWC(pg))
9590 return _PAGE_CACHE_WC;
9591 else if (PageUncached(pg) && !PageWC(pg))
9592@@ -85,7 +85,7 @@ static inline void set_page_memtype(struct page *pg, unsigned long memtype)
9593 SetPageWC(pg);
9594 break;
9595 default:
9596- case -1:
9597+ case ~0UL:
9598 ClearPageUncached(pg);
9599 ClearPageWC(pg);
9600 break;
9601diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
9602index 0e63c9a..ab8d972 100644
9603--- a/arch/x86/include/asm/calling.h
9604+++ b/arch/x86/include/asm/calling.h
9605@@ -52,32 +52,32 @@ For 32-bit we have the following conventions - kernel is built with
9606 * for assembly code:
9607 */
9608
9609-#define R15 0
9610-#define R14 8
9611-#define R13 16
9612-#define R12 24
9613-#define RBP 32
9614-#define RBX 40
9615+#define R15 (0)
9616+#define R14 (8)
9617+#define R13 (16)
9618+#define R12 (24)
9619+#define RBP (32)
9620+#define RBX (40)
9621
9622 /* arguments: interrupts/non tracing syscalls only save up to here: */
9623-#define R11 48
9624-#define R10 56
9625-#define R9 64
9626-#define R8 72
9627-#define RAX 80
9628-#define RCX 88
9629-#define RDX 96
9630-#define RSI 104
9631-#define RDI 112
9632-#define ORIG_RAX 120 /* + error_code */
9633+#define R11 (48)
9634+#define R10 (56)
9635+#define R9 (64)
9636+#define R8 (72)
9637+#define RAX (80)
9638+#define RCX (88)
9639+#define RDX (96)
9640+#define RSI (104)
9641+#define RDI (112)
9642+#define ORIG_RAX (120) /* + error_code */
9643 /* end of arguments */
9644
9645 /* cpu exception frame or undefined in case of fast syscall: */
9646-#define RIP 128
9647-#define CS 136
9648-#define EFLAGS 144
9649-#define RSP 152
9650-#define SS 160
9651+#define RIP (128)
9652+#define CS (136)
9653+#define EFLAGS (144)
9654+#define RSP (152)
9655+#define SS (160)
9656
9657 #define ARGOFFSET R11
9658 #define SWFRAME ORIG_RAX
9659diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
9660index 46fc474..b02b0f9 100644
9661--- a/arch/x86/include/asm/checksum_32.h
9662+++ b/arch/x86/include/asm/checksum_32.h
9663@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
9664 int len, __wsum sum,
9665 int *src_err_ptr, int *dst_err_ptr);
9666
9667+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
9668+ int len, __wsum sum,
9669+ int *src_err_ptr, int *dst_err_ptr);
9670+
9671+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
9672+ int len, __wsum sum,
9673+ int *src_err_ptr, int *dst_err_ptr);
9674+
9675 /*
9676 * Note: when you get a NULL pointer exception here this means someone
9677 * passed in an incorrect kernel address to one of these functions.
9678@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
9679 int *err_ptr)
9680 {
9681 might_sleep();
9682- return csum_partial_copy_generic((__force void *)src, dst,
9683+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
9684 len, sum, err_ptr, NULL);
9685 }
9686
9687@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
9688 {
9689 might_sleep();
9690 if (access_ok(VERIFY_WRITE, dst, len))
9691- return csum_partial_copy_generic(src, (__force void *)dst,
9692+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
9693 len, sum, NULL, err_ptr);
9694
9695 if (len)
9696diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
9697index 617bd56..7b047a1 100644
9698--- a/arch/x86/include/asm/desc.h
9699+++ b/arch/x86/include/asm/desc.h
9700@@ -4,6 +4,7 @@
9701 #include <asm/desc_defs.h>
9702 #include <asm/ldt.h>
9703 #include <asm/mmu.h>
9704+#include <asm/pgtable.h>
9705 #include <linux/smp.h>
9706
9707 static inline void fill_ldt(struct desc_struct *desc,
9708@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_struct *desc,
9709 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
9710 desc->type = (info->read_exec_only ^ 1) << 1;
9711 desc->type |= info->contents << 2;
9712+ desc->type |= info->seg_not_present ^ 1;
9713 desc->s = 1;
9714 desc->dpl = 0x3;
9715 desc->p = info->seg_not_present ^ 1;
9716@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_struct *desc,
9717 }
9718
9719 extern struct desc_ptr idt_descr;
9720-extern gate_desc idt_table[];
9721-
9722-struct gdt_page {
9723- struct desc_struct gdt[GDT_ENTRIES];
9724-} __attribute__((aligned(PAGE_SIZE)));
9725-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
9726+extern gate_desc idt_table[256];
9727
9728+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
9729 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
9730 {
9731- return per_cpu(gdt_page, cpu).gdt;
9732+ return cpu_gdt_table[cpu];
9733 }
9734
9735 #ifdef CONFIG_X86_64
9736@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
9737 unsigned long base, unsigned dpl, unsigned flags,
9738 unsigned short seg)
9739 {
9740- gate->a = (seg << 16) | (base & 0xffff);
9741- gate->b = (base & 0xffff0000) |
9742- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
9743+ gate->gate.offset_low = base;
9744+ gate->gate.seg = seg;
9745+ gate->gate.reserved = 0;
9746+ gate->gate.type = type;
9747+ gate->gate.s = 0;
9748+ gate->gate.dpl = dpl;
9749+ gate->gate.p = 1;
9750+ gate->gate.offset_high = base >> 16;
9751 }
9752
9753 #endif
9754@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
9755 static inline void native_write_idt_entry(gate_desc *idt, int entry,
9756 const gate_desc *gate)
9757 {
9758+ pax_open_kernel();
9759 memcpy(&idt[entry], gate, sizeof(*gate));
9760+ pax_close_kernel();
9761 }
9762
9763 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
9764 const void *desc)
9765 {
9766+ pax_open_kernel();
9767 memcpy(&ldt[entry], desc, 8);
9768+ pax_close_kernel();
9769 }
9770
9771 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9772@@ -139,7 +146,10 @@ static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
9773 size = sizeof(struct desc_struct);
9774 break;
9775 }
9776+
9777+ pax_open_kernel();
9778 memcpy(&gdt[entry], desc, size);
9779+ pax_close_kernel();
9780 }
9781
9782 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
9783@@ -211,7 +221,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
9784
9785 static inline void native_load_tr_desc(void)
9786 {
9787+ pax_open_kernel();
9788 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
9789+ pax_close_kernel();
9790 }
9791
9792 static inline void native_load_gdt(const struct desc_ptr *dtr)
9793@@ -246,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
9794 unsigned int i;
9795 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
9796
9797+ pax_open_kernel();
9798 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
9799 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
9800+ pax_close_kernel();
9801 }
9802
9803 #define _LDT_empty(info) \
9804@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
9805 desc->limit = (limit >> 16) & 0xf;
9806 }
9807
9808-static inline void _set_gate(int gate, unsigned type, void *addr,
9809+static inline void _set_gate(int gate, unsigned type, const void *addr,
9810 unsigned dpl, unsigned ist, unsigned seg)
9811 {
9812 gate_desc s;
9813@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
9814 * Pentium F0 0F bugfix can have resulted in the mapped
9815 * IDT being write-protected.
9816 */
9817-static inline void set_intr_gate(unsigned int n, void *addr)
9818+static inline void set_intr_gate(unsigned int n, const void *addr)
9819 {
9820 BUG_ON((unsigned)n > 0xFF);
9821 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
9822@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
9823 /*
9824 * This routine sets up an interrupt gate at directory privilege level 3.
9825 */
9826-static inline void set_system_intr_gate(unsigned int n, void *addr)
9827+static inline void set_system_intr_gate(unsigned int n, const void *addr)
9828 {
9829 BUG_ON((unsigned)n > 0xFF);
9830 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
9831 }
9832
9833-static inline void set_system_trap_gate(unsigned int n, void *addr)
9834+static inline void set_system_trap_gate(unsigned int n, const void *addr)
9835 {
9836 BUG_ON((unsigned)n > 0xFF);
9837 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
9838 }
9839
9840-static inline void set_trap_gate(unsigned int n, void *addr)
9841+static inline void set_trap_gate(unsigned int n, const void *addr)
9842 {
9843 BUG_ON((unsigned)n > 0xFF);
9844 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
9845@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
9846 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
9847 {
9848 BUG_ON((unsigned)n > 0xFF);
9849- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
9850+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
9851 }
9852
9853-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
9854+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
9855 {
9856 BUG_ON((unsigned)n > 0xFF);
9857 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
9858 }
9859
9860-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
9861+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
9862 {
9863 BUG_ON((unsigned)n > 0xFF);
9864 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
9865 }
9866
9867+#ifdef CONFIG_X86_32
9868+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
9869+{
9870+ struct desc_struct d;
9871+
9872+ if (likely(limit))
9873+ limit = (limit - 1UL) >> PAGE_SHIFT;
9874+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
9875+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
9876+}
9877+#endif
9878+
9879 #endif /* _ASM_X86_DESC_H */
9880diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
9881index 9d66848..6b4a691 100644
9882--- a/arch/x86/include/asm/desc_defs.h
9883+++ b/arch/x86/include/asm/desc_defs.h
9884@@ -31,6 +31,12 @@ struct desc_struct {
9885 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
9886 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
9887 };
9888+ struct {
9889+ u16 offset_low;
9890+ u16 seg;
9891+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
9892+ unsigned offset_high: 16;
9893+ } gate;
9894 };
9895 } __attribute__((packed));
9896
9897diff --git a/arch/x86/include/asm/device.h b/arch/x86/include/asm/device.h
9898index cee34e9..a7c3fa2 100644
9899--- a/arch/x86/include/asm/device.h
9900+++ b/arch/x86/include/asm/device.h
9901@@ -6,7 +6,7 @@ struct dev_archdata {
9902 void *acpi_handle;
9903 #endif
9904 #ifdef CONFIG_X86_64
9905-struct dma_map_ops *dma_ops;
9906+ const struct dma_map_ops *dma_ops;
9907 #endif
9908 #ifdef CONFIG_DMAR
9909 void *iommu; /* hook for IOMMU specific extension */
9910diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
9911index 6a25d5d..786b202 100644
9912--- a/arch/x86/include/asm/dma-mapping.h
9913+++ b/arch/x86/include/asm/dma-mapping.h
9914@@ -25,9 +25,9 @@ extern int iommu_merge;
9915 extern struct device x86_dma_fallback_dev;
9916 extern int panic_on_overflow;
9917
9918-extern struct dma_map_ops *dma_ops;
9919+extern const struct dma_map_ops *dma_ops;
9920
9921-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9922+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
9923 {
9924 #ifdef CONFIG_X86_32
9925 return dma_ops;
9926@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dma_ops(struct device *dev)
9927 /* Make sure we keep the same behaviour */
9928 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
9929 {
9930- struct dma_map_ops *ops = get_dma_ops(dev);
9931+ const struct dma_map_ops *ops = get_dma_ops(dev);
9932 if (ops->mapping_error)
9933 return ops->mapping_error(dev, dma_addr);
9934
9935@@ -122,7 +122,7 @@ static inline void *
9936 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9937 gfp_t gfp)
9938 {
9939- struct dma_map_ops *ops = get_dma_ops(dev);
9940+ const struct dma_map_ops *ops = get_dma_ops(dev);
9941 void *memory;
9942
9943 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
9944@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
9945 static inline void dma_free_coherent(struct device *dev, size_t size,
9946 void *vaddr, dma_addr_t bus)
9947 {
9948- struct dma_map_ops *ops = get_dma_ops(dev);
9949+ const struct dma_map_ops *ops = get_dma_ops(dev);
9950
9951 WARN_ON(irqs_disabled()); /* for portability */
9952
9953diff --git a/arch/x86/include/asm/e820.h b/arch/x86/include/asm/e820.h
9954index 40b4e61..40d8133 100644
9955--- a/arch/x86/include/asm/e820.h
9956+++ b/arch/x86/include/asm/e820.h
9957@@ -133,7 +133,7 @@ extern char *default_machine_specific_memory_setup(void);
9958 #define ISA_END_ADDRESS 0x100000
9959 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
9960
9961-#define BIOS_BEGIN 0x000a0000
9962+#define BIOS_BEGIN 0x000c0000
9963 #define BIOS_END 0x00100000
9964
9965 #ifdef __KERNEL__
9966diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
9967index 8ac9d9a..0a6c96e 100644
9968--- a/arch/x86/include/asm/elf.h
9969+++ b/arch/x86/include/asm/elf.h
9970@@ -257,7 +257,25 @@ extern int force_personality32;
9971 the loader. We need to make sure that it is out of the way of the program
9972 that it will "exec", and that there is sufficient room for the brk. */
9973
9974+#ifdef CONFIG_PAX_SEGMEXEC
9975+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
9976+#else
9977 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
9978+#endif
9979+
9980+#ifdef CONFIG_PAX_ASLR
9981+#ifdef CONFIG_X86_32
9982+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
9983+
9984+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9985+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
9986+#else
9987+#define PAX_ELF_ET_DYN_BASE 0x400000UL
9988+
9989+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9990+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
9991+#endif
9992+#endif
9993
9994 /* This yields a mask that user programs can use to figure out what
9995 instruction set this CPU supports. This could be done in user space,
9996@@ -310,9 +328,7 @@ do { \
9997
9998 #define ARCH_DLINFO \
9999 do { \
10000- if (vdso_enabled) \
10001- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
10002- (unsigned long)current->mm->context.vdso); \
10003+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
10004 } while (0)
10005
10006 #define AT_SYSINFO 32
10007@@ -323,7 +339,7 @@ do { \
10008
10009 #endif /* !CONFIG_X86_32 */
10010
10011-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
10012+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
10013
10014 #define VDSO_ENTRY \
10015 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
10016@@ -337,7 +353,4 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
10017 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
10018 #define compat_arch_setup_additional_pages syscall32_setup_pages
10019
10020-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
10021-#define arch_randomize_brk arch_randomize_brk
10022-
10023 #endif /* _ASM_X86_ELF_H */
10024diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
10025index cc70c1c..d96d011 100644
10026--- a/arch/x86/include/asm/emergency-restart.h
10027+++ b/arch/x86/include/asm/emergency-restart.h
10028@@ -15,6 +15,6 @@ enum reboot_type {
10029
10030 extern enum reboot_type reboot_type;
10031
10032-extern void machine_emergency_restart(void);
10033+extern void machine_emergency_restart(void) __noreturn;
10034
10035 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
10036diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
10037index 1f11ce4..7caabd1 100644
10038--- a/arch/x86/include/asm/futex.h
10039+++ b/arch/x86/include/asm/futex.h
10040@@ -12,16 +12,18 @@
10041 #include <asm/system.h>
10042
10043 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
10044+ typecheck(u32 __user *, uaddr); \
10045 asm volatile("1:\t" insn "\n" \
10046 "2:\t.section .fixup,\"ax\"\n" \
10047 "3:\tmov\t%3, %1\n" \
10048 "\tjmp\t2b\n" \
10049 "\t.previous\n" \
10050 _ASM_EXTABLE(1b, 3b) \
10051- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
10052+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr))\
10053 : "i" (-EFAULT), "0" (oparg), "1" (0))
10054
10055 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
10056+ typecheck(u32 __user *, uaddr); \
10057 asm volatile("1:\tmovl %2, %0\n" \
10058 "\tmovl\t%0, %3\n" \
10059 "\t" insn "\n" \
10060@@ -34,10 +36,10 @@
10061 _ASM_EXTABLE(1b, 4b) \
10062 _ASM_EXTABLE(2b, 4b) \
10063 : "=&a" (oldval), "=&r" (ret), \
10064- "+m" (*uaddr), "=&r" (tem) \
10065+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
10066 : "r" (oparg), "i" (-EFAULT), "1" (0))
10067
10068-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10069+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
10070 {
10071 int op = (encoded_op >> 28) & 7;
10072 int cmp = (encoded_op >> 24) & 15;
10073@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10074
10075 switch (op) {
10076 case FUTEX_OP_SET:
10077- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
10078+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
10079 break;
10080 case FUTEX_OP_ADD:
10081- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
10082+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
10083 uaddr, oparg);
10084 break;
10085 case FUTEX_OP_OR:
10086@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
10087 return ret;
10088 }
10089
10090-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10091+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
10092 int newval)
10093 {
10094
10095@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
10096 return -ENOSYS;
10097 #endif
10098
10099- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
10100+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
10101 return -EFAULT;
10102
10103- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
10104+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
10105 "2:\t.section .fixup, \"ax\"\n"
10106 "3:\tmov %2, %0\n"
10107 "\tjmp 2b\n"
10108 "\t.previous\n"
10109 _ASM_EXTABLE(1b, 3b)
10110- : "=a" (oldval), "+m" (*uaddr)
10111+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
10112 : "i" (-EFAULT), "r" (newval), "0" (oldval)
10113 : "memory"
10114 );
10115diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
10116index ba180d9..3bad351 100644
10117--- a/arch/x86/include/asm/hw_irq.h
10118+++ b/arch/x86/include/asm/hw_irq.h
10119@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
10120 extern void enable_IO_APIC(void);
10121
10122 /* Statistics */
10123-extern atomic_t irq_err_count;
10124-extern atomic_t irq_mis_count;
10125+extern atomic_unchecked_t irq_err_count;
10126+extern atomic_unchecked_t irq_mis_count;
10127
10128 /* EISA */
10129 extern void eisa_set_level_irq(unsigned int irq);
10130diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
10131index 0b20bbb..4cb1396 100644
10132--- a/arch/x86/include/asm/i387.h
10133+++ b/arch/x86/include/asm/i387.h
10134@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10135 {
10136 int err;
10137
10138+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10139+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10140+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
10141+#endif
10142+
10143 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
10144 "2:\n"
10145 ".section .fixup,\"ax\"\n"
10146@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
10147 {
10148 int err;
10149
10150+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10151+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
10152+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
10153+#endif
10154+
10155 asm volatile("1: rex64/fxsave (%[fx])\n\t"
10156 "2:\n"
10157 ".section .fixup,\"ax\"\n"
10158@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
10159 }
10160
10161 /* We need a safe address that is cheap to find and that is already
10162- in L1 during context switch. The best choices are unfortunately
10163- different for UP and SMP */
10164-#ifdef CONFIG_SMP
10165-#define safe_address (__per_cpu_offset[0])
10166-#else
10167-#define safe_address (kstat_cpu(0).cpustat.user)
10168-#endif
10169+ in L1 during context switch. */
10170+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
10171
10172 /*
10173 * These must be called with preempt disabled
10174@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void)
10175 struct thread_info *me = current_thread_info();
10176 preempt_disable();
10177 if (me->status & TS_USEDFPU)
10178- __save_init_fpu(me->task);
10179+ __save_init_fpu(current);
10180 else
10181 clts();
10182 }
10183diff --git a/arch/x86/include/asm/io_32.h b/arch/x86/include/asm/io_32.h
10184index a299900..15c5410 100644
10185--- a/arch/x86/include/asm/io_32.h
10186+++ b/arch/x86/include/asm/io_32.h
10187@@ -3,6 +3,7 @@
10188
10189 #include <linux/string.h>
10190 #include <linux/compiler.h>
10191+#include <asm/processor.h>
10192
10193 /*
10194 * This file contains the definitions for the x86 IO instructions
10195@@ -42,6 +43,17 @@
10196
10197 #ifdef __KERNEL__
10198
10199+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10200+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10201+{
10202+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10203+}
10204+
10205+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10206+{
10207+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10208+}
10209+
10210 #include <asm-generic/iomap.h>
10211
10212 #include <linux/vmalloc.h>
10213diff --git a/arch/x86/include/asm/io_64.h b/arch/x86/include/asm/io_64.h
10214index 2440678..c158b88 100644
10215--- a/arch/x86/include/asm/io_64.h
10216+++ b/arch/x86/include/asm/io_64.h
10217@@ -140,6 +140,17 @@ __OUTS(l)
10218
10219 #include <linux/vmalloc.h>
10220
10221+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
10222+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
10223+{
10224+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10225+}
10226+
10227+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
10228+{
10229+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
10230+}
10231+
10232 #include <asm-generic/iomap.h>
10233
10234 void __memcpy_fromio(void *, unsigned long, unsigned);
10235diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
10236index fd6d21b..8b13915 100644
10237--- a/arch/x86/include/asm/iommu.h
10238+++ b/arch/x86/include/asm/iommu.h
10239@@ -3,7 +3,7 @@
10240
10241 extern void pci_iommu_shutdown(void);
10242 extern void no_iommu_init(void);
10243-extern struct dma_map_ops nommu_dma_ops;
10244+extern const struct dma_map_ops nommu_dma_ops;
10245 extern int force_iommu, no_iommu;
10246 extern int iommu_detected;
10247 extern int iommu_pass_through;
10248diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
10249index 9e2b952..557206e 100644
10250--- a/arch/x86/include/asm/irqflags.h
10251+++ b/arch/x86/include/asm/irqflags.h
10252@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_irq_save(void)
10253 sti; \
10254 sysexit
10255
10256+#define GET_CR0_INTO_RDI mov %cr0, %rdi
10257+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
10258+#define GET_CR3_INTO_RDI mov %cr3, %rdi
10259+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
10260+
10261 #else
10262 #define INTERRUPT_RETURN iret
10263 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
10264diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
10265index 4fe681d..bb6d40c 100644
10266--- a/arch/x86/include/asm/kprobes.h
10267+++ b/arch/x86/include/asm/kprobes.h
10268@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
10269 #define BREAKPOINT_INSTRUCTION 0xcc
10270 #define RELATIVEJUMP_INSTRUCTION 0xe9
10271 #define MAX_INSN_SIZE 16
10272-#define MAX_STACK_SIZE 64
10273-#define MIN_STACK_SIZE(ADDR) \
10274- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
10275- THREAD_SIZE - (unsigned long)(ADDR))) \
10276- ? (MAX_STACK_SIZE) \
10277- : (((unsigned long)current_thread_info()) + \
10278- THREAD_SIZE - (unsigned long)(ADDR)))
10279+#define MAX_STACK_SIZE 64UL
10280+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
10281
10282 #define flush_insn_slot(p) do { } while (0)
10283
10284diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
10285index 08bc2ff..2e88d1f 100644
10286--- a/arch/x86/include/asm/kvm_host.h
10287+++ b/arch/x86/include/asm/kvm_host.h
10288@@ -534,9 +534,9 @@ struct kvm_x86_ops {
10289 bool (*gb_page_enable)(void);
10290
10291 const struct trace_print_flags *exit_reasons_str;
10292-};
10293+} __do_const;
10294
10295-extern struct kvm_x86_ops *kvm_x86_ops;
10296+extern const struct kvm_x86_ops *kvm_x86_ops;
10297
10298 int kvm_mmu_module_init(void);
10299 void kvm_mmu_module_exit(void);
10300diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
10301index 47b9b6f..815aaa1 100644
10302--- a/arch/x86/include/asm/local.h
10303+++ b/arch/x86/include/asm/local.h
10304@@ -18,26 +18,58 @@ typedef struct {
10305
10306 static inline void local_inc(local_t *l)
10307 {
10308- asm volatile(_ASM_INC "%0"
10309+ asm volatile(_ASM_INC "%0\n"
10310+
10311+#ifdef CONFIG_PAX_REFCOUNT
10312+ "jno 0f\n"
10313+ _ASM_DEC "%0\n"
10314+ "int $4\n0:\n"
10315+ _ASM_EXTABLE(0b, 0b)
10316+#endif
10317+
10318 : "+m" (l->a.counter));
10319 }
10320
10321 static inline void local_dec(local_t *l)
10322 {
10323- asm volatile(_ASM_DEC "%0"
10324+ asm volatile(_ASM_DEC "%0\n"
10325+
10326+#ifdef CONFIG_PAX_REFCOUNT
10327+ "jno 0f\n"
10328+ _ASM_INC "%0\n"
10329+ "int $4\n0:\n"
10330+ _ASM_EXTABLE(0b, 0b)
10331+#endif
10332+
10333 : "+m" (l->a.counter));
10334 }
10335
10336 static inline void local_add(long i, local_t *l)
10337 {
10338- asm volatile(_ASM_ADD "%1,%0"
10339+ asm volatile(_ASM_ADD "%1,%0\n"
10340+
10341+#ifdef CONFIG_PAX_REFCOUNT
10342+ "jno 0f\n"
10343+ _ASM_SUB "%1,%0\n"
10344+ "int $4\n0:\n"
10345+ _ASM_EXTABLE(0b, 0b)
10346+#endif
10347+
10348 : "+m" (l->a.counter)
10349 : "ir" (i));
10350 }
10351
10352 static inline void local_sub(long i, local_t *l)
10353 {
10354- asm volatile(_ASM_SUB "%1,%0"
10355+ asm volatile(_ASM_SUB "%1,%0\n"
10356+
10357+#ifdef CONFIG_PAX_REFCOUNT
10358+ "jno 0f\n"
10359+ _ASM_ADD "%1,%0\n"
10360+ "int $4\n0:\n"
10361+ _ASM_EXTABLE(0b, 0b)
10362+#endif
10363+
10364 : "+m" (l->a.counter)
10365 : "ir" (i));
10366 }
10367@@ -55,7 +87,16 @@ static inline int local_sub_and_test(long i, local_t *l)
10368 {
10369 unsigned char c;
10370
10371- asm volatile(_ASM_SUB "%2,%0; sete %1"
10372+ asm volatile(_ASM_SUB "%2,%0\n"
10373+
10374+#ifdef CONFIG_PAX_REFCOUNT
10375+ "jno 0f\n"
10376+ _ASM_ADD "%2,%0\n"
10377+ "int $4\n0:\n"
10378+ _ASM_EXTABLE(0b, 0b)
10379+#endif
10380+
10381+ "sete %1\n"
10382 : "+m" (l->a.counter), "=qm" (c)
10383 : "ir" (i) : "memory");
10384 return c;
10385@@ -73,7 +114,16 @@ static inline int local_dec_and_test(local_t *l)
10386 {
10387 unsigned char c;
10388
10389- asm volatile(_ASM_DEC "%0; sete %1"
10390+ asm volatile(_ASM_DEC "%0\n"
10391+
10392+#ifdef CONFIG_PAX_REFCOUNT
10393+ "jno 0f\n"
10394+ _ASM_INC "%0\n"
10395+ "int $4\n0:\n"
10396+ _ASM_EXTABLE(0b, 0b)
10397+#endif
10398+
10399+ "sete %1\n"
10400 : "+m" (l->a.counter), "=qm" (c)
10401 : : "memory");
10402 return c != 0;
10403@@ -91,7 +141,16 @@ static inline int local_inc_and_test(local_t *l)
10404 {
10405 unsigned char c;
10406
10407- asm volatile(_ASM_INC "%0; sete %1"
10408+ asm volatile(_ASM_INC "%0\n"
10409+
10410+#ifdef CONFIG_PAX_REFCOUNT
10411+ "jno 0f\n"
10412+ _ASM_DEC "%0\n"
10413+ "int $4\n0:\n"
10414+ _ASM_EXTABLE(0b, 0b)
10415+#endif
10416+
10417+ "sete %1\n"
10418 : "+m" (l->a.counter), "=qm" (c)
10419 : : "memory");
10420 return c != 0;
10421@@ -110,7 +169,16 @@ static inline int local_add_negative(long i, local_t *l)
10422 {
10423 unsigned char c;
10424
10425- asm volatile(_ASM_ADD "%2,%0; sets %1"
10426+ asm volatile(_ASM_ADD "%2,%0\n"
10427+
10428+#ifdef CONFIG_PAX_REFCOUNT
10429+ "jno 0f\n"
10430+ _ASM_SUB "%2,%0\n"
10431+ "int $4\n0:\n"
10432+ _ASM_EXTABLE(0b, 0b)
10433+#endif
10434+
10435+ "sets %1\n"
10436 : "+m" (l->a.counter), "=qm" (c)
10437 : "ir" (i) : "memory");
10438 return c;
10439@@ -133,7 +201,15 @@ static inline long local_add_return(long i, local_t *l)
10440 #endif
10441 /* Modern 486+ processor */
10442 __i = i;
10443- asm volatile(_ASM_XADD "%0, %1;"
10444+ asm volatile(_ASM_XADD "%0, %1\n"
10445+
10446+#ifdef CONFIG_PAX_REFCOUNT
10447+ "jno 0f\n"
10448+ _ASM_MOV "%0,%1\n"
10449+ "int $4\n0:\n"
10450+ _ASM_EXTABLE(0b, 0b)
10451+#endif
10452+
10453 : "+r" (i), "+m" (l->a.counter)
10454 : : "memory");
10455 return i + __i;
10456diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h
10457index ef51b50..514ba37 100644
10458--- a/arch/x86/include/asm/microcode.h
10459+++ b/arch/x86/include/asm/microcode.h
10460@@ -12,13 +12,13 @@ struct device;
10461 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
10462
10463 struct microcode_ops {
10464- enum ucode_state (*request_microcode_user) (int cpu,
10465+ enum ucode_state (* const request_microcode_user) (int cpu,
10466 const void __user *buf, size_t size);
10467
10468- enum ucode_state (*request_microcode_fw) (int cpu,
10469+ enum ucode_state (* const request_microcode_fw) (int cpu,
10470 struct device *device);
10471
10472- void (*microcode_fini_cpu) (int cpu);
10473+ void (* const microcode_fini_cpu) (int cpu);
10474
10475 /*
10476 * The generic 'microcode_core' part guarantees that
10477@@ -38,18 +38,18 @@ struct ucode_cpu_info {
10478 extern struct ucode_cpu_info ucode_cpu_info[];
10479
10480 #ifdef CONFIG_MICROCODE_INTEL
10481-extern struct microcode_ops * __init init_intel_microcode(void);
10482+extern const struct microcode_ops * __init init_intel_microcode(void);
10483 #else
10484-static inline struct microcode_ops * __init init_intel_microcode(void)
10485+static inline const struct microcode_ops * __init init_intel_microcode(void)
10486 {
10487 return NULL;
10488 }
10489 #endif /* CONFIG_MICROCODE_INTEL */
10490
10491 #ifdef CONFIG_MICROCODE_AMD
10492-extern struct microcode_ops * __init init_amd_microcode(void);
10493+extern const struct microcode_ops * __init init_amd_microcode(void);
10494 #else
10495-static inline struct microcode_ops * __init init_amd_microcode(void)
10496+static inline const struct microcode_ops * __init init_amd_microcode(void)
10497 {
10498 return NULL;
10499 }
10500diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
10501index 593e51d..fa69c9a 100644
10502--- a/arch/x86/include/asm/mman.h
10503+++ b/arch/x86/include/asm/mman.h
10504@@ -5,4 +5,14 @@
10505
10506 #include <asm-generic/mman.h>
10507
10508+#ifdef __KERNEL__
10509+#ifndef __ASSEMBLY__
10510+#ifdef CONFIG_X86_32
10511+#define arch_mmap_check i386_mmap_check
10512+int i386_mmap_check(unsigned long addr, unsigned long len,
10513+ unsigned long flags);
10514+#endif
10515+#endif
10516+#endif
10517+
10518 #endif /* _ASM_X86_MMAN_H */
10519diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
10520index 80a1dee..239c67d 100644
10521--- a/arch/x86/include/asm/mmu.h
10522+++ b/arch/x86/include/asm/mmu.h
10523@@ -9,10 +9,23 @@
10524 * we put the segment information here.
10525 */
10526 typedef struct {
10527- void *ldt;
10528+ struct desc_struct *ldt;
10529 int size;
10530 struct mutex lock;
10531- void *vdso;
10532+ unsigned long vdso;
10533+
10534+#ifdef CONFIG_X86_32
10535+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
10536+ unsigned long user_cs_base;
10537+ unsigned long user_cs_limit;
10538+
10539+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10540+ cpumask_t cpu_user_cs_mask;
10541+#endif
10542+
10543+#endif
10544+#endif
10545+
10546 } mm_context_t;
10547
10548 #ifdef CONFIG_SMP
10549diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
10550index 8b5393e..8143173 100644
10551--- a/arch/x86/include/asm/mmu_context.h
10552+++ b/arch/x86/include/asm/mmu_context.h
10553@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
10554
10555 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
10556 {
10557+
10558+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
10559+ unsigned int i;
10560+ pgd_t *pgd;
10561+
10562+ pax_open_kernel();
10563+ pgd = get_cpu_pgd(smp_processor_id());
10564+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
10565+ set_pgd_batched(pgd+i, native_make_pgd(0));
10566+ pax_close_kernel();
10567+#endif
10568+
10569 #ifdef CONFIG_SMP
10570 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
10571 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
10572@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10573 struct task_struct *tsk)
10574 {
10575 unsigned cpu = smp_processor_id();
10576+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) && defined(CONFIG_SMP)
10577+ int tlbstate = TLBSTATE_OK;
10578+#endif
10579
10580 if (likely(prev != next)) {
10581 #ifdef CONFIG_SMP
10582+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10583+ tlbstate = percpu_read(cpu_tlbstate.state);
10584+#endif
10585 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10586 percpu_write(cpu_tlbstate.active_mm, next);
10587 #endif
10588 cpumask_set_cpu(cpu, mm_cpumask(next));
10589
10590 /* Re-load page tables */
10591+#ifdef CONFIG_PAX_PER_CPU_PGD
10592+ pax_open_kernel();
10593+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10594+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10595+ pax_close_kernel();
10596+ load_cr3(get_cpu_pgd(cpu));
10597+#else
10598 load_cr3(next->pgd);
10599+#endif
10600
10601 /* stop flush ipis for the previous mm */
10602 cpumask_clear_cpu(cpu, mm_cpumask(prev));
10603@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10604 */
10605 if (unlikely(prev->context.ldt != next->context.ldt))
10606 load_LDT_nolock(&next->context);
10607- }
10608+
10609+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
10610+ if (!nx_enabled) {
10611+ smp_mb__before_clear_bit();
10612+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
10613+ smp_mb__after_clear_bit();
10614+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10615+ }
10616+#endif
10617+
10618+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10619+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
10620+ prev->context.user_cs_limit != next->context.user_cs_limit))
10621+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10622 #ifdef CONFIG_SMP
10623+ else if (unlikely(tlbstate != TLBSTATE_OK))
10624+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10625+#endif
10626+#endif
10627+
10628+ }
10629 else {
10630+
10631+#ifdef CONFIG_PAX_PER_CPU_PGD
10632+ pax_open_kernel();
10633+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
10634+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
10635+ pax_close_kernel();
10636+ load_cr3(get_cpu_pgd(cpu));
10637+#endif
10638+
10639+#ifdef CONFIG_SMP
10640 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
10641 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
10642
10643@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
10644 * tlb flush IPI delivery. We must reload CR3
10645 * to make sure to use no freed page tables.
10646 */
10647+
10648+#ifndef CONFIG_PAX_PER_CPU_PGD
10649 load_cr3(next->pgd);
10650+#endif
10651+
10652 load_LDT_nolock(&next->context);
10653+
10654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
10655+ if (!nx_enabled)
10656+ cpu_set(cpu, next->context.cpu_user_cs_mask);
10657+#endif
10658+
10659+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
10660+#ifdef CONFIG_PAX_PAGEEXEC
10661+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
10662+#endif
10663+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
10664+#endif
10665+
10666 }
10667+#endif
10668 }
10669-#endif
10670 }
10671
10672 #define activate_mm(prev, next) \
10673diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
10674index 3e2ce58..caaf478 100644
10675--- a/arch/x86/include/asm/module.h
10676+++ b/arch/x86/include/asm/module.h
10677@@ -5,6 +5,7 @@
10678
10679 #ifdef CONFIG_X86_64
10680 /* X86_64 does not define MODULE_PROC_FAMILY */
10681+#define MODULE_PROC_FAMILY ""
10682 #elif defined CONFIG_M386
10683 #define MODULE_PROC_FAMILY "386 "
10684 #elif defined CONFIG_M486
10685@@ -59,13 +60,26 @@
10686 #error unknown processor family
10687 #endif
10688
10689-#ifdef CONFIG_X86_32
10690-# ifdef CONFIG_4KSTACKS
10691-# define MODULE_STACKSIZE "4KSTACKS "
10692-# else
10693-# define MODULE_STACKSIZE ""
10694-# endif
10695-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
10696+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
10697+#define MODULE_STACKSIZE "4KSTACKS "
10698+#else
10699+#define MODULE_STACKSIZE ""
10700 #endif
10701
10702+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
10703+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
10704+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
10705+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
10706+#else
10707+#define MODULE_PAX_KERNEXEC ""
10708+#endif
10709+
10710+#ifdef CONFIG_PAX_MEMORY_UDEREF
10711+#define MODULE_PAX_UDEREF "UDEREF "
10712+#else
10713+#define MODULE_PAX_UDEREF ""
10714+#endif
10715+
10716+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
10717+
10718 #endif /* _ASM_X86_MODULE_H */
10719diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
10720index 7639dbf..e08a58c 100644
10721--- a/arch/x86/include/asm/page_64_types.h
10722+++ b/arch/x86/include/asm/page_64_types.h
10723@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
10724
10725 /* duplicated to the one in bootmem.h */
10726 extern unsigned long max_pfn;
10727-extern unsigned long phys_base;
10728+extern const unsigned long phys_base;
10729
10730 extern unsigned long __phys_addr(unsigned long);
10731 #define __phys_reloc_hide(x) (x)
10732diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
10733index efb3899..ef30687 100644
10734--- a/arch/x86/include/asm/paravirt.h
10735+++ b/arch/x86/include/asm/paravirt.h
10736@@ -648,6 +648,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
10737 val);
10738 }
10739
10740+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
10741+{
10742+ pgdval_t val = native_pgd_val(pgd);
10743+
10744+ if (sizeof(pgdval_t) > sizeof(long))
10745+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
10746+ val, (u64)val >> 32);
10747+ else
10748+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
10749+ val);
10750+}
10751+
10752 static inline void pgd_clear(pgd_t *pgdp)
10753 {
10754 set_pgd(pgdp, __pgd(0));
10755@@ -729,6 +741,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
10756 pv_mmu_ops.set_fixmap(idx, phys, flags);
10757 }
10758
10759+#ifdef CONFIG_PAX_KERNEXEC
10760+static inline unsigned long pax_open_kernel(void)
10761+{
10762+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
10763+}
10764+
10765+static inline unsigned long pax_close_kernel(void)
10766+{
10767+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
10768+}
10769+#else
10770+static inline unsigned long pax_open_kernel(void) { return 0; }
10771+static inline unsigned long pax_close_kernel(void) { return 0; }
10772+#endif
10773+
10774 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
10775
10776 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
10777@@ -945,7 +972,7 @@ extern void default_banner(void);
10778
10779 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
10780 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
10781-#define PARA_INDIRECT(addr) *%cs:addr
10782+#define PARA_INDIRECT(addr) *%ss:addr
10783 #endif
10784
10785 #define INTERRUPT_RETURN \
10786@@ -1022,6 +1049,21 @@ extern void default_banner(void);
10787 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
10788 CLBR_NONE, \
10789 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
10790+
10791+#define GET_CR0_INTO_RDI \
10792+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
10793+ mov %rax,%rdi
10794+
10795+#define SET_RDI_INTO_CR0 \
10796+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
10797+
10798+#define GET_CR3_INTO_RDI \
10799+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
10800+ mov %rax,%rdi
10801+
10802+#define SET_RDI_INTO_CR3 \
10803+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
10804+
10805 #endif /* CONFIG_X86_32 */
10806
10807 #endif /* __ASSEMBLY__ */
10808diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
10809index 9357473..aeb2de5 100644
10810--- a/arch/x86/include/asm/paravirt_types.h
10811+++ b/arch/x86/include/asm/paravirt_types.h
10812@@ -78,19 +78,19 @@ struct pv_init_ops {
10813 */
10814 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
10815 unsigned long addr, unsigned len);
10816-};
10817+} __no_const;
10818
10819
10820 struct pv_lazy_ops {
10821 /* Set deferred update mode, used for batching operations. */
10822 void (*enter)(void);
10823 void (*leave)(void);
10824-};
10825+} __no_const;
10826
10827 struct pv_time_ops {
10828 unsigned long long (*sched_clock)(void);
10829 unsigned long (*get_tsc_khz)(void);
10830-};
10831+} __no_const;
10832
10833 struct pv_cpu_ops {
10834 /* hooks for various privileged instructions */
10835@@ -186,7 +186,7 @@ struct pv_cpu_ops {
10836
10837 void (*start_context_switch)(struct task_struct *prev);
10838 void (*end_context_switch)(struct task_struct *next);
10839-};
10840+} __no_const;
10841
10842 struct pv_irq_ops {
10843 /*
10844@@ -217,7 +217,7 @@ struct pv_apic_ops {
10845 unsigned long start_eip,
10846 unsigned long start_esp);
10847 #endif
10848-};
10849+} __no_const;
10850
10851 struct pv_mmu_ops {
10852 unsigned long (*read_cr2)(void);
10853@@ -301,6 +301,7 @@ struct pv_mmu_ops {
10854 struct paravirt_callee_save make_pud;
10855
10856 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
10857+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
10858 #endif /* PAGETABLE_LEVELS == 4 */
10859 #endif /* PAGETABLE_LEVELS >= 3 */
10860
10861@@ -316,6 +317,12 @@ struct pv_mmu_ops {
10862 an mfn. We can tell which is which from the index. */
10863 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
10864 phys_addr_t phys, pgprot_t flags);
10865+
10866+#ifdef CONFIG_PAX_KERNEXEC
10867+ unsigned long (*pax_open_kernel)(void);
10868+ unsigned long (*pax_close_kernel)(void);
10869+#endif
10870+
10871 };
10872
10873 struct raw_spinlock;
10874@@ -326,7 +333,7 @@ struct pv_lock_ops {
10875 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
10876 int (*spin_trylock)(struct raw_spinlock *lock);
10877 void (*spin_unlock)(struct raw_spinlock *lock);
10878-};
10879+} __no_const;
10880
10881 /* This contains all the paravirt structures: we get a convenient
10882 * number for each function using the offset which we use to indicate
10883diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
10884index b399988..3f47c38 100644
10885--- a/arch/x86/include/asm/pci_x86.h
10886+++ b/arch/x86/include/asm/pci_x86.h
10887@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct pci_dev *dev);
10888 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
10889
10890 struct pci_raw_ops {
10891- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10892+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
10893 int reg, int len, u32 *val);
10894- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10895+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
10896 int reg, int len, u32 val);
10897 };
10898
10899-extern struct pci_raw_ops *raw_pci_ops;
10900-extern struct pci_raw_ops *raw_pci_ext_ops;
10901+extern const struct pci_raw_ops *raw_pci_ops;
10902+extern const struct pci_raw_ops *raw_pci_ext_ops;
10903
10904-extern struct pci_raw_ops pci_direct_conf1;
10905+extern const struct pci_raw_ops pci_direct_conf1;
10906 extern bool port_cf9_safe;
10907
10908 /* arch_initcall level */
10909diff --git a/arch/x86/include/asm/percpu.h b/arch/x86/include/asm/percpu.h
10910index b65a36d..50345a4 100644
10911--- a/arch/x86/include/asm/percpu.h
10912+++ b/arch/x86/include/asm/percpu.h
10913@@ -78,6 +78,7 @@ do { \
10914 if (0) { \
10915 T__ tmp__; \
10916 tmp__ = (val); \
10917+ (void)tmp__; \
10918 } \
10919 switch (sizeof(var)) { \
10920 case 1: \
10921diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
10922index 271de94..ef944d6 100644
10923--- a/arch/x86/include/asm/pgalloc.h
10924+++ b/arch/x86/include/asm/pgalloc.h
10925@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
10926 pmd_t *pmd, pte_t *pte)
10927 {
10928 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10929+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
10930+}
10931+
10932+static inline void pmd_populate_user(struct mm_struct *mm,
10933+ pmd_t *pmd, pte_t *pte)
10934+{
10935+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
10936 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
10937 }
10938
10939diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
10940index 2334982..70bc412 100644
10941--- a/arch/x86/include/asm/pgtable-2level.h
10942+++ b/arch/x86/include/asm/pgtable-2level.h
10943@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
10944
10945 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10946 {
10947+ pax_open_kernel();
10948 *pmdp = pmd;
10949+ pax_close_kernel();
10950 }
10951
10952 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10953diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
10954index 33927d2..ccde329 100644
10955--- a/arch/x86/include/asm/pgtable-3level.h
10956+++ b/arch/x86/include/asm/pgtable-3level.h
10957@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
10958
10959 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
10960 {
10961+ pax_open_kernel();
10962 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
10963+ pax_close_kernel();
10964 }
10965
10966 static inline void native_set_pud(pud_t *pudp, pud_t pud)
10967 {
10968+ pax_open_kernel();
10969 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
10970+ pax_close_kernel();
10971 }
10972
10973 /*
10974diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
10975index af6fd36..867ff74 100644
10976--- a/arch/x86/include/asm/pgtable.h
10977+++ b/arch/x86/include/asm/pgtable.h
10978@@ -39,6 +39,7 @@ extern struct list_head pgd_list;
10979
10980 #ifndef __PAGETABLE_PUD_FOLDED
10981 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
10982+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
10983 #define pgd_clear(pgd) native_pgd_clear(pgd)
10984 #endif
10985
10986@@ -74,12 +75,51 @@ extern struct list_head pgd_list;
10987
10988 #define arch_end_context_switch(prev) do {} while(0)
10989
10990+#define pax_open_kernel() native_pax_open_kernel()
10991+#define pax_close_kernel() native_pax_close_kernel()
10992 #endif /* CONFIG_PARAVIRT */
10993
10994+#define __HAVE_ARCH_PAX_OPEN_KERNEL
10995+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
10996+
10997+#ifdef CONFIG_PAX_KERNEXEC
10998+static inline unsigned long native_pax_open_kernel(void)
10999+{
11000+ unsigned long cr0;
11001+
11002+ preempt_disable();
11003+ barrier();
11004+ cr0 = read_cr0() ^ X86_CR0_WP;
11005+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
11006+ write_cr0(cr0);
11007+ return cr0 ^ X86_CR0_WP;
11008+}
11009+
11010+static inline unsigned long native_pax_close_kernel(void)
11011+{
11012+ unsigned long cr0;
11013+
11014+ cr0 = read_cr0() ^ X86_CR0_WP;
11015+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
11016+ write_cr0(cr0);
11017+ barrier();
11018+ preempt_enable_no_resched();
11019+ return cr0 ^ X86_CR0_WP;
11020+}
11021+#else
11022+static inline unsigned long native_pax_open_kernel(void) { return 0; }
11023+static inline unsigned long native_pax_close_kernel(void) { return 0; }
11024+#endif
11025+
11026 /*
11027 * The following only work if pte_present() is true.
11028 * Undefined behaviour if not..
11029 */
11030+static inline int pte_user(pte_t pte)
11031+{
11032+ return pte_val(pte) & _PAGE_USER;
11033+}
11034+
11035 static inline int pte_dirty(pte_t pte)
11036 {
11037 return pte_flags(pte) & _PAGE_DIRTY;
11038@@ -167,9 +207,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
11039 return pte_clear_flags(pte, _PAGE_RW);
11040 }
11041
11042+static inline pte_t pte_mkread(pte_t pte)
11043+{
11044+ return __pte(pte_val(pte) | _PAGE_USER);
11045+}
11046+
11047 static inline pte_t pte_mkexec(pte_t pte)
11048 {
11049- return pte_clear_flags(pte, _PAGE_NX);
11050+#ifdef CONFIG_X86_PAE
11051+ if (__supported_pte_mask & _PAGE_NX)
11052+ return pte_clear_flags(pte, _PAGE_NX);
11053+ else
11054+#endif
11055+ return pte_set_flags(pte, _PAGE_USER);
11056+}
11057+
11058+static inline pte_t pte_exprotect(pte_t pte)
11059+{
11060+#ifdef CONFIG_X86_PAE
11061+ if (__supported_pte_mask & _PAGE_NX)
11062+ return pte_set_flags(pte, _PAGE_NX);
11063+ else
11064+#endif
11065+ return pte_clear_flags(pte, _PAGE_USER);
11066 }
11067
11068 static inline pte_t pte_mkdirty(pte_t pte)
11069@@ -302,6 +362,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
11070 #endif
11071
11072 #ifndef __ASSEMBLY__
11073+
11074+#ifdef CONFIG_PAX_PER_CPU_PGD
11075+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
11076+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
11077+{
11078+ return cpu_pgd[cpu];
11079+}
11080+#endif
11081+
11082 #include <linux/mm_types.h>
11083
11084 static inline int pte_none(pte_t pte)
11085@@ -472,7 +541,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
11086
11087 static inline int pgd_bad(pgd_t pgd)
11088 {
11089- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
11090+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
11091 }
11092
11093 static inline int pgd_none(pgd_t pgd)
11094@@ -495,7 +564,12 @@ static inline int pgd_none(pgd_t pgd)
11095 * pgd_offset() returns a (pgd_t *)
11096 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
11097 */
11098-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
11099+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
11100+
11101+#ifdef CONFIG_PAX_PER_CPU_PGD
11102+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
11103+#endif
11104+
11105 /*
11106 * a shortcut which implies the use of the kernel's pgd, instead
11107 * of a process's
11108@@ -506,6 +580,20 @@ static inline int pgd_none(pgd_t pgd)
11109 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
11110 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
11111
11112+#ifdef CONFIG_X86_32
11113+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
11114+#else
11115+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
11116+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
11117+
11118+#ifdef CONFIG_PAX_MEMORY_UDEREF
11119+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
11120+#else
11121+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
11122+#endif
11123+
11124+#endif
11125+
11126 #ifndef __ASSEMBLY__
11127
11128 extern int direct_gbpages;
11129@@ -611,11 +699,23 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm,
11130 * dst and src can be on the same page, but the range must not overlap,
11131 * and must not cross a page boundary.
11132 */
11133-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
11134+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
11135 {
11136- memcpy(dst, src, count * sizeof(pgd_t));
11137+ pax_open_kernel();
11138+ while (count--)
11139+ *dst++ = *src++;
11140+ pax_close_kernel();
11141 }
11142
11143+#ifdef CONFIG_PAX_PER_CPU_PGD
11144+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11145+#endif
11146+
11147+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11148+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
11149+#else
11150+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
11151+#endif
11152
11153 #include <asm-generic/pgtable.h>
11154 #endif /* __ASSEMBLY__ */
11155diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
11156index 750f1bf..971e839 100644
11157--- a/arch/x86/include/asm/pgtable_32.h
11158+++ b/arch/x86/include/asm/pgtable_32.h
11159@@ -26,9 +26,6 @@
11160 struct mm_struct;
11161 struct vm_area_struct;
11162
11163-extern pgd_t swapper_pg_dir[1024];
11164-extern pgd_t trampoline_pg_dir[1024];
11165-
11166 static inline void pgtable_cache_init(void) { }
11167 static inline void check_pgt_cache(void) { }
11168 void paging_init(void);
11169@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11170 # include <asm/pgtable-2level.h>
11171 #endif
11172
11173+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
11174+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
11175+#ifdef CONFIG_X86_PAE
11176+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
11177+#endif
11178+
11179 #if defined(CONFIG_HIGHPTE)
11180 #define __KM_PTE \
11181 (in_nmi() ? KM_NMI_PTE : \
11182@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
11183 /* Clear a kernel PTE and flush it from the TLB */
11184 #define kpte_clear_flush(ptep, vaddr) \
11185 do { \
11186+ pax_open_kernel(); \
11187 pte_clear(&init_mm, (vaddr), (ptep)); \
11188+ pax_close_kernel(); \
11189 __flush_tlb_one((vaddr)); \
11190 } while (0)
11191
11192@@ -85,6 +90,9 @@ do { \
11193
11194 #endif /* !__ASSEMBLY__ */
11195
11196+#define HAVE_ARCH_UNMAPPED_AREA
11197+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
11198+
11199 /*
11200 * kern_addr_valid() is (1) for FLATMEM and (0) for
11201 * SPARSEMEM and DISCONTIGMEM
11202diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
11203index 5e67c15..12d5c47 100644
11204--- a/arch/x86/include/asm/pgtable_32_types.h
11205+++ b/arch/x86/include/asm/pgtable_32_types.h
11206@@ -8,7 +8,7 @@
11207 */
11208 #ifdef CONFIG_X86_PAE
11209 # include <asm/pgtable-3level_types.h>
11210-# define PMD_SIZE (1UL << PMD_SHIFT)
11211+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
11212 # define PMD_MASK (~(PMD_SIZE - 1))
11213 #else
11214 # include <asm/pgtable-2level_types.h>
11215@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
11216 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
11217 #endif
11218
11219+#ifdef CONFIG_PAX_KERNEXEC
11220+#ifndef __ASSEMBLY__
11221+extern unsigned char MODULES_EXEC_VADDR[];
11222+extern unsigned char MODULES_EXEC_END[];
11223+#endif
11224+#include <asm/boot.h>
11225+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
11226+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
11227+#else
11228+#define ktla_ktva(addr) (addr)
11229+#define ktva_ktla(addr) (addr)
11230+#endif
11231+
11232 #define MODULES_VADDR VMALLOC_START
11233 #define MODULES_END VMALLOC_END
11234 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
11235diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
11236index c57a301..6b414ff 100644
11237--- a/arch/x86/include/asm/pgtable_64.h
11238+++ b/arch/x86/include/asm/pgtable_64.h
11239@@ -16,10 +16,14 @@
11240
11241 extern pud_t level3_kernel_pgt[512];
11242 extern pud_t level3_ident_pgt[512];
11243+extern pud_t level3_vmalloc_start_pgt[512];
11244+extern pud_t level3_vmalloc_end_pgt[512];
11245+extern pud_t level3_vmemmap_pgt[512];
11246+extern pud_t level2_vmemmap_pgt[512];
11247 extern pmd_t level2_kernel_pgt[512];
11248 extern pmd_t level2_fixmap_pgt[512];
11249-extern pmd_t level2_ident_pgt[512];
11250-extern pgd_t init_level4_pgt[];
11251+extern pmd_t level2_ident_pgt[512*2];
11252+extern pgd_t init_level4_pgt[512];
11253
11254 #define swapper_pg_dir init_level4_pgt
11255
11256@@ -74,7 +78,9 @@ static inline pte_t native_ptep_get_and_clear(pte_t *xp)
11257
11258 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
11259 {
11260+ pax_open_kernel();
11261 *pmdp = pmd;
11262+ pax_close_kernel();
11263 }
11264
11265 static inline void native_pmd_clear(pmd_t *pmd)
11266@@ -94,6 +100,13 @@ static inline void native_pud_clear(pud_t *pud)
11267
11268 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
11269 {
11270+ pax_open_kernel();
11271+ *pgdp = pgd;
11272+ pax_close_kernel();
11273+}
11274+
11275+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
11276+{
11277 *pgdp = pgd;
11278 }
11279
11280diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
11281index 766ea16..5b96cb3 100644
11282--- a/arch/x86/include/asm/pgtable_64_types.h
11283+++ b/arch/x86/include/asm/pgtable_64_types.h
11284@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
11285 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
11286 #define MODULES_END _AC(0xffffffffff000000, UL)
11287 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
11288+#define MODULES_EXEC_VADDR MODULES_VADDR
11289+#define MODULES_EXEC_END MODULES_END
11290+
11291+#define ktla_ktva(addr) (addr)
11292+#define ktva_ktla(addr) (addr)
11293
11294 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
11295diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
11296index d1f4a76..2f46ba1 100644
11297--- a/arch/x86/include/asm/pgtable_types.h
11298+++ b/arch/x86/include/asm/pgtable_types.h
11299@@ -16,12 +16,11 @@
11300 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
11301 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
11302 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
11303-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
11304+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
11305 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
11306 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
11307 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
11308-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
11309-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
11310+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
11311 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
11312
11313 /* If _PAGE_BIT_PRESENT is clear, we use these: */
11314@@ -39,7 +38,6 @@
11315 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
11316 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
11317 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
11318-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
11319 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
11320 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
11321 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
11322@@ -55,8 +53,10 @@
11323
11324 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
11325 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
11326-#else
11327+#elif defined(CONFIG_KMEMCHECK)
11328 #define _PAGE_NX (_AT(pteval_t, 0))
11329+#else
11330+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
11331 #endif
11332
11333 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
11334@@ -93,6 +93,9 @@
11335 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
11336 _PAGE_ACCESSED)
11337
11338+#define PAGE_READONLY_NOEXEC PAGE_READONLY
11339+#define PAGE_SHARED_NOEXEC PAGE_SHARED
11340+
11341 #define __PAGE_KERNEL_EXEC \
11342 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
11343 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
11344@@ -103,8 +106,8 @@
11345 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
11346 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
11347 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
11348-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
11349-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
11350+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
11351+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
11352 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
11353 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
11354 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
11355@@ -163,8 +166,8 @@
11356 * bits are combined, this will alow user to access the high address mapped
11357 * VDSO in the presence of CONFIG_COMPAT_VDSO
11358 */
11359-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
11360-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
11361+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11362+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
11363 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
11364 #endif
11365
11366@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
11367 {
11368 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
11369 }
11370+#endif
11371
11372+#if PAGETABLE_LEVELS == 3
11373+#include <asm-generic/pgtable-nopud.h>
11374+#endif
11375+
11376+#if PAGETABLE_LEVELS == 2
11377+#include <asm-generic/pgtable-nopmd.h>
11378+#endif
11379+
11380+#ifndef __ASSEMBLY__
11381 #if PAGETABLE_LEVELS > 3
11382 typedef struct { pudval_t pud; } pud_t;
11383
11384@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pud_t pud)
11385 return pud.pud;
11386 }
11387 #else
11388-#include <asm-generic/pgtable-nopud.h>
11389-
11390 static inline pudval_t native_pud_val(pud_t pud)
11391 {
11392 return native_pgd_val(pud.pgd);
11393@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
11394 return pmd.pmd;
11395 }
11396 #else
11397-#include <asm-generic/pgtable-nopmd.h>
11398-
11399 static inline pmdval_t native_pmd_val(pmd_t pmd)
11400 {
11401 return native_pgd_val(pmd.pud.pgd);
11402@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
11403
11404 extern pteval_t __supported_pte_mask;
11405 extern void set_nx(void);
11406+
11407+#ifdef CONFIG_X86_32
11408+#ifdef CONFIG_X86_PAE
11409 extern int nx_enabled;
11410+#else
11411+#define nx_enabled (0)
11412+#endif
11413+#else
11414+#define nx_enabled (1)
11415+#endif
11416
11417 #define pgprot_writecombine pgprot_writecombine
11418 extern pgprot_t pgprot_writecombine(pgprot_t prot);
11419diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
11420index fa04dea..5f823fc 100644
11421--- a/arch/x86/include/asm/processor.h
11422+++ b/arch/x86/include/asm/processor.h
11423@@ -272,7 +272,7 @@ struct tss_struct {
11424
11425 } ____cacheline_aligned;
11426
11427-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
11428+extern struct tss_struct init_tss[NR_CPUS];
11429
11430 /*
11431 * Save the original ist values for checking stack pointers during debugging
11432@@ -911,11 +911,18 @@ static inline void spin_lock_prefetch(const void *x)
11433 */
11434 #define TASK_SIZE PAGE_OFFSET
11435 #define TASK_SIZE_MAX TASK_SIZE
11436+
11437+#ifdef CONFIG_PAX_SEGMEXEC
11438+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
11439+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
11440+#else
11441 #define STACK_TOP TASK_SIZE
11442-#define STACK_TOP_MAX STACK_TOP
11443+#endif
11444+
11445+#define STACK_TOP_MAX TASK_SIZE
11446
11447 #define INIT_THREAD { \
11448- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11449+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11450 .vm86_info = NULL, \
11451 .sysenter_cs = __KERNEL_CS, \
11452 .io_bitmap_ptr = NULL, \
11453@@ -929,7 +936,7 @@ static inline void spin_lock_prefetch(const void *x)
11454 */
11455 #define INIT_TSS { \
11456 .x86_tss = { \
11457- .sp0 = sizeof(init_stack) + (long)&init_stack, \
11458+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
11459 .ss0 = __KERNEL_DS, \
11460 .ss1 = __KERNEL_CS, \
11461 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
11462@@ -940,11 +947,7 @@ static inline void spin_lock_prefetch(const void *x)
11463 extern unsigned long thread_saved_pc(struct task_struct *tsk);
11464
11465 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
11466-#define KSTK_TOP(info) \
11467-({ \
11468- unsigned long *__ptr = (unsigned long *)(info); \
11469- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
11470-})
11471+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
11472
11473 /*
11474 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
11475@@ -959,7 +962,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11476 #define task_pt_regs(task) \
11477 ({ \
11478 struct pt_regs *__regs__; \
11479- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
11480+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
11481 __regs__ - 1; \
11482 })
11483
11484@@ -969,13 +972,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11485 /*
11486 * User space process size. 47bits minus one guard page.
11487 */
11488-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
11489+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
11490
11491 /* This decides where the kernel will search for a free chunk of vm
11492 * space during mmap's.
11493 */
11494 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
11495- 0xc0000000 : 0xFFFFe000)
11496+ 0xc0000000 : 0xFFFFf000)
11497
11498 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
11499 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
11500@@ -986,11 +989,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
11501 #define STACK_TOP_MAX TASK_SIZE_MAX
11502
11503 #define INIT_THREAD { \
11504- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11505+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11506 }
11507
11508 #define INIT_TSS { \
11509- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
11510+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
11511 }
11512
11513 /*
11514@@ -1012,6 +1015,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
11515 */
11516 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
11517
11518+#ifdef CONFIG_PAX_SEGMEXEC
11519+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
11520+#endif
11521+
11522 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
11523
11524 /* Get/set a process' ability to use the timestamp counter instruction */
11525diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
11526index 0f0d908..f2e3da2 100644
11527--- a/arch/x86/include/asm/ptrace.h
11528+++ b/arch/x86/include/asm/ptrace.h
11529@@ -151,28 +151,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
11530 }
11531
11532 /*
11533- * user_mode_vm(regs) determines whether a register set came from user mode.
11534+ * user_mode(regs) determines whether a register set came from user mode.
11535 * This is true if V8086 mode was enabled OR if the register set was from
11536 * protected mode with RPL-3 CS value. This tricky test checks that with
11537 * one comparison. Many places in the kernel can bypass this full check
11538- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
11539+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
11540+ * be used.
11541 */
11542-static inline int user_mode(struct pt_regs *regs)
11543+static inline int user_mode_novm(struct pt_regs *regs)
11544 {
11545 #ifdef CONFIG_X86_32
11546 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
11547 #else
11548- return !!(regs->cs & 3);
11549+ return !!(regs->cs & SEGMENT_RPL_MASK);
11550 #endif
11551 }
11552
11553-static inline int user_mode_vm(struct pt_regs *regs)
11554+static inline int user_mode(struct pt_regs *regs)
11555 {
11556 #ifdef CONFIG_X86_32
11557 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
11558 USER_RPL;
11559 #else
11560- return user_mode(regs);
11561+ return user_mode_novm(regs);
11562 #endif
11563 }
11564
11565diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
11566index 562d4fd..6e39df1 100644
11567--- a/arch/x86/include/asm/reboot.h
11568+++ b/arch/x86/include/asm/reboot.h
11569@@ -6,19 +6,19 @@
11570 struct pt_regs;
11571
11572 struct machine_ops {
11573- void (*restart)(char *cmd);
11574- void (*halt)(void);
11575- void (*power_off)(void);
11576+ void (* __noreturn restart)(char *cmd);
11577+ void (* __noreturn halt)(void);
11578+ void (* __noreturn power_off)(void);
11579 void (*shutdown)(void);
11580 void (*crash_shutdown)(struct pt_regs *);
11581- void (*emergency_restart)(void);
11582-};
11583+ void (* __noreturn emergency_restart)(void);
11584+} __no_const;
11585
11586 extern struct machine_ops machine_ops;
11587
11588 void native_machine_crash_shutdown(struct pt_regs *regs);
11589 void native_machine_shutdown(void);
11590-void machine_real_restart(const unsigned char *code, int length);
11591+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
11592
11593 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
11594 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
11595diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
11596index 606ede1..dbfff37 100644
11597--- a/arch/x86/include/asm/rwsem.h
11598+++ b/arch/x86/include/asm/rwsem.h
11599@@ -118,6 +118,14 @@ static inline void __down_read(struct rw_semaphore *sem)
11600 {
11601 asm volatile("# beginning down_read\n\t"
11602 LOCK_PREFIX _ASM_INC "(%1)\n\t"
11603+
11604+#ifdef CONFIG_PAX_REFCOUNT
11605+ "jno 0f\n"
11606+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
11607+ "int $4\n0:\n"
11608+ _ASM_EXTABLE(0b, 0b)
11609+#endif
11610+
11611 /* adds 0x00000001, returns the old value */
11612 " jns 1f\n"
11613 " call call_rwsem_down_read_failed\n"
11614@@ -139,6 +147,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
11615 "1:\n\t"
11616 " mov %1,%2\n\t"
11617 " add %3,%2\n\t"
11618+
11619+#ifdef CONFIG_PAX_REFCOUNT
11620+ "jno 0f\n"
11621+ "sub %3,%2\n"
11622+ "int $4\n0:\n"
11623+ _ASM_EXTABLE(0b, 0b)
11624+#endif
11625+
11626 " jle 2f\n\t"
11627 LOCK_PREFIX " cmpxchg %2,%0\n\t"
11628 " jnz 1b\n\t"
11629@@ -160,6 +176,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
11630 tmp = RWSEM_ACTIVE_WRITE_BIAS;
11631 asm volatile("# beginning down_write\n\t"
11632 LOCK_PREFIX " xadd %1,(%2)\n\t"
11633+
11634+#ifdef CONFIG_PAX_REFCOUNT
11635+ "jno 0f\n"
11636+ "mov %1,(%2)\n"
11637+ "int $4\n0:\n"
11638+ _ASM_EXTABLE(0b, 0b)
11639+#endif
11640+
11641 /* subtract 0x0000ffff, returns the old value */
11642 " test %1,%1\n\t"
11643 /* was the count 0 before? */
11644@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_semaphore *sem)
11645 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
11646 asm volatile("# beginning __up_read\n\t"
11647 LOCK_PREFIX " xadd %1,(%2)\n\t"
11648+
11649+#ifdef CONFIG_PAX_REFCOUNT
11650+ "jno 0f\n"
11651+ "mov %1,(%2)\n"
11652+ "int $4\n0:\n"
11653+ _ASM_EXTABLE(0b, 0b)
11654+#endif
11655+
11656 /* subtracts 1, returns the old value */
11657 " jns 1f\n\t"
11658 " call call_rwsem_wake\n"
11659@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_semaphore *sem)
11660 rwsem_count_t tmp;
11661 asm volatile("# beginning __up_write\n\t"
11662 LOCK_PREFIX " xadd %1,(%2)\n\t"
11663+
11664+#ifdef CONFIG_PAX_REFCOUNT
11665+ "jno 0f\n"
11666+ "mov %1,(%2)\n"
11667+ "int $4\n0:\n"
11668+ _ASM_EXTABLE(0b, 0b)
11669+#endif
11670+
11671 /* tries to transition
11672 0xffff0001 -> 0x00000000 */
11673 " jz 1f\n"
11674@@ -234,6 +274,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11675 {
11676 asm volatile("# beginning __downgrade_write\n\t"
11677 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
11678+
11679+#ifdef CONFIG_PAX_REFCOUNT
11680+ "jno 0f\n"
11681+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
11682+ "int $4\n0:\n"
11683+ _ASM_EXTABLE(0b, 0b)
11684+#endif
11685+
11686 /*
11687 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
11688 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
11689@@ -253,7 +301,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
11690 static inline void rwsem_atomic_add(rwsem_count_t delta,
11691 struct rw_semaphore *sem)
11692 {
11693- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
11694+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
11695+
11696+#ifdef CONFIG_PAX_REFCOUNT
11697+ "jno 0f\n"
11698+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
11699+ "int $4\n0:\n"
11700+ _ASM_EXTABLE(0b, 0b)
11701+#endif
11702+
11703 : "+m" (sem->count)
11704 : "er" (delta));
11705 }
11706@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic_update(rwsem_count_t delta,
11707 {
11708 rwsem_count_t tmp = delta;
11709
11710- asm volatile(LOCK_PREFIX "xadd %0,%1"
11711+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
11712+
11713+#ifdef CONFIG_PAX_REFCOUNT
11714+ "jno 0f\n"
11715+ "mov %0,%1\n"
11716+ "int $4\n0:\n"
11717+ _ASM_EXTABLE(0b, 0b)
11718+#endif
11719+
11720 : "+r" (tmp), "+m" (sem->count)
11721 : : "memory");
11722
11723diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
11724index 14e0ed8..7f7dd5e 100644
11725--- a/arch/x86/include/asm/segment.h
11726+++ b/arch/x86/include/asm/segment.h
11727@@ -62,10 +62,15 @@
11728 * 26 - ESPFIX small SS
11729 * 27 - per-cpu [ offset to per-cpu data area ]
11730 * 28 - stack_canary-20 [ for stack protector ]
11731- * 29 - unused
11732- * 30 - unused
11733+ * 29 - PCI BIOS CS
11734+ * 30 - PCI BIOS DS
11735 * 31 - TSS for double fault handler
11736 */
11737+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
11738+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
11739+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
11740+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
11741+
11742 #define GDT_ENTRY_TLS_MIN 6
11743 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
11744
11745@@ -77,6 +82,8 @@
11746
11747 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
11748
11749+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
11750+
11751 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
11752
11753 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
11754@@ -88,7 +95,7 @@
11755 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
11756 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
11757
11758-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11759+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
11760 #ifdef CONFIG_SMP
11761 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
11762 #else
11763@@ -102,6 +109,12 @@
11764 #define __KERNEL_STACK_CANARY 0
11765 #endif
11766
11767+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
11768+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
11769+
11770+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
11771+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
11772+
11773 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
11774
11775 /*
11776@@ -139,7 +152,7 @@
11777 */
11778
11779 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
11780-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
11781+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
11782
11783
11784 #else
11785@@ -163,6 +176,8 @@
11786 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
11787 #define __USER32_DS __USER_DS
11788
11789+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
11790+
11791 #define GDT_ENTRY_TSS 8 /* needs two entries */
11792 #define GDT_ENTRY_LDT 10 /* needs two entries */
11793 #define GDT_ENTRY_TLS_MIN 12
11794@@ -183,6 +198,7 @@
11795 #endif
11796
11797 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
11798+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
11799 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
11800 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
11801 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
11802diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
11803index 4c2f63c..5685db2 100644
11804--- a/arch/x86/include/asm/smp.h
11805+++ b/arch/x86/include/asm/smp.h
11806@@ -24,7 +24,7 @@ extern unsigned int num_processors;
11807 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
11808 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
11809 DECLARE_PER_CPU(u16, cpu_llc_id);
11810-DECLARE_PER_CPU(int, cpu_number);
11811+DECLARE_PER_CPU(unsigned int, cpu_number);
11812
11813 static inline struct cpumask *cpu_sibling_mask(int cpu)
11814 {
11815@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
11816 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
11817
11818 /* Static state in head.S used to set up a CPU */
11819-extern struct {
11820- void *sp;
11821- unsigned short ss;
11822-} stack_start;
11823+extern unsigned long stack_start; /* Initial stack pointer address */
11824
11825 struct smp_ops {
11826 void (*smp_prepare_boot_cpu)(void);
11827@@ -60,7 +57,7 @@ struct smp_ops {
11828
11829 void (*send_call_func_ipi)(const struct cpumask *mask);
11830 void (*send_call_func_single_ipi)(int cpu);
11831-};
11832+} __no_const;
11833
11834 /* Globals due to paravirt */
11835 extern void set_cpu_sibling_map(int cpu);
11836@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitdata;
11837 extern int safe_smp_processor_id(void);
11838
11839 #elif defined(CONFIG_X86_64_SMP)
11840-#define raw_smp_processor_id() (percpu_read(cpu_number))
11841-
11842-#define stack_smp_processor_id() \
11843-({ \
11844- struct thread_info *ti; \
11845- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
11846- ti->cpu; \
11847-})
11848+#define raw_smp_processor_id() (percpu_read(cpu_number))
11849+#define stack_smp_processor_id() raw_smp_processor_id()
11850 #define safe_smp_processor_id() smp_processor_id()
11851
11852 #endif
11853diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
11854index 4e77853..4359783 100644
11855--- a/arch/x86/include/asm/spinlock.h
11856+++ b/arch/x86/include/asm/spinlock.h
11857@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(raw_rwlock_t *lock)
11858 static inline void __raw_read_lock(raw_rwlock_t *rw)
11859 {
11860 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
11861+
11862+#ifdef CONFIG_PAX_REFCOUNT
11863+ "jno 0f\n"
11864+ LOCK_PREFIX " addl $1,(%0)\n"
11865+ "int $4\n0:\n"
11866+ _ASM_EXTABLE(0b, 0b)
11867+#endif
11868+
11869 "jns 1f\n"
11870 "call __read_lock_failed\n\t"
11871 "1:\n"
11872@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
11873 static inline void __raw_write_lock(raw_rwlock_t *rw)
11874 {
11875 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
11876+
11877+#ifdef CONFIG_PAX_REFCOUNT
11878+ "jno 0f\n"
11879+ LOCK_PREFIX " addl %1,(%0)\n"
11880+ "int $4\n0:\n"
11881+ _ASM_EXTABLE(0b, 0b)
11882+#endif
11883+
11884 "jz 1f\n"
11885 "call __write_lock_failed\n\t"
11886 "1:\n"
11887@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
11888
11889 static inline void __raw_read_unlock(raw_rwlock_t *rw)
11890 {
11891- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
11892+ asm volatile(LOCK_PREFIX "incl %0\n"
11893+
11894+#ifdef CONFIG_PAX_REFCOUNT
11895+ "jno 0f\n"
11896+ LOCK_PREFIX "decl %0\n"
11897+ "int $4\n0:\n"
11898+ _ASM_EXTABLE(0b, 0b)
11899+#endif
11900+
11901+ :"+m" (rw->lock) : : "memory");
11902 }
11903
11904 static inline void __raw_write_unlock(raw_rwlock_t *rw)
11905 {
11906- asm volatile(LOCK_PREFIX "addl %1, %0"
11907+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
11908+
11909+#ifdef CONFIG_PAX_REFCOUNT
11910+ "jno 0f\n"
11911+ LOCK_PREFIX "subl %1, %0\n"
11912+ "int $4\n0:\n"
11913+ _ASM_EXTABLE(0b, 0b)
11914+#endif
11915+
11916 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
11917 }
11918
11919diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
11920index 1575177..cb23f52 100644
11921--- a/arch/x86/include/asm/stackprotector.h
11922+++ b/arch/x86/include/asm/stackprotector.h
11923@@ -48,7 +48,7 @@
11924 * head_32 for boot CPU and setup_per_cpu_areas() for others.
11925 */
11926 #define GDT_STACK_CANARY_INIT \
11927- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
11928+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
11929
11930 /*
11931 * Initialize the stackprotector canary value.
11932@@ -113,7 +113,7 @@ static inline void setup_stack_canary_segment(int cpu)
11933
11934 static inline void load_stack_canary_segment(void)
11935 {
11936-#ifdef CONFIG_X86_32
11937+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
11938 asm volatile ("mov %0, %%gs" : : "r" (0));
11939 #endif
11940 }
11941diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
11942index e0fbf29..858ef4a 100644
11943--- a/arch/x86/include/asm/system.h
11944+++ b/arch/x86/include/asm/system.h
11945@@ -132,7 +132,7 @@ do { \
11946 "thread_return:\n\t" \
11947 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
11948 __switch_canary \
11949- "movq %P[thread_info](%%rsi),%%r8\n\t" \
11950+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
11951 "movq %%rax,%%rdi\n\t" \
11952 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
11953 "jnz ret_from_fork\n\t" \
11954@@ -143,7 +143,7 @@ do { \
11955 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
11956 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
11957 [_tif_fork] "i" (_TIF_FORK), \
11958- [thread_info] "i" (offsetof(struct task_struct, stack)), \
11959+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
11960 [current_task] "m" (per_cpu_var(current_task)) \
11961 __switch_canary_iparam \
11962 : "memory", "cc" __EXTRA_CLOBBER)
11963@@ -200,7 +200,7 @@ static inline unsigned long get_limit(unsigned long segment)
11964 {
11965 unsigned long __limit;
11966 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
11967- return __limit + 1;
11968+ return __limit;
11969 }
11970
11971 static inline void native_clts(void)
11972@@ -340,12 +340,12 @@ void enable_hlt(void);
11973
11974 void cpu_idle_wait(void);
11975
11976-extern unsigned long arch_align_stack(unsigned long sp);
11977+#define arch_align_stack(x) ((x) & ~0xfUL)
11978 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
11979
11980 void default_idle(void);
11981
11982-void stop_this_cpu(void *dummy);
11983+void stop_this_cpu(void *dummy) __noreturn;
11984
11985 /*
11986 * Force strict CPU ordering.
11987diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
11988index 19c3ce4..8962535 100644
11989--- a/arch/x86/include/asm/thread_info.h
11990+++ b/arch/x86/include/asm/thread_info.h
11991@@ -10,6 +10,7 @@
11992 #include <linux/compiler.h>
11993 #include <asm/page.h>
11994 #include <asm/types.h>
11995+#include <asm/percpu.h>
11996
11997 /*
11998 * low level task data that entry.S needs immediate access to
11999@@ -24,7 +25,6 @@ struct exec_domain;
12000 #include <asm/atomic.h>
12001
12002 struct thread_info {
12003- struct task_struct *task; /* main task structure */
12004 struct exec_domain *exec_domain; /* execution domain */
12005 __u32 flags; /* low level flags */
12006 __u32 status; /* thread synchronous flags */
12007@@ -34,18 +34,12 @@ struct thread_info {
12008 mm_segment_t addr_limit;
12009 struct restart_block restart_block;
12010 void __user *sysenter_return;
12011-#ifdef CONFIG_X86_32
12012- unsigned long previous_esp; /* ESP of the previous stack in
12013- case of nested (IRQ) stacks
12014- */
12015- __u8 supervisor_stack[0];
12016-#endif
12017+ unsigned long lowest_stack;
12018 int uaccess_err;
12019 };
12020
12021-#define INIT_THREAD_INFO(tsk) \
12022+#define INIT_THREAD_INFO \
12023 { \
12024- .task = &tsk, \
12025 .exec_domain = &default_exec_domain, \
12026 .flags = 0, \
12027 .cpu = 0, \
12028@@ -56,7 +50,7 @@ struct thread_info {
12029 }, \
12030 }
12031
12032-#define init_thread_info (init_thread_union.thread_info)
12033+#define init_thread_info (init_thread_union.stack)
12034 #define init_stack (init_thread_union.stack)
12035
12036 #else /* !__ASSEMBLY__ */
12037@@ -163,45 +157,40 @@ struct thread_info {
12038 #define alloc_thread_info(tsk) \
12039 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
12040
12041-#ifdef CONFIG_X86_32
12042-
12043-#define STACK_WARN (THREAD_SIZE/8)
12044-/*
12045- * macros/functions for gaining access to the thread information structure
12046- *
12047- * preempt_count needs to be 1 initially, until the scheduler is functional.
12048- */
12049-#ifndef __ASSEMBLY__
12050-
12051-
12052-/* how to get the current stack pointer from C */
12053-register unsigned long current_stack_pointer asm("esp") __used;
12054-
12055-/* how to get the thread information struct from C */
12056-static inline struct thread_info *current_thread_info(void)
12057-{
12058- return (struct thread_info *)
12059- (current_stack_pointer & ~(THREAD_SIZE - 1));
12060-}
12061-
12062-#else /* !__ASSEMBLY__ */
12063-
12064+#ifdef __ASSEMBLY__
12065 /* how to get the thread information struct from ASM */
12066 #define GET_THREAD_INFO(reg) \
12067- movl $-THREAD_SIZE, reg; \
12068- andl %esp, reg
12069+ mov PER_CPU_VAR(current_tinfo), reg
12070
12071 /* use this one if reg already contains %esp */
12072-#define GET_THREAD_INFO_WITH_ESP(reg) \
12073- andl $-THREAD_SIZE, reg
12074+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
12075+#else
12076+/* how to get the thread information struct from C */
12077+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
12078+
12079+static __always_inline struct thread_info *current_thread_info(void)
12080+{
12081+ return percpu_read_stable(current_tinfo);
12082+}
12083+#endif
12084+
12085+#ifdef CONFIG_X86_32
12086+
12087+#define STACK_WARN (THREAD_SIZE/8)
12088+/*
12089+ * macros/functions for gaining access to the thread information structure
12090+ *
12091+ * preempt_count needs to be 1 initially, until the scheduler is functional.
12092+ */
12093+#ifndef __ASSEMBLY__
12094+
12095+/* how to get the current stack pointer from C */
12096+register unsigned long current_stack_pointer asm("esp") __used;
12097
12098 #endif
12099
12100 #else /* X86_32 */
12101
12102-#include <asm/percpu.h>
12103-#define KERNEL_STACK_OFFSET (5*8)
12104-
12105 /*
12106 * macros/functions for gaining access to the thread information structure
12107 * preempt_count needs to be 1 initially, until the scheduler is functional.
12108@@ -209,21 +198,8 @@ static inline struct thread_info *current_thread_info(void)
12109 #ifndef __ASSEMBLY__
12110 DECLARE_PER_CPU(unsigned long, kernel_stack);
12111
12112-static inline struct thread_info *current_thread_info(void)
12113-{
12114- struct thread_info *ti;
12115- ti = (void *)(percpu_read_stable(kernel_stack) +
12116- KERNEL_STACK_OFFSET - THREAD_SIZE);
12117- return ti;
12118-}
12119-
12120-#else /* !__ASSEMBLY__ */
12121-
12122-/* how to get the thread information struct from ASM */
12123-#define GET_THREAD_INFO(reg) \
12124- movq PER_CPU_VAR(kernel_stack),reg ; \
12125- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
12126-
12127+/* how to get the current stack pointer from C */
12128+register unsigned long current_stack_pointer asm("rsp") __used;
12129 #endif
12130
12131 #endif /* !X86_32 */
12132@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
12133 extern void free_thread_info(struct thread_info *ti);
12134 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
12135 #define arch_task_cache_init arch_task_cache_init
12136+
12137+#define __HAVE_THREAD_FUNCTIONS
12138+#define task_thread_info(task) (&(task)->tinfo)
12139+#define task_stack_page(task) ((task)->stack)
12140+#define setup_thread_stack(p, org) do {} while (0)
12141+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
12142+
12143+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
12144+extern struct task_struct *alloc_task_struct(void);
12145+extern void free_task_struct(struct task_struct *);
12146+
12147 #endif
12148 #endif /* _ASM_X86_THREAD_INFO_H */
12149diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
12150index 61c5874..8a046e9 100644
12151--- a/arch/x86/include/asm/uaccess.h
12152+++ b/arch/x86/include/asm/uaccess.h
12153@@ -8,12 +8,15 @@
12154 #include <linux/thread_info.h>
12155 #include <linux/prefetch.h>
12156 #include <linux/string.h>
12157+#include <linux/sched.h>
12158 #include <asm/asm.h>
12159 #include <asm/page.h>
12160
12161 #define VERIFY_READ 0
12162 #define VERIFY_WRITE 1
12163
12164+extern void check_object_size(const void *ptr, unsigned long n, bool to);
12165+
12166 /*
12167 * The fs value determines whether argument validity checking should be
12168 * performed or not. If get_fs() == USER_DS, checking is performed, with
12169@@ -29,7 +32,12 @@
12170
12171 #define get_ds() (KERNEL_DS)
12172 #define get_fs() (current_thread_info()->addr_limit)
12173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12174+void __set_fs(mm_segment_t x);
12175+void set_fs(mm_segment_t x);
12176+#else
12177 #define set_fs(x) (current_thread_info()->addr_limit = (x))
12178+#endif
12179
12180 #define segment_eq(a, b) ((a).seg == (b).seg)
12181
12182@@ -77,7 +85,33 @@
12183 * checks that the pointer is in the user space range - after calling
12184 * this function, memory access functions may still return -EFAULT.
12185 */
12186-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12187+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
12188+#define access_ok(type, addr, size) \
12189+({ \
12190+ long __size = size; \
12191+ unsigned long __addr = (unsigned long)addr; \
12192+ unsigned long __addr_ao = __addr & PAGE_MASK; \
12193+ unsigned long __end_ao = __addr + __size - 1; \
12194+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
12195+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
12196+ while(__addr_ao <= __end_ao) { \
12197+ char __c_ao; \
12198+ __addr_ao += PAGE_SIZE; \
12199+ if (__size > PAGE_SIZE) \
12200+ cond_resched(); \
12201+ if (__get_user(__c_ao, (char __user *)__addr)) \
12202+ break; \
12203+ if (type != VERIFY_WRITE) { \
12204+ __addr = __addr_ao; \
12205+ continue; \
12206+ } \
12207+ if (__put_user(__c_ao, (char __user *)__addr)) \
12208+ break; \
12209+ __addr = __addr_ao; \
12210+ } \
12211+ } \
12212+ __ret_ao; \
12213+})
12214
12215 /*
12216 * The exception table consists of pairs of addresses: the first is the
12217@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
12218 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
12219 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
12220
12221-
12222+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
12223+#define __copyuser_seg "gs;"
12224+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
12225+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
12226+#else
12227+#define __copyuser_seg
12228+#define __COPYUSER_SET_ES
12229+#define __COPYUSER_RESTORE_ES
12230+#endif
12231
12232 #ifdef CONFIG_X86_32
12233 #define __put_user_asm_u64(x, addr, err, errret) \
12234- asm volatile("1: movl %%eax,0(%2)\n" \
12235- "2: movl %%edx,4(%2)\n" \
12236+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
12237+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
12238 "3:\n" \
12239 ".section .fixup,\"ax\"\n" \
12240 "4: movl %3,%0\n" \
12241@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
12242 : "A" (x), "r" (addr), "i" (errret), "0" (err))
12243
12244 #define __put_user_asm_ex_u64(x, addr) \
12245- asm volatile("1: movl %%eax,0(%1)\n" \
12246- "2: movl %%edx,4(%1)\n" \
12247+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
12248+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
12249 "3:\n" \
12250 _ASM_EXTABLE(1b, 2b - 1b) \
12251 _ASM_EXTABLE(2b, 3b - 2b) \
12252@@ -253,7 +295,7 @@ extern void __put_user_8(void);
12253 __typeof__(*(ptr)) __pu_val; \
12254 __chk_user_ptr(ptr); \
12255 might_fault(); \
12256- __pu_val = x; \
12257+ __pu_val = (x); \
12258 switch (sizeof(*(ptr))) { \
12259 case 1: \
12260 __put_user_x(1, __pu_val, ptr, __ret_pu); \
12261@@ -374,7 +416,7 @@ do { \
12262 } while (0)
12263
12264 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12265- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
12266+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
12267 "2:\n" \
12268 ".section .fixup,\"ax\"\n" \
12269 "3: mov %3,%0\n" \
12270@@ -382,7 +424,7 @@ do { \
12271 " jmp 2b\n" \
12272 ".previous\n" \
12273 _ASM_EXTABLE(1b, 3b) \
12274- : "=r" (err), ltype(x) \
12275+ : "=r" (err), ltype (x) \
12276 : "m" (__m(addr)), "i" (errret), "0" (err))
12277
12278 #define __get_user_size_ex(x, ptr, size) \
12279@@ -407,7 +449,7 @@ do { \
12280 } while (0)
12281
12282 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
12283- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
12284+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
12285 "2:\n" \
12286 _ASM_EXTABLE(1b, 2b - 1b) \
12287 : ltype(x) : "m" (__m(addr)))
12288@@ -424,13 +466,24 @@ do { \
12289 int __gu_err; \
12290 unsigned long __gu_val; \
12291 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
12292- (x) = (__force __typeof__(*(ptr)))__gu_val; \
12293+ (x) = (__typeof__(*(ptr)))__gu_val; \
12294 __gu_err; \
12295 })
12296
12297 /* FIXME: this hack is definitely wrong -AK */
12298 struct __large_struct { unsigned long buf[100]; };
12299-#define __m(x) (*(struct __large_struct __user *)(x))
12300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12301+#define ____m(x) \
12302+({ \
12303+ unsigned long ____x = (unsigned long)(x); \
12304+ if (____x < PAX_USER_SHADOW_BASE) \
12305+ ____x += PAX_USER_SHADOW_BASE; \
12306+ (void __user *)____x; \
12307+})
12308+#else
12309+#define ____m(x) (x)
12310+#endif
12311+#define __m(x) (*(struct __large_struct __user *)____m(x))
12312
12313 /*
12314 * Tell gcc we read from memory instead of writing: this is because
12315@@ -438,7 +491,7 @@ struct __large_struct { unsigned long buf[100]; };
12316 * aliasing issues.
12317 */
12318 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
12319- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
12320+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
12321 "2:\n" \
12322 ".section .fixup,\"ax\"\n" \
12323 "3: mov %3,%0\n" \
12324@@ -446,10 +499,10 @@ struct __large_struct { unsigned long buf[100]; };
12325 ".previous\n" \
12326 _ASM_EXTABLE(1b, 3b) \
12327 : "=r"(err) \
12328- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
12329+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
12330
12331 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
12332- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
12333+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
12334 "2:\n" \
12335 _ASM_EXTABLE(1b, 2b - 1b) \
12336 : : ltype(x), "m" (__m(addr)))
12337@@ -488,8 +541,12 @@ struct __large_struct { unsigned long buf[100]; };
12338 * On error, the variable @x is set to zero.
12339 */
12340
12341+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12342+#define __get_user(x, ptr) get_user((x), (ptr))
12343+#else
12344 #define __get_user(x, ptr) \
12345 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
12346+#endif
12347
12348 /**
12349 * __put_user: - Write a simple value into user space, with less checking.
12350@@ -511,8 +568,12 @@ struct __large_struct { unsigned long buf[100]; };
12351 * Returns zero on success, or -EFAULT on error.
12352 */
12353
12354+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
12355+#define __put_user(x, ptr) put_user((x), (ptr))
12356+#else
12357 #define __put_user(x, ptr) \
12358 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
12359+#endif
12360
12361 #define __get_user_unaligned __get_user
12362 #define __put_user_unaligned __put_user
12363@@ -530,7 +591,7 @@ struct __large_struct { unsigned long buf[100]; };
12364 #define get_user_ex(x, ptr) do { \
12365 unsigned long __gue_val; \
12366 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
12367- (x) = (__force __typeof__(*(ptr)))__gue_val; \
12368+ (x) = (__typeof__(*(ptr)))__gue_val; \
12369 } while (0)
12370
12371 #ifdef CONFIG_X86_WP_WORKS_OK
12372@@ -567,6 +628,7 @@ extern struct movsl_mask {
12373
12374 #define ARCH_HAS_NOCACHE_UACCESS 1
12375
12376+#define ARCH_HAS_SORT_EXTABLE
12377 #ifdef CONFIG_X86_32
12378 # include "uaccess_32.h"
12379 #else
12380diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
12381index 632fb44..e30e334 100644
12382--- a/arch/x86/include/asm/uaccess_32.h
12383+++ b/arch/x86/include/asm/uaccess_32.h
12384@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
12385 static __always_inline unsigned long __must_check
12386 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12387 {
12388+ pax_track_stack();
12389+
12390+ if ((long)n < 0)
12391+ return n;
12392+
12393 if (__builtin_constant_p(n)) {
12394 unsigned long ret;
12395
12396@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
12397 return ret;
12398 }
12399 }
12400+ if (!__builtin_constant_p(n))
12401+ check_object_size(from, n, true);
12402 return __copy_to_user_ll(to, from, n);
12403 }
12404
12405@@ -83,12 +90,16 @@ static __always_inline unsigned long __must_check
12406 __copy_to_user(void __user *to, const void *from, unsigned long n)
12407 {
12408 might_fault();
12409+
12410 return __copy_to_user_inatomic(to, from, n);
12411 }
12412
12413 static __always_inline unsigned long
12414 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
12415 {
12416+ if ((long)n < 0)
12417+ return n;
12418+
12419 /* Avoid zeroing the tail if the copy fails..
12420 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
12421 * but as the zeroing behaviour is only significant when n is not
12422@@ -138,6 +149,12 @@ static __always_inline unsigned long
12423 __copy_from_user(void *to, const void __user *from, unsigned long n)
12424 {
12425 might_fault();
12426+
12427+ pax_track_stack();
12428+
12429+ if ((long)n < 0)
12430+ return n;
12431+
12432 if (__builtin_constant_p(n)) {
12433 unsigned long ret;
12434
12435@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __user *from, unsigned long n)
12436 return ret;
12437 }
12438 }
12439+ if (!__builtin_constant_p(n))
12440+ check_object_size(to, n, false);
12441 return __copy_from_user_ll(to, from, n);
12442 }
12443
12444@@ -160,6 +179,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
12445 const void __user *from, unsigned long n)
12446 {
12447 might_fault();
12448+
12449+ if ((long)n < 0)
12450+ return n;
12451+
12452 if (__builtin_constant_p(n)) {
12453 unsigned long ret;
12454
12455@@ -182,14 +205,62 @@ static __always_inline unsigned long
12456 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
12457 unsigned long n)
12458 {
12459- return __copy_from_user_ll_nocache_nozero(to, from, n);
12460+ if ((long)n < 0)
12461+ return n;
12462+
12463+ return __copy_from_user_ll_nocache_nozero(to, from, n);
12464+}
12465+
12466+/**
12467+ * copy_to_user: - Copy a block of data into user space.
12468+ * @to: Destination address, in user space.
12469+ * @from: Source address, in kernel space.
12470+ * @n: Number of bytes to copy.
12471+ *
12472+ * Context: User context only. This function may sleep.
12473+ *
12474+ * Copy data from kernel space to user space.
12475+ *
12476+ * Returns number of bytes that could not be copied.
12477+ * On success, this will be zero.
12478+ */
12479+static __always_inline unsigned long __must_check
12480+copy_to_user(void __user *to, const void *from, unsigned long n)
12481+{
12482+ if (access_ok(VERIFY_WRITE, to, n))
12483+ n = __copy_to_user(to, from, n);
12484+ return n;
12485+}
12486+
12487+/**
12488+ * copy_from_user: - Copy a block of data from user space.
12489+ * @to: Destination address, in kernel space.
12490+ * @from: Source address, in user space.
12491+ * @n: Number of bytes to copy.
12492+ *
12493+ * Context: User context only. This function may sleep.
12494+ *
12495+ * Copy data from user space to kernel space.
12496+ *
12497+ * Returns number of bytes that could not be copied.
12498+ * On success, this will be zero.
12499+ *
12500+ * If some data could not be copied, this function will pad the copied
12501+ * data to the requested size using zero bytes.
12502+ */
12503+static __always_inline unsigned long __must_check
12504+copy_from_user(void *to, const void __user *from, unsigned long n)
12505+{
12506+ if (access_ok(VERIFY_READ, from, n))
12507+ n = __copy_from_user(to, from, n);
12508+ else if ((long)n > 0) {
12509+ if (!__builtin_constant_p(n))
12510+ check_object_size(to, n, false);
12511+ memset(to, 0, n);
12512+ }
12513+ return n;
12514 }
12515
12516-unsigned long __must_check copy_to_user(void __user *to,
12517- const void *from, unsigned long n);
12518-unsigned long __must_check copy_from_user(void *to,
12519- const void __user *from,
12520- unsigned long n);
12521 long __must_check strncpy_from_user(char *dst, const char __user *src,
12522 long count);
12523 long __must_check __strncpy_from_user(char *dst,
12524diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
12525index db24b21..f595ae7 100644
12526--- a/arch/x86/include/asm/uaccess_64.h
12527+++ b/arch/x86/include/asm/uaccess_64.h
12528@@ -9,6 +9,9 @@
12529 #include <linux/prefetch.h>
12530 #include <linux/lockdep.h>
12531 #include <asm/page.h>
12532+#include <asm/pgtable.h>
12533+
12534+#define set_fs(x) (current_thread_info()->addr_limit = (x))
12535
12536 /*
12537 * Copy To/From Userspace
12538@@ -16,116 +19,205 @@
12539
12540 /* Handles exceptions in both to and from, but doesn't do access_ok */
12541 __must_check unsigned long
12542-copy_user_generic(void *to, const void *from, unsigned len);
12543+copy_user_generic(void *to, const void *from, unsigned long len);
12544
12545 __must_check unsigned long
12546-copy_to_user(void __user *to, const void *from, unsigned len);
12547-__must_check unsigned long
12548-copy_from_user(void *to, const void __user *from, unsigned len);
12549-__must_check unsigned long
12550-copy_in_user(void __user *to, const void __user *from, unsigned len);
12551+copy_in_user(void __user *to, const void __user *from, unsigned long len);
12552
12553 static __always_inline __must_check
12554-int __copy_from_user(void *dst, const void __user *src, unsigned size)
12555+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
12556 {
12557- int ret = 0;
12558+ unsigned ret = 0;
12559
12560 might_fault();
12561- if (!__builtin_constant_p(size))
12562- return copy_user_generic(dst, (__force void *)src, size);
12563+
12564+ if (size > INT_MAX)
12565+ return size;
12566+
12567+#ifdef CONFIG_PAX_MEMORY_UDEREF
12568+ if (!__access_ok(VERIFY_READ, src, size))
12569+ return size;
12570+#endif
12571+
12572+ if (!__builtin_constant_p(size)) {
12573+ check_object_size(dst, size, false);
12574+
12575+#ifdef CONFIG_PAX_MEMORY_UDEREF
12576+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12577+ src += PAX_USER_SHADOW_BASE;
12578+#endif
12579+
12580+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12581+ }
12582 switch (size) {
12583- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
12584+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
12585 ret, "b", "b", "=q", 1);
12586 return ret;
12587- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
12588+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
12589 ret, "w", "w", "=r", 2);
12590 return ret;
12591- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
12592+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
12593 ret, "l", "k", "=r", 4);
12594 return ret;
12595- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
12596+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12597 ret, "q", "", "=r", 8);
12598 return ret;
12599 case 10:
12600- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12601+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12602 ret, "q", "", "=r", 10);
12603 if (unlikely(ret))
12604 return ret;
12605 __get_user_asm(*(u16 *)(8 + (char *)dst),
12606- (u16 __user *)(8 + (char __user *)src),
12607+ (const u16 __user *)(8 + (const char __user *)src),
12608 ret, "w", "w", "=r", 2);
12609 return ret;
12610 case 16:
12611- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
12612+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
12613 ret, "q", "", "=r", 16);
12614 if (unlikely(ret))
12615 return ret;
12616 __get_user_asm(*(u64 *)(8 + (char *)dst),
12617- (u64 __user *)(8 + (char __user *)src),
12618+ (const u64 __user *)(8 + (const char __user *)src),
12619 ret, "q", "", "=r", 8);
12620 return ret;
12621 default:
12622- return copy_user_generic(dst, (__force void *)src, size);
12623+
12624+#ifdef CONFIG_PAX_MEMORY_UDEREF
12625+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12626+ src += PAX_USER_SHADOW_BASE;
12627+#endif
12628+
12629+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12630 }
12631 }
12632
12633 static __always_inline __must_check
12634-int __copy_to_user(void __user *dst, const void *src, unsigned size)
12635+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
12636 {
12637- int ret = 0;
12638+ unsigned ret = 0;
12639
12640 might_fault();
12641- if (!__builtin_constant_p(size))
12642- return copy_user_generic((__force void *)dst, src, size);
12643+
12644+ pax_track_stack();
12645+
12646+ if (size > INT_MAX)
12647+ return size;
12648+
12649+#ifdef CONFIG_PAX_MEMORY_UDEREF
12650+ if (!__access_ok(VERIFY_WRITE, dst, size))
12651+ return size;
12652+#endif
12653+
12654+ if (!__builtin_constant_p(size)) {
12655+ check_object_size(src, size, true);
12656+
12657+#ifdef CONFIG_PAX_MEMORY_UDEREF
12658+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12659+ dst += PAX_USER_SHADOW_BASE;
12660+#endif
12661+
12662+ return copy_user_generic((__force_kernel void *)dst, src, size);
12663+ }
12664 switch (size) {
12665- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
12666+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
12667 ret, "b", "b", "iq", 1);
12668 return ret;
12669- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
12670+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
12671 ret, "w", "w", "ir", 2);
12672 return ret;
12673- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
12674+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
12675 ret, "l", "k", "ir", 4);
12676 return ret;
12677- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
12678+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12679 ret, "q", "", "er", 8);
12680 return ret;
12681 case 10:
12682- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12683+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12684 ret, "q", "", "er", 10);
12685 if (unlikely(ret))
12686 return ret;
12687 asm("":::"memory");
12688- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
12689+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
12690 ret, "w", "w", "ir", 2);
12691 return ret;
12692 case 16:
12693- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
12694+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
12695 ret, "q", "", "er", 16);
12696 if (unlikely(ret))
12697 return ret;
12698 asm("":::"memory");
12699- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
12700+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
12701 ret, "q", "", "er", 8);
12702 return ret;
12703 default:
12704- return copy_user_generic((__force void *)dst, src, size);
12705+
12706+#ifdef CONFIG_PAX_MEMORY_UDEREF
12707+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12708+ dst += PAX_USER_SHADOW_BASE;
12709+#endif
12710+
12711+ return copy_user_generic((__force_kernel void *)dst, src, size);
12712+ }
12713+}
12714+
12715+static __always_inline __must_check
12716+unsigned long copy_to_user(void __user *to, const void *from, unsigned long len)
12717+{
12718+ if (access_ok(VERIFY_WRITE, to, len))
12719+ len = __copy_to_user(to, from, len);
12720+ return len;
12721+}
12722+
12723+static __always_inline __must_check
12724+unsigned long copy_from_user(void *to, const void __user *from, unsigned long len)
12725+{
12726+ might_fault();
12727+
12728+ if (access_ok(VERIFY_READ, from, len))
12729+ len = __copy_from_user(to, from, len);
12730+ else if (len < INT_MAX) {
12731+ if (!__builtin_constant_p(len))
12732+ check_object_size(to, len, false);
12733+ memset(to, 0, len);
12734 }
12735+ return len;
12736 }
12737
12738 static __always_inline __must_check
12739-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12740+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
12741 {
12742- int ret = 0;
12743+ unsigned ret = 0;
12744
12745 might_fault();
12746- if (!__builtin_constant_p(size))
12747- return copy_user_generic((__force void *)dst,
12748- (__force void *)src, size);
12749+
12750+ pax_track_stack();
12751+
12752+ if (size > INT_MAX)
12753+ return size;
12754+
12755+#ifdef CONFIG_PAX_MEMORY_UDEREF
12756+ if (!__access_ok(VERIFY_READ, src, size))
12757+ return size;
12758+ if (!__access_ok(VERIFY_WRITE, dst, size))
12759+ return size;
12760+#endif
12761+
12762+ if (!__builtin_constant_p(size)) {
12763+
12764+#ifdef CONFIG_PAX_MEMORY_UDEREF
12765+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12766+ src += PAX_USER_SHADOW_BASE;
12767+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12768+ dst += PAX_USER_SHADOW_BASE;
12769+#endif
12770+
12771+ return copy_user_generic((__force_kernel void *)dst,
12772+ (__force_kernel const void *)src, size);
12773+ }
12774 switch (size) {
12775 case 1: {
12776 u8 tmp;
12777- __get_user_asm(tmp, (u8 __user *)src,
12778+ __get_user_asm(tmp, (const u8 __user *)src,
12779 ret, "b", "b", "=q", 1);
12780 if (likely(!ret))
12781 __put_user_asm(tmp, (u8 __user *)dst,
12782@@ -134,7 +226,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12783 }
12784 case 2: {
12785 u16 tmp;
12786- __get_user_asm(tmp, (u16 __user *)src,
12787+ __get_user_asm(tmp, (const u16 __user *)src,
12788 ret, "w", "w", "=r", 2);
12789 if (likely(!ret))
12790 __put_user_asm(tmp, (u16 __user *)dst,
12791@@ -144,7 +236,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12792
12793 case 4: {
12794 u32 tmp;
12795- __get_user_asm(tmp, (u32 __user *)src,
12796+ __get_user_asm(tmp, (const u32 __user *)src,
12797 ret, "l", "k", "=r", 4);
12798 if (likely(!ret))
12799 __put_user_asm(tmp, (u32 __user *)dst,
12800@@ -153,7 +245,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12801 }
12802 case 8: {
12803 u64 tmp;
12804- __get_user_asm(tmp, (u64 __user *)src,
12805+ __get_user_asm(tmp, (const u64 __user *)src,
12806 ret, "q", "", "=r", 8);
12807 if (likely(!ret))
12808 __put_user_asm(tmp, (u64 __user *)dst,
12809@@ -161,8 +253,16 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
12810 return ret;
12811 }
12812 default:
12813- return copy_user_generic((__force void *)dst,
12814- (__force void *)src, size);
12815+
12816+#ifdef CONFIG_PAX_MEMORY_UDEREF
12817+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12818+ src += PAX_USER_SHADOW_BASE;
12819+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12820+ dst += PAX_USER_SHADOW_BASE;
12821+#endif
12822+
12823+ return copy_user_generic((__force_kernel void *)dst,
12824+ (__force_kernel const void *)src, size);
12825 }
12826 }
12827
12828@@ -176,33 +276,75 @@ __must_check long strlen_user(const char __user *str);
12829 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
12830 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
12831
12832-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
12833- unsigned size);
12834+static __must_check __always_inline unsigned long
12835+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
12836+{
12837+ pax_track_stack();
12838+
12839+ if (size > INT_MAX)
12840+ return size;
12841+
12842+#ifdef CONFIG_PAX_MEMORY_UDEREF
12843+ if (!__access_ok(VERIFY_READ, src, size))
12844+ return size;
12845
12846-static __must_check __always_inline int
12847-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
12848+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
12849+ src += PAX_USER_SHADOW_BASE;
12850+#endif
12851+
12852+ return copy_user_generic(dst, (__force_kernel const void *)src, size);
12853+}
12854+
12855+static __must_check __always_inline unsigned long
12856+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
12857 {
12858- return copy_user_generic((__force void *)dst, src, size);
12859+ if (size > INT_MAX)
12860+ return size;
12861+
12862+#ifdef CONFIG_PAX_MEMORY_UDEREF
12863+ if (!__access_ok(VERIFY_WRITE, dst, size))
12864+ return size;
12865+
12866+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
12867+ dst += PAX_USER_SHADOW_BASE;
12868+#endif
12869+
12870+ return copy_user_generic((__force_kernel void *)dst, src, size);
12871 }
12872
12873-extern long __copy_user_nocache(void *dst, const void __user *src,
12874- unsigned size, int zerorest);
12875+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
12876+ unsigned long size, int zerorest);
12877
12878-static inline int
12879-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
12880+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
12881 {
12882 might_sleep();
12883+
12884+ if (size > INT_MAX)
12885+ return size;
12886+
12887+#ifdef CONFIG_PAX_MEMORY_UDEREF
12888+ if (!__access_ok(VERIFY_READ, src, size))
12889+ return size;
12890+#endif
12891+
12892 return __copy_user_nocache(dst, src, size, 1);
12893 }
12894
12895-static inline int
12896-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12897- unsigned size)
12898+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
12899+ unsigned long size)
12900 {
12901+ if (size > INT_MAX)
12902+ return size;
12903+
12904+#ifdef CONFIG_PAX_MEMORY_UDEREF
12905+ if (!__access_ok(VERIFY_READ, src, size))
12906+ return size;
12907+#endif
12908+
12909 return __copy_user_nocache(dst, src, size, 0);
12910 }
12911
12912-unsigned long
12913-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
12914+extern unsigned long
12915+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest);
12916
12917 #endif /* _ASM_X86_UACCESS_64_H */
12918diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h
12919index 9064052..786cfbc 100644
12920--- a/arch/x86/include/asm/vdso.h
12921+++ b/arch/x86/include/asm/vdso.h
12922@@ -25,7 +25,7 @@ extern const char VDSO32_PRELINK[];
12923 #define VDSO32_SYMBOL(base, name) \
12924 ({ \
12925 extern const char VDSO32_##name[]; \
12926- (void *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12927+ (void __user *)(VDSO32_##name - VDSO32_PRELINK + (unsigned long)(base)); \
12928 })
12929 #endif
12930
12931diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
12932index 3d61e20..9507180 100644
12933--- a/arch/x86/include/asm/vgtod.h
12934+++ b/arch/x86/include/asm/vgtod.h
12935@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
12936 int sysctl_enabled;
12937 struct timezone sys_tz;
12938 struct { /* extract of a clocksource struct */
12939+ char name[8];
12940 cycle_t (*vread)(void);
12941 cycle_t cycle_last;
12942 cycle_t mask;
12943diff --git a/arch/x86/include/asm/vmi.h b/arch/x86/include/asm/vmi.h
12944index 61e08c0..b0da582 100644
12945--- a/arch/x86/include/asm/vmi.h
12946+++ b/arch/x86/include/asm/vmi.h
12947@@ -191,6 +191,7 @@ struct vrom_header {
12948 u8 reserved[96]; /* Reserved for headers */
12949 char vmi_init[8]; /* VMI_Init jump point */
12950 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
12951+ char rom_data[8048]; /* rest of the option ROM */
12952 } __attribute__((packed));
12953
12954 struct pnp_header {
12955diff --git a/arch/x86/include/asm/vmi_time.h b/arch/x86/include/asm/vmi_time.h
12956index c6e0bee..fcb9f74 100644
12957--- a/arch/x86/include/asm/vmi_time.h
12958+++ b/arch/x86/include/asm/vmi_time.h
12959@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
12960 int (*wallclock_updated)(void);
12961 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
12962 void (*cancel_alarm)(u32 flags);
12963-} vmi_timer_ops;
12964+} __no_const vmi_timer_ops;
12965
12966 /* Prototypes */
12967 extern void __init vmi_time_init(void);
12968diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h
12969index d0983d2..1f7c9e9 100644
12970--- a/arch/x86/include/asm/vsyscall.h
12971+++ b/arch/x86/include/asm/vsyscall.h
12972@@ -15,9 +15,10 @@ enum vsyscall_num {
12973
12974 #ifdef __KERNEL__
12975 #include <linux/seqlock.h>
12976+#include <linux/getcpu.h>
12977+#include <linux/time.h>
12978
12979 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
12980-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
12981
12982 /* Definitions for CONFIG_GENERIC_TIME definitions */
12983 #define __section_vsyscall_gtod_data __attribute__ \
12984@@ -31,7 +32,6 @@ enum vsyscall_num {
12985 #define VGETCPU_LSL 2
12986
12987 extern int __vgetcpu_mode;
12988-extern volatile unsigned long __jiffies;
12989
12990 /* kernel space (writeable) */
12991 extern int vgetcpu_mode;
12992@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
12993
12994 extern void map_vsyscall(void);
12995
12996+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
12997+extern time_t vtime(time_t *t);
12998+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
12999 #endif /* __KERNEL__ */
13000
13001 #endif /* _ASM_X86_VSYSCALL_H */
13002diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
13003index 2c756fd..3377e37 100644
13004--- a/arch/x86/include/asm/x86_init.h
13005+++ b/arch/x86/include/asm/x86_init.h
13006@@ -28,7 +28,7 @@ struct x86_init_mpparse {
13007 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
13008 void (*find_smp_config)(unsigned int reserve);
13009 void (*get_smp_config)(unsigned int early);
13010-};
13011+} __no_const;
13012
13013 /**
13014 * struct x86_init_resources - platform specific resource related ops
13015@@ -42,7 +42,7 @@ struct x86_init_resources {
13016 void (*probe_roms)(void);
13017 void (*reserve_resources)(void);
13018 char *(*memory_setup)(void);
13019-};
13020+} __no_const;
13021
13022 /**
13023 * struct x86_init_irqs - platform specific interrupt setup
13024@@ -55,7 +55,7 @@ struct x86_init_irqs {
13025 void (*pre_vector_init)(void);
13026 void (*intr_init)(void);
13027 void (*trap_init)(void);
13028-};
13029+} __no_const;
13030
13031 /**
13032 * struct x86_init_oem - oem platform specific customizing functions
13033@@ -65,7 +65,7 @@ struct x86_init_irqs {
13034 struct x86_init_oem {
13035 void (*arch_setup)(void);
13036 void (*banner)(void);
13037-};
13038+} __no_const;
13039
13040 /**
13041 * struct x86_init_paging - platform specific paging functions
13042@@ -75,7 +75,7 @@ struct x86_init_oem {
13043 struct x86_init_paging {
13044 void (*pagetable_setup_start)(pgd_t *base);
13045 void (*pagetable_setup_done)(pgd_t *base);
13046-};
13047+} __no_const;
13048
13049 /**
13050 * struct x86_init_timers - platform specific timer setup
13051@@ -88,7 +88,7 @@ struct x86_init_timers {
13052 void (*setup_percpu_clockev)(void);
13053 void (*tsc_pre_init)(void);
13054 void (*timer_init)(void);
13055-};
13056+} __no_const;
13057
13058 /**
13059 * struct x86_init_ops - functions for platform specific setup
13060@@ -101,7 +101,7 @@ struct x86_init_ops {
13061 struct x86_init_oem oem;
13062 struct x86_init_paging paging;
13063 struct x86_init_timers timers;
13064-};
13065+} __no_const;
13066
13067 /**
13068 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
13069@@ -109,7 +109,7 @@ struct x86_init_ops {
13070 */
13071 struct x86_cpuinit_ops {
13072 void (*setup_percpu_clockev)(void);
13073-};
13074+} __no_const;
13075
13076 /**
13077 * struct x86_platform_ops - platform specific runtime functions
13078@@ -121,7 +121,7 @@ struct x86_platform_ops {
13079 unsigned long (*calibrate_tsc)(void);
13080 unsigned long (*get_wallclock)(void);
13081 int (*set_wallclock)(unsigned long nowtime);
13082-};
13083+} __no_const;
13084
13085 extern struct x86_init_ops x86_init;
13086 extern struct x86_cpuinit_ops x86_cpuinit;
13087diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
13088index 727acc1..554f3eb 100644
13089--- a/arch/x86/include/asm/xsave.h
13090+++ b/arch/x86/include/asm/xsave.h
13091@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct xsave_struct *fx)
13092 static inline int xsave_user(struct xsave_struct __user *buf)
13093 {
13094 int err;
13095+
13096+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13097+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
13098+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
13099+#endif
13100+
13101 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
13102 "2:\n"
13103 ".section .fixup,\"ax\"\n"
13104@@ -78,10 +84,15 @@ static inline int xsave_user(struct xsave_struct __user *buf)
13105 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
13106 {
13107 int err;
13108- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
13109+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
13110 u32 lmask = mask;
13111 u32 hmask = mask >> 32;
13112
13113+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13114+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
13115+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
13116+#endif
13117+
13118 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
13119 "2:\n"
13120 ".section .fixup,\"ax\"\n"
13121diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
13122index 6a564ac..9b1340c 100644
13123--- a/arch/x86/kernel/acpi/realmode/Makefile
13124+++ b/arch/x86/kernel/acpi/realmode/Makefile
13125@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
13126 $(call cc-option, -fno-stack-protector) \
13127 $(call cc-option, -mpreferred-stack-boundary=2)
13128 KBUILD_CFLAGS += $(call cc-option, -m32)
13129+ifdef CONSTIFY_PLUGIN
13130+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
13131+endif
13132 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
13133 GCOV_PROFILE := n
13134
13135diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/kernel/acpi/realmode/wakeup.S
13136index 580b4e2..d4129e4 100644
13137--- a/arch/x86/kernel/acpi/realmode/wakeup.S
13138+++ b/arch/x86/kernel/acpi/realmode/wakeup.S
13139@@ -91,6 +91,9 @@ _start:
13140 /* Do any other stuff... */
13141
13142 #ifndef CONFIG_64BIT
13143+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
13144+ call verify_cpu
13145+
13146 /* This could also be done in C code... */
13147 movl pmode_cr3, %eax
13148 movl %eax, %cr3
13149@@ -104,7 +107,7 @@ _start:
13150 movl %eax, %ecx
13151 orl %edx, %ecx
13152 jz 1f
13153- movl $0xc0000080, %ecx
13154+ mov $MSR_EFER, %ecx
13155 wrmsr
13156 1:
13157
13158@@ -114,6 +117,7 @@ _start:
13159 movl pmode_cr0, %eax
13160 movl %eax, %cr0
13161 jmp pmode_return
13162+# include "../../verify_cpu.S"
13163 #else
13164 pushw $0
13165 pushw trampoline_segment
13166diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
13167index ca93638..7042f24 100644
13168--- a/arch/x86/kernel/acpi/sleep.c
13169+++ b/arch/x86/kernel/acpi/sleep.c
13170@@ -11,11 +11,12 @@
13171 #include <linux/cpumask.h>
13172 #include <asm/segment.h>
13173 #include <asm/desc.h>
13174+#include <asm/e820.h>
13175
13176 #include "realmode/wakeup.h"
13177 #include "sleep.h"
13178
13179-unsigned long acpi_wakeup_address;
13180+unsigned long acpi_wakeup_address = 0x2000;
13181 unsigned long acpi_realmode_flags;
13182
13183 /* address in low memory of the wakeup routine. */
13184@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
13185 #else /* CONFIG_64BIT */
13186 header->trampoline_segment = setup_trampoline() >> 4;
13187 #ifdef CONFIG_SMP
13188- stack_start.sp = temp_stack + sizeof(temp_stack);
13189+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
13190+
13191+ pax_open_kernel();
13192 early_gdt_descr.address =
13193 (unsigned long)get_cpu_gdt_table(smp_processor_id());
13194+ pax_close_kernel();
13195+
13196 initial_gs = per_cpu_offset(smp_processor_id());
13197 #endif
13198 initial_code = (unsigned long)wakeup_long64;
13199@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
13200 return;
13201 }
13202
13203- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
13204-
13205- if (!acpi_realmode) {
13206- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
13207- return;
13208- }
13209-
13210- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
13211+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
13212+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
13213 }
13214
13215
13216diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
13217index 8ded418..079961e 100644
13218--- a/arch/x86/kernel/acpi/wakeup_32.S
13219+++ b/arch/x86/kernel/acpi/wakeup_32.S
13220@@ -30,13 +30,11 @@ wakeup_pmode_return:
13221 # and restore the stack ... but you need gdt for this to work
13222 movl saved_context_esp, %esp
13223
13224- movl %cs:saved_magic, %eax
13225- cmpl $0x12345678, %eax
13226+ cmpl $0x12345678, saved_magic
13227 jne bogus_magic
13228
13229 # jump to place where we left off
13230- movl saved_eip, %eax
13231- jmp *%eax
13232+ jmp *(saved_eip)
13233
13234 bogus_magic:
13235 jmp bogus_magic
13236diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
13237index de7353c..075da5f 100644
13238--- a/arch/x86/kernel/alternative.c
13239+++ b/arch/x86/kernel/alternative.c
13240@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
13241
13242 BUG_ON(p->len > MAX_PATCH_LEN);
13243 /* prep the buffer with the original instructions */
13244- memcpy(insnbuf, p->instr, p->len);
13245+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
13246 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
13247 (unsigned long)p->instr, p->len);
13248
13249@@ -475,7 +475,7 @@ void __init alternative_instructions(void)
13250 if (smp_alt_once)
13251 free_init_pages("SMP alternatives",
13252 (unsigned long)__smp_locks,
13253- (unsigned long)__smp_locks_end);
13254+ PAGE_ALIGN((unsigned long)__smp_locks_end));
13255
13256 restart_nmi();
13257 }
13258@@ -492,13 +492,17 @@ void __init alternative_instructions(void)
13259 * instructions. And on the local CPU you need to be protected again NMI or MCE
13260 * handlers seeing an inconsistent instruction while you patch.
13261 */
13262-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13263+static void *__kprobes text_poke_early(void *addr, const void *opcode,
13264 size_t len)
13265 {
13266 unsigned long flags;
13267 local_irq_save(flags);
13268- memcpy(addr, opcode, len);
13269+
13270+ pax_open_kernel();
13271+ memcpy(ktla_ktva(addr), opcode, len);
13272 sync_core();
13273+ pax_close_kernel();
13274+
13275 local_irq_restore(flags);
13276 /* Could also do a CLFLUSH here to speed up CPU recovery; but
13277 that causes hangs on some VIA CPUs. */
13278@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_early(void *addr, const void *opcode,
13279 */
13280 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
13281 {
13282- unsigned long flags;
13283- char *vaddr;
13284+ unsigned char *vaddr = ktla_ktva(addr);
13285 struct page *pages[2];
13286- int i;
13287+ size_t i;
13288
13289 if (!core_kernel_text((unsigned long)addr)) {
13290- pages[0] = vmalloc_to_page(addr);
13291- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
13292+ pages[0] = vmalloc_to_page(vaddr);
13293+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
13294 } else {
13295- pages[0] = virt_to_page(addr);
13296+ pages[0] = virt_to_page(vaddr);
13297 WARN_ON(!PageReserved(pages[0]));
13298- pages[1] = virt_to_page(addr + PAGE_SIZE);
13299+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
13300 }
13301 BUG_ON(!pages[0]);
13302- local_irq_save(flags);
13303- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
13304- if (pages[1])
13305- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
13306- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
13307- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
13308- clear_fixmap(FIX_TEXT_POKE0);
13309- if (pages[1])
13310- clear_fixmap(FIX_TEXT_POKE1);
13311- local_flush_tlb();
13312- sync_core();
13313- /* Could also do a CLFLUSH here to speed up CPU recovery; but
13314- that causes hangs on some VIA CPUs. */
13315+ text_poke_early(addr, opcode, len);
13316 for (i = 0; i < len; i++)
13317- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
13318- local_irq_restore(flags);
13319+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
13320 return addr;
13321 }
13322diff --git a/arch/x86/kernel/amd_iommu.c b/arch/x86/kernel/amd_iommu.c
13323index 3a44b75..1601800 100644
13324--- a/arch/x86/kernel/amd_iommu.c
13325+++ b/arch/x86/kernel/amd_iommu.c
13326@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(void)
13327 }
13328 }
13329
13330-static struct dma_map_ops amd_iommu_dma_ops = {
13331+static const struct dma_map_ops amd_iommu_dma_ops = {
13332 .alloc_coherent = alloc_coherent,
13333 .free_coherent = free_coherent,
13334 .map_page = map_page,
13335diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
13336index 1d2d670..8e3f477 100644
13337--- a/arch/x86/kernel/apic/apic.c
13338+++ b/arch/x86/kernel/apic/apic.c
13339@@ -170,7 +170,7 @@ int first_system_vector = 0xfe;
13340 /*
13341 * Debug level, exported for io_apic.c
13342 */
13343-unsigned int apic_verbosity;
13344+int apic_verbosity;
13345
13346 int pic_mode;
13347
13348@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs *regs)
13349 apic_write(APIC_ESR, 0);
13350 v1 = apic_read(APIC_ESR);
13351 ack_APIC_irq();
13352- atomic_inc(&irq_err_count);
13353+ atomic_inc_unchecked(&irq_err_count);
13354
13355 /*
13356 * Here is what the APIC error bits mean:
13357@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(void)
13358 u16 *bios_cpu_apicid;
13359 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
13360
13361+ pax_track_stack();
13362+
13363 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
13364 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
13365
13366diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
13367index 8928d97..f799cea 100644
13368--- a/arch/x86/kernel/apic/io_apic.c
13369+++ b/arch/x86/kernel/apic/io_apic.c
13370@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapic_entries(void)
13371 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
13372 GFP_ATOMIC);
13373 if (!ioapic_entries)
13374- return 0;
13375+ return NULL;
13376
13377 for (apic = 0; apic < nr_ioapics; apic++) {
13378 ioapic_entries[apic] =
13379@@ -733,7 +733,7 @@ nomem:
13380 kfree(ioapic_entries[apic]);
13381 kfree(ioapic_entries);
13382
13383- return 0;
13384+ return NULL;
13385 }
13386
13387 /*
13388@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
13389 }
13390 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
13391
13392-void lock_vector_lock(void)
13393+void lock_vector_lock(void) __acquires(vector_lock)
13394 {
13395 /* Used to the online set of cpus does not change
13396 * during assign_irq_vector.
13397@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
13398 spin_lock(&vector_lock);
13399 }
13400
13401-void unlock_vector_lock(void)
13402+void unlock_vector_lock(void) __releases(vector_lock)
13403 {
13404 spin_unlock(&vector_lock);
13405 }
13406@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int irq)
13407 ack_APIC_irq();
13408 }
13409
13410-atomic_t irq_mis_count;
13411+atomic_unchecked_t irq_mis_count;
13412
13413 static void ack_apic_level(unsigned int irq)
13414 {
13415@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int irq)
13416
13417 /* Tail end of version 0x11 I/O APIC bug workaround */
13418 if (!(v & (1 << (i & 0x1f)))) {
13419- atomic_inc(&irq_mis_count);
13420+ atomic_inc_unchecked(&irq_mis_count);
13421 spin_lock(&ioapic_lock);
13422 __mask_and_edge_IO_APIC_irq(cfg);
13423 __unmask_and_level_IO_APIC_irq(cfg);
13424diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
13425index 151ace6..f317474 100644
13426--- a/arch/x86/kernel/apm_32.c
13427+++ b/arch/x86/kernel/apm_32.c
13428@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
13429 * This is for buggy BIOS's that refer to (real mode) segment 0x40
13430 * even though they are called in protected mode.
13431 */
13432-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
13433+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
13434 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
13435
13436 static const char driver_version[] = "1.16ac"; /* no spaces */
13437@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
13438 BUG_ON(cpu != 0);
13439 gdt = get_cpu_gdt_table(cpu);
13440 save_desc_40 = gdt[0x40 / 8];
13441+
13442+ pax_open_kernel();
13443 gdt[0x40 / 8] = bad_bios_desc;
13444+ pax_close_kernel();
13445
13446 apm_irq_save(flags);
13447 APM_DO_SAVE_SEGS;
13448@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
13449 &call->esi);
13450 APM_DO_RESTORE_SEGS;
13451 apm_irq_restore(flags);
13452+
13453+ pax_open_kernel();
13454 gdt[0x40 / 8] = save_desc_40;
13455+ pax_close_kernel();
13456+
13457 put_cpu();
13458
13459 return call->eax & 0xff;
13460@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void *_call)
13461 BUG_ON(cpu != 0);
13462 gdt = get_cpu_gdt_table(cpu);
13463 save_desc_40 = gdt[0x40 / 8];
13464+
13465+ pax_open_kernel();
13466 gdt[0x40 / 8] = bad_bios_desc;
13467+ pax_close_kernel();
13468
13469 apm_irq_save(flags);
13470 APM_DO_SAVE_SEGS;
13471@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void *_call)
13472 &call->eax);
13473 APM_DO_RESTORE_SEGS;
13474 apm_irq_restore(flags);
13475+
13476+ pax_open_kernel();
13477 gdt[0x40 / 8] = save_desc_40;
13478+ pax_close_kernel();
13479+
13480 put_cpu();
13481 return error;
13482 }
13483@@ -975,7 +989,7 @@ recalc:
13484
13485 static void apm_power_off(void)
13486 {
13487- unsigned char po_bios_call[] = {
13488+ const unsigned char po_bios_call[] = {
13489 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
13490 0x8e, 0xd0, /* movw ax,ss */
13491 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
13492@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
13493 * code to that CPU.
13494 */
13495 gdt = get_cpu_gdt_table(0);
13496+
13497+ pax_open_kernel();
13498 set_desc_base(&gdt[APM_CS >> 3],
13499 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
13500 set_desc_base(&gdt[APM_CS_16 >> 3],
13501 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
13502 set_desc_base(&gdt[APM_DS >> 3],
13503 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
13504+ pax_close_kernel();
13505
13506 proc_create("apm", 0, NULL, &apm_file_ops);
13507
13508diff --git a/arch/x86/kernel/asm-offsets_32.c b/arch/x86/kernel/asm-offsets_32.c
13509index dfdbf64..9b2b6ce 100644
13510--- a/arch/x86/kernel/asm-offsets_32.c
13511+++ b/arch/x86/kernel/asm-offsets_32.c
13512@@ -51,7 +51,6 @@ void foo(void)
13513 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
13514 BLANK();
13515
13516- OFFSET(TI_task, thread_info, task);
13517 OFFSET(TI_exec_domain, thread_info, exec_domain);
13518 OFFSET(TI_flags, thread_info, flags);
13519 OFFSET(TI_status, thread_info, status);
13520@@ -60,6 +59,8 @@ void foo(void)
13521 OFFSET(TI_restart_block, thread_info, restart_block);
13522 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
13523 OFFSET(TI_cpu, thread_info, cpu);
13524+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
13525+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13526 BLANK();
13527
13528 OFFSET(GDS_size, desc_ptr, size);
13529@@ -99,6 +100,7 @@ void foo(void)
13530
13531 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13532 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
13533+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13534 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
13535 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
13536 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
13537@@ -115,6 +117,11 @@ void foo(void)
13538 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
13539 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13540 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13541+
13542+#ifdef CONFIG_PAX_KERNEXEC
13543+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13544+#endif
13545+
13546 #endif
13547
13548 #ifdef CONFIG_XEN
13549diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
13550index 4a6aeed..371de20 100644
13551--- a/arch/x86/kernel/asm-offsets_64.c
13552+++ b/arch/x86/kernel/asm-offsets_64.c
13553@@ -44,6 +44,8 @@ int main(void)
13554 ENTRY(addr_limit);
13555 ENTRY(preempt_count);
13556 ENTRY(status);
13557+ ENTRY(lowest_stack);
13558+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
13559 #ifdef CONFIG_IA32_EMULATION
13560 ENTRY(sysenter_return);
13561 #endif
13562@@ -63,6 +65,18 @@ int main(void)
13563 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
13564 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
13565 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
13566+
13567+#ifdef CONFIG_PAX_KERNEXEC
13568+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
13569+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
13570+#endif
13571+
13572+#ifdef CONFIG_PAX_MEMORY_UDEREF
13573+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
13574+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
13575+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
13576+#endif
13577+
13578 #endif
13579
13580
13581@@ -115,6 +129,7 @@ int main(void)
13582 ENTRY(cr8);
13583 BLANK();
13584 #undef ENTRY
13585+ DEFINE(TSS_size, sizeof(struct tss_struct));
13586 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
13587 BLANK();
13588 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
13589@@ -130,6 +145,7 @@ int main(void)
13590
13591 BLANK();
13592 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
13593+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
13594 #ifdef CONFIG_XEN
13595 BLANK();
13596 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
13597diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
13598index ff502cc..dc5133e 100644
13599--- a/arch/x86/kernel/cpu/Makefile
13600+++ b/arch/x86/kernel/cpu/Makefile
13601@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
13602 CFLAGS_REMOVE_common.o = -pg
13603 endif
13604
13605-# Make sure load_percpu_segment has no stackprotector
13606-nostackp := $(call cc-option, -fno-stack-protector)
13607-CFLAGS_common.o := $(nostackp)
13608-
13609 obj-y := intel_cacheinfo.o addon_cpuid_features.o
13610 obj-y += proc.o capflags.o powerflags.o common.o
13611 obj-y += vmware.o hypervisor.o sched.o
13612diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
13613index 6e082dc..a0b5f36 100644
13614--- a/arch/x86/kernel/cpu/amd.c
13615+++ b/arch/x86/kernel/cpu/amd.c
13616@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
13617 unsigned int size)
13618 {
13619 /* AMD errata T13 (order #21922) */
13620- if ((c->x86 == 6)) {
13621+ if (c->x86 == 6) {
13622 /* Duron Rev A0 */
13623 if (c->x86_model == 3 && c->x86_mask == 0)
13624 size = 64;
13625diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
13626index 4e34d10..ba6bc97 100644
13627--- a/arch/x86/kernel/cpu/common.c
13628+++ b/arch/x86/kernel/cpu/common.c
13629@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
13630
13631 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
13632
13633-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
13634-#ifdef CONFIG_X86_64
13635- /*
13636- * We need valid kernel segments for data and code in long mode too
13637- * IRET will check the segment types kkeil 2000/10/28
13638- * Also sysret mandates a special GDT layout
13639- *
13640- * TLS descriptors are currently at a different place compared to i386.
13641- * Hopefully nobody expects them at a fixed place (Wine?)
13642- */
13643- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
13644- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
13645- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
13646- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
13647- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
13648- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
13649-#else
13650- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
13651- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13652- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
13653- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
13654- /*
13655- * Segments used for calling PnP BIOS have byte granularity.
13656- * They code segments and data segments have fixed 64k limits,
13657- * the transfer segment sizes are set at run time.
13658- */
13659- /* 32-bit code */
13660- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13661- /* 16-bit code */
13662- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13663- /* 16-bit data */
13664- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
13665- /* 16-bit data */
13666- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
13667- /* 16-bit data */
13668- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
13669- /*
13670- * The APM segments have byte granularity and their bases
13671- * are set at run time. All have 64k limits.
13672- */
13673- /* 32-bit code */
13674- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
13675- /* 16-bit code */
13676- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
13677- /* data */
13678- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
13679-
13680- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13681- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
13682- GDT_STACK_CANARY_INIT
13683-#endif
13684-} };
13685-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
13686-
13687 static int __init x86_xsave_setup(char *s)
13688 {
13689 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
13690@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
13691 {
13692 struct desc_ptr gdt_descr;
13693
13694- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
13695+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
13696 gdt_descr.size = GDT_SIZE - 1;
13697 load_gdt(&gdt_descr);
13698 /* Reload the per-cpu base */
13699@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
13700 /* Filter out anything that depends on CPUID levels we don't have */
13701 filter_cpuid_features(c, true);
13702
13703+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
13704+ setup_clear_cpu_cap(X86_FEATURE_SEP);
13705+#endif
13706+
13707 /* If the model name is still unset, do table lookup. */
13708 if (!c->x86_model_id[0]) {
13709 const char *p;
13710@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(char *arg)
13711 }
13712 __setup("clearcpuid=", setup_disablecpuid);
13713
13714+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
13715+EXPORT_PER_CPU_SYMBOL(current_tinfo);
13716+
13717 #ifdef CONFIG_X86_64
13718 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
13719
13720@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
13721 EXPORT_PER_CPU_SYMBOL(current_task);
13722
13723 DEFINE_PER_CPU(unsigned long, kernel_stack) =
13724- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
13725+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
13726 EXPORT_PER_CPU_SYMBOL(kernel_stack);
13727
13728 DEFINE_PER_CPU(char *, irq_stack_ptr) =
13729@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
13730 {
13731 memset(regs, 0, sizeof(struct pt_regs));
13732 regs->fs = __KERNEL_PERCPU;
13733- regs->gs = __KERNEL_STACK_CANARY;
13734+ savesegment(gs, regs->gs);
13735
13736 return regs;
13737 }
13738@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
13739 int i;
13740
13741 cpu = stack_smp_processor_id();
13742- t = &per_cpu(init_tss, cpu);
13743+ t = init_tss + cpu;
13744 orig_ist = &per_cpu(orig_ist, cpu);
13745
13746 #ifdef CONFIG_NUMA
13747@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
13748 switch_to_new_gdt(cpu);
13749 loadsegment(fs, 0);
13750
13751- load_idt((const struct desc_ptr *)&idt_descr);
13752+ load_idt(&idt_descr);
13753
13754 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
13755 syscall_init();
13756@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
13757 wrmsrl(MSR_KERNEL_GS_BASE, 0);
13758 barrier();
13759
13760- check_efer();
13761 if (cpu != 0)
13762 enable_x2apic();
13763
13764@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
13765 {
13766 int cpu = smp_processor_id();
13767 struct task_struct *curr = current;
13768- struct tss_struct *t = &per_cpu(init_tss, cpu);
13769+ struct tss_struct *t = init_tss + cpu;
13770 struct thread_struct *thread = &curr->thread;
13771
13772 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
13773diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
13774index 6a77cca..4f4fca0 100644
13775--- a/arch/x86/kernel/cpu/intel.c
13776+++ b/arch/x86/kernel/cpu/intel.c
13777@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug(void)
13778 * Update the IDT descriptor and reload the IDT so that
13779 * it uses the read-only mapped virtual address.
13780 */
13781- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
13782+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
13783 load_idt(&idt_descr);
13784 }
13785 #endif
13786diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
13787index 417990f..96dc36b 100644
13788--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
13789+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
13790@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13791 return ret;
13792 }
13793
13794-static struct sysfs_ops sysfs_ops = {
13795+static const struct sysfs_ops sysfs_ops = {
13796 .show = show,
13797 .store = store,
13798 };
13799diff --git a/arch/x86/kernel/cpu/mcheck/mce-inject.c b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13800index 472763d..9831e11 100644
13801--- a/arch/x86/kernel/cpu/mcheck/mce-inject.c
13802+++ b/arch/x86/kernel/cpu/mcheck/mce-inject.c
13803@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *filp, const char __user *ubuf,
13804 static int inject_init(void)
13805 {
13806 printk(KERN_INFO "Machine check injector initialized\n");
13807- mce_chrdev_ops.write = mce_write;
13808+ pax_open_kernel();
13809+ *(void **)&mce_chrdev_ops.write = mce_write;
13810+ pax_close_kernel();
13811 register_die_notifier(&mce_raise_nb);
13812 return 0;
13813 }
13814diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
13815index 0f16a2b..21740f5 100644
13816--- a/arch/x86/kernel/cpu/mcheck/mce.c
13817+++ b/arch/x86/kernel/cpu/mcheck/mce.c
13818@@ -43,6 +43,7 @@
13819 #include <asm/ipi.h>
13820 #include <asm/mce.h>
13821 #include <asm/msr.h>
13822+#include <asm/local.h>
13823
13824 #include "mce-internal.h"
13825
13826@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
13827 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
13828 m->cs, m->ip);
13829
13830- if (m->cs == __KERNEL_CS)
13831+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
13832 print_symbol("{%s}", m->ip);
13833 pr_cont("\n");
13834 }
13835@@ -221,10 +222,10 @@ static void print_mce_tail(void)
13836
13837 #define PANIC_TIMEOUT 5 /* 5 seconds */
13838
13839-static atomic_t mce_paniced;
13840+static atomic_unchecked_t mce_paniced;
13841
13842 static int fake_panic;
13843-static atomic_t mce_fake_paniced;
13844+static atomic_unchecked_t mce_fake_paniced;
13845
13846 /* Panic in progress. Enable interrupts and wait for final IPI */
13847 static void wait_for_panic(void)
13848@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13849 /*
13850 * Make sure only one CPU runs in machine check panic
13851 */
13852- if (atomic_inc_return(&mce_paniced) > 1)
13853+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
13854 wait_for_panic();
13855 barrier();
13856
13857@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
13858 console_verbose();
13859 } else {
13860 /* Don't log too much for fake panic */
13861- if (atomic_inc_return(&mce_fake_paniced) > 1)
13862+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
13863 return;
13864 }
13865 print_mce_head();
13866@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
13867 * might have been modified by someone else.
13868 */
13869 rmb();
13870- if (atomic_read(&mce_paniced))
13871+ if (atomic_read_unchecked(&mce_paniced))
13872 wait_for_panic();
13873 if (!monarch_timeout)
13874 goto out;
13875@@ -1394,7 +1395,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
13876 }
13877
13878 /* Call the installed machine check handler for this CPU setup. */
13879-void (*machine_check_vector)(struct pt_regs *, long error_code) =
13880+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
13881 unexpected_machine_check;
13882
13883 /*
13884@@ -1416,7 +1417,9 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13885 return;
13886 }
13887
13888+ pax_open_kernel();
13889 machine_check_vector = do_machine_check;
13890+ pax_close_kernel();
13891
13892 mce_init();
13893 mce_cpu_features(c);
13894@@ -1429,14 +1432,14 @@ void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
13895 */
13896
13897 static DEFINE_SPINLOCK(mce_state_lock);
13898-static int open_count; /* #times opened */
13899+static local_t open_count; /* #times opened */
13900 static int open_exclu; /* already open exclusive? */
13901
13902 static int mce_open(struct inode *inode, struct file *file)
13903 {
13904 spin_lock(&mce_state_lock);
13905
13906- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
13907+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
13908 spin_unlock(&mce_state_lock);
13909
13910 return -EBUSY;
13911@@ -1444,7 +1447,7 @@ static int mce_open(struct inode *inode, struct file *file)
13912
13913 if (file->f_flags & O_EXCL)
13914 open_exclu = 1;
13915- open_count++;
13916+ local_inc(&open_count);
13917
13918 spin_unlock(&mce_state_lock);
13919
13920@@ -1455,7 +1458,7 @@ static int mce_release(struct inode *inode, struct file *file)
13921 {
13922 spin_lock(&mce_state_lock);
13923
13924- open_count--;
13925+ local_dec(&open_count);
13926 open_exclu = 0;
13927
13928 spin_unlock(&mce_state_lock);
13929@@ -2082,7 +2085,7 @@ struct dentry *mce_get_debugfs_dir(void)
13930 static void mce_reset(void)
13931 {
13932 cpu_missing = 0;
13933- atomic_set(&mce_fake_paniced, 0);
13934+ atomic_set_unchecked(&mce_fake_paniced, 0);
13935 atomic_set(&mce_executing, 0);
13936 atomic_set(&mce_callin, 0);
13937 atomic_set(&global_nwo, 0);
13938diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13939index ef3cd31..9d2f6ab 100644
13940--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
13941+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
13942@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kobj, struct attribute *attr,
13943 return ret;
13944 }
13945
13946-static struct sysfs_ops threshold_ops = {
13947+static const struct sysfs_ops threshold_ops = {
13948 .show = show,
13949 .store = store,
13950 };
13951diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
13952index 5c0e653..0882b0a 100644
13953--- a/arch/x86/kernel/cpu/mcheck/p5.c
13954+++ b/arch/x86/kernel/cpu/mcheck/p5.c
13955@@ -12,6 +12,7 @@
13956 #include <asm/system.h>
13957 #include <asm/mce.h>
13958 #include <asm/msr.h>
13959+#include <asm/pgtable.h>
13960
13961 /* By default disabled */
13962 int mce_p5_enabled __read_mostly;
13963@@ -50,7 +51,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
13964 if (!cpu_has(c, X86_FEATURE_MCE))
13965 return;
13966
13967+ pax_open_kernel();
13968 machine_check_vector = pentium_machine_check;
13969+ pax_close_kernel();
13970 /* Make sure the vector pointer is visible before we enable MCEs: */
13971 wmb();
13972
13973diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
13974index 54060f5..c1a7577 100644
13975--- a/arch/x86/kernel/cpu/mcheck/winchip.c
13976+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
13977@@ -11,6 +11,7 @@
13978 #include <asm/system.h>
13979 #include <asm/mce.h>
13980 #include <asm/msr.h>
13981+#include <asm/pgtable.h>
13982
13983 /* Machine check handler for WinChip C6: */
13984 static void winchip_machine_check(struct pt_regs *regs, long error_code)
13985@@ -24,7 +25,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
13986 {
13987 u32 lo, hi;
13988
13989+ pax_open_kernel();
13990 machine_check_vector = winchip_machine_check;
13991+ pax_close_kernel();
13992 /* Make sure the vector pointer is visible before we enable MCEs: */
13993 wmb();
13994
13995diff --git a/arch/x86/kernel/cpu/mtrr/amd.c b/arch/x86/kernel/cpu/mtrr/amd.c
13996index 33af141..92ba9cd 100644
13997--- a/arch/x86/kernel/cpu/mtrr/amd.c
13998+++ b/arch/x86/kernel/cpu/mtrr/amd.c
13999@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base, unsigned long size, unsigned int type)
14000 return 0;
14001 }
14002
14003-static struct mtrr_ops amd_mtrr_ops = {
14004+static const struct mtrr_ops amd_mtrr_ops = {
14005 .vendor = X86_VENDOR_AMD,
14006 .set = amd_set_mtrr,
14007 .get = amd_get_mtrr,
14008diff --git a/arch/x86/kernel/cpu/mtrr/centaur.c b/arch/x86/kernel/cpu/mtrr/centaur.c
14009index de89f14..316fe3e 100644
14010--- a/arch/x86/kernel/cpu/mtrr/centaur.c
14011+++ b/arch/x86/kernel/cpu/mtrr/centaur.c
14012@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long base, unsigned long size, unsigned int t
14013 return 0;
14014 }
14015
14016-static struct mtrr_ops centaur_mtrr_ops = {
14017+static const struct mtrr_ops centaur_mtrr_ops = {
14018 .vendor = X86_VENDOR_CENTAUR,
14019 .set = centaur_set_mcr,
14020 .get = centaur_get_mcr,
14021diff --git a/arch/x86/kernel/cpu/mtrr/cyrix.c b/arch/x86/kernel/cpu/mtrr/cyrix.c
14022index 228d982..68a3343 100644
14023--- a/arch/x86/kernel/cpu/mtrr/cyrix.c
14024+++ b/arch/x86/kernel/cpu/mtrr/cyrix.c
14025@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
14026 post_set();
14027 }
14028
14029-static struct mtrr_ops cyrix_mtrr_ops = {
14030+static const struct mtrr_ops cyrix_mtrr_ops = {
14031 .vendor = X86_VENDOR_CYRIX,
14032 .set_all = cyrix_set_all,
14033 .set = cyrix_set_arr,
14034diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
14035index 55da0c5..4d75584 100644
14036--- a/arch/x86/kernel/cpu/mtrr/generic.c
14037+++ b/arch/x86/kernel/cpu/mtrr/generic.c
14038@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
14039 /*
14040 * Generic structure...
14041 */
14042-struct mtrr_ops generic_mtrr_ops = {
14043+const struct mtrr_ops generic_mtrr_ops = {
14044 .use_intel_if = 1,
14045 .set_all = generic_set_all,
14046 .get = generic_get_mtrr,
14047diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
14048index fd60f09..c94ef52 100644
14049--- a/arch/x86/kernel/cpu/mtrr/main.c
14050+++ b/arch/x86/kernel/cpu/mtrr/main.c
14051@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
14052 u64 size_or_mask, size_and_mask;
14053 static bool mtrr_aps_delayed_init;
14054
14055-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
14056+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
14057
14058-struct mtrr_ops *mtrr_if;
14059+const struct mtrr_ops *mtrr_if;
14060
14061 static void set_mtrr(unsigned int reg, unsigned long base,
14062 unsigned long size, mtrr_type type);
14063
14064-void set_mtrr_ops(struct mtrr_ops *ops)
14065+void set_mtrr_ops(const struct mtrr_ops *ops)
14066 {
14067 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
14068 mtrr_ops[ops->vendor] = ops;
14069diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
14070index a501dee..816c719 100644
14071--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
14072+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
14073@@ -25,14 +25,14 @@ struct mtrr_ops {
14074 int (*validate_add_page)(unsigned long base, unsigned long size,
14075 unsigned int type);
14076 int (*have_wrcomb)(void);
14077-};
14078+} __do_const;
14079
14080 extern int generic_get_free_region(unsigned long base, unsigned long size,
14081 int replace_reg);
14082 extern int generic_validate_add_page(unsigned long base, unsigned long size,
14083 unsigned int type);
14084
14085-extern struct mtrr_ops generic_mtrr_ops;
14086+extern const struct mtrr_ops generic_mtrr_ops;
14087
14088 extern int positive_have_wrcomb(void);
14089
14090@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int index,
14091 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
14092 void get_mtrr_state(void);
14093
14094-extern void set_mtrr_ops(struct mtrr_ops *ops);
14095+extern void set_mtrr_ops(const struct mtrr_ops *ops);
14096
14097 extern u64 size_or_mask, size_and_mask;
14098-extern struct mtrr_ops *mtrr_if;
14099+extern const struct mtrr_ops *mtrr_if;
14100
14101 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
14102 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
14103diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
14104index 0ff02ca..fc49a60 100644
14105--- a/arch/x86/kernel/cpu/perf_event.c
14106+++ b/arch/x86/kernel/cpu/perf_event.c
14107@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event *event,
14108 * count to the generic event atomically:
14109 */
14110 again:
14111- prev_raw_count = atomic64_read(&hwc->prev_count);
14112+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
14113 rdmsrl(hwc->event_base + idx, new_raw_count);
14114
14115- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
14116+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
14117 new_raw_count) != prev_raw_count)
14118 goto again;
14119
14120@@ -741,7 +741,7 @@ again:
14121 delta = (new_raw_count << shift) - (prev_raw_count << shift);
14122 delta >>= shift;
14123
14124- atomic64_add(delta, &event->count);
14125+ atomic64_add_unchecked(delta, &event->count);
14126 atomic64_sub(delta, &hwc->period_left);
14127
14128 return new_raw_count;
14129@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_event *event,
14130 * The hw event starts counting from this event offset,
14131 * mark it to be able to extra future deltas:
14132 */
14133- atomic64_set(&hwc->prev_count, (u64)-left);
14134+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
14135
14136 err = checking_wrmsrl(hwc->event_base + idx,
14137 (u64)(-left) & x86_pmu.event_mask);
14138@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
14139 break;
14140
14141 callchain_store(entry, frame.return_address);
14142- fp = frame.next_frame;
14143+ fp = (__force const void __user *)frame.next_frame;
14144 }
14145 }
14146
14147diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
14148index 898df97..9e82503 100644
14149--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
14150+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
14151@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
14152
14153 /* Interface defining a CPU specific perfctr watchdog */
14154 struct wd_ops {
14155- int (*reserve)(void);
14156- void (*unreserve)(void);
14157- int (*setup)(unsigned nmi_hz);
14158- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14159- void (*stop)(void);
14160+ int (* const reserve)(void);
14161+ void (* const unreserve)(void);
14162+ int (* const setup)(unsigned nmi_hz);
14163+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
14164+ void (* const stop)(void);
14165 unsigned perfctr;
14166 unsigned evntsel;
14167 u64 checkbit;
14168@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
14169 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
14170 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
14171
14172+/* cannot be const */
14173 static struct wd_ops intel_arch_wd_ops;
14174
14175 static int setup_intel_arch_watchdog(unsigned nmi_hz)
14176@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(unsigned nmi_hz)
14177 return 1;
14178 }
14179
14180+/* cannot be const */
14181 static struct wd_ops intel_arch_wd_ops __read_mostly = {
14182 .reserve = single_msr_reserve,
14183 .unreserve = single_msr_unreserve,
14184diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
14185index ff95824..2ffdcb5 100644
14186--- a/arch/x86/kernel/crash.c
14187+++ b/arch/x86/kernel/crash.c
14188@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu, struct die_args *args)
14189 regs = args->regs;
14190
14191 #ifdef CONFIG_X86_32
14192- if (!user_mode_vm(regs)) {
14193+ if (!user_mode(regs)) {
14194 crash_fixup_ss_esp(&fixed_regs, regs);
14195 regs = &fixed_regs;
14196 }
14197diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
14198index 37250fe..bf2ec74 100644
14199--- a/arch/x86/kernel/doublefault_32.c
14200+++ b/arch/x86/kernel/doublefault_32.c
14201@@ -11,7 +11,7 @@
14202
14203 #define DOUBLEFAULT_STACKSIZE (1024)
14204 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
14205-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
14206+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
14207
14208 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
14209
14210@@ -21,7 +21,7 @@ static void doublefault_fn(void)
14211 unsigned long gdt, tss;
14212
14213 store_gdt(&gdt_desc);
14214- gdt = gdt_desc.address;
14215+ gdt = (unsigned long)gdt_desc.address;
14216
14217 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
14218
14219@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
14220 /* 0x2 bit is always set */
14221 .flags = X86_EFLAGS_SF | 0x2,
14222 .sp = STACK_START,
14223- .es = __USER_DS,
14224+ .es = __KERNEL_DS,
14225 .cs = __KERNEL_CS,
14226 .ss = __KERNEL_DS,
14227- .ds = __USER_DS,
14228+ .ds = __KERNEL_DS,
14229 .fs = __KERNEL_PERCPU,
14230
14231 .__cr3 = __pa_nodebug(swapper_pg_dir),
14232diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
14233index 2d8a371..4fa6ae6 100644
14234--- a/arch/x86/kernel/dumpstack.c
14235+++ b/arch/x86/kernel/dumpstack.c
14236@@ -2,6 +2,9 @@
14237 * Copyright (C) 1991, 1992 Linus Torvalds
14238 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
14239 */
14240+#ifdef CONFIG_GRKERNSEC_HIDESYM
14241+#define __INCLUDED_BY_HIDESYM 1
14242+#endif
14243 #include <linux/kallsyms.h>
14244 #include <linux/kprobes.h>
14245 #include <linux/uaccess.h>
14246@@ -28,7 +31,7 @@ static int die_counter;
14247
14248 void printk_address(unsigned long address, int reliable)
14249 {
14250- printk(" [<%p>] %s%pS\n", (void *) address,
14251+ printk(" [<%p>] %s%pA\n", (void *) address,
14252 reliable ? "" : "? ", (void *) address);
14253 }
14254
14255@@ -36,9 +39,8 @@ void printk_address(unsigned long address, int reliable)
14256 static void
14257 print_ftrace_graph_addr(unsigned long addr, void *data,
14258 const struct stacktrace_ops *ops,
14259- struct thread_info *tinfo, int *graph)
14260+ struct task_struct *task, int *graph)
14261 {
14262- struct task_struct *task = tinfo->task;
14263 unsigned long ret_addr;
14264 int index = task->curr_ret_stack;
14265
14266@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14267 static inline void
14268 print_ftrace_graph_addr(unsigned long addr, void *data,
14269 const struct stacktrace_ops *ops,
14270- struct thread_info *tinfo, int *graph)
14271+ struct task_struct *task, int *graph)
14272 { }
14273 #endif
14274
14275@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
14276 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
14277 */
14278
14279-static inline int valid_stack_ptr(struct thread_info *tinfo,
14280- void *p, unsigned int size, void *end)
14281+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
14282 {
14283- void *t = tinfo;
14284 if (end) {
14285 if (p < end && p >= (end-THREAD_SIZE))
14286 return 1;
14287@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
14288 }
14289
14290 unsigned long
14291-print_context_stack(struct thread_info *tinfo,
14292+print_context_stack(struct task_struct *task, void *stack_start,
14293 unsigned long *stack, unsigned long bp,
14294 const struct stacktrace_ops *ops, void *data,
14295 unsigned long *end, int *graph)
14296 {
14297 struct stack_frame *frame = (struct stack_frame *)bp;
14298
14299- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
14300+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
14301 unsigned long addr;
14302
14303 addr = *stack;
14304@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *tinfo,
14305 } else {
14306 ops->address(data, addr, 0);
14307 }
14308- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
14309+ print_ftrace_graph_addr(addr, data, ops, task, graph);
14310 }
14311 stack++;
14312 }
14313@@ -180,7 +180,7 @@ void dump_stack(void)
14314 #endif
14315
14316 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
14317- current->pid, current->comm, print_tainted(),
14318+ task_pid_nr(current), current->comm, print_tainted(),
14319 init_utsname()->release,
14320 (int)strcspn(init_utsname()->version, " "),
14321 init_utsname()->version);
14322@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
14323 return flags;
14324 }
14325
14326+extern void gr_handle_kernel_exploit(void);
14327+
14328 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14329 {
14330 if (regs && kexec_should_crash(current))
14331@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
14332 panic("Fatal exception in interrupt");
14333 if (panic_on_oops)
14334 panic("Fatal exception");
14335- do_exit(signr);
14336+
14337+ gr_handle_kernel_exploit();
14338+
14339+ do_group_exit(signr);
14340 }
14341
14342 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
14343@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
14344 unsigned long flags = oops_begin();
14345 int sig = SIGSEGV;
14346
14347- if (!user_mode_vm(regs))
14348+ if (!user_mode(regs))
14349 report_bug(regs->ip, regs);
14350
14351 if (__die(str, regs, err))
14352diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
14353index 81086c2..13e8b17 100644
14354--- a/arch/x86/kernel/dumpstack.h
14355+++ b/arch/x86/kernel/dumpstack.h
14356@@ -15,7 +15,7 @@
14357 #endif
14358
14359 extern unsigned long
14360-print_context_stack(struct thread_info *tinfo,
14361+print_context_stack(struct task_struct *task, void *stack_start,
14362 unsigned long *stack, unsigned long bp,
14363 const struct stacktrace_ops *ops, void *data,
14364 unsigned long *end, int *graph);
14365diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
14366index f7dd2a7..504f53b 100644
14367--- a/arch/x86/kernel/dumpstack_32.c
14368+++ b/arch/x86/kernel/dumpstack_32.c
14369@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14370 #endif
14371
14372 for (;;) {
14373- struct thread_info *context;
14374+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14375+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14376
14377- context = (struct thread_info *)
14378- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
14379- bp = print_context_stack(context, stack, bp, ops,
14380- data, NULL, &graph);
14381-
14382- stack = (unsigned long *)context->previous_esp;
14383- if (!stack)
14384+ if (stack_start == task_stack_page(task))
14385 break;
14386+ stack = *(unsigned long **)stack_start;
14387 if (ops->stack(data, "IRQ") < 0)
14388 break;
14389 touch_nmi_watchdog();
14390@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs)
14391 * When in-kernel, we also print out the stack and code at the
14392 * time of the fault..
14393 */
14394- if (!user_mode_vm(regs)) {
14395+ if (!user_mode(regs)) {
14396 unsigned int code_prologue = code_bytes * 43 / 64;
14397 unsigned int code_len = code_bytes;
14398 unsigned char c;
14399 u8 *ip;
14400+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
14401
14402 printk(KERN_EMERG "Stack:\n");
14403 show_stack_log_lvl(NULL, regs, &regs->sp,
14404@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs)
14405
14406 printk(KERN_EMERG "Code: ");
14407
14408- ip = (u8 *)regs->ip - code_prologue;
14409+ ip = (u8 *)regs->ip - code_prologue + cs_base;
14410 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
14411 /* try starting at IP */
14412- ip = (u8 *)regs->ip;
14413+ ip = (u8 *)regs->ip + cs_base;
14414 code_len = code_len - code_prologue + 1;
14415 }
14416 for (i = 0; i < code_len; i++, ip++) {
14417@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs)
14418 printk(" Bad EIP value.");
14419 break;
14420 }
14421- if (ip == (u8 *)regs->ip)
14422+ if (ip == (u8 *)regs->ip + cs_base)
14423 printk("<%02x> ", c);
14424 else
14425 printk("%02x ", c);
14426@@ -145,10 +142,23 @@ void show_registers(struct pt_regs *regs)
14427 printk("\n");
14428 }
14429
14430+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14431+void pax_check_alloca(unsigned long size)
14432+{
14433+ unsigned long sp = (unsigned long)&sp, stack_left;
14434+
14435+ /* all kernel stacks are of the same size */
14436+ stack_left = sp & (THREAD_SIZE - 1);
14437+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14438+}
14439+EXPORT_SYMBOL(pax_check_alloca);
14440+#endif
14441+
14442 int is_valid_bugaddr(unsigned long ip)
14443 {
14444 unsigned short ud2;
14445
14446+ ip = ktla_ktva(ip);
14447 if (ip < PAGE_OFFSET)
14448 return 0;
14449 if (probe_kernel_address((unsigned short *)ip, ud2))
14450diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
14451index a071e6b..36cd585 100644
14452--- a/arch/x86/kernel/dumpstack_64.c
14453+++ b/arch/x86/kernel/dumpstack_64.c
14454@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14455 unsigned long *irq_stack_end =
14456 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
14457 unsigned used = 0;
14458- struct thread_info *tinfo;
14459 int graph = 0;
14460+ void *stack_start;
14461
14462 if (!task)
14463 task = current;
14464@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14465 * current stack address. If the stacks consist of nested
14466 * exceptions
14467 */
14468- tinfo = task_thread_info(task);
14469 for (;;) {
14470 char *id;
14471 unsigned long *estack_end;
14472+
14473 estack_end = in_exception_stack(cpu, (unsigned long)stack,
14474 &used, &id);
14475
14476@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14477 if (ops->stack(data, id) < 0)
14478 break;
14479
14480- bp = print_context_stack(tinfo, stack, bp, ops,
14481+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
14482 data, estack_end, &graph);
14483 ops->stack(data, "<EOE>");
14484 /*
14485@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14486 if (stack >= irq_stack && stack < irq_stack_end) {
14487 if (ops->stack(data, "IRQ") < 0)
14488 break;
14489- bp = print_context_stack(tinfo, stack, bp,
14490+ bp = print_context_stack(task, irq_stack, stack, bp,
14491 ops, data, irq_stack_end, &graph);
14492 /*
14493 * We link to the next stack (which would be
14494@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
14495 /*
14496 * This handles the process stack:
14497 */
14498- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
14499+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
14500+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
14501 put_cpu();
14502 }
14503 EXPORT_SYMBOL(dump_trace);
14504@@ -304,3 +305,50 @@ int is_valid_bugaddr(unsigned long ip)
14505 return ud2 == 0x0b0f;
14506 }
14507
14508+
14509+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14510+void pax_check_alloca(unsigned long size)
14511+{
14512+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
14513+ unsigned cpu, used;
14514+ char *id;
14515+
14516+ /* check the process stack first */
14517+ stack_start = (unsigned long)task_stack_page(current);
14518+ stack_end = stack_start + THREAD_SIZE;
14519+ if (likely(stack_start <= sp && sp < stack_end)) {
14520+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
14521+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14522+ return;
14523+ }
14524+
14525+ cpu = get_cpu();
14526+
14527+ /* check the irq stacks */
14528+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
14529+ stack_start = stack_end - IRQ_STACK_SIZE;
14530+ if (stack_start <= sp && sp < stack_end) {
14531+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
14532+ put_cpu();
14533+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14534+ return;
14535+ }
14536+
14537+ /* check the exception stacks */
14538+ used = 0;
14539+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
14540+ stack_start = stack_end - EXCEPTION_STKSZ;
14541+ if (stack_end && stack_start <= sp && sp < stack_end) {
14542+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
14543+ put_cpu();
14544+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
14545+ return;
14546+ }
14547+
14548+ put_cpu();
14549+
14550+ /* unknown stack */
14551+ BUG();
14552+}
14553+EXPORT_SYMBOL(pax_check_alloca);
14554+#endif
14555diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
14556index a89739a..95e0c48 100644
14557--- a/arch/x86/kernel/e820.c
14558+++ b/arch/x86/kernel/e820.c
14559@@ -733,7 +733,7 @@ struct early_res {
14560 };
14561 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
14562 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
14563- {}
14564+ { 0, 0, {0}, 0 }
14565 };
14566
14567 static int __init find_overlapped_early(u64 start, u64 end)
14568diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
14569index b9c830c..1e41a96 100644
14570--- a/arch/x86/kernel/early_printk.c
14571+++ b/arch/x86/kernel/early_printk.c
14572@@ -7,6 +7,7 @@
14573 #include <linux/pci_regs.h>
14574 #include <linux/pci_ids.h>
14575 #include <linux/errno.h>
14576+#include <linux/sched.h>
14577 #include <asm/io.h>
14578 #include <asm/processor.h>
14579 #include <asm/fcntl.h>
14580@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char *fmt, ...)
14581 int n;
14582 va_list ap;
14583
14584+ pax_track_stack();
14585+
14586 va_start(ap, fmt);
14587 n = vscnprintf(buf, sizeof(buf), fmt, ap);
14588 early_console->write(early_console, buf, n);
14589diff --git a/arch/x86/kernel/efi_32.c b/arch/x86/kernel/efi_32.c
14590index 5cab48e..b025f9b 100644
14591--- a/arch/x86/kernel/efi_32.c
14592+++ b/arch/x86/kernel/efi_32.c
14593@@ -38,70 +38,56 @@
14594 */
14595
14596 static unsigned long efi_rt_eflags;
14597-static pgd_t efi_bak_pg_dir_pointer[2];
14598+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
14599
14600-void efi_call_phys_prelog(void)
14601+void __init efi_call_phys_prelog(void)
14602 {
14603- unsigned long cr4;
14604- unsigned long temp;
14605 struct desc_ptr gdt_descr;
14606
14607+#ifdef CONFIG_PAX_KERNEXEC
14608+ struct desc_struct d;
14609+#endif
14610+
14611 local_irq_save(efi_rt_eflags);
14612
14613- /*
14614- * If I don't have PAE, I should just duplicate two entries in page
14615- * directory. If I have PAE, I just need to duplicate one entry in
14616- * page directory.
14617- */
14618- cr4 = read_cr4_safe();
14619-
14620- if (cr4 & X86_CR4_PAE) {
14621- efi_bak_pg_dir_pointer[0].pgd =
14622- swapper_pg_dir[pgd_index(0)].pgd;
14623- swapper_pg_dir[0].pgd =
14624- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14625- } else {
14626- efi_bak_pg_dir_pointer[0].pgd =
14627- swapper_pg_dir[pgd_index(0)].pgd;
14628- efi_bak_pg_dir_pointer[1].pgd =
14629- swapper_pg_dir[pgd_index(0x400000)].pgd;
14630- swapper_pg_dir[pgd_index(0)].pgd =
14631- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
14632- temp = PAGE_OFFSET + 0x400000;
14633- swapper_pg_dir[pgd_index(0x400000)].pgd =
14634- swapper_pg_dir[pgd_index(temp)].pgd;
14635- }
14636+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
14637+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
14638+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
14639
14640 /*
14641 * After the lock is released, the original page table is restored.
14642 */
14643 __flush_tlb_all();
14644
14645+#ifdef CONFIG_PAX_KERNEXEC
14646+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
14647+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14648+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
14649+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14650+#endif
14651+
14652 gdt_descr.address = __pa(get_cpu_gdt_table(0));
14653 gdt_descr.size = GDT_SIZE - 1;
14654 load_gdt(&gdt_descr);
14655 }
14656
14657-void efi_call_phys_epilog(void)
14658+void __init efi_call_phys_epilog(void)
14659 {
14660- unsigned long cr4;
14661 struct desc_ptr gdt_descr;
14662
14663+#ifdef CONFIG_PAX_KERNEXEC
14664+ struct desc_struct d;
14665+
14666+ memset(&d, 0, sizeof d);
14667+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
14668+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
14669+#endif
14670+
14671 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
14672 gdt_descr.size = GDT_SIZE - 1;
14673 load_gdt(&gdt_descr);
14674
14675- cr4 = read_cr4_safe();
14676-
14677- if (cr4 & X86_CR4_PAE) {
14678- swapper_pg_dir[pgd_index(0)].pgd =
14679- efi_bak_pg_dir_pointer[0].pgd;
14680- } else {
14681- swapper_pg_dir[pgd_index(0)].pgd =
14682- efi_bak_pg_dir_pointer[0].pgd;
14683- swapper_pg_dir[pgd_index(0x400000)].pgd =
14684- efi_bak_pg_dir_pointer[1].pgd;
14685- }
14686+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
14687
14688 /*
14689 * After the lock is released, the original page table is restored.
14690diff --git a/arch/x86/kernel/efi_stub_32.S b/arch/x86/kernel/efi_stub_32.S
14691index fbe66e6..c5c0dd2 100644
14692--- a/arch/x86/kernel/efi_stub_32.S
14693+++ b/arch/x86/kernel/efi_stub_32.S
14694@@ -6,7 +6,9 @@
14695 */
14696
14697 #include <linux/linkage.h>
14698+#include <linux/init.h>
14699 #include <asm/page_types.h>
14700+#include <asm/segment.h>
14701
14702 /*
14703 * efi_call_phys(void *, ...) is a function with variable parameters.
14704@@ -20,7 +22,7 @@
14705 * service functions will comply with gcc calling convention, too.
14706 */
14707
14708-.text
14709+__INIT
14710 ENTRY(efi_call_phys)
14711 /*
14712 * 0. The function can only be called in Linux kernel. So CS has been
14713@@ -36,9 +38,11 @@ ENTRY(efi_call_phys)
14714 * The mapping of lower virtual memory has been created in prelog and
14715 * epilog.
14716 */
14717- movl $1f, %edx
14718- subl $__PAGE_OFFSET, %edx
14719- jmp *%edx
14720+ movl $(__KERNEXEC_EFI_DS), %edx
14721+ mov %edx, %ds
14722+ mov %edx, %es
14723+ mov %edx, %ss
14724+ ljmp $(__KERNEXEC_EFI_CS),$1f-__PAGE_OFFSET
14725 1:
14726
14727 /*
14728@@ -47,14 +51,8 @@ ENTRY(efi_call_phys)
14729 * parameter 2, ..., param n. To make things easy, we save the return
14730 * address of efi_call_phys in a global variable.
14731 */
14732- popl %edx
14733- movl %edx, saved_return_addr
14734- /* get the function pointer into ECX*/
14735- popl %ecx
14736- movl %ecx, efi_rt_function_ptr
14737- movl $2f, %edx
14738- subl $__PAGE_OFFSET, %edx
14739- pushl %edx
14740+ popl (saved_return_addr)
14741+ popl (efi_rt_function_ptr)
14742
14743 /*
14744 * 3. Clear PG bit in %CR0.
14745@@ -73,9 +71,8 @@ ENTRY(efi_call_phys)
14746 /*
14747 * 5. Call the physical function.
14748 */
14749- jmp *%ecx
14750+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
14751
14752-2:
14753 /*
14754 * 6. After EFI runtime service returns, control will return to
14755 * following instruction. We'd better readjust stack pointer first.
14756@@ -88,35 +85,32 @@ ENTRY(efi_call_phys)
14757 movl %cr0, %edx
14758 orl $0x80000000, %edx
14759 movl %edx, %cr0
14760- jmp 1f
14761-1:
14762+
14763 /*
14764 * 8. Now restore the virtual mode from flat mode by
14765 * adding EIP with PAGE_OFFSET.
14766 */
14767- movl $1f, %edx
14768- jmp *%edx
14769+ ljmp $(__KERNEL_CS),$1f+__PAGE_OFFSET
14770 1:
14771+ movl $(__KERNEL_DS), %edx
14772+ mov %edx, %ds
14773+ mov %edx, %es
14774+ mov %edx, %ss
14775
14776 /*
14777 * 9. Balance the stack. And because EAX contain the return value,
14778 * we'd better not clobber it.
14779 */
14780- leal efi_rt_function_ptr, %edx
14781- movl (%edx), %ecx
14782- pushl %ecx
14783+ pushl (efi_rt_function_ptr)
14784
14785 /*
14786- * 10. Push the saved return address onto the stack and return.
14787+ * 10. Return to the saved return address.
14788 */
14789- leal saved_return_addr, %edx
14790- movl (%edx), %ecx
14791- pushl %ecx
14792- ret
14793+ jmpl *(saved_return_addr)
14794 ENDPROC(efi_call_phys)
14795 .previous
14796
14797-.data
14798+__INITDATA
14799 saved_return_addr:
14800 .long 0
14801 efi_rt_function_ptr:
14802diff --git a/arch/x86/kernel/efi_stub_64.S b/arch/x86/kernel/efi_stub_64.S
14803index 4c07cca..2c8427d 100644
14804--- a/arch/x86/kernel/efi_stub_64.S
14805+++ b/arch/x86/kernel/efi_stub_64.S
14806@@ -7,6 +7,7 @@
14807 */
14808
14809 #include <linux/linkage.h>
14810+#include <asm/alternative-asm.h>
14811
14812 #define SAVE_XMM \
14813 mov %rsp, %rax; \
14814@@ -40,6 +41,7 @@ ENTRY(efi_call0)
14815 call *%rdi
14816 addq $32, %rsp
14817 RESTORE_XMM
14818+ pax_force_retaddr 0, 1
14819 ret
14820 ENDPROC(efi_call0)
14821
14822@@ -50,6 +52,7 @@ ENTRY(efi_call1)
14823 call *%rdi
14824 addq $32, %rsp
14825 RESTORE_XMM
14826+ pax_force_retaddr 0, 1
14827 ret
14828 ENDPROC(efi_call1)
14829
14830@@ -60,6 +63,7 @@ ENTRY(efi_call2)
14831 call *%rdi
14832 addq $32, %rsp
14833 RESTORE_XMM
14834+ pax_force_retaddr 0, 1
14835 ret
14836 ENDPROC(efi_call2)
14837
14838@@ -71,6 +75,7 @@ ENTRY(efi_call3)
14839 call *%rdi
14840 addq $32, %rsp
14841 RESTORE_XMM
14842+ pax_force_retaddr 0, 1
14843 ret
14844 ENDPROC(efi_call3)
14845
14846@@ -83,6 +88,7 @@ ENTRY(efi_call4)
14847 call *%rdi
14848 addq $32, %rsp
14849 RESTORE_XMM
14850+ pax_force_retaddr 0, 1
14851 ret
14852 ENDPROC(efi_call4)
14853
14854@@ -96,6 +102,7 @@ ENTRY(efi_call5)
14855 call *%rdi
14856 addq $48, %rsp
14857 RESTORE_XMM
14858+ pax_force_retaddr 0, 1
14859 ret
14860 ENDPROC(efi_call5)
14861
14862@@ -112,5 +119,6 @@ ENTRY(efi_call6)
14863 call *%rdi
14864 addq $48, %rsp
14865 RESTORE_XMM
14866+ pax_force_retaddr 0, 1
14867 ret
14868 ENDPROC(efi_call6)
14869diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
14870index c097e7d..c689cf4 100644
14871--- a/arch/x86/kernel/entry_32.S
14872+++ b/arch/x86/kernel/entry_32.S
14873@@ -185,13 +185,146 @@
14874 /*CFI_REL_OFFSET gs, PT_GS*/
14875 .endm
14876 .macro SET_KERNEL_GS reg
14877+
14878+#ifdef CONFIG_CC_STACKPROTECTOR
14879 movl $(__KERNEL_STACK_CANARY), \reg
14880+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14881+ movl $(__USER_DS), \reg
14882+#else
14883+ xorl \reg, \reg
14884+#endif
14885+
14886 movl \reg, %gs
14887 .endm
14888
14889 #endif /* CONFIG_X86_32_LAZY_GS */
14890
14891-.macro SAVE_ALL
14892+.macro pax_enter_kernel
14893+#ifdef CONFIG_PAX_KERNEXEC
14894+ call pax_enter_kernel
14895+#endif
14896+.endm
14897+
14898+.macro pax_exit_kernel
14899+#ifdef CONFIG_PAX_KERNEXEC
14900+ call pax_exit_kernel
14901+#endif
14902+.endm
14903+
14904+#ifdef CONFIG_PAX_KERNEXEC
14905+ENTRY(pax_enter_kernel)
14906+#ifdef CONFIG_PARAVIRT
14907+ pushl %eax
14908+ pushl %ecx
14909+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
14910+ mov %eax, %esi
14911+#else
14912+ mov %cr0, %esi
14913+#endif
14914+ bts $16, %esi
14915+ jnc 1f
14916+ mov %cs, %esi
14917+ cmp $__KERNEL_CS, %esi
14918+ jz 3f
14919+ ljmp $__KERNEL_CS, $3f
14920+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
14921+2:
14922+#ifdef CONFIG_PARAVIRT
14923+ mov %esi, %eax
14924+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14925+#else
14926+ mov %esi, %cr0
14927+#endif
14928+3:
14929+#ifdef CONFIG_PARAVIRT
14930+ popl %ecx
14931+ popl %eax
14932+#endif
14933+ ret
14934+ENDPROC(pax_enter_kernel)
14935+
14936+ENTRY(pax_exit_kernel)
14937+#ifdef CONFIG_PARAVIRT
14938+ pushl %eax
14939+ pushl %ecx
14940+#endif
14941+ mov %cs, %esi
14942+ cmp $__KERNEXEC_KERNEL_CS, %esi
14943+ jnz 2f
14944+#ifdef CONFIG_PARAVIRT
14945+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
14946+ mov %eax, %esi
14947+#else
14948+ mov %cr0, %esi
14949+#endif
14950+ btr $16, %esi
14951+ ljmp $__KERNEL_CS, $1f
14952+1:
14953+#ifdef CONFIG_PARAVIRT
14954+ mov %esi, %eax
14955+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
14956+#else
14957+ mov %esi, %cr0
14958+#endif
14959+2:
14960+#ifdef CONFIG_PARAVIRT
14961+ popl %ecx
14962+ popl %eax
14963+#endif
14964+ ret
14965+ENDPROC(pax_exit_kernel)
14966+#endif
14967+
14968+.macro pax_erase_kstack
14969+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14970+ call pax_erase_kstack
14971+#endif
14972+.endm
14973+
14974+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
14975+/*
14976+ * ebp: thread_info
14977+ * ecx, edx: can be clobbered
14978+ */
14979+ENTRY(pax_erase_kstack)
14980+ pushl %edi
14981+ pushl %eax
14982+
14983+ mov TI_lowest_stack(%ebp), %edi
14984+ mov $-0xBEEF, %eax
14985+ std
14986+
14987+1: mov %edi, %ecx
14988+ and $THREAD_SIZE_asm - 1, %ecx
14989+ shr $2, %ecx
14990+ repne scasl
14991+ jecxz 2f
14992+
14993+ cmp $2*16, %ecx
14994+ jc 2f
14995+
14996+ mov $2*16, %ecx
14997+ repe scasl
14998+ jecxz 2f
14999+ jne 1b
15000+
15001+2: cld
15002+ mov %esp, %ecx
15003+ sub %edi, %ecx
15004+ shr $2, %ecx
15005+ rep stosl
15006+
15007+ mov TI_task_thread_sp0(%ebp), %edi
15008+ sub $128, %edi
15009+ mov %edi, TI_lowest_stack(%ebp)
15010+
15011+ popl %eax
15012+ popl %edi
15013+ ret
15014+ENDPROC(pax_erase_kstack)
15015+#endif
15016+
15017+.macro __SAVE_ALL _DS
15018 cld
15019 PUSH_GS
15020 pushl %fs
15021@@ -224,7 +357,7 @@
15022 pushl %ebx
15023 CFI_ADJUST_CFA_OFFSET 4
15024 CFI_REL_OFFSET ebx, 0
15025- movl $(__USER_DS), %edx
15026+ movl $\_DS, %edx
15027 movl %edx, %ds
15028 movl %edx, %es
15029 movl $(__KERNEL_PERCPU), %edx
15030@@ -232,6 +365,15 @@
15031 SET_KERNEL_GS %edx
15032 .endm
15033
15034+.macro SAVE_ALL
15035+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
15036+ __SAVE_ALL __KERNEL_DS
15037+ pax_enter_kernel
15038+#else
15039+ __SAVE_ALL __USER_DS
15040+#endif
15041+.endm
15042+
15043 .macro RESTORE_INT_REGS
15044 popl %ebx
15045 CFI_ADJUST_CFA_OFFSET -4
15046@@ -331,7 +473,7 @@ ENTRY(ret_from_fork)
15047 CFI_ADJUST_CFA_OFFSET -4
15048 jmp syscall_exit
15049 CFI_ENDPROC
15050-END(ret_from_fork)
15051+ENDPROC(ret_from_fork)
15052
15053 /*
15054 * Return to user mode is not as complex as all this looks,
15055@@ -352,7 +494,15 @@ check_userspace:
15056 movb PT_CS(%esp), %al
15057 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
15058 cmpl $USER_RPL, %eax
15059+
15060+#ifdef CONFIG_PAX_KERNEXEC
15061+ jae resume_userspace
15062+
15063+ PAX_EXIT_KERNEL
15064+ jmp resume_kernel
15065+#else
15066 jb resume_kernel # not returning to v8086 or userspace
15067+#endif
15068
15069 ENTRY(resume_userspace)
15070 LOCKDEP_SYS_EXIT
15071@@ -364,8 +514,8 @@ ENTRY(resume_userspace)
15072 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
15073 # int/exception return?
15074 jne work_pending
15075- jmp restore_all
15076-END(ret_from_exception)
15077+ jmp restore_all_pax
15078+ENDPROC(ret_from_exception)
15079
15080 #ifdef CONFIG_PREEMPT
15081 ENTRY(resume_kernel)
15082@@ -380,7 +530,7 @@ need_resched:
15083 jz restore_all
15084 call preempt_schedule_irq
15085 jmp need_resched
15086-END(resume_kernel)
15087+ENDPROC(resume_kernel)
15088 #endif
15089 CFI_ENDPROC
15090
15091@@ -414,25 +564,36 @@ sysenter_past_esp:
15092 /*CFI_REL_OFFSET cs, 0*/
15093 /*
15094 * Push current_thread_info()->sysenter_return to the stack.
15095- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
15096- * pushed above; +8 corresponds to copy_thread's esp0 setting.
15097 */
15098- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
15099+ pushl $0
15100 CFI_ADJUST_CFA_OFFSET 4
15101 CFI_REL_OFFSET eip, 0
15102
15103 pushl %eax
15104 CFI_ADJUST_CFA_OFFSET 4
15105 SAVE_ALL
15106+ GET_THREAD_INFO(%ebp)
15107+ movl TI_sysenter_return(%ebp),%ebp
15108+ movl %ebp,PT_EIP(%esp)
15109 ENABLE_INTERRUPTS(CLBR_NONE)
15110
15111 /*
15112 * Load the potential sixth argument from user stack.
15113 * Careful about security.
15114 */
15115+ movl PT_OLDESP(%esp),%ebp
15116+
15117+#ifdef CONFIG_PAX_MEMORY_UDEREF
15118+ mov PT_OLDSS(%esp),%ds
15119+1: movl %ds:(%ebp),%ebp
15120+ push %ss
15121+ pop %ds
15122+#else
15123 cmpl $__PAGE_OFFSET-3,%ebp
15124 jae syscall_fault
15125 1: movl (%ebp),%ebp
15126+#endif
15127+
15128 movl %ebp,PT_EBP(%esp)
15129 .section __ex_table,"a"
15130 .align 4
15131@@ -455,12 +616,24 @@ sysenter_do_call:
15132 testl $_TIF_ALLWORK_MASK, %ecx
15133 jne sysexit_audit
15134 sysenter_exit:
15135+
15136+#ifdef CONFIG_PAX_RANDKSTACK
15137+ pushl_cfi %eax
15138+ movl %esp, %eax
15139+ call pax_randomize_kstack
15140+ popl_cfi %eax
15141+#endif
15142+
15143+ pax_erase_kstack
15144+
15145 /* if something modifies registers it must also disable sysexit */
15146 movl PT_EIP(%esp), %edx
15147 movl PT_OLDESP(%esp), %ecx
15148 xorl %ebp,%ebp
15149 TRACE_IRQS_ON
15150 1: mov PT_FS(%esp), %fs
15151+2: mov PT_DS(%esp), %ds
15152+3: mov PT_ES(%esp), %es
15153 PTGS_TO_GS
15154 ENABLE_INTERRUPTS_SYSEXIT
15155
15156@@ -477,6 +650,9 @@ sysenter_audit:
15157 movl %eax,%edx /* 2nd arg: syscall number */
15158 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
15159 call audit_syscall_entry
15160+
15161+ pax_erase_kstack
15162+
15163 pushl %ebx
15164 CFI_ADJUST_CFA_OFFSET 4
15165 movl PT_EAX(%esp),%eax /* reload syscall number */
15166@@ -504,11 +680,17 @@ sysexit_audit:
15167
15168 CFI_ENDPROC
15169 .pushsection .fixup,"ax"
15170-2: movl $0,PT_FS(%esp)
15171+4: movl $0,PT_FS(%esp)
15172+ jmp 1b
15173+5: movl $0,PT_DS(%esp)
15174+ jmp 1b
15175+6: movl $0,PT_ES(%esp)
15176 jmp 1b
15177 .section __ex_table,"a"
15178 .align 4
15179- .long 1b,2b
15180+ .long 1b,4b
15181+ .long 2b,5b
15182+ .long 3b,6b
15183 .popsection
15184 PTGS_TO_GS_EX
15185 ENDPROC(ia32_sysenter_target)
15186@@ -538,6 +720,15 @@ syscall_exit:
15187 testl $_TIF_ALLWORK_MASK, %ecx # current->work
15188 jne syscall_exit_work
15189
15190+restore_all_pax:
15191+
15192+#ifdef CONFIG_PAX_RANDKSTACK
15193+ movl %esp, %eax
15194+ call pax_randomize_kstack
15195+#endif
15196+
15197+ pax_erase_kstack
15198+
15199 restore_all:
15200 TRACE_IRQS_IRET
15201 restore_all_notrace:
15202@@ -602,10 +793,29 @@ ldt_ss:
15203 mov PT_OLDESP(%esp), %eax /* load userspace esp */
15204 mov %dx, %ax /* eax: new kernel esp */
15205 sub %eax, %edx /* offset (low word is 0) */
15206- PER_CPU(gdt_page, %ebx)
15207+#ifdef CONFIG_SMP
15208+ movl PER_CPU_VAR(cpu_number), %ebx
15209+ shll $PAGE_SHIFT_asm, %ebx
15210+ addl $cpu_gdt_table, %ebx
15211+#else
15212+ movl $cpu_gdt_table, %ebx
15213+#endif
15214 shr $16, %edx
15215+
15216+#ifdef CONFIG_PAX_KERNEXEC
15217+ mov %cr0, %esi
15218+ btr $16, %esi
15219+ mov %esi, %cr0
15220+#endif
15221+
15222 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
15223 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
15224+
15225+#ifdef CONFIG_PAX_KERNEXEC
15226+ bts $16, %esi
15227+ mov %esi, %cr0
15228+#endif
15229+
15230 pushl $__ESPFIX_SS
15231 CFI_ADJUST_CFA_OFFSET 4
15232 push %eax /* new kernel esp */
15233@@ -636,36 +846,30 @@ work_resched:
15234 movl TI_flags(%ebp), %ecx
15235 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
15236 # than syscall tracing?
15237- jz restore_all
15238+ jz restore_all_pax
15239 testb $_TIF_NEED_RESCHED, %cl
15240 jnz work_resched
15241
15242 work_notifysig: # deal with pending signals and
15243 # notify-resume requests
15244+ movl %esp, %eax
15245 #ifdef CONFIG_VM86
15246 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
15247- movl %esp, %eax
15248- jne work_notifysig_v86 # returning to kernel-space or
15249+ jz 1f # returning to kernel-space or
15250 # vm86-space
15251- xorl %edx, %edx
15252- call do_notify_resume
15253- jmp resume_userspace_sig
15254
15255- ALIGN
15256-work_notifysig_v86:
15257 pushl %ecx # save ti_flags for do_notify_resume
15258 CFI_ADJUST_CFA_OFFSET 4
15259 call save_v86_state # %eax contains pt_regs pointer
15260 popl %ecx
15261 CFI_ADJUST_CFA_OFFSET -4
15262 movl %eax, %esp
15263-#else
15264- movl %esp, %eax
15265+1:
15266 #endif
15267 xorl %edx, %edx
15268 call do_notify_resume
15269 jmp resume_userspace_sig
15270-END(work_pending)
15271+ENDPROC(work_pending)
15272
15273 # perform syscall exit tracing
15274 ALIGN
15275@@ -673,11 +877,14 @@ syscall_trace_entry:
15276 movl $-ENOSYS,PT_EAX(%esp)
15277 movl %esp, %eax
15278 call syscall_trace_enter
15279+
15280+ pax_erase_kstack
15281+
15282 /* What it returned is what we'll actually use. */
15283 cmpl $(nr_syscalls), %eax
15284 jnae syscall_call
15285 jmp syscall_exit
15286-END(syscall_trace_entry)
15287+ENDPROC(syscall_trace_entry)
15288
15289 # perform syscall exit tracing
15290 ALIGN
15291@@ -690,20 +897,24 @@ syscall_exit_work:
15292 movl %esp, %eax
15293 call syscall_trace_leave
15294 jmp resume_userspace
15295-END(syscall_exit_work)
15296+ENDPROC(syscall_exit_work)
15297 CFI_ENDPROC
15298
15299 RING0_INT_FRAME # can't unwind into user space anyway
15300 syscall_fault:
15301+#ifdef CONFIG_PAX_MEMORY_UDEREF
15302+ push %ss
15303+ pop %ds
15304+#endif
15305 GET_THREAD_INFO(%ebp)
15306 movl $-EFAULT,PT_EAX(%esp)
15307 jmp resume_userspace
15308-END(syscall_fault)
15309+ENDPROC(syscall_fault)
15310
15311 syscall_badsys:
15312 movl $-ENOSYS,PT_EAX(%esp)
15313 jmp resume_userspace
15314-END(syscall_badsys)
15315+ENDPROC(syscall_badsys)
15316 CFI_ENDPROC
15317
15318 /*
15319@@ -726,6 +937,33 @@ PTREGSCALL(rt_sigreturn)
15320 PTREGSCALL(vm86)
15321 PTREGSCALL(vm86old)
15322
15323+ ALIGN;
15324+ENTRY(kernel_execve)
15325+ push %ebp
15326+ sub $PT_OLDSS+4,%esp
15327+ push %edi
15328+ push %ecx
15329+ push %eax
15330+ lea 3*4(%esp),%edi
15331+ mov $PT_OLDSS/4+1,%ecx
15332+ xorl %eax,%eax
15333+ rep stosl
15334+ pop %eax
15335+ pop %ecx
15336+ pop %edi
15337+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
15338+ mov %eax,PT_EBX(%esp)
15339+ mov %edx,PT_ECX(%esp)
15340+ mov %ecx,PT_EDX(%esp)
15341+ mov %esp,%eax
15342+ call sys_execve
15343+ GET_THREAD_INFO(%ebp)
15344+ test %eax,%eax
15345+ jz syscall_exit
15346+ add $PT_OLDSS+4,%esp
15347+ pop %ebp
15348+ ret
15349+
15350 .macro FIXUP_ESPFIX_STACK
15351 /*
15352 * Switch back for ESPFIX stack to the normal zerobased stack
15353@@ -735,7 +973,13 @@ PTREGSCALL(vm86old)
15354 * normal stack and adjusts ESP with the matching offset.
15355 */
15356 /* fixup the stack */
15357- PER_CPU(gdt_page, %ebx)
15358+#ifdef CONFIG_SMP
15359+ movl PER_CPU_VAR(cpu_number), %ebx
15360+ shll $PAGE_SHIFT_asm, %ebx
15361+ addl $cpu_gdt_table, %ebx
15362+#else
15363+ movl $cpu_gdt_table, %ebx
15364+#endif
15365 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
15366 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
15367 shl $16, %eax
15368@@ -793,7 +1037,7 @@ vector=vector+1
15369 .endr
15370 2: jmp common_interrupt
15371 .endr
15372-END(irq_entries_start)
15373+ENDPROC(irq_entries_start)
15374
15375 .previous
15376 END(interrupt)
15377@@ -840,7 +1084,7 @@ ENTRY(coprocessor_error)
15378 CFI_ADJUST_CFA_OFFSET 4
15379 jmp error_code
15380 CFI_ENDPROC
15381-END(coprocessor_error)
15382+ENDPROC(coprocessor_error)
15383
15384 ENTRY(simd_coprocessor_error)
15385 RING0_INT_FRAME
15386@@ -850,7 +1094,7 @@ ENTRY(simd_coprocessor_error)
15387 CFI_ADJUST_CFA_OFFSET 4
15388 jmp error_code
15389 CFI_ENDPROC
15390-END(simd_coprocessor_error)
15391+ENDPROC(simd_coprocessor_error)
15392
15393 ENTRY(device_not_available)
15394 RING0_INT_FRAME
15395@@ -860,7 +1104,7 @@ ENTRY(device_not_available)
15396 CFI_ADJUST_CFA_OFFSET 4
15397 jmp error_code
15398 CFI_ENDPROC
15399-END(device_not_available)
15400+ENDPROC(device_not_available)
15401
15402 #ifdef CONFIG_PARAVIRT
15403 ENTRY(native_iret)
15404@@ -869,12 +1113,12 @@ ENTRY(native_iret)
15405 .align 4
15406 .long native_iret, iret_exc
15407 .previous
15408-END(native_iret)
15409+ENDPROC(native_iret)
15410
15411 ENTRY(native_irq_enable_sysexit)
15412 sti
15413 sysexit
15414-END(native_irq_enable_sysexit)
15415+ENDPROC(native_irq_enable_sysexit)
15416 #endif
15417
15418 ENTRY(overflow)
15419@@ -885,7 +1129,7 @@ ENTRY(overflow)
15420 CFI_ADJUST_CFA_OFFSET 4
15421 jmp error_code
15422 CFI_ENDPROC
15423-END(overflow)
15424+ENDPROC(overflow)
15425
15426 ENTRY(bounds)
15427 RING0_INT_FRAME
15428@@ -895,7 +1139,7 @@ ENTRY(bounds)
15429 CFI_ADJUST_CFA_OFFSET 4
15430 jmp error_code
15431 CFI_ENDPROC
15432-END(bounds)
15433+ENDPROC(bounds)
15434
15435 ENTRY(invalid_op)
15436 RING0_INT_FRAME
15437@@ -905,7 +1149,7 @@ ENTRY(invalid_op)
15438 CFI_ADJUST_CFA_OFFSET 4
15439 jmp error_code
15440 CFI_ENDPROC
15441-END(invalid_op)
15442+ENDPROC(invalid_op)
15443
15444 ENTRY(coprocessor_segment_overrun)
15445 RING0_INT_FRAME
15446@@ -915,7 +1159,7 @@ ENTRY(coprocessor_segment_overrun)
15447 CFI_ADJUST_CFA_OFFSET 4
15448 jmp error_code
15449 CFI_ENDPROC
15450-END(coprocessor_segment_overrun)
15451+ENDPROC(coprocessor_segment_overrun)
15452
15453 ENTRY(invalid_TSS)
15454 RING0_EC_FRAME
15455@@ -923,7 +1167,7 @@ ENTRY(invalid_TSS)
15456 CFI_ADJUST_CFA_OFFSET 4
15457 jmp error_code
15458 CFI_ENDPROC
15459-END(invalid_TSS)
15460+ENDPROC(invalid_TSS)
15461
15462 ENTRY(segment_not_present)
15463 RING0_EC_FRAME
15464@@ -931,7 +1175,7 @@ ENTRY(segment_not_present)
15465 CFI_ADJUST_CFA_OFFSET 4
15466 jmp error_code
15467 CFI_ENDPROC
15468-END(segment_not_present)
15469+ENDPROC(segment_not_present)
15470
15471 ENTRY(stack_segment)
15472 RING0_EC_FRAME
15473@@ -939,7 +1183,7 @@ ENTRY(stack_segment)
15474 CFI_ADJUST_CFA_OFFSET 4
15475 jmp error_code
15476 CFI_ENDPROC
15477-END(stack_segment)
15478+ENDPROC(stack_segment)
15479
15480 ENTRY(alignment_check)
15481 RING0_EC_FRAME
15482@@ -947,7 +1191,7 @@ ENTRY(alignment_check)
15483 CFI_ADJUST_CFA_OFFSET 4
15484 jmp error_code
15485 CFI_ENDPROC
15486-END(alignment_check)
15487+ENDPROC(alignment_check)
15488
15489 ENTRY(divide_error)
15490 RING0_INT_FRAME
15491@@ -957,7 +1201,7 @@ ENTRY(divide_error)
15492 CFI_ADJUST_CFA_OFFSET 4
15493 jmp error_code
15494 CFI_ENDPROC
15495-END(divide_error)
15496+ENDPROC(divide_error)
15497
15498 #ifdef CONFIG_X86_MCE
15499 ENTRY(machine_check)
15500@@ -968,7 +1212,7 @@ ENTRY(machine_check)
15501 CFI_ADJUST_CFA_OFFSET 4
15502 jmp error_code
15503 CFI_ENDPROC
15504-END(machine_check)
15505+ENDPROC(machine_check)
15506 #endif
15507
15508 ENTRY(spurious_interrupt_bug)
15509@@ -979,7 +1223,7 @@ ENTRY(spurious_interrupt_bug)
15510 CFI_ADJUST_CFA_OFFSET 4
15511 jmp error_code
15512 CFI_ENDPROC
15513-END(spurious_interrupt_bug)
15514+ENDPROC(spurious_interrupt_bug)
15515
15516 ENTRY(kernel_thread_helper)
15517 pushl $0 # fake return address for unwinder
15518@@ -1095,7 +1339,7 @@ ENDPROC(xen_failsafe_callback)
15519
15520 ENTRY(mcount)
15521 ret
15522-END(mcount)
15523+ENDPROC(mcount)
15524
15525 ENTRY(ftrace_caller)
15526 cmpl $0, function_trace_stop
15527@@ -1124,7 +1368,7 @@ ftrace_graph_call:
15528 .globl ftrace_stub
15529 ftrace_stub:
15530 ret
15531-END(ftrace_caller)
15532+ENDPROC(ftrace_caller)
15533
15534 #else /* ! CONFIG_DYNAMIC_FTRACE */
15535
15536@@ -1160,7 +1404,7 @@ trace:
15537 popl %ecx
15538 popl %eax
15539 jmp ftrace_stub
15540-END(mcount)
15541+ENDPROC(mcount)
15542 #endif /* CONFIG_DYNAMIC_FTRACE */
15543 #endif /* CONFIG_FUNCTION_TRACER */
15544
15545@@ -1181,7 +1425,7 @@ ENTRY(ftrace_graph_caller)
15546 popl %ecx
15547 popl %eax
15548 ret
15549-END(ftrace_graph_caller)
15550+ENDPROC(ftrace_graph_caller)
15551
15552 .globl return_to_handler
15553 return_to_handler:
15554@@ -1198,7 +1442,6 @@ return_to_handler:
15555 ret
15556 #endif
15557
15558-.section .rodata,"a"
15559 #include "syscall_table_32.S"
15560
15561 syscall_table_size=(.-sys_call_table)
15562@@ -1255,15 +1498,18 @@ error_code:
15563 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
15564 REG_TO_PTGS %ecx
15565 SET_KERNEL_GS %ecx
15566- movl $(__USER_DS), %ecx
15567+ movl $(__KERNEL_DS), %ecx
15568 movl %ecx, %ds
15569 movl %ecx, %es
15570+
15571+ pax_enter_kernel
15572+
15573 TRACE_IRQS_OFF
15574 movl %esp,%eax # pt_regs pointer
15575 call *%edi
15576 jmp ret_from_exception
15577 CFI_ENDPROC
15578-END(page_fault)
15579+ENDPROC(page_fault)
15580
15581 /*
15582 * Debug traps and NMI can happen at the one SYSENTER instruction
15583@@ -1309,7 +1555,7 @@ debug_stack_correct:
15584 call do_debug
15585 jmp ret_from_exception
15586 CFI_ENDPROC
15587-END(debug)
15588+ENDPROC(debug)
15589
15590 /*
15591 * NMI is doubly nasty. It can happen _while_ we're handling
15592@@ -1351,6 +1597,9 @@ nmi_stack_correct:
15593 xorl %edx,%edx # zero error code
15594 movl %esp,%eax # pt_regs pointer
15595 call do_nmi
15596+
15597+ pax_exit_kernel
15598+
15599 jmp restore_all_notrace
15600 CFI_ENDPROC
15601
15602@@ -1391,12 +1640,15 @@ nmi_espfix_stack:
15603 FIXUP_ESPFIX_STACK # %eax == %esp
15604 xorl %edx,%edx # zero error code
15605 call do_nmi
15606+
15607+ pax_exit_kernel
15608+
15609 RESTORE_REGS
15610 lss 12+4(%esp), %esp # back to espfix stack
15611 CFI_ADJUST_CFA_OFFSET -24
15612 jmp irq_return
15613 CFI_ENDPROC
15614-END(nmi)
15615+ENDPROC(nmi)
15616
15617 ENTRY(int3)
15618 RING0_INT_FRAME
15619@@ -1409,7 +1661,7 @@ ENTRY(int3)
15620 call do_int3
15621 jmp ret_from_exception
15622 CFI_ENDPROC
15623-END(int3)
15624+ENDPROC(int3)
15625
15626 ENTRY(general_protection)
15627 RING0_EC_FRAME
15628@@ -1417,7 +1669,7 @@ ENTRY(general_protection)
15629 CFI_ADJUST_CFA_OFFSET 4
15630 jmp error_code
15631 CFI_ENDPROC
15632-END(general_protection)
15633+ENDPROC(general_protection)
15634
15635 /*
15636 * End of kprobes section
15637diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
15638index 34a56a9..4aa5c8b 100644
15639--- a/arch/x86/kernel/entry_64.S
15640+++ b/arch/x86/kernel/entry_64.S
15641@@ -53,6 +53,8 @@
15642 #include <asm/paravirt.h>
15643 #include <asm/ftrace.h>
15644 #include <asm/percpu.h>
15645+#include <asm/pgtable.h>
15646+#include <asm/alternative-asm.h>
15647
15648 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
15649 #include <linux/elf-em.h>
15650@@ -64,8 +66,9 @@
15651 #ifdef CONFIG_FUNCTION_TRACER
15652 #ifdef CONFIG_DYNAMIC_FTRACE
15653 ENTRY(mcount)
15654+ pax_force_retaddr
15655 retq
15656-END(mcount)
15657+ENDPROC(mcount)
15658
15659 ENTRY(ftrace_caller)
15660 cmpl $0, function_trace_stop
15661@@ -88,8 +91,9 @@ GLOBAL(ftrace_graph_call)
15662 #endif
15663
15664 GLOBAL(ftrace_stub)
15665+ pax_force_retaddr
15666 retq
15667-END(ftrace_caller)
15668+ENDPROC(ftrace_caller)
15669
15670 #else /* ! CONFIG_DYNAMIC_FTRACE */
15671 ENTRY(mcount)
15672@@ -108,6 +112,7 @@ ENTRY(mcount)
15673 #endif
15674
15675 GLOBAL(ftrace_stub)
15676+ pax_force_retaddr
15677 retq
15678
15679 trace:
15680@@ -117,12 +122,13 @@ trace:
15681 movq 8(%rbp), %rsi
15682 subq $MCOUNT_INSN_SIZE, %rdi
15683
15684+ pax_force_fptr ftrace_trace_function
15685 call *ftrace_trace_function
15686
15687 MCOUNT_RESTORE_FRAME
15688
15689 jmp ftrace_stub
15690-END(mcount)
15691+ENDPROC(mcount)
15692 #endif /* CONFIG_DYNAMIC_FTRACE */
15693 #endif /* CONFIG_FUNCTION_TRACER */
15694
15695@@ -142,8 +148,9 @@ ENTRY(ftrace_graph_caller)
15696
15697 MCOUNT_RESTORE_FRAME
15698
15699+ pax_force_retaddr
15700 retq
15701-END(ftrace_graph_caller)
15702+ENDPROC(ftrace_graph_caller)
15703
15704 GLOBAL(return_to_handler)
15705 subq $24, %rsp
15706@@ -159,6 +166,7 @@ GLOBAL(return_to_handler)
15707 movq 8(%rsp), %rdx
15708 movq (%rsp), %rax
15709 addq $16, %rsp
15710+ pax_force_retaddr
15711 retq
15712 #endif
15713
15714@@ -174,6 +182,282 @@ ENTRY(native_usergs_sysret64)
15715 ENDPROC(native_usergs_sysret64)
15716 #endif /* CONFIG_PARAVIRT */
15717
15718+ .macro ljmpq sel, off
15719+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
15720+ .byte 0x48; ljmp *1234f(%rip)
15721+ .pushsection .rodata
15722+ .align 16
15723+ 1234: .quad \off; .word \sel
15724+ .popsection
15725+#else
15726+ pushq $\sel
15727+ pushq $\off
15728+ lretq
15729+#endif
15730+ .endm
15731+
15732+ .macro pax_enter_kernel
15733+ pax_set_fptr_mask
15734+#ifdef CONFIG_PAX_KERNEXEC
15735+ call pax_enter_kernel
15736+#endif
15737+ .endm
15738+
15739+ .macro pax_exit_kernel
15740+#ifdef CONFIG_PAX_KERNEXEC
15741+ call pax_exit_kernel
15742+#endif
15743+ .endm
15744+
15745+#ifdef CONFIG_PAX_KERNEXEC
15746+ENTRY(pax_enter_kernel)
15747+ pushq %rdi
15748+
15749+#ifdef CONFIG_PARAVIRT
15750+ PV_SAVE_REGS(CLBR_RDI)
15751+#endif
15752+
15753+ GET_CR0_INTO_RDI
15754+ bts $16,%rdi
15755+ jnc 3f
15756+ mov %cs,%edi
15757+ cmp $__KERNEL_CS,%edi
15758+ jnz 2f
15759+1:
15760+
15761+#ifdef CONFIG_PARAVIRT
15762+ PV_RESTORE_REGS(CLBR_RDI)
15763+#endif
15764+
15765+ popq %rdi
15766+ pax_force_retaddr
15767+ retq
15768+
15769+2: ljmpq __KERNEL_CS,1f
15770+3: ljmpq __KERNEXEC_KERNEL_CS,4f
15771+4: SET_RDI_INTO_CR0
15772+ jmp 1b
15773+ENDPROC(pax_enter_kernel)
15774+
15775+ENTRY(pax_exit_kernel)
15776+ pushq %rdi
15777+
15778+#ifdef CONFIG_PARAVIRT
15779+ PV_SAVE_REGS(CLBR_RDI)
15780+#endif
15781+
15782+ mov %cs,%rdi
15783+ cmp $__KERNEXEC_KERNEL_CS,%edi
15784+ jz 2f
15785+1:
15786+
15787+#ifdef CONFIG_PARAVIRT
15788+ PV_RESTORE_REGS(CLBR_RDI);
15789+#endif
15790+
15791+ popq %rdi
15792+ pax_force_retaddr
15793+ retq
15794+
15795+2: GET_CR0_INTO_RDI
15796+ btr $16,%rdi
15797+ ljmpq __KERNEL_CS,3f
15798+3: SET_RDI_INTO_CR0
15799+ jmp 1b
15800+#ifdef CONFIG_PARAVIRT
15801+ PV_RESTORE_REGS(CLBR_RDI);
15802+#endif
15803+
15804+ popq %rdi
15805+ pax_force_retaddr
15806+ retq
15807+ENDPROC(pax_exit_kernel)
15808+#endif
15809+
15810+ .macro pax_enter_kernel_user
15811+ pax_set_fptr_mask
15812+#ifdef CONFIG_PAX_MEMORY_UDEREF
15813+ call pax_enter_kernel_user
15814+#endif
15815+ .endm
15816+
15817+ .macro pax_exit_kernel_user
15818+#ifdef CONFIG_PAX_MEMORY_UDEREF
15819+ call pax_exit_kernel_user
15820+#endif
15821+#ifdef CONFIG_PAX_RANDKSTACK
15822+ pushq %rax
15823+ call pax_randomize_kstack
15824+ popq %rax
15825+#endif
15826+ .endm
15827+
15828+#ifdef CONFIG_PAX_MEMORY_UDEREF
15829+ENTRY(pax_enter_kernel_user)
15830+ pushq %rdi
15831+ pushq %rbx
15832+
15833+#ifdef CONFIG_PARAVIRT
15834+ PV_SAVE_REGS(CLBR_RDI)
15835+#endif
15836+
15837+ GET_CR3_INTO_RDI
15838+ mov %rdi,%rbx
15839+ add $__START_KERNEL_map,%rbx
15840+ sub phys_base(%rip),%rbx
15841+
15842+#ifdef CONFIG_PARAVIRT
15843+ pushq %rdi
15844+ cmpl $0, pv_info+PARAVIRT_enabled
15845+ jz 1f
15846+ i = 0
15847+ .rept USER_PGD_PTRS
15848+ mov i*8(%rbx),%rsi
15849+ mov $0,%sil
15850+ lea i*8(%rbx),%rdi
15851+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15852+ i = i + 1
15853+ .endr
15854+ jmp 2f
15855+1:
15856+#endif
15857+
15858+ i = 0
15859+ .rept USER_PGD_PTRS
15860+ movb $0,i*8(%rbx)
15861+ i = i + 1
15862+ .endr
15863+
15864+#ifdef CONFIG_PARAVIRT
15865+2: popq %rdi
15866+#endif
15867+ SET_RDI_INTO_CR3
15868+
15869+#ifdef CONFIG_PAX_KERNEXEC
15870+ GET_CR0_INTO_RDI
15871+ bts $16,%rdi
15872+ SET_RDI_INTO_CR0
15873+#endif
15874+
15875+#ifdef CONFIG_PARAVIRT
15876+ PV_RESTORE_REGS(CLBR_RDI)
15877+#endif
15878+
15879+ popq %rbx
15880+ popq %rdi
15881+ pax_force_retaddr
15882+ retq
15883+ENDPROC(pax_enter_kernel_user)
15884+
15885+ENTRY(pax_exit_kernel_user)
15886+ push %rdi
15887+
15888+#ifdef CONFIG_PARAVIRT
15889+ pushq %rbx
15890+ PV_SAVE_REGS(CLBR_RDI)
15891+#endif
15892+
15893+#ifdef CONFIG_PAX_KERNEXEC
15894+ GET_CR0_INTO_RDI
15895+ btr $16,%rdi
15896+ SET_RDI_INTO_CR0
15897+#endif
15898+
15899+ GET_CR3_INTO_RDI
15900+ add $__START_KERNEL_map,%rdi
15901+ sub phys_base(%rip),%rdi
15902+
15903+#ifdef CONFIG_PARAVIRT
15904+ cmpl $0, pv_info+PARAVIRT_enabled
15905+ jz 1f
15906+ mov %rdi,%rbx
15907+ i = 0
15908+ .rept USER_PGD_PTRS
15909+ mov i*8(%rbx),%rsi
15910+ mov $0x67,%sil
15911+ lea i*8(%rbx),%rdi
15912+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
15913+ i = i + 1
15914+ .endr
15915+ jmp 2f
15916+1:
15917+#endif
15918+
15919+ i = 0
15920+ .rept USER_PGD_PTRS
15921+ movb $0x67,i*8(%rdi)
15922+ i = i + 1
15923+ .endr
15924+
15925+#ifdef CONFIG_PARAVIRT
15926+2: PV_RESTORE_REGS(CLBR_RDI)
15927+ popq %rbx
15928+#endif
15929+
15930+ popq %rdi
15931+ pax_force_retaddr
15932+ retq
15933+ENDPROC(pax_exit_kernel_user)
15934+#endif
15935+
15936+.macro pax_erase_kstack
15937+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15938+ call pax_erase_kstack
15939+#endif
15940+.endm
15941+
15942+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
15943+/*
15944+ * r11: thread_info
15945+ * rcx, rdx: can be clobbered
15946+ */
15947+ENTRY(pax_erase_kstack)
15948+ pushq %rdi
15949+ pushq %rax
15950+ pushq %r11
15951+
15952+ GET_THREAD_INFO(%r11)
15953+ mov TI_lowest_stack(%r11), %rdi
15954+ mov $-0xBEEF, %rax
15955+ std
15956+
15957+1: mov %edi, %ecx
15958+ and $THREAD_SIZE_asm - 1, %ecx
15959+ shr $3, %ecx
15960+ repne scasq
15961+ jecxz 2f
15962+
15963+ cmp $2*8, %ecx
15964+ jc 2f
15965+
15966+ mov $2*8, %ecx
15967+ repe scasq
15968+ jecxz 2f
15969+ jne 1b
15970+
15971+2: cld
15972+ mov %esp, %ecx
15973+ sub %edi, %ecx
15974+
15975+ cmp $THREAD_SIZE_asm, %rcx
15976+ jb 3f
15977+ ud2
15978+3:
15979+
15980+ shr $3, %ecx
15981+ rep stosq
15982+
15983+ mov TI_task_thread_sp0(%r11), %rdi
15984+ sub $256, %rdi
15985+ mov %rdi, TI_lowest_stack(%r11)
15986+
15987+ popq %r11
15988+ popq %rax
15989+ popq %rdi
15990+ pax_force_retaddr
15991+ ret
15992+ENDPROC(pax_erase_kstack)
15993+#endif
15994
15995 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
15996 #ifdef CONFIG_TRACE_IRQFLAGS
15997@@ -233,8 +517,8 @@ ENDPROC(native_usergs_sysret64)
15998 .endm
15999
16000 .macro UNFAKE_STACK_FRAME
16001- addq $8*6, %rsp
16002- CFI_ADJUST_CFA_OFFSET -(6*8)
16003+ addq $8*6 + ARG_SKIP, %rsp
16004+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
16005 .endm
16006
16007 /*
16008@@ -317,7 +601,7 @@ ENTRY(save_args)
16009 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
16010 movq_cfi rbp, 8 /* push %rbp */
16011 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
16012- testl $3, CS(%rdi)
16013+ testb $3, CS(%rdi)
16014 je 1f
16015 SWAPGS
16016 /*
16017@@ -337,9 +621,10 @@ ENTRY(save_args)
16018 * We entered an interrupt context - irqs are off:
16019 */
16020 2: TRACE_IRQS_OFF
16021+ pax_force_retaddr
16022 ret
16023 CFI_ENDPROC
16024-END(save_args)
16025+ENDPROC(save_args)
16026
16027 ENTRY(save_rest)
16028 PARTIAL_FRAME 1 REST_SKIP+8
16029@@ -352,9 +637,10 @@ ENTRY(save_rest)
16030 movq_cfi r15, R15+16
16031 movq %r11, 8(%rsp) /* return address */
16032 FIXUP_TOP_OF_STACK %r11, 16
16033+ pax_force_retaddr
16034 ret
16035 CFI_ENDPROC
16036-END(save_rest)
16037+ENDPROC(save_rest)
16038
16039 /* save complete stack frame */
16040 .pushsection .kprobes.text, "ax"
16041@@ -383,9 +669,10 @@ ENTRY(save_paranoid)
16042 js 1f /* negative -> in kernel */
16043 SWAPGS
16044 xorl %ebx,%ebx
16045-1: ret
16046+1: pax_force_retaddr_bts
16047+ ret
16048 CFI_ENDPROC
16049-END(save_paranoid)
16050+ENDPROC(save_paranoid)
16051 .popsection
16052
16053 /*
16054@@ -409,7 +696,7 @@ ENTRY(ret_from_fork)
16055
16056 RESTORE_REST
16057
16058- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16059+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
16060 je int_ret_from_sys_call
16061
16062 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
16063@@ -419,7 +706,7 @@ ENTRY(ret_from_fork)
16064 jmp ret_from_sys_call # go to the SYSRET fastpath
16065
16066 CFI_ENDPROC
16067-END(ret_from_fork)
16068+ENDPROC(ret_from_fork)
16069
16070 /*
16071 * System call entry. Upto 6 arguments in registers are supported.
16072@@ -455,7 +742,7 @@ END(ret_from_fork)
16073 ENTRY(system_call)
16074 CFI_STARTPROC simple
16075 CFI_SIGNAL_FRAME
16076- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
16077+ CFI_DEF_CFA rsp,0
16078 CFI_REGISTER rip,rcx
16079 /*CFI_REGISTER rflags,r11*/
16080 SWAPGS_UNSAFE_STACK
16081@@ -468,12 +755,13 @@ ENTRY(system_call_after_swapgs)
16082
16083 movq %rsp,PER_CPU_VAR(old_rsp)
16084 movq PER_CPU_VAR(kernel_stack),%rsp
16085+ SAVE_ARGS 8*6,1
16086+ pax_enter_kernel_user
16087 /*
16088 * No need to follow this irqs off/on section - it's straight
16089 * and short:
16090 */
16091 ENABLE_INTERRUPTS(CLBR_NONE)
16092- SAVE_ARGS 8,1
16093 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
16094 movq %rcx,RIP-ARGOFFSET(%rsp)
16095 CFI_REL_OFFSET rip,RIP-ARGOFFSET
16096@@ -483,7 +771,7 @@ ENTRY(system_call_after_swapgs)
16097 system_call_fastpath:
16098 cmpq $__NR_syscall_max,%rax
16099 ja badsys
16100- movq %r10,%rcx
16101+ movq R10-ARGOFFSET(%rsp),%rcx
16102 call *sys_call_table(,%rax,8) # XXX: rip relative
16103 movq %rax,RAX-ARGOFFSET(%rsp)
16104 /*
16105@@ -502,6 +790,8 @@ sysret_check:
16106 andl %edi,%edx
16107 jnz sysret_careful
16108 CFI_REMEMBER_STATE
16109+ pax_exit_kernel_user
16110+ pax_erase_kstack
16111 /*
16112 * sysretq will re-enable interrupts:
16113 */
16114@@ -555,14 +845,18 @@ badsys:
16115 * jump back to the normal fast path.
16116 */
16117 auditsys:
16118- movq %r10,%r9 /* 6th arg: 4th syscall arg */
16119+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
16120 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
16121 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
16122 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
16123 movq %rax,%rsi /* 2nd arg: syscall number */
16124 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
16125 call audit_syscall_entry
16126+
16127+ pax_erase_kstack
16128+
16129 LOAD_ARGS 0 /* reload call-clobbered registers */
16130+ pax_set_fptr_mask
16131 jmp system_call_fastpath
16132
16133 /*
16134@@ -592,16 +886,20 @@ tracesys:
16135 FIXUP_TOP_OF_STACK %rdi
16136 movq %rsp,%rdi
16137 call syscall_trace_enter
16138+
16139+ pax_erase_kstack
16140+
16141 /*
16142 * Reload arg registers from stack in case ptrace changed them.
16143 * We don't reload %rax because syscall_trace_enter() returned
16144 * the value it wants us to use in the table lookup.
16145 */
16146 LOAD_ARGS ARGOFFSET, 1
16147+ pax_set_fptr_mask
16148 RESTORE_REST
16149 cmpq $__NR_syscall_max,%rax
16150 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
16151- movq %r10,%rcx /* fixup for C */
16152+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
16153 call *sys_call_table(,%rax,8)
16154 movq %rax,RAX-ARGOFFSET(%rsp)
16155 /* Use IRET because user could have changed frame */
16156@@ -613,7 +911,7 @@ tracesys:
16157 GLOBAL(int_ret_from_sys_call)
16158 DISABLE_INTERRUPTS(CLBR_NONE)
16159 TRACE_IRQS_OFF
16160- testl $3,CS-ARGOFFSET(%rsp)
16161+ testb $3,CS-ARGOFFSET(%rsp)
16162 je retint_restore_args
16163 movl $_TIF_ALLWORK_MASK,%edi
16164 /* edi: mask to check */
16165@@ -674,7 +972,7 @@ int_restore_rest:
16166 TRACE_IRQS_OFF
16167 jmp int_with_check
16168 CFI_ENDPROC
16169-END(system_call)
16170+ENDPROC(system_call)
16171
16172 /*
16173 * Certain special system calls that need to save a complete full stack frame.
16174@@ -690,7 +988,7 @@ ENTRY(\label)
16175 call \func
16176 jmp ptregscall_common
16177 CFI_ENDPROC
16178-END(\label)
16179+ENDPROC(\label)
16180 .endm
16181
16182 PTREGSCALL stub_clone, sys_clone, %r8
16183@@ -708,9 +1006,10 @@ ENTRY(ptregscall_common)
16184 movq_cfi_restore R12+8, r12
16185 movq_cfi_restore RBP+8, rbp
16186 movq_cfi_restore RBX+8, rbx
16187+ pax_force_retaddr
16188 ret $REST_SKIP /* pop extended registers */
16189 CFI_ENDPROC
16190-END(ptregscall_common)
16191+ENDPROC(ptregscall_common)
16192
16193 ENTRY(stub_execve)
16194 CFI_STARTPROC
16195@@ -726,7 +1025,7 @@ ENTRY(stub_execve)
16196 RESTORE_REST
16197 jmp int_ret_from_sys_call
16198 CFI_ENDPROC
16199-END(stub_execve)
16200+ENDPROC(stub_execve)
16201
16202 /*
16203 * sigreturn is special because it needs to restore all registers on return.
16204@@ -744,7 +1043,7 @@ ENTRY(stub_rt_sigreturn)
16205 RESTORE_REST
16206 jmp int_ret_from_sys_call
16207 CFI_ENDPROC
16208-END(stub_rt_sigreturn)
16209+ENDPROC(stub_rt_sigreturn)
16210
16211 /*
16212 * Build the entry stubs and pointer table with some assembler magic.
16213@@ -780,7 +1079,7 @@ vector=vector+1
16214 2: jmp common_interrupt
16215 .endr
16216 CFI_ENDPROC
16217-END(irq_entries_start)
16218+ENDPROC(irq_entries_start)
16219
16220 .previous
16221 END(interrupt)
16222@@ -800,6 +1099,16 @@ END(interrupt)
16223 CFI_ADJUST_CFA_OFFSET 10*8
16224 call save_args
16225 PARTIAL_FRAME 0
16226+#ifdef CONFIG_PAX_MEMORY_UDEREF
16227+ testb $3, CS(%rdi)
16228+ jnz 1f
16229+ pax_enter_kernel
16230+ jmp 2f
16231+1: pax_enter_kernel_user
16232+2:
16233+#else
16234+ pax_enter_kernel
16235+#endif
16236 call \func
16237 .endm
16238
16239@@ -822,7 +1131,7 @@ ret_from_intr:
16240 CFI_ADJUST_CFA_OFFSET -8
16241 exit_intr:
16242 GET_THREAD_INFO(%rcx)
16243- testl $3,CS-ARGOFFSET(%rsp)
16244+ testb $3,CS-ARGOFFSET(%rsp)
16245 je retint_kernel
16246
16247 /* Interrupt came from user space */
16248@@ -844,12 +1153,16 @@ retint_swapgs: /* return to user-space */
16249 * The iretq could re-enable interrupts:
16250 */
16251 DISABLE_INTERRUPTS(CLBR_ANY)
16252+ pax_exit_kernel_user
16253+ pax_erase_kstack
16254 TRACE_IRQS_IRETQ
16255 SWAPGS
16256 jmp restore_args
16257
16258 retint_restore_args: /* return to kernel space */
16259 DISABLE_INTERRUPTS(CLBR_ANY)
16260+ pax_exit_kernel
16261+ pax_force_retaddr RIP-ARGOFFSET
16262 /*
16263 * The iretq could re-enable interrupts:
16264 */
16265@@ -940,7 +1253,7 @@ ENTRY(retint_kernel)
16266 #endif
16267
16268 CFI_ENDPROC
16269-END(common_interrupt)
16270+ENDPROC(common_interrupt)
16271
16272 /*
16273 * APIC interrupts.
16274@@ -953,7 +1266,7 @@ ENTRY(\sym)
16275 interrupt \do_sym
16276 jmp ret_from_intr
16277 CFI_ENDPROC
16278-END(\sym)
16279+ENDPROC(\sym)
16280 .endm
16281
16282 #ifdef CONFIG_SMP
16283@@ -1032,12 +1345,22 @@ ENTRY(\sym)
16284 CFI_ADJUST_CFA_OFFSET 15*8
16285 call error_entry
16286 DEFAULT_FRAME 0
16287+#ifdef CONFIG_PAX_MEMORY_UDEREF
16288+ testb $3, CS(%rsp)
16289+ jnz 1f
16290+ pax_enter_kernel
16291+ jmp 2f
16292+1: pax_enter_kernel_user
16293+2:
16294+#else
16295+ pax_enter_kernel
16296+#endif
16297 movq %rsp,%rdi /* pt_regs pointer */
16298 xorl %esi,%esi /* no error code */
16299 call \do_sym
16300 jmp error_exit /* %ebx: no swapgs flag */
16301 CFI_ENDPROC
16302-END(\sym)
16303+ENDPROC(\sym)
16304 .endm
16305
16306 .macro paranoidzeroentry sym do_sym
16307@@ -1049,12 +1372,22 @@ ENTRY(\sym)
16308 subq $15*8, %rsp
16309 call save_paranoid
16310 TRACE_IRQS_OFF
16311+#ifdef CONFIG_PAX_MEMORY_UDEREF
16312+ testb $3, CS(%rsp)
16313+ jnz 1f
16314+ pax_enter_kernel
16315+ jmp 2f
16316+1: pax_enter_kernel_user
16317+2:
16318+#else
16319+ pax_enter_kernel
16320+#endif
16321 movq %rsp,%rdi /* pt_regs pointer */
16322 xorl %esi,%esi /* no error code */
16323 call \do_sym
16324 jmp paranoid_exit /* %ebx: no swapgs flag */
16325 CFI_ENDPROC
16326-END(\sym)
16327+ENDPROC(\sym)
16328 .endm
16329
16330 .macro paranoidzeroentry_ist sym do_sym ist
16331@@ -1066,15 +1399,30 @@ ENTRY(\sym)
16332 subq $15*8, %rsp
16333 call save_paranoid
16334 TRACE_IRQS_OFF
16335+#ifdef CONFIG_PAX_MEMORY_UDEREF
16336+ testb $3, CS(%rsp)
16337+ jnz 1f
16338+ pax_enter_kernel
16339+ jmp 2f
16340+1: pax_enter_kernel_user
16341+2:
16342+#else
16343+ pax_enter_kernel
16344+#endif
16345 movq %rsp,%rdi /* pt_regs pointer */
16346 xorl %esi,%esi /* no error code */
16347- PER_CPU(init_tss, %rbp)
16348+#ifdef CONFIG_SMP
16349+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
16350+ lea init_tss(%rbp), %rbp
16351+#else
16352+ lea init_tss(%rip), %rbp
16353+#endif
16354 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16355 call \do_sym
16356 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
16357 jmp paranoid_exit /* %ebx: no swapgs flag */
16358 CFI_ENDPROC
16359-END(\sym)
16360+ENDPROC(\sym)
16361 .endm
16362
16363 .macro errorentry sym do_sym
16364@@ -1085,13 +1433,23 @@ ENTRY(\sym)
16365 CFI_ADJUST_CFA_OFFSET 15*8
16366 call error_entry
16367 DEFAULT_FRAME 0
16368+#ifdef CONFIG_PAX_MEMORY_UDEREF
16369+ testb $3, CS(%rsp)
16370+ jnz 1f
16371+ pax_enter_kernel
16372+ jmp 2f
16373+1: pax_enter_kernel_user
16374+2:
16375+#else
16376+ pax_enter_kernel
16377+#endif
16378 movq %rsp,%rdi /* pt_regs pointer */
16379 movq ORIG_RAX(%rsp),%rsi /* get error code */
16380 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16381 call \do_sym
16382 jmp error_exit /* %ebx: no swapgs flag */
16383 CFI_ENDPROC
16384-END(\sym)
16385+ENDPROC(\sym)
16386 .endm
16387
16388 /* error code is on the stack already */
16389@@ -1104,13 +1462,23 @@ ENTRY(\sym)
16390 call save_paranoid
16391 DEFAULT_FRAME 0
16392 TRACE_IRQS_OFF
16393+#ifdef CONFIG_PAX_MEMORY_UDEREF
16394+ testb $3, CS(%rsp)
16395+ jnz 1f
16396+ pax_enter_kernel
16397+ jmp 2f
16398+1: pax_enter_kernel_user
16399+2:
16400+#else
16401+ pax_enter_kernel
16402+#endif
16403 movq %rsp,%rdi /* pt_regs pointer */
16404 movq ORIG_RAX(%rsp),%rsi /* get error code */
16405 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
16406 call \do_sym
16407 jmp paranoid_exit /* %ebx: no swapgs flag */
16408 CFI_ENDPROC
16409-END(\sym)
16410+ENDPROC(\sym)
16411 .endm
16412
16413 zeroentry divide_error do_divide_error
16414@@ -1141,9 +1509,10 @@ gs_change:
16415 SWAPGS
16416 popf
16417 CFI_ADJUST_CFA_OFFSET -8
16418+ pax_force_retaddr
16419 ret
16420 CFI_ENDPROC
16421-END(native_load_gs_index)
16422+ENDPROC(native_load_gs_index)
16423
16424 .section __ex_table,"a"
16425 .align 8
16426@@ -1193,11 +1562,12 @@ ENTRY(kernel_thread)
16427 * of hacks for example to fork off the per-CPU idle tasks.
16428 * [Hopefully no generic code relies on the reschedule -AK]
16429 */
16430- RESTORE_ALL
16431+ RESTORE_REST
16432 UNFAKE_STACK_FRAME
16433+ pax_force_retaddr
16434 ret
16435 CFI_ENDPROC
16436-END(kernel_thread)
16437+ENDPROC(kernel_thread)
16438
16439 ENTRY(child_rip)
16440 pushq $0 # fake return address
16441@@ -1208,13 +1578,14 @@ ENTRY(child_rip)
16442 */
16443 movq %rdi, %rax
16444 movq %rsi, %rdi
16445+ pax_force_fptr %rax
16446 call *%rax
16447 # exit
16448 mov %eax, %edi
16449 call do_exit
16450 ud2 # padding for call trace
16451 CFI_ENDPROC
16452-END(child_rip)
16453+ENDPROC(child_rip)
16454
16455 /*
16456 * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
16457@@ -1241,11 +1612,11 @@ ENTRY(kernel_execve)
16458 RESTORE_REST
16459 testq %rax,%rax
16460 je int_ret_from_sys_call
16461- RESTORE_ARGS
16462 UNFAKE_STACK_FRAME
16463+ pax_force_retaddr
16464 ret
16465 CFI_ENDPROC
16466-END(kernel_execve)
16467+ENDPROC(kernel_execve)
16468
16469 /* Call softirq on interrupt stack. Interrupts are off. */
16470 ENTRY(call_softirq)
16471@@ -1263,9 +1634,10 @@ ENTRY(call_softirq)
16472 CFI_DEF_CFA_REGISTER rsp
16473 CFI_ADJUST_CFA_OFFSET -8
16474 decl PER_CPU_VAR(irq_count)
16475+ pax_force_retaddr
16476 ret
16477 CFI_ENDPROC
16478-END(call_softirq)
16479+ENDPROC(call_softirq)
16480
16481 #ifdef CONFIG_XEN
16482 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
16483@@ -1303,7 +1675,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
16484 decl PER_CPU_VAR(irq_count)
16485 jmp error_exit
16486 CFI_ENDPROC
16487-END(xen_do_hypervisor_callback)
16488+ENDPROC(xen_do_hypervisor_callback)
16489
16490 /*
16491 * Hypervisor uses this for application faults while it executes.
16492@@ -1362,7 +1734,7 @@ ENTRY(xen_failsafe_callback)
16493 SAVE_ALL
16494 jmp error_exit
16495 CFI_ENDPROC
16496-END(xen_failsafe_callback)
16497+ENDPROC(xen_failsafe_callback)
16498
16499 #endif /* CONFIG_XEN */
16500
16501@@ -1405,16 +1777,31 @@ ENTRY(paranoid_exit)
16502 TRACE_IRQS_OFF
16503 testl %ebx,%ebx /* swapgs needed? */
16504 jnz paranoid_restore
16505- testl $3,CS(%rsp)
16506+ testb $3,CS(%rsp)
16507 jnz paranoid_userspace
16508+#ifdef CONFIG_PAX_MEMORY_UDEREF
16509+ pax_exit_kernel
16510+ TRACE_IRQS_IRETQ 0
16511+ SWAPGS_UNSAFE_STACK
16512+ RESTORE_ALL 8
16513+ pax_force_retaddr_bts
16514+ jmp irq_return
16515+#endif
16516 paranoid_swapgs:
16517+#ifdef CONFIG_PAX_MEMORY_UDEREF
16518+ pax_exit_kernel_user
16519+#else
16520+ pax_exit_kernel
16521+#endif
16522 TRACE_IRQS_IRETQ 0
16523 SWAPGS_UNSAFE_STACK
16524 RESTORE_ALL 8
16525 jmp irq_return
16526 paranoid_restore:
16527+ pax_exit_kernel
16528 TRACE_IRQS_IRETQ 0
16529 RESTORE_ALL 8
16530+ pax_force_retaddr_bts
16531 jmp irq_return
16532 paranoid_userspace:
16533 GET_THREAD_INFO(%rcx)
16534@@ -1443,7 +1830,7 @@ paranoid_schedule:
16535 TRACE_IRQS_OFF
16536 jmp paranoid_userspace
16537 CFI_ENDPROC
16538-END(paranoid_exit)
16539+ENDPROC(paranoid_exit)
16540
16541 /*
16542 * Exception entry point. This expects an error code/orig_rax on the stack.
16543@@ -1470,12 +1857,13 @@ ENTRY(error_entry)
16544 movq_cfi r14, R14+8
16545 movq_cfi r15, R15+8
16546 xorl %ebx,%ebx
16547- testl $3,CS+8(%rsp)
16548+ testb $3,CS+8(%rsp)
16549 je error_kernelspace
16550 error_swapgs:
16551 SWAPGS
16552 error_sti:
16553 TRACE_IRQS_OFF
16554+ pax_force_retaddr_bts
16555 ret
16556 CFI_ENDPROC
16557
16558@@ -1497,7 +1885,7 @@ error_kernelspace:
16559 cmpq $gs_change,RIP+8(%rsp)
16560 je error_swapgs
16561 jmp error_sti
16562-END(error_entry)
16563+ENDPROC(error_entry)
16564
16565
16566 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
16567@@ -1517,7 +1905,7 @@ ENTRY(error_exit)
16568 jnz retint_careful
16569 jmp retint_swapgs
16570 CFI_ENDPROC
16571-END(error_exit)
16572+ENDPROC(error_exit)
16573
16574
16575 /* runs on exception stack */
16576@@ -1529,6 +1917,16 @@ ENTRY(nmi)
16577 CFI_ADJUST_CFA_OFFSET 15*8
16578 call save_paranoid
16579 DEFAULT_FRAME 0
16580+#ifdef CONFIG_PAX_MEMORY_UDEREF
16581+ testb $3, CS(%rsp)
16582+ jnz 1f
16583+ pax_enter_kernel
16584+ jmp 2f
16585+1: pax_enter_kernel_user
16586+2:
16587+#else
16588+ pax_enter_kernel
16589+#endif
16590 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
16591 movq %rsp,%rdi
16592 movq $-1,%rsi
16593@@ -1539,12 +1937,28 @@ ENTRY(nmi)
16594 DISABLE_INTERRUPTS(CLBR_NONE)
16595 testl %ebx,%ebx /* swapgs needed? */
16596 jnz nmi_restore
16597- testl $3,CS(%rsp)
16598+ testb $3,CS(%rsp)
16599 jnz nmi_userspace
16600+#ifdef CONFIG_PAX_MEMORY_UDEREF
16601+ pax_exit_kernel
16602+ SWAPGS_UNSAFE_STACK
16603+ RESTORE_ALL 8
16604+ pax_force_retaddr_bts
16605+ jmp irq_return
16606+#endif
16607 nmi_swapgs:
16608+#ifdef CONFIG_PAX_MEMORY_UDEREF
16609+ pax_exit_kernel_user
16610+#else
16611+ pax_exit_kernel
16612+#endif
16613 SWAPGS_UNSAFE_STACK
16614+ RESTORE_ALL 8
16615+ jmp irq_return
16616 nmi_restore:
16617+ pax_exit_kernel
16618 RESTORE_ALL 8
16619+ pax_force_retaddr_bts
16620 jmp irq_return
16621 nmi_userspace:
16622 GET_THREAD_INFO(%rcx)
16623@@ -1573,14 +1987,14 @@ nmi_schedule:
16624 jmp paranoid_exit
16625 CFI_ENDPROC
16626 #endif
16627-END(nmi)
16628+ENDPROC(nmi)
16629
16630 ENTRY(ignore_sysret)
16631 CFI_STARTPROC
16632 mov $-ENOSYS,%eax
16633 sysret
16634 CFI_ENDPROC
16635-END(ignore_sysret)
16636+ENDPROC(ignore_sysret)
16637
16638 /*
16639 * End of kprobes section
16640diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
16641index 9dbb527..7b3615a 100644
16642--- a/arch/x86/kernel/ftrace.c
16643+++ b/arch/x86/kernel/ftrace.c
16644@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the IP to write to */
16645 static void *mod_code_newcode; /* holds the text to write to the IP */
16646
16647 static unsigned nmi_wait_count;
16648-static atomic_t nmi_update_count = ATOMIC_INIT(0);
16649+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
16650
16651 int ftrace_arch_read_dyn_info(char *buf, int size)
16652 {
16653@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf, int size)
16654
16655 r = snprintf(buf, size, "%u %u",
16656 nmi_wait_count,
16657- atomic_read(&nmi_update_count));
16658+ atomic_read_unchecked(&nmi_update_count));
16659 return r;
16660 }
16661
16662@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
16663 {
16664 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
16665 smp_rmb();
16666+ pax_open_kernel();
16667 ftrace_mod_code();
16668- atomic_inc(&nmi_update_count);
16669+ pax_close_kernel();
16670+ atomic_inc_unchecked(&nmi_update_count);
16671 }
16672 /* Must have previous changes seen before executions */
16673 smp_mb();
16674@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, void *new_code)
16675
16676
16677
16678-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
16679+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
16680
16681 static unsigned char *ftrace_nop_replace(void)
16682 {
16683@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, unsigned char *old_code,
16684 {
16685 unsigned char replaced[MCOUNT_INSN_SIZE];
16686
16687+ ip = ktla_ktva(ip);
16688+
16689 /*
16690 * Note: Due to modules and __init, code can
16691 * disappear and change, we need to protect against faulting
16692@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
16693 unsigned char old[MCOUNT_INSN_SIZE], *new;
16694 int ret;
16695
16696- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
16697+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
16698 new = ftrace_call_replace(ip, (unsigned long)func);
16699 ret = ftrace_modify_code(ip, old, new);
16700
16701@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *data)
16702 switch (faulted) {
16703 case 0:
16704 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
16705- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
16706+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
16707 break;
16708 case 1:
16709 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
16710- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
16711+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
16712 break;
16713 case 2:
16714 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
16715- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
16716+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
16717 break;
16718 }
16719
16720@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long ip,
16721 {
16722 unsigned char code[MCOUNT_INSN_SIZE];
16723
16724+ ip = ktla_ktva(ip);
16725+
16726 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
16727 return -EFAULT;
16728
16729diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
16730index 4f8e250..df24706 100644
16731--- a/arch/x86/kernel/head32.c
16732+++ b/arch/x86/kernel/head32.c
16733@@ -16,6 +16,7 @@
16734 #include <asm/apic.h>
16735 #include <asm/io_apic.h>
16736 #include <asm/bios_ebda.h>
16737+#include <asm/boot.h>
16738
16739 static void __init i386_default_early_setup(void)
16740 {
16741@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
16742 {
16743 reserve_trampoline_memory();
16744
16745- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16746+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
16747
16748 #ifdef CONFIG_BLK_DEV_INITRD
16749 /* Reserve INITRD */
16750diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
16751index 34c3308..6fc4e76 100644
16752--- a/arch/x86/kernel/head_32.S
16753+++ b/arch/x86/kernel/head_32.S
16754@@ -19,10 +19,17 @@
16755 #include <asm/setup.h>
16756 #include <asm/processor-flags.h>
16757 #include <asm/percpu.h>
16758+#include <asm/msr-index.h>
16759
16760 /* Physical address */
16761 #define pa(X) ((X) - __PAGE_OFFSET)
16762
16763+#ifdef CONFIG_PAX_KERNEXEC
16764+#define ta(X) (X)
16765+#else
16766+#define ta(X) ((X) - __PAGE_OFFSET)
16767+#endif
16768+
16769 /*
16770 * References to members of the new_cpu_data structure.
16771 */
16772@@ -52,11 +59,7 @@
16773 * and small than max_low_pfn, otherwise will waste some page table entries
16774 */
16775
16776-#if PTRS_PER_PMD > 1
16777-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
16778-#else
16779-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
16780-#endif
16781+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
16782
16783 /* Enough space to fit pagetables for the low memory linear map */
16784 MAPPING_BEYOND_END = \
16785@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE_asm
16786 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16787
16788 /*
16789+ * Real beginning of normal "text" segment
16790+ */
16791+ENTRY(stext)
16792+ENTRY(_stext)
16793+
16794+/*
16795 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
16796 * %esi points to the real-mode code as a 32-bit pointer.
16797 * CS and DS must be 4 GB flat segments, but we don't depend on
16798@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
16799 * can.
16800 */
16801 __HEAD
16802+
16803+#ifdef CONFIG_PAX_KERNEXEC
16804+ jmp startup_32
16805+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
16806+.fill PAGE_SIZE-5,1,0xcc
16807+#endif
16808+
16809 ENTRY(startup_32)
16810+ movl pa(stack_start),%ecx
16811+
16812 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
16813 us to not reload segments */
16814 testb $(1<<6), BP_loadflags(%esi)
16815@@ -95,7 +113,60 @@ ENTRY(startup_32)
16816 movl %eax,%es
16817 movl %eax,%fs
16818 movl %eax,%gs
16819+ movl %eax,%ss
16820 2:
16821+ leal -__PAGE_OFFSET(%ecx),%esp
16822+
16823+#ifdef CONFIG_SMP
16824+ movl $pa(cpu_gdt_table),%edi
16825+ movl $__per_cpu_load,%eax
16826+ movw %ax,__KERNEL_PERCPU + 2(%edi)
16827+ rorl $16,%eax
16828+ movb %al,__KERNEL_PERCPU + 4(%edi)
16829+ movb %ah,__KERNEL_PERCPU + 7(%edi)
16830+ movl $__per_cpu_end - 1,%eax
16831+ subl $__per_cpu_start,%eax
16832+ movw %ax,__KERNEL_PERCPU + 0(%edi)
16833+#endif
16834+
16835+#ifdef CONFIG_PAX_MEMORY_UDEREF
16836+ movl $NR_CPUS,%ecx
16837+ movl $pa(cpu_gdt_table),%edi
16838+1:
16839+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
16840+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
16841+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
16842+ addl $PAGE_SIZE_asm,%edi
16843+ loop 1b
16844+#endif
16845+
16846+#ifdef CONFIG_PAX_KERNEXEC
16847+ movl $pa(boot_gdt),%edi
16848+ movl $__LOAD_PHYSICAL_ADDR,%eax
16849+ movw %ax,__BOOT_CS + 2(%edi)
16850+ rorl $16,%eax
16851+ movb %al,__BOOT_CS + 4(%edi)
16852+ movb %ah,__BOOT_CS + 7(%edi)
16853+ rorl $16,%eax
16854+
16855+ ljmp $(__BOOT_CS),$1f
16856+1:
16857+
16858+ movl $NR_CPUS,%ecx
16859+ movl $pa(cpu_gdt_table),%edi
16860+ addl $__PAGE_OFFSET,%eax
16861+1:
16862+ movw %ax,__KERNEL_CS + 2(%edi)
16863+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
16864+ rorl $16,%eax
16865+ movb %al,__KERNEL_CS + 4(%edi)
16866+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
16867+ movb %ah,__KERNEL_CS + 7(%edi)
16868+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
16869+ rorl $16,%eax
16870+ addl $PAGE_SIZE_asm,%edi
16871+ loop 1b
16872+#endif
16873
16874 /*
16875 * Clear BSS first so that there are no surprises...
16876@@ -140,9 +211,7 @@ ENTRY(startup_32)
16877 cmpl $num_subarch_entries, %eax
16878 jae bad_subarch
16879
16880- movl pa(subarch_entries)(,%eax,4), %eax
16881- subl $__PAGE_OFFSET, %eax
16882- jmp *%eax
16883+ jmp *pa(subarch_entries)(,%eax,4)
16884
16885 bad_subarch:
16886 WEAK(lguest_entry)
16887@@ -154,10 +223,10 @@ WEAK(xen_entry)
16888 __INITDATA
16889
16890 subarch_entries:
16891- .long default_entry /* normal x86/PC */
16892- .long lguest_entry /* lguest hypervisor */
16893- .long xen_entry /* Xen hypervisor */
16894- .long default_entry /* Moorestown MID */
16895+ .long ta(default_entry) /* normal x86/PC */
16896+ .long ta(lguest_entry) /* lguest hypervisor */
16897+ .long ta(xen_entry) /* Xen hypervisor */
16898+ .long ta(default_entry) /* Moorestown MID */
16899 num_subarch_entries = (. - subarch_entries) / 4
16900 .previous
16901 #endif /* CONFIG_PARAVIRT */
16902@@ -218,8 +287,11 @@ default_entry:
16903 movl %eax, pa(max_pfn_mapped)
16904
16905 /* Do early initialization of the fixmap area */
16906- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16907- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16908+#ifdef CONFIG_COMPAT_VDSO
16909+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16910+#else
16911+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
16912+#endif
16913 #else /* Not PAE */
16914
16915 page_pde_offset = (__PAGE_OFFSET >> 20);
16916@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
16917 movl %eax, pa(max_pfn_mapped)
16918
16919 /* Do early initialization of the fixmap area */
16920- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
16921- movl %eax,pa(swapper_pg_dir+0xffc)
16922+#ifdef CONFIG_COMPAT_VDSO
16923+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
16924+#else
16925+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
16926+#endif
16927 #endif
16928 jmp 3f
16929 /*
16930@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
16931 movl %eax,%es
16932 movl %eax,%fs
16933 movl %eax,%gs
16934+ movl pa(stack_start),%ecx
16935+ movl %eax,%ss
16936+ leal -__PAGE_OFFSET(%ecx),%esp
16937 #endif /* CONFIG_SMP */
16938 3:
16939
16940@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
16941 orl %edx,%eax
16942 movl %eax,%cr4
16943
16944+#ifdef CONFIG_X86_PAE
16945 btl $5, %eax # check if PAE is enabled
16946 jnc 6f
16947
16948@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
16949 cpuid
16950 cmpl $0x80000000, %eax
16951 jbe 6f
16952+
16953+ /* Clear bogus XD_DISABLE bits */
16954+ call verify_cpu
16955+
16956 mov $0x80000001, %eax
16957 cpuid
16958 /* Execute Disable bit supported? */
16959@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
16960 jnc 6f
16961
16962 /* Setup EFER (Extended Feature Enable Register) */
16963- movl $0xc0000080, %ecx
16964+ movl $MSR_EFER, %ecx
16965 rdmsr
16966
16967 btsl $11, %eax
16968 /* Make changes effective */
16969 wrmsr
16970
16971+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
16972+ movl $1,pa(nx_enabled)
16973+#endif
16974+
16975 6:
16976
16977 /*
16978@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
16979 movl %eax,%cr0 /* ..and set paging (PG) bit */
16980 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
16981 1:
16982- /* Set up the stack pointer */
16983- lss stack_start,%esp
16984+ /* Shift the stack pointer to a virtual address */
16985+ addl $__PAGE_OFFSET, %esp
16986
16987 /*
16988 * Initialize eflags. Some BIOS's leave bits like NT set. This would
16989@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
16990
16991 #ifdef CONFIG_SMP
16992 cmpb $0, ready
16993- jz 1f /* Initial CPU cleans BSS */
16994- jmp checkCPUtype
16995-1:
16996+ jnz checkCPUtype
16997 #endif /* CONFIG_SMP */
16998
16999 /*
17000@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
17001 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
17002 movl %eax,%ss # after changing gdt.
17003
17004- movl $(__USER_DS),%eax # DS/ES contains default USER segment
17005+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
17006 movl %eax,%ds
17007 movl %eax,%es
17008
17009@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
17010 */
17011 cmpb $0,ready
17012 jne 1f
17013- movl $per_cpu__gdt_page,%eax
17014+ movl $cpu_gdt_table,%eax
17015 movl $per_cpu__stack_canary,%ecx
17016+#ifdef CONFIG_SMP
17017+ addl $__per_cpu_load,%ecx
17018+#endif
17019 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
17020 shrl $16, %ecx
17021 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
17022 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
17023 1:
17024-#endif
17025 movl $(__KERNEL_STACK_CANARY),%eax
17026+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
17027+ movl $(__USER_DS),%eax
17028+#else
17029+ xorl %eax,%eax
17030+#endif
17031 movl %eax,%gs
17032
17033 xorl %eax,%eax # Clear LDT
17034@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
17035
17036 cld # gcc2 wants the direction flag cleared at all times
17037 pushl $0 # fake return address for unwinder
17038-#ifdef CONFIG_SMP
17039- movb ready, %cl
17040 movb $1, ready
17041- cmpb $0,%cl # the first CPU calls start_kernel
17042- je 1f
17043- movl (stack_start), %esp
17044-1:
17045-#endif /* CONFIG_SMP */
17046 jmp *(initial_code)
17047
17048 /*
17049@@ -546,22 +631,22 @@ early_page_fault:
17050 jmp early_fault
17051
17052 early_fault:
17053- cld
17054 #ifdef CONFIG_PRINTK
17055+ cmpl $1,%ss:early_recursion_flag
17056+ je hlt_loop
17057+ incl %ss:early_recursion_flag
17058+ cld
17059 pusha
17060 movl $(__KERNEL_DS),%eax
17061 movl %eax,%ds
17062 movl %eax,%es
17063- cmpl $2,early_recursion_flag
17064- je hlt_loop
17065- incl early_recursion_flag
17066 movl %cr2,%eax
17067 pushl %eax
17068 pushl %edx /* trapno */
17069 pushl $fault_msg
17070 call printk
17071+; call dump_stack
17072 #endif
17073- call dump_stack
17074 hlt_loop:
17075 hlt
17076 jmp hlt_loop
17077@@ -569,8 +654,11 @@ hlt_loop:
17078 /* This is the default interrupt "handler" :-) */
17079 ALIGN
17080 ignore_int:
17081- cld
17082 #ifdef CONFIG_PRINTK
17083+ cmpl $2,%ss:early_recursion_flag
17084+ je hlt_loop
17085+ incl %ss:early_recursion_flag
17086+ cld
17087 pushl %eax
17088 pushl %ecx
17089 pushl %edx
17090@@ -579,9 +667,6 @@ ignore_int:
17091 movl $(__KERNEL_DS),%eax
17092 movl %eax,%ds
17093 movl %eax,%es
17094- cmpl $2,early_recursion_flag
17095- je hlt_loop
17096- incl early_recursion_flag
17097 pushl 16(%esp)
17098 pushl 24(%esp)
17099 pushl 32(%esp)
17100@@ -600,6 +685,8 @@ ignore_int:
17101 #endif
17102 iret
17103
17104+#include "verify_cpu.S"
17105+
17106 __REFDATA
17107 .align 4
17108 ENTRY(initial_code)
17109@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
17110 /*
17111 * BSS section
17112 */
17113-__PAGE_ALIGNED_BSS
17114- .align PAGE_SIZE_asm
17115 #ifdef CONFIG_X86_PAE
17116+.section .swapper_pg_pmd,"a",@progbits
17117 swapper_pg_pmd:
17118 .fill 1024*KPMDS,4,0
17119 #else
17120+.section .swapper_pg_dir,"a",@progbits
17121 ENTRY(swapper_pg_dir)
17122 .fill 1024,4,0
17123 #endif
17124+.section .swapper_pg_fixmap,"a",@progbits
17125 swapper_pg_fixmap:
17126 .fill 1024,4,0
17127 #ifdef CONFIG_X86_TRAMPOLINE
17128+.section .trampoline_pg_dir,"a",@progbits
17129 ENTRY(trampoline_pg_dir)
17130+#ifdef CONFIG_X86_PAE
17131+ .fill 4,8,0
17132+#else
17133 .fill 1024,4,0
17134 #endif
17135+#endif
17136+
17137+.section .empty_zero_page,"a",@progbits
17138 ENTRY(empty_zero_page)
17139 .fill 4096,1,0
17140
17141 /*
17142+ * The IDT has to be page-aligned to simplify the Pentium
17143+ * F0 0F bug workaround.. We have a special link segment
17144+ * for this.
17145+ */
17146+.section .idt,"a",@progbits
17147+ENTRY(idt_table)
17148+ .fill 256,8,0
17149+
17150+/*
17151 * This starts the data section.
17152 */
17153 #ifdef CONFIG_X86_PAE
17154-__PAGE_ALIGNED_DATA
17155- /* Page-aligned for the benefit of paravirt? */
17156- .align PAGE_SIZE_asm
17157+.section .swapper_pg_dir,"a",@progbits
17158+
17159 ENTRY(swapper_pg_dir)
17160 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
17161 # if KPMDS == 3
17162@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
17163 # error "Kernel PMDs should be 1, 2 or 3"
17164 # endif
17165 .align PAGE_SIZE_asm /* needs to be page-sized too */
17166+
17167+#ifdef CONFIG_PAX_PER_CPU_PGD
17168+ENTRY(cpu_pgd)
17169+ .rept NR_CPUS
17170+ .fill 4,8,0
17171+ .endr
17172+#endif
17173+
17174 #endif
17175
17176 .data
17177+.balign 4
17178 ENTRY(stack_start)
17179- .long init_thread_union+THREAD_SIZE
17180- .long __BOOT_DS
17181+ .long init_thread_union+THREAD_SIZE-8
17182
17183 ready: .byte 0
17184
17185+.section .rodata,"a",@progbits
17186 early_recursion_flag:
17187 .long 0
17188
17189@@ -697,7 +809,7 @@ fault_msg:
17190 .word 0 # 32 bit align gdt_desc.address
17191 boot_gdt_descr:
17192 .word __BOOT_DS+7
17193- .long boot_gdt - __PAGE_OFFSET
17194+ .long pa(boot_gdt)
17195
17196 .word 0 # 32-bit align idt_desc.address
17197 idt_descr:
17198@@ -708,7 +820,7 @@ idt_descr:
17199 .word 0 # 32 bit align gdt_desc.address
17200 ENTRY(early_gdt_descr)
17201 .word GDT_ENTRIES*8-1
17202- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
17203+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
17204
17205 /*
17206 * The boot_gdt must mirror the equivalent in setup.S and is
17207@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
17208 .align L1_CACHE_BYTES
17209 ENTRY(boot_gdt)
17210 .fill GDT_ENTRY_BOOT_CS,8,0
17211- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
17212- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
17213+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
17214+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
17215+
17216+ .align PAGE_SIZE_asm
17217+ENTRY(cpu_gdt_table)
17218+ .rept NR_CPUS
17219+ .quad 0x0000000000000000 /* NULL descriptor */
17220+ .quad 0x0000000000000000 /* 0x0b reserved */
17221+ .quad 0x0000000000000000 /* 0x13 reserved */
17222+ .quad 0x0000000000000000 /* 0x1b reserved */
17223+
17224+#ifdef CONFIG_PAX_KERNEXEC
17225+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
17226+#else
17227+ .quad 0x0000000000000000 /* 0x20 unused */
17228+#endif
17229+
17230+ .quad 0x0000000000000000 /* 0x28 unused */
17231+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
17232+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
17233+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
17234+ .quad 0x0000000000000000 /* 0x4b reserved */
17235+ .quad 0x0000000000000000 /* 0x53 reserved */
17236+ .quad 0x0000000000000000 /* 0x5b reserved */
17237+
17238+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
17239+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
17240+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
17241+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
17242+
17243+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
17244+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
17245+
17246+ /*
17247+ * Segments used for calling PnP BIOS have byte granularity.
17248+ * The code segments and data segments have fixed 64k limits,
17249+ * the transfer segment sizes are set at run time.
17250+ */
17251+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
17252+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
17253+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
17254+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
17255+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
17256+
17257+ /*
17258+ * The APM segments have byte granularity and their bases
17259+ * are set at run time. All have 64k limits.
17260+ */
17261+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
17262+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
17263+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
17264+
17265+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
17266+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
17267+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
17268+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
17269+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
17270+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
17271+
17272+ /* Be sure this is zeroed to avoid false validations in Xen */
17273+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
17274+ .endr
17275diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
17276index 780cd92..758b2a6 100644
17277--- a/arch/x86/kernel/head_64.S
17278+++ b/arch/x86/kernel/head_64.S
17279@@ -19,6 +19,8 @@
17280 #include <asm/cache.h>
17281 #include <asm/processor-flags.h>
17282 #include <asm/percpu.h>
17283+#include <asm/cpufeature.h>
17284+#include <asm/alternative-asm.h>
17285
17286 #ifdef CONFIG_PARAVIRT
17287 #include <asm/asm-offsets.h>
17288@@ -38,6 +40,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
17289 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
17290 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
17291 L3_START_KERNEL = pud_index(__START_KERNEL_map)
17292+L4_VMALLOC_START = pgd_index(VMALLOC_START)
17293+L3_VMALLOC_START = pud_index(VMALLOC_START)
17294+L4_VMALLOC_END = pgd_index(VMALLOC_END)
17295+L3_VMALLOC_END = pud_index(VMALLOC_END)
17296+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
17297+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
17298
17299 .text
17300 __HEAD
17301@@ -85,35 +93,23 @@ startup_64:
17302 */
17303 addq %rbp, init_level4_pgt + 0(%rip)
17304 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
17305+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
17306+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
17307+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
17308 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
17309
17310 addq %rbp, level3_ident_pgt + 0(%rip)
17311+#ifndef CONFIG_XEN
17312+ addq %rbp, level3_ident_pgt + 8(%rip)
17313+#endif
17314
17315- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
17316- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
17317+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
17318+
17319+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
17320+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
17321
17322 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
17323-
17324- /* Add an Identity mapping if I am above 1G */
17325- leaq _text(%rip), %rdi
17326- andq $PMD_PAGE_MASK, %rdi
17327-
17328- movq %rdi, %rax
17329- shrq $PUD_SHIFT, %rax
17330- andq $(PTRS_PER_PUD - 1), %rax
17331- jz ident_complete
17332-
17333- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
17334- leaq level3_ident_pgt(%rip), %rbx
17335- movq %rdx, 0(%rbx, %rax, 8)
17336-
17337- movq %rdi, %rax
17338- shrq $PMD_SHIFT, %rax
17339- andq $(PTRS_PER_PMD - 1), %rax
17340- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
17341- leaq level2_spare_pgt(%rip), %rbx
17342- movq %rdx, 0(%rbx, %rax, 8)
17343-ident_complete:
17344+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
17345
17346 /*
17347 * Fixup the kernel text+data virtual addresses. Note that
17348@@ -161,8 +157,8 @@ ENTRY(secondary_startup_64)
17349 * after the boot processor executes this code.
17350 */
17351
17352- /* Enable PAE mode and PGE */
17353- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
17354+ /* Enable PAE mode and PSE/PGE */
17355+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17356 movq %rax, %cr4
17357
17358 /* Setup early boot stage 4 level pagetables. */
17359@@ -184,9 +180,16 @@ ENTRY(secondary_startup_64)
17360 movl $MSR_EFER, %ecx
17361 rdmsr
17362 btsl $_EFER_SCE, %eax /* Enable System Call */
17363- btl $20,%edi /* No Execute supported? */
17364+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
17365 jnc 1f
17366 btsl $_EFER_NX, %eax
17367+ leaq init_level4_pgt(%rip), %rdi
17368+#ifndef CONFIG_EFI
17369+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
17370+#endif
17371+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
17372+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
17373+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
17374 1: wrmsr /* Make changes effective */
17375
17376 /* Setup cr0 */
17377@@ -249,6 +252,7 @@ ENTRY(secondary_startup_64)
17378 * jump. In addition we need to ensure %cs is set so we make this
17379 * a far return.
17380 */
17381+ pax_set_fptr_mask
17382 movq initial_code(%rip),%rax
17383 pushq $0 # fake return address to stop unwinder
17384 pushq $__KERNEL_CS # set correct cs
17385@@ -262,16 +266,16 @@ ENTRY(secondary_startup_64)
17386 .quad x86_64_start_kernel
17387 ENTRY(initial_gs)
17388 .quad INIT_PER_CPU_VAR(irq_stack_union)
17389- __FINITDATA
17390
17391 ENTRY(stack_start)
17392 .quad init_thread_union+THREAD_SIZE-8
17393 .word 0
17394+ __FINITDATA
17395
17396 bad_address:
17397 jmp bad_address
17398
17399- .section ".init.text","ax"
17400+ __INIT
17401 #ifdef CONFIG_EARLY_PRINTK
17402 .globl early_idt_handlers
17403 early_idt_handlers:
17404@@ -316,18 +320,23 @@ ENTRY(early_idt_handler)
17405 #endif /* EARLY_PRINTK */
17406 1: hlt
17407 jmp 1b
17408+ .previous
17409
17410 #ifdef CONFIG_EARLY_PRINTK
17411+ __INITDATA
17412 early_recursion_flag:
17413 .long 0
17414+ .previous
17415
17416+ .section .rodata,"a",@progbits
17417 early_idt_msg:
17418 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
17419 early_idt_ripmsg:
17420 .asciz "RIP %s\n"
17421+ .previous
17422 #endif /* CONFIG_EARLY_PRINTK */
17423- .previous
17424
17425+ .section .rodata,"a",@progbits
17426 #define NEXT_PAGE(name) \
17427 .balign PAGE_SIZE; \
17428 ENTRY(name)
17429@@ -350,13 +359,41 @@ NEXT_PAGE(init_level4_pgt)
17430 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17431 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
17432 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17433+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
17434+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
17435+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
17436+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
17437+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
17438+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17439 .org init_level4_pgt + L4_START_KERNEL*8, 0
17440 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
17441 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
17442
17443+#ifdef CONFIG_PAX_PER_CPU_PGD
17444+NEXT_PAGE(cpu_pgd)
17445+ .rept NR_CPUS
17446+ .fill 512,8,0
17447+ .endr
17448+#endif
17449+
17450 NEXT_PAGE(level3_ident_pgt)
17451 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
17452+#ifdef CONFIG_XEN
17453 .fill 511,8,0
17454+#else
17455+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
17456+ .fill 510,8,0
17457+#endif
17458+
17459+NEXT_PAGE(level3_vmalloc_start_pgt)
17460+ .fill 512,8,0
17461+
17462+NEXT_PAGE(level3_vmalloc_end_pgt)
17463+ .fill 512,8,0
17464+
17465+NEXT_PAGE(level3_vmemmap_pgt)
17466+ .fill L3_VMEMMAP_START,8,0
17467+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
17468
17469 NEXT_PAGE(level3_kernel_pgt)
17470 .fill L3_START_KERNEL,8,0
17471@@ -364,20 +401,23 @@ NEXT_PAGE(level3_kernel_pgt)
17472 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
17473 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17474
17475+NEXT_PAGE(level2_vmemmap_pgt)
17476+ .fill 512,8,0
17477+
17478 NEXT_PAGE(level2_fixmap_pgt)
17479- .fill 506,8,0
17480- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
17481- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
17482- .fill 5,8,0
17483+ .fill 507,8,0
17484+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
17485+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
17486+ .fill 4,8,0
17487
17488-NEXT_PAGE(level1_fixmap_pgt)
17489+NEXT_PAGE(level1_vsyscall_pgt)
17490 .fill 512,8,0
17491
17492-NEXT_PAGE(level2_ident_pgt)
17493- /* Since I easily can, map the first 1G.
17494+ /* Since I easily can, map the first 2G.
17495 * Don't set NX because code runs from these pages.
17496 */
17497- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
17498+NEXT_PAGE(level2_ident_pgt)
17499+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
17500
17501 NEXT_PAGE(level2_kernel_pgt)
17502 /*
17503@@ -390,33 +430,55 @@ NEXT_PAGE(level2_kernel_pgt)
17504 * If you want to increase this then increase MODULES_VADDR
17505 * too.)
17506 */
17507- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
17508- KERNEL_IMAGE_SIZE/PMD_SIZE)
17509-
17510-NEXT_PAGE(level2_spare_pgt)
17511- .fill 512, 8, 0
17512+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
17513
17514 #undef PMDS
17515 #undef NEXT_PAGE
17516
17517- .data
17518+ .align PAGE_SIZE
17519+ENTRY(cpu_gdt_table)
17520+ .rept NR_CPUS
17521+ .quad 0x0000000000000000 /* NULL descriptor */
17522+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
17523+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
17524+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
17525+ .quad 0x00cffb000000ffff /* __USER32_CS */
17526+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
17527+ .quad 0x00affb000000ffff /* __USER_CS */
17528+
17529+#ifdef CONFIG_PAX_KERNEXEC
17530+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
17531+#else
17532+ .quad 0x0 /* unused */
17533+#endif
17534+
17535+ .quad 0,0 /* TSS */
17536+ .quad 0,0 /* LDT */
17537+ .quad 0,0,0 /* three TLS descriptors */
17538+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
17539+ /* asm/segment.h:GDT_ENTRIES must match this */
17540+
17541+ /* zero the remaining page */
17542+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
17543+ .endr
17544+
17545 .align 16
17546 .globl early_gdt_descr
17547 early_gdt_descr:
17548 .word GDT_ENTRIES*8-1
17549 early_gdt_descr_base:
17550- .quad INIT_PER_CPU_VAR(gdt_page)
17551+ .quad cpu_gdt_table
17552
17553 ENTRY(phys_base)
17554 /* This must match the first entry in level2_kernel_pgt */
17555 .quad 0x0000000000000000
17556
17557 #include "../../x86/xen/xen-head.S"
17558-
17559- .section .bss, "aw", @nobits
17560+
17561+ .section .rodata,"a",@progbits
17562 .align L1_CACHE_BYTES
17563 ENTRY(idt_table)
17564- .skip IDT_ENTRIES * 16
17565+ .fill 512,8,0
17566
17567 __PAGE_ALIGNED_BSS
17568 .align PAGE_SIZE
17569diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
17570index 9c3bd4a..e1d9b35 100644
17571--- a/arch/x86/kernel/i386_ksyms_32.c
17572+++ b/arch/x86/kernel/i386_ksyms_32.c
17573@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
17574 EXPORT_SYMBOL(cmpxchg8b_emu);
17575 #endif
17576
17577+EXPORT_SYMBOL_GPL(cpu_gdt_table);
17578+
17579 /* Networking helper routines. */
17580 EXPORT_SYMBOL(csum_partial_copy_generic);
17581+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
17582+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
17583
17584 EXPORT_SYMBOL(__get_user_1);
17585 EXPORT_SYMBOL(__get_user_2);
17586@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
17587
17588 EXPORT_SYMBOL(csum_partial);
17589 EXPORT_SYMBOL(empty_zero_page);
17590+
17591+#ifdef CONFIG_PAX_KERNEXEC
17592+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
17593+#endif
17594diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
17595index df89102..a244320 100644
17596--- a/arch/x86/kernel/i8259.c
17597+++ b/arch/x86/kernel/i8259.c
17598@@ -208,7 +208,7 @@ spurious_8259A_irq:
17599 "spurious 8259A interrupt: IRQ%d.\n", irq);
17600 spurious_irq_mask |= irqmask;
17601 }
17602- atomic_inc(&irq_err_count);
17603+ atomic_inc_unchecked(&irq_err_count);
17604 /*
17605 * Theoretically we do not have to handle this IRQ,
17606 * but in Linux this does not cause problems and is
17607diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c
17608index 3a54dcb..1c22348 100644
17609--- a/arch/x86/kernel/init_task.c
17610+++ b/arch/x86/kernel/init_task.c
17611@@ -20,8 +20,7 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17612 * way process stacks are handled. This is done by having a special
17613 * "init_task" linker map entry..
17614 */
17615-union thread_union init_thread_union __init_task_data =
17616- { INIT_THREAD_INFO(init_task) };
17617+union thread_union init_thread_union __init_task_data;
17618
17619 /*
17620 * Initial task structure.
17621@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
17622 * section. Since TSS's are completely CPU-local, we want them
17623 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
17624 */
17625-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
17626-
17627+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
17628+EXPORT_SYMBOL(init_tss);
17629diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
17630index 99c4d30..74c84e9 100644
17631--- a/arch/x86/kernel/ioport.c
17632+++ b/arch/x86/kernel/ioport.c
17633@@ -6,6 +6,7 @@
17634 #include <linux/sched.h>
17635 #include <linux/kernel.h>
17636 #include <linux/capability.h>
17637+#include <linux/security.h>
17638 #include <linux/errno.h>
17639 #include <linux/types.h>
17640 #include <linux/ioport.h>
17641@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17642
17643 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
17644 return -EINVAL;
17645+#ifdef CONFIG_GRKERNSEC_IO
17646+ if (turn_on && grsec_disable_privio) {
17647+ gr_handle_ioperm();
17648+ return -EPERM;
17649+ }
17650+#endif
17651 if (turn_on && !capable(CAP_SYS_RAWIO))
17652 return -EPERM;
17653
17654@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
17655 * because the ->io_bitmap_max value must match the bitmap
17656 * contents:
17657 */
17658- tss = &per_cpu(init_tss, get_cpu());
17659+ tss = init_tss + get_cpu();
17660
17661 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
17662
17663@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, struct pt_regs *regs)
17664 return -EINVAL;
17665 /* Trying to gain more privileges? */
17666 if (level > old) {
17667+#ifdef CONFIG_GRKERNSEC_IO
17668+ if (grsec_disable_privio) {
17669+ gr_handle_iopl();
17670+ return -EPERM;
17671+ }
17672+#endif
17673 if (!capable(CAP_SYS_RAWIO))
17674 return -EPERM;
17675 }
17676diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
17677index 04bbd52..83a07d9 100644
17678--- a/arch/x86/kernel/irq.c
17679+++ b/arch/x86/kernel/irq.c
17680@@ -15,7 +15,7 @@
17681 #include <asm/mce.h>
17682 #include <asm/hw_irq.h>
17683
17684-atomic_t irq_err_count;
17685+atomic_unchecked_t irq_err_count;
17686
17687 /* Function pointer for generic interrupt vector handling */
17688 void (*generic_interrupt_extension)(void) = NULL;
17689@@ -114,9 +114,9 @@ static int show_other_interrupts(struct seq_file *p, int prec)
17690 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
17691 seq_printf(p, " Machine check polls\n");
17692 #endif
17693- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
17694+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
17695 #if defined(CONFIG_X86_IO_APIC)
17696- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
17697+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
17698 #endif
17699 return 0;
17700 }
17701@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
17702
17703 u64 arch_irq_stat(void)
17704 {
17705- u64 sum = atomic_read(&irq_err_count);
17706+ u64 sum = atomic_read_unchecked(&irq_err_count);
17707
17708 #ifdef CONFIG_X86_IO_APIC
17709- sum += atomic_read(&irq_mis_count);
17710+ sum += atomic_read_unchecked(&irq_mis_count);
17711 #endif
17712 return sum;
17713 }
17714diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
17715index 7d35d0f..03f1d52 100644
17716--- a/arch/x86/kernel/irq_32.c
17717+++ b/arch/x86/kernel/irq_32.c
17718@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
17719 __asm__ __volatile__("andl %%esp,%0" :
17720 "=r" (sp) : "0" (THREAD_SIZE - 1));
17721
17722- return sp < (sizeof(struct thread_info) + STACK_WARN);
17723+ return sp < STACK_WARN;
17724 }
17725
17726 static void print_stack_overflow(void)
17727@@ -54,9 +54,9 @@ static inline void print_stack_overflow(void) { }
17728 * per-CPU IRQ handling contexts (thread information and stack)
17729 */
17730 union irq_ctx {
17731- struct thread_info tinfo;
17732- u32 stack[THREAD_SIZE/sizeof(u32)];
17733-} __attribute__((aligned(PAGE_SIZE)));
17734+ unsigned long previous_esp;
17735+ u32 stack[THREAD_SIZE/sizeof(u32)];
17736+} __attribute__((aligned(THREAD_SIZE)));
17737
17738 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
17739 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
17740@@ -78,10 +78,9 @@ static void call_on_stack(void *func, void *stack)
17741 static inline int
17742 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17743 {
17744- union irq_ctx *curctx, *irqctx;
17745+ union irq_ctx *irqctx;
17746 u32 *isp, arg1, arg2;
17747
17748- curctx = (union irq_ctx *) current_thread_info();
17749 irqctx = __get_cpu_var(hardirq_ctx);
17750
17751 /*
17752@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17753 * handler) we can't do that and just have to keep using the
17754 * current stack (which is the irq stack already after all)
17755 */
17756- if (unlikely(curctx == irqctx))
17757+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
17758 return 0;
17759
17760 /* build the stack frame on the IRQ stack */
17761- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17762- irqctx->tinfo.task = curctx->tinfo.task;
17763- irqctx->tinfo.previous_esp = current_stack_pointer;
17764+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17765+ irqctx->previous_esp = current_stack_pointer;
17766
17767- /*
17768- * Copy the softirq bits in preempt_count so that the
17769- * softirq checks work in the hardirq context.
17770- */
17771- irqctx->tinfo.preempt_count =
17772- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
17773- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
17774+#ifdef CONFIG_PAX_MEMORY_UDEREF
17775+ __set_fs(MAKE_MM_SEG(0));
17776+#endif
17777
17778 if (unlikely(overflow))
17779 call_on_stack(print_stack_overflow, isp);
17780@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17781 : "0" (irq), "1" (desc), "2" (isp),
17782 "D" (desc->handle_irq)
17783 : "memory", "cc", "ecx");
17784+
17785+#ifdef CONFIG_PAX_MEMORY_UDEREF
17786+ __set_fs(current_thread_info()->addr_limit);
17787+#endif
17788+
17789 return 1;
17790 }
17791
17792@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
17793 */
17794 void __cpuinit irq_ctx_init(int cpu)
17795 {
17796- union irq_ctx *irqctx;
17797-
17798 if (per_cpu(hardirq_ctx, cpu))
17799 return;
17800
17801- irqctx = &per_cpu(hardirq_stack, cpu);
17802- irqctx->tinfo.task = NULL;
17803- irqctx->tinfo.exec_domain = NULL;
17804- irqctx->tinfo.cpu = cpu;
17805- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
17806- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17807-
17808- per_cpu(hardirq_ctx, cpu) = irqctx;
17809-
17810- irqctx = &per_cpu(softirq_stack, cpu);
17811- irqctx->tinfo.task = NULL;
17812- irqctx->tinfo.exec_domain = NULL;
17813- irqctx->tinfo.cpu = cpu;
17814- irqctx->tinfo.preempt_count = 0;
17815- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
17816-
17817- per_cpu(softirq_ctx, cpu) = irqctx;
17818+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
17819+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
17820
17821 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
17822 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
17823@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
17824 asmlinkage void do_softirq(void)
17825 {
17826 unsigned long flags;
17827- struct thread_info *curctx;
17828 union irq_ctx *irqctx;
17829 u32 *isp;
17830
17831@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
17832 local_irq_save(flags);
17833
17834 if (local_softirq_pending()) {
17835- curctx = current_thread_info();
17836 irqctx = __get_cpu_var(softirq_ctx);
17837- irqctx->tinfo.task = curctx->task;
17838- irqctx->tinfo.previous_esp = current_stack_pointer;
17839+ irqctx->previous_esp = current_stack_pointer;
17840
17841 /* build the stack frame on the softirq stack */
17842- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
17843+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
17844+
17845+#ifdef CONFIG_PAX_MEMORY_UDEREF
17846+ __set_fs(MAKE_MM_SEG(0));
17847+#endif
17848
17849 call_on_stack(__do_softirq, isp);
17850+
17851+#ifdef CONFIG_PAX_MEMORY_UDEREF
17852+ __set_fs(current_thread_info()->addr_limit);
17853+#endif
17854+
17855 /*
17856 * Shouldnt happen, we returned above if in_interrupt():
17857 */
17858diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
17859index 8d82a77..0baf312 100644
17860--- a/arch/x86/kernel/kgdb.c
17861+++ b/arch/x86/kernel/kgdb.c
17862@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
17863
17864 /* clear the trace bit */
17865 linux_regs->flags &= ~X86_EFLAGS_TF;
17866- atomic_set(&kgdb_cpu_doing_single_step, -1);
17867+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
17868
17869 /* set the trace bit if we're stepping */
17870 if (remcomInBuffer[0] == 's') {
17871 linux_regs->flags |= X86_EFLAGS_TF;
17872 kgdb_single_step = 1;
17873- atomic_set(&kgdb_cpu_doing_single_step,
17874+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
17875 raw_smp_processor_id());
17876 }
17877
17878@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
17879 break;
17880
17881 case DIE_DEBUG:
17882- if (atomic_read(&kgdb_cpu_doing_single_step) ==
17883+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
17884 raw_smp_processor_id()) {
17885 if (user_mode(regs))
17886 return single_step_cont(regs, args);
17887@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
17888 return instruction_pointer(regs);
17889 }
17890
17891-struct kgdb_arch arch_kgdb_ops = {
17892+const struct kgdb_arch arch_kgdb_ops = {
17893 /* Breakpoint instruction: */
17894 .gdb_bpt_instr = { 0xcc },
17895 .flags = KGDB_HW_BREAKPOINT,
17896diff --git a/arch/x86/kernel/kprobes.c b/arch/x86/kernel/kprobes.c
17897index 7a67820..8d15b75 100644
17898--- a/arch/x86/kernel/kprobes.c
17899+++ b/arch/x86/kernel/kprobes.c
17900@@ -168,9 +168,13 @@ static void __kprobes set_jmp_op(void *from, void *to)
17901 char op;
17902 s32 raddr;
17903 } __attribute__((packed)) * jop;
17904- jop = (struct __arch_jmp_op *)from;
17905+
17906+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
17907+
17908+ pax_open_kernel();
17909 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
17910 jop->op = RELATIVEJUMP_INSTRUCTION;
17911+ pax_close_kernel();
17912 }
17913
17914 /*
17915@@ -195,7 +199,7 @@ static int __kprobes can_boost(kprobe_opcode_t *opcodes)
17916 kprobe_opcode_t opcode;
17917 kprobe_opcode_t *orig_opcodes = opcodes;
17918
17919- if (search_exception_tables((unsigned long)opcodes))
17920+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
17921 return 0; /* Page fault may occur on this address. */
17922
17923 retry:
17924@@ -339,7 +343,9 @@ static void __kprobes fix_riprel(struct kprobe *p)
17925 disp = (u8 *) p->addr + *((s32 *) insn) -
17926 (u8 *) p->ainsn.insn;
17927 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
17928+ pax_open_kernel();
17929 *(s32 *)insn = (s32) disp;
17930+ pax_close_kernel();
17931 }
17932 }
17933 #endif
17934@@ -347,16 +353,18 @@ static void __kprobes fix_riprel(struct kprobe *p)
17935
17936 static void __kprobes arch_copy_kprobe(struct kprobe *p)
17937 {
17938- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17939+ pax_open_kernel();
17940+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
17941+ pax_close_kernel();
17942
17943 fix_riprel(p);
17944
17945- if (can_boost(p->addr))
17946+ if (can_boost(ktla_ktva(p->addr)))
17947 p->ainsn.boostable = 0;
17948 else
17949 p->ainsn.boostable = -1;
17950
17951- p->opcode = *p->addr;
17952+ p->opcode = *(ktla_ktva(p->addr));
17953 }
17954
17955 int __kprobes arch_prepare_kprobe(struct kprobe *p)
17956@@ -434,7 +442,7 @@ static void __kprobes prepare_singlestep(struct kprobe *p, struct pt_regs *regs)
17957 if (p->opcode == BREAKPOINT_INSTRUCTION)
17958 regs->ip = (unsigned long)p->addr;
17959 else
17960- regs->ip = (unsigned long)p->ainsn.insn;
17961+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17962 }
17963
17964 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
17965@@ -455,7 +463,7 @@ static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs,
17966 if (p->ainsn.boostable == 1 && !p->post_handler) {
17967 /* Boost up -- we can execute copied instructions directly */
17968 reset_current_kprobe();
17969- regs->ip = (unsigned long)p->ainsn.insn;
17970+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
17971 preempt_enable_no_resched();
17972 return;
17973 }
17974@@ -525,7 +533,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
17975 struct kprobe_ctlblk *kcb;
17976
17977 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
17978- if (*addr != BREAKPOINT_INSTRUCTION) {
17979+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
17980 /*
17981 * The breakpoint instruction was removed right
17982 * after we hit it. Another cpu has removed
17983@@ -637,6 +645,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
17984 /* Skip orig_ax, ip, cs */
17985 " addq $24, %rsp\n"
17986 " popfq\n"
17987+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN
17988+ " btsq $63,(%rsp)\n"
17989+#endif
17990 #else
17991 " pushf\n"
17992 /*
17993@@ -777,7 +788,7 @@ static void __kprobes resume_execution(struct kprobe *p,
17994 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
17995 {
17996 unsigned long *tos = stack_addr(regs);
17997- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
17998+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
17999 unsigned long orig_ip = (unsigned long)p->addr;
18000 kprobe_opcode_t *insn = p->ainsn.insn;
18001
18002@@ -960,7 +971,7 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
18003 struct die_args *args = data;
18004 int ret = NOTIFY_DONE;
18005
18006- if (args->regs && user_mode_vm(args->regs))
18007+ if (args->regs && user_mode(args->regs))
18008 return ret;
18009
18010 switch (val) {
18011diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
18012index 63b0ec8..6d92227 100644
18013--- a/arch/x86/kernel/kvm.c
18014+++ b/arch/x86/kernel/kvm.c
18015@@ -216,6 +216,7 @@ static void __init paravirt_ops_setup(void)
18016 pv_mmu_ops.set_pud = kvm_set_pud;
18017 #if PAGETABLE_LEVELS == 4
18018 pv_mmu_ops.set_pgd = kvm_set_pgd;
18019+ pv_mmu_ops.set_pgd_batched = kvm_set_pgd;
18020 #endif
18021 #endif
18022 pv_mmu_ops.flush_tlb_user = kvm_flush_tlb;
18023diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
18024index ec6ef60..ab2c824 100644
18025--- a/arch/x86/kernel/ldt.c
18026+++ b/arch/x86/kernel/ldt.c
18027@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
18028 if (reload) {
18029 #ifdef CONFIG_SMP
18030 preempt_disable();
18031- load_LDT(pc);
18032+ load_LDT_nolock(pc);
18033 if (!cpumask_equal(mm_cpumask(current->mm),
18034 cpumask_of(smp_processor_id())))
18035 smp_call_function(flush_ldt, current->mm, 1);
18036 preempt_enable();
18037 #else
18038- load_LDT(pc);
18039+ load_LDT_nolock(pc);
18040 #endif
18041 }
18042 if (oldsize) {
18043@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
18044 return err;
18045
18046 for (i = 0; i < old->size; i++)
18047- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
18048+ write_ldt_entry(new->ldt, i, old->ldt + i);
18049 return 0;
18050 }
18051
18052@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
18053 retval = copy_ldt(&mm->context, &old_mm->context);
18054 mutex_unlock(&old_mm->context.lock);
18055 }
18056+
18057+ if (tsk == current) {
18058+ mm->context.vdso = 0;
18059+
18060+#ifdef CONFIG_X86_32
18061+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
18062+ mm->context.user_cs_base = 0UL;
18063+ mm->context.user_cs_limit = ~0UL;
18064+
18065+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
18066+ cpus_clear(mm->context.cpu_user_cs_mask);
18067+#endif
18068+
18069+#endif
18070+#endif
18071+
18072+ }
18073+
18074 return retval;
18075 }
18076
18077@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
18078 }
18079 }
18080
18081+#ifdef CONFIG_PAX_SEGMEXEC
18082+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
18083+ error = -EINVAL;
18084+ goto out_unlock;
18085+ }
18086+#endif
18087+
18088 fill_ldt(&ldt, &ldt_info);
18089 if (oldmode)
18090 ldt.avl = 0;
18091diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
18092index c1c429d..f02eaf9 100644
18093--- a/arch/x86/kernel/machine_kexec_32.c
18094+++ b/arch/x86/kernel/machine_kexec_32.c
18095@@ -26,7 +26,7 @@
18096 #include <asm/system.h>
18097 #include <asm/cacheflush.h>
18098
18099-static void set_idt(void *newidt, __u16 limit)
18100+static void set_idt(struct desc_struct *newidt, __u16 limit)
18101 {
18102 struct desc_ptr curidt;
18103
18104@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
18105 }
18106
18107
18108-static void set_gdt(void *newgdt, __u16 limit)
18109+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
18110 {
18111 struct desc_ptr curgdt;
18112
18113@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
18114 }
18115
18116 control_page = page_address(image->control_code_page);
18117- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
18118+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
18119
18120 relocate_kernel_ptr = control_page;
18121 page_list[PA_CONTROL_PAGE] = __pa(control_page);
18122diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c
18123index 1e47679..e73449d 100644
18124--- a/arch/x86/kernel/microcode_amd.c
18125+++ b/arch/x86/kernel/microcode_amd.c
18126@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int cpu)
18127 uci->mc = NULL;
18128 }
18129
18130-static struct microcode_ops microcode_amd_ops = {
18131+static const struct microcode_ops microcode_amd_ops = {
18132 .request_microcode_user = request_microcode_user,
18133 .request_microcode_fw = request_microcode_fw,
18134 .collect_cpu_info = collect_cpu_info_amd,
18135@@ -372,7 +372,7 @@ static struct microcode_ops microcode_amd_ops = {
18136 .microcode_fini_cpu = microcode_fini_cpu_amd,
18137 };
18138
18139-struct microcode_ops * __init init_amd_microcode(void)
18140+const struct microcode_ops * __init init_amd_microcode(void)
18141 {
18142 return &microcode_amd_ops;
18143 }
18144diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
18145index 378e9a8..b5a6ea9 100644
18146--- a/arch/x86/kernel/microcode_core.c
18147+++ b/arch/x86/kernel/microcode_core.c
18148@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
18149
18150 #define MICROCODE_VERSION "2.00"
18151
18152-static struct microcode_ops *microcode_ops;
18153+static const struct microcode_ops *microcode_ops;
18154
18155 /*
18156 * Synchronization.
18157diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
18158index 0d334dd..14cedaf 100644
18159--- a/arch/x86/kernel/microcode_intel.c
18160+++ b/arch/x86/kernel/microcode_intel.c
18161@@ -443,13 +443,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device)
18162
18163 static int get_ucode_user(void *to, const void *from, size_t n)
18164 {
18165- return copy_from_user(to, from, n);
18166+ return copy_from_user(to, (const void __force_user *)from, n);
18167 }
18168
18169 static enum ucode_state
18170 request_microcode_user(int cpu, const void __user *buf, size_t size)
18171 {
18172- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
18173+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
18174 }
18175
18176 static void microcode_fini_cpu(int cpu)
18177@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
18178 uci->mc = NULL;
18179 }
18180
18181-static struct microcode_ops microcode_intel_ops = {
18182+static const struct microcode_ops microcode_intel_ops = {
18183 .request_microcode_user = request_microcode_user,
18184 .request_microcode_fw = request_microcode_fw,
18185 .collect_cpu_info = collect_cpu_info,
18186@@ -468,7 +468,7 @@ static struct microcode_ops microcode_intel_ops = {
18187 .microcode_fini_cpu = microcode_fini_cpu,
18188 };
18189
18190-struct microcode_ops * __init init_intel_microcode(void)
18191+const struct microcode_ops * __init init_intel_microcode(void)
18192 {
18193 return &microcode_intel_ops;
18194 }
18195diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
18196index 89f386f..9028f51 100644
18197--- a/arch/x86/kernel/module.c
18198+++ b/arch/x86/kernel/module.c
18199@@ -34,7 +34,7 @@
18200 #define DEBUGP(fmt...)
18201 #endif
18202
18203-void *module_alloc(unsigned long size)
18204+static void *__module_alloc(unsigned long size, pgprot_t prot)
18205 {
18206 struct vm_struct *area;
18207
18208@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
18209 if (!area)
18210 return NULL;
18211
18212- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
18213- PAGE_KERNEL_EXEC);
18214+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
18215+}
18216+
18217+void *module_alloc(unsigned long size)
18218+{
18219+
18220+#ifdef CONFIG_PAX_KERNEXEC
18221+ return __module_alloc(size, PAGE_KERNEL);
18222+#else
18223+ return __module_alloc(size, PAGE_KERNEL_EXEC);
18224+#endif
18225+
18226 }
18227
18228 /* Free memory returned from module_alloc */
18229@@ -58,6 +68,40 @@ void module_free(struct module *mod, void *module_region)
18230 vfree(module_region);
18231 }
18232
18233+#ifdef CONFIG_PAX_KERNEXEC
18234+#ifdef CONFIG_X86_32
18235+void *module_alloc_exec(unsigned long size)
18236+{
18237+ struct vm_struct *area;
18238+
18239+ if (size == 0)
18240+ return NULL;
18241+
18242+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
18243+ return area ? area->addr : NULL;
18244+}
18245+EXPORT_SYMBOL(module_alloc_exec);
18246+
18247+void module_free_exec(struct module *mod, void *module_region)
18248+{
18249+ vunmap(module_region);
18250+}
18251+EXPORT_SYMBOL(module_free_exec);
18252+#else
18253+void module_free_exec(struct module *mod, void *module_region)
18254+{
18255+ module_free(mod, module_region);
18256+}
18257+EXPORT_SYMBOL(module_free_exec);
18258+
18259+void *module_alloc_exec(unsigned long size)
18260+{
18261+ return __module_alloc(size, PAGE_KERNEL_RX);
18262+}
18263+EXPORT_SYMBOL(module_alloc_exec);
18264+#endif
18265+#endif
18266+
18267 /* We don't need anything special. */
18268 int module_frob_arch_sections(Elf_Ehdr *hdr,
18269 Elf_Shdr *sechdrs,
18270@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18271 unsigned int i;
18272 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
18273 Elf32_Sym *sym;
18274- uint32_t *location;
18275+ uint32_t *plocation, location;
18276
18277 DEBUGP("Applying relocate section %u to %u\n", relsec,
18278 sechdrs[relsec].sh_info);
18279 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
18280 /* This is where to make the change */
18281- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
18282- + rel[i].r_offset;
18283+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
18284+ location = (uint32_t)plocation;
18285+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
18286+ plocation = ktla_ktva((void *)plocation);
18287 /* This is the symbol it is referring to. Note that all
18288 undefined symbols have been resolved. */
18289 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
18290@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
18291 switch (ELF32_R_TYPE(rel[i].r_info)) {
18292 case R_386_32:
18293 /* We add the value into the location given */
18294- *location += sym->st_value;
18295+ pax_open_kernel();
18296+ *plocation += sym->st_value;
18297+ pax_close_kernel();
18298 break;
18299 case R_386_PC32:
18300 /* Add the value, subtract its postition */
18301- *location += sym->st_value - (uint32_t)location;
18302+ pax_open_kernel();
18303+ *plocation += sym->st_value - location;
18304+ pax_close_kernel();
18305 break;
18306 default:
18307 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
18308@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
18309 case R_X86_64_NONE:
18310 break;
18311 case R_X86_64_64:
18312+ pax_open_kernel();
18313 *(u64 *)loc = val;
18314+ pax_close_kernel();
18315 break;
18316 case R_X86_64_32:
18317+ pax_open_kernel();
18318 *(u32 *)loc = val;
18319+ pax_close_kernel();
18320 if (val != *(u32 *)loc)
18321 goto overflow;
18322 break;
18323 case R_X86_64_32S:
18324+ pax_open_kernel();
18325 *(s32 *)loc = val;
18326+ pax_close_kernel();
18327 if ((s64)val != *(s32 *)loc)
18328 goto overflow;
18329 break;
18330 case R_X86_64_PC32:
18331 val -= (u64)loc;
18332+ pax_open_kernel();
18333 *(u32 *)loc = val;
18334+ pax_close_kernel();
18335+
18336 #if 0
18337 if ((s64)val != *(s32 *)loc)
18338 goto overflow;
18339diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
18340index 3a7c5a4..9191528 100644
18341--- a/arch/x86/kernel/paravirt-spinlocks.c
18342+++ b/arch/x86/kernel/paravirt-spinlocks.c
18343@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
18344 __raw_spin_lock(lock);
18345 }
18346
18347-struct pv_lock_ops pv_lock_ops = {
18348+struct pv_lock_ops pv_lock_ops __read_only = {
18349 #ifdef CONFIG_SMP
18350 .spin_is_locked = __ticket_spin_is_locked,
18351 .spin_is_contended = __ticket_spin_is_contended,
18352diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
18353index 1b1739d..dea6077 100644
18354--- a/arch/x86/kernel/paravirt.c
18355+++ b/arch/x86/kernel/paravirt.c
18356@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
18357 {
18358 return x;
18359 }
18360+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18361+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
18362+#endif
18363
18364 void __init default_banner(void)
18365 {
18366@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbuf, const void *target,
18367 * corresponding structure. */
18368 static void *get_call_destination(u8 type)
18369 {
18370- struct paravirt_patch_template tmpl = {
18371+ const struct paravirt_patch_template tmpl = {
18372 .pv_init_ops = pv_init_ops,
18373 .pv_time_ops = pv_time_ops,
18374 .pv_cpu_ops = pv_cpu_ops,
18375@@ -133,6 +136,8 @@ static void *get_call_destination(u8 type)
18376 .pv_lock_ops = pv_lock_ops,
18377 #endif
18378 };
18379+
18380+ pax_track_stack();
18381 return *((void **)&tmpl + type);
18382 }
18383
18384@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
18385 if (opfunc == NULL)
18386 /* If there's no function, patch it with a ud2a (BUG) */
18387 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
18388- else if (opfunc == _paravirt_nop)
18389+ else if (opfunc == (void *)_paravirt_nop)
18390 /* If the operation is a nop, then nop the callsite */
18391 ret = paravirt_patch_nop();
18392
18393 /* identity functions just return their single argument */
18394- else if (opfunc == _paravirt_ident_32)
18395+ else if (opfunc == (void *)_paravirt_ident_32)
18396 ret = paravirt_patch_ident_32(insnbuf, len);
18397- else if (opfunc == _paravirt_ident_64)
18398+ else if (opfunc == (void *)_paravirt_ident_64)
18399 ret = paravirt_patch_ident_64(insnbuf, len);
18400+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
18401+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
18402+ ret = paravirt_patch_ident_64(insnbuf, len);
18403+#endif
18404
18405 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
18406 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
18407@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
18408 if (insn_len > len || start == NULL)
18409 insn_len = len;
18410 else
18411- memcpy(insnbuf, start, insn_len);
18412+ memcpy(insnbuf, ktla_ktva(start), insn_len);
18413
18414 return insn_len;
18415 }
18416@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
18417 preempt_enable();
18418 }
18419
18420-struct pv_info pv_info = {
18421+struct pv_info pv_info __read_only = {
18422 .name = "bare hardware",
18423 .paravirt_enabled = 0,
18424 .kernel_rpl = 0,
18425 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
18426 };
18427
18428-struct pv_init_ops pv_init_ops = {
18429+struct pv_init_ops pv_init_ops __read_only = {
18430 .patch = native_patch,
18431 };
18432
18433-struct pv_time_ops pv_time_ops = {
18434+struct pv_time_ops pv_time_ops __read_only = {
18435 .sched_clock = native_sched_clock,
18436 };
18437
18438-struct pv_irq_ops pv_irq_ops = {
18439+struct pv_irq_ops pv_irq_ops __read_only = {
18440 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
18441 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
18442 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
18443@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
18444 #endif
18445 };
18446
18447-struct pv_cpu_ops pv_cpu_ops = {
18448+struct pv_cpu_ops pv_cpu_ops __read_only = {
18449 .cpuid = native_cpuid,
18450 .get_debugreg = native_get_debugreg,
18451 .set_debugreg = native_set_debugreg,
18452@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
18453 .end_context_switch = paravirt_nop,
18454 };
18455
18456-struct pv_apic_ops pv_apic_ops = {
18457+struct pv_apic_ops pv_apic_ops __read_only = {
18458 #ifdef CONFIG_X86_LOCAL_APIC
18459 .startup_ipi_hook = paravirt_nop,
18460 #endif
18461 };
18462
18463-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
18464+#ifdef CONFIG_X86_32
18465+#ifdef CONFIG_X86_PAE
18466+/* 64-bit pagetable entries */
18467+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
18468+#else
18469 /* 32-bit pagetable entries */
18470 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
18471+#endif
18472 #else
18473 /* 64-bit pagetable entries */
18474 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
18475 #endif
18476
18477-struct pv_mmu_ops pv_mmu_ops = {
18478+struct pv_mmu_ops pv_mmu_ops __read_only = {
18479
18480 .read_cr2 = native_read_cr2,
18481 .write_cr2 = native_write_cr2,
18482@@ -448,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = {
18483 .make_pud = PTE_IDENT,
18484
18485 .set_pgd = native_set_pgd,
18486+ .set_pgd_batched = native_set_pgd_batched,
18487 #endif
18488 #endif /* PAGETABLE_LEVELS >= 3 */
18489
18490@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
18491 },
18492
18493 .set_fixmap = native_set_fixmap,
18494+
18495+#ifdef CONFIG_PAX_KERNEXEC
18496+ .pax_open_kernel = native_pax_open_kernel,
18497+ .pax_close_kernel = native_pax_close_kernel,
18498+#endif
18499+
18500 };
18501
18502 EXPORT_SYMBOL_GPL(pv_time_ops);
18503diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
18504index 1a2d4b1..6a0dd55 100644
18505--- a/arch/x86/kernel/pci-calgary_64.c
18506+++ b/arch/x86/kernel/pci-calgary_64.c
18507@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct device *dev, size_t size,
18508 free_pages((unsigned long)vaddr, get_order(size));
18509 }
18510
18511-static struct dma_map_ops calgary_dma_ops = {
18512+static const struct dma_map_ops calgary_dma_ops = {
18513 .alloc_coherent = calgary_alloc_coherent,
18514 .free_coherent = calgary_free_coherent,
18515 .map_sg = calgary_map_sg,
18516diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
18517index 6ac3931..42b4414 100644
18518--- a/arch/x86/kernel/pci-dma.c
18519+++ b/arch/x86/kernel/pci-dma.c
18520@@ -14,7 +14,7 @@
18521
18522 static int forbid_dac __read_mostly;
18523
18524-struct dma_map_ops *dma_ops;
18525+const struct dma_map_ops *dma_ops;
18526 EXPORT_SYMBOL(dma_ops);
18527
18528 static int iommu_sac_force __read_mostly;
18529@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
18530
18531 int dma_supported(struct device *dev, u64 mask)
18532 {
18533- struct dma_map_ops *ops = get_dma_ops(dev);
18534+ const struct dma_map_ops *ops = get_dma_ops(dev);
18535
18536 #ifdef CONFIG_PCI
18537 if (mask > 0xffffffff && forbid_dac > 0) {
18538diff --git a/arch/x86/kernel/pci-gart_64.c b/arch/x86/kernel/pci-gart_64.c
18539index 1c76691..e3632db 100644
18540--- a/arch/x86/kernel/pci-gart_64.c
18541+++ b/arch/x86/kernel/pci-gart_64.c
18542@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
18543 return -1;
18544 }
18545
18546-static struct dma_map_ops gart_dma_ops = {
18547+static const struct dma_map_ops gart_dma_ops = {
18548 .map_sg = gart_map_sg,
18549 .unmap_sg = gart_unmap_sg,
18550 .map_page = gart_map_page,
18551diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
18552index a3933d4..c898869 100644
18553--- a/arch/x86/kernel/pci-nommu.c
18554+++ b/arch/x86/kernel/pci-nommu.c
18555@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
18556 flush_write_buffers();
18557 }
18558
18559-struct dma_map_ops nommu_dma_ops = {
18560+const struct dma_map_ops nommu_dma_ops = {
18561 .alloc_coherent = dma_generic_alloc_coherent,
18562 .free_coherent = nommu_free_coherent,
18563 .map_sg = nommu_map_sg,
18564diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
18565index aaa6b78..4de1881 100644
18566--- a/arch/x86/kernel/pci-swiotlb.c
18567+++ b/arch/x86/kernel/pci-swiotlb.c
18568@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
18569 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
18570 }
18571
18572-static struct dma_map_ops swiotlb_dma_ops = {
18573+static const struct dma_map_ops swiotlb_dma_ops = {
18574 .mapping_error = swiotlb_dma_mapping_error,
18575 .alloc_coherent = x86_swiotlb_alloc_coherent,
18576 .free_coherent = swiotlb_free_coherent,
18577diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
18578index fc6c84d..0312ca2 100644
18579--- a/arch/x86/kernel/process.c
18580+++ b/arch/x86/kernel/process.c
18581@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_struct *tsk)
18582
18583 void free_thread_info(struct thread_info *ti)
18584 {
18585- free_thread_xstate(ti->task);
18586 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
18587 }
18588
18589+static struct kmem_cache *task_struct_cachep;
18590+
18591 void arch_task_cache_init(void)
18592 {
18593- task_xstate_cachep =
18594- kmem_cache_create("task_xstate", xstate_size,
18595+ /* create a slab on which task_structs can be allocated */
18596+ task_struct_cachep =
18597+ kmem_cache_create("task_struct", sizeof(struct task_struct),
18598+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
18599+
18600+ task_xstate_cachep =
18601+ kmem_cache_create("task_xstate", xstate_size,
18602 __alignof__(union thread_xstate),
18603- SLAB_PANIC | SLAB_NOTRACK, NULL);
18604+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
18605+}
18606+
18607+struct task_struct *alloc_task_struct(void)
18608+{
18609+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
18610+}
18611+
18612+void free_task_struct(struct task_struct *task)
18613+{
18614+ free_thread_xstate(task);
18615+ kmem_cache_free(task_struct_cachep, task);
18616 }
18617
18618 /*
18619@@ -73,7 +90,7 @@ void exit_thread(void)
18620 unsigned long *bp = t->io_bitmap_ptr;
18621
18622 if (bp) {
18623- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
18624+ struct tss_struct *tss = init_tss + get_cpu();
18625
18626 t->io_bitmap_ptr = NULL;
18627 clear_thread_flag(TIF_IO_BITMAP);
18628@@ -93,6 +110,9 @@ void flush_thread(void)
18629
18630 clear_tsk_thread_flag(tsk, TIF_DEBUG);
18631
18632+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
18633+ loadsegment(gs, 0);
18634+#endif
18635 tsk->thread.debugreg0 = 0;
18636 tsk->thread.debugreg1 = 0;
18637 tsk->thread.debugreg2 = 0;
18638@@ -307,7 +327,7 @@ void default_idle(void)
18639 EXPORT_SYMBOL(default_idle);
18640 #endif
18641
18642-void stop_this_cpu(void *dummy)
18643+__noreturn void stop_this_cpu(void *dummy)
18644 {
18645 local_irq_disable();
18646 /*
18647@@ -568,16 +588,38 @@ static int __init idle_setup(char *str)
18648 }
18649 early_param("idle", idle_setup);
18650
18651-unsigned long arch_align_stack(unsigned long sp)
18652+#ifdef CONFIG_PAX_RANDKSTACK
18653+void pax_randomize_kstack(struct pt_regs *regs)
18654 {
18655- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
18656- sp -= get_random_int() % 8192;
18657- return sp & ~0xf;
18658-}
18659+ struct thread_struct *thread = &current->thread;
18660+ unsigned long time;
18661
18662-unsigned long arch_randomize_brk(struct mm_struct *mm)
18663-{
18664- unsigned long range_end = mm->brk + 0x02000000;
18665- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
18666+ if (!randomize_va_space)
18667+ return;
18668+
18669+ if (v8086_mode(regs))
18670+ return;
18671+
18672+ rdtscl(time);
18673+
18674+ /* P4 seems to return a 0 LSB, ignore it */
18675+#ifdef CONFIG_MPENTIUM4
18676+ time &= 0x3EUL;
18677+ time <<= 2;
18678+#elif defined(CONFIG_X86_64)
18679+ time &= 0xFUL;
18680+ time <<= 4;
18681+#else
18682+ time &= 0x1FUL;
18683+ time <<= 3;
18684+#endif
18685+
18686+ thread->sp0 ^= time;
18687+ load_sp0(init_tss + smp_processor_id(), thread);
18688+
18689+#ifdef CONFIG_X86_64
18690+ percpu_write(kernel_stack, thread->sp0);
18691+#endif
18692 }
18693+#endif
18694
18695diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
18696index c40c432..6e1df72 100644
18697--- a/arch/x86/kernel/process_32.c
18698+++ b/arch/x86/kernel/process_32.c
18699@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
18700 unsigned long thread_saved_pc(struct task_struct *tsk)
18701 {
18702 return ((unsigned long *)tsk->thread.sp)[3];
18703+//XXX return tsk->thread.eip;
18704 }
18705
18706 #ifndef CONFIG_SMP
18707@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, int all)
18708 unsigned short ss, gs;
18709 const char *board;
18710
18711- if (user_mode_vm(regs)) {
18712+ if (user_mode(regs)) {
18713 sp = regs->sp;
18714 ss = regs->ss & 0xffff;
18715- gs = get_user_gs(regs);
18716 } else {
18717 sp = (unsigned long) (&regs->sp);
18718 savesegment(ss, ss);
18719- savesegment(gs, gs);
18720 }
18721+ gs = get_user_gs(regs);
18722
18723 printk("\n");
18724
18725@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
18726 regs.bx = (unsigned long) fn;
18727 regs.dx = (unsigned long) arg;
18728
18729- regs.ds = __USER_DS;
18730- regs.es = __USER_DS;
18731+ regs.ds = __KERNEL_DS;
18732+ regs.es = __KERNEL_DS;
18733 regs.fs = __KERNEL_PERCPU;
18734- regs.gs = __KERNEL_STACK_CANARY;
18735+ savesegment(gs, regs.gs);
18736 regs.orig_ax = -1;
18737 regs.ip = (unsigned long) kernel_thread_helper;
18738 regs.cs = __KERNEL_CS | get_kernel_rpl();
18739@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18740 struct task_struct *tsk;
18741 int err;
18742
18743- childregs = task_pt_regs(p);
18744+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
18745 *childregs = *regs;
18746 childregs->ax = 0;
18747 childregs->sp = sp;
18748
18749 p->thread.sp = (unsigned long) childregs;
18750 p->thread.sp0 = (unsigned long) (childregs+1);
18751+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18752
18753 p->thread.ip = (unsigned long) ret_from_fork;
18754
18755@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18756 struct thread_struct *prev = &prev_p->thread,
18757 *next = &next_p->thread;
18758 int cpu = smp_processor_id();
18759- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18760+ struct tss_struct *tss = init_tss + cpu;
18761 bool preload_fpu;
18762
18763 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
18764@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18765 */
18766 lazy_save_gs(prev->gs);
18767
18768+#ifdef CONFIG_PAX_MEMORY_UDEREF
18769+ __set_fs(task_thread_info(next_p)->addr_limit);
18770+#endif
18771+
18772 /*
18773 * Load the per-thread Thread-Local Storage descriptor.
18774 */
18775@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18776 */
18777 arch_end_context_switch(next_p);
18778
18779+ percpu_write(current_task, next_p);
18780+ percpu_write(current_tinfo, &next_p->tinfo);
18781+
18782 if (preload_fpu)
18783 __math_state_restore();
18784
18785@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18786 if (prev->gs | next->gs)
18787 lazy_load_gs(next->gs);
18788
18789- percpu_write(current_task, next_p);
18790-
18791 return prev_p;
18792 }
18793
18794@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_struct *p)
18795 } while (count++ < 16);
18796 return 0;
18797 }
18798-
18799diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
18800index 39493bc..196816d 100644
18801--- a/arch/x86/kernel/process_64.c
18802+++ b/arch/x86/kernel/process_64.c
18803@@ -91,7 +91,7 @@ static void __exit_idle(void)
18804 void exit_idle(void)
18805 {
18806 /* idle loop has pid 0 */
18807- if (current->pid)
18808+ if (task_pid_nr(current))
18809 return;
18810 __exit_idle();
18811 }
18812@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, int all)
18813 if (!board)
18814 board = "";
18815 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
18816- current->pid, current->comm, print_tainted(),
18817+ task_pid_nr(current), current->comm, print_tainted(),
18818 init_utsname()->release,
18819 (int)strcspn(init_utsname()->version, " "),
18820 init_utsname()->version, board);
18821@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18822 struct pt_regs *childregs;
18823 struct task_struct *me = current;
18824
18825- childregs = ((struct pt_regs *)
18826- (THREAD_SIZE + task_stack_page(p))) - 1;
18827+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
18828 *childregs = *regs;
18829
18830 childregs->ax = 0;
18831@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
18832 p->thread.sp = (unsigned long) childregs;
18833 p->thread.sp0 = (unsigned long) (childregs+1);
18834 p->thread.usersp = me->thread.usersp;
18835+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
18836
18837 set_tsk_thread_flag(p, TIF_FORK);
18838
18839@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18840 struct thread_struct *prev = &prev_p->thread;
18841 struct thread_struct *next = &next_p->thread;
18842 int cpu = smp_processor_id();
18843- struct tss_struct *tss = &per_cpu(init_tss, cpu);
18844+ struct tss_struct *tss = init_tss + cpu;
18845 unsigned fsindex, gsindex;
18846 bool preload_fpu;
18847
18848@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
18849 prev->usersp = percpu_read(old_rsp);
18850 percpu_write(old_rsp, next->usersp);
18851 percpu_write(current_task, next_p);
18852+ percpu_write(current_tinfo, &next_p->tinfo);
18853
18854- percpu_write(kernel_stack,
18855- (unsigned long)task_stack_page(next_p) +
18856- THREAD_SIZE - KERNEL_STACK_OFFSET);
18857+ percpu_write(kernel_stack, next->sp0);
18858
18859 /*
18860 * Now maybe reload the debug registers and handle I/O bitmaps
18861@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_struct *p)
18862 if (!p || p == current || p->state == TASK_RUNNING)
18863 return 0;
18864 stack = (unsigned long)task_stack_page(p);
18865- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
18866+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
18867 return 0;
18868 fp = *(u64 *)(p->thread.sp);
18869 do {
18870- if (fp < (unsigned long)stack ||
18871- fp >= (unsigned long)stack+THREAD_SIZE)
18872+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
18873 return 0;
18874 ip = *(u64 *)(fp+8);
18875 if (!in_sched_functions(ip))
18876diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
18877index c06acdd..3f5fff5 100644
18878--- a/arch/x86/kernel/ptrace.c
18879+++ b/arch/x86/kernel/ptrace.c
18880@@ -925,7 +925,7 @@ static const struct user_regset_view user_x86_32_view; /* Initialized below. */
18881 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18882 {
18883 int ret;
18884- unsigned long __user *datap = (unsigned long __user *)data;
18885+ unsigned long __user *datap = (__force unsigned long __user *)data;
18886
18887 switch (request) {
18888 /* read the word at location addr in the USER area. */
18889@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18890 if (addr < 0)
18891 return -EIO;
18892 ret = do_get_thread_area(child, addr,
18893- (struct user_desc __user *) data);
18894+ (__force struct user_desc __user *) data);
18895 break;
18896
18897 case PTRACE_SET_THREAD_AREA:
18898 if (addr < 0)
18899 return -EIO;
18900 ret = do_set_thread_area(child, addr,
18901- (struct user_desc __user *) data, 0);
18902+ (__force struct user_desc __user *) data, 0);
18903 break;
18904 #endif
18905
18906@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18907 #ifdef CONFIG_X86_PTRACE_BTS
18908 case PTRACE_BTS_CONFIG:
18909 ret = ptrace_bts_config
18910- (child, data, (struct ptrace_bts_config __user *)addr);
18911+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18912 break;
18913
18914 case PTRACE_BTS_STATUS:
18915 ret = ptrace_bts_status
18916- (child, data, (struct ptrace_bts_config __user *)addr);
18917+ (child, data, (__force struct ptrace_bts_config __user *)addr);
18918 break;
18919
18920 case PTRACE_BTS_SIZE:
18921@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18922
18923 case PTRACE_BTS_GET:
18924 ret = ptrace_bts_read_record
18925- (child, data, (struct bts_struct __user *) addr);
18926+ (child, data, (__force struct bts_struct __user *) addr);
18927 break;
18928
18929 case PTRACE_BTS_CLEAR:
18930@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
18931
18932 case PTRACE_BTS_DRAIN:
18933 ret = ptrace_bts_drain
18934- (child, data, (struct bts_struct __user *) addr);
18935+ (child, data, (__force struct bts_struct __user *) addr);
18936 break;
18937 #endif /* CONFIG_X86_PTRACE_BTS */
18938
18939@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18940 info.si_code = si_code;
18941
18942 /* User-mode ip? */
18943- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
18944+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
18945
18946 /* Send us the fake SIGTRAP */
18947 force_sig_info(SIGTRAP, &info, tsk);
18948@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
18949 * We must return the syscall number to actually look up in the table.
18950 * This can be -1L to skip running any syscall at all.
18951 */
18952-asmregparm long syscall_trace_enter(struct pt_regs *regs)
18953+long syscall_trace_enter(struct pt_regs *regs)
18954 {
18955 long ret = 0;
18956
18957@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(struct pt_regs *regs)
18958 return ret ?: regs->orig_ax;
18959 }
18960
18961-asmregparm void syscall_trace_leave(struct pt_regs *regs)
18962+void syscall_trace_leave(struct pt_regs *regs)
18963 {
18964 if (unlikely(current->audit_context))
18965 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
18966diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
18967index cf98100..e76e03d 100644
18968--- a/arch/x86/kernel/reboot.c
18969+++ b/arch/x86/kernel/reboot.c
18970@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
18971 EXPORT_SYMBOL(pm_power_off);
18972
18973 static const struct desc_ptr no_idt = {};
18974-static int reboot_mode;
18975+static unsigned short reboot_mode;
18976 enum reboot_type reboot_type = BOOT_KBD;
18977 int reboot_force;
18978
18979@@ -292,12 +292,12 @@ core_initcall(reboot_init);
18980 controller to pulse the CPU reset line, which is more thorough, but
18981 doesn't work with at least one type of 486 motherboard. It is easy
18982 to stop this code working; hence the copious comments. */
18983-static const unsigned long long
18984-real_mode_gdt_entries [3] =
18985+static struct desc_struct
18986+real_mode_gdt_entries [3] __read_only =
18987 {
18988- 0x0000000000000000ULL, /* Null descriptor */
18989- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
18990- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
18991+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
18992+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
18993+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
18994 };
18995
18996 static const struct desc_ptr
18997@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios [] =
18998 * specified by the code and length parameters.
18999 * We assume that length will aways be less that 100!
19000 */
19001-void machine_real_restart(const unsigned char *code, int length)
19002+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
19003 {
19004 local_irq_disable();
19005
19006@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned char *code, int length)
19007 /* Remap the kernel at virtual address zero, as well as offset zero
19008 from the kernel segment. This assumes the kernel segment starts at
19009 virtual address PAGE_OFFSET. */
19010- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19011- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
19012+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19013+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
19014
19015 /*
19016 * Use `swapper_pg_dir' as our page directory.
19017@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned char *code, int length)
19018 boot)". This seems like a fairly standard thing that gets set by
19019 REBOOT.COM programs, and the previous reset routine did this
19020 too. */
19021- *((unsigned short *)0x472) = reboot_mode;
19022+ *(unsigned short *)(__va(0x472)) = reboot_mode;
19023
19024 /* For the switch to real mode, copy some code to low memory. It has
19025 to be in the first 64k because it is running in 16-bit mode, and it
19026 has to have the same physical and virtual address, because it turns
19027 off paging. Copy it near the end of the first page, out of the way
19028 of BIOS variables. */
19029- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
19030- real_mode_switch, sizeof (real_mode_switch));
19031- memcpy((void *)(0x1000 - 100), code, length);
19032+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
19033+ memcpy(__va(0x1000 - 100), code, length);
19034
19035 /* Set up the IDT for real mode. */
19036 load_idt(&real_mode_idt);
19037@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned char *code, int length)
19038 __asm__ __volatile__ ("ljmp $0x0008,%0"
19039 :
19040 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
19041+ do { } while (1);
19042 }
19043 #ifdef CONFIG_APM_MODULE
19044 EXPORT_SYMBOL(machine_real_restart);
19045@@ -544,7 +544,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
19046 {
19047 }
19048
19049-static void native_machine_emergency_restart(void)
19050+__noreturn static void native_machine_emergency_restart(void)
19051 {
19052 int i;
19053
19054@@ -659,13 +659,13 @@ void native_machine_shutdown(void)
19055 #endif
19056 }
19057
19058-static void __machine_emergency_restart(int emergency)
19059+static __noreturn void __machine_emergency_restart(int emergency)
19060 {
19061 reboot_emergency = emergency;
19062 machine_ops.emergency_restart();
19063 }
19064
19065-static void native_machine_restart(char *__unused)
19066+static __noreturn void native_machine_restart(char *__unused)
19067 {
19068 printk("machine restart\n");
19069
19070@@ -674,7 +674,7 @@ static void native_machine_restart(char *__unused)
19071 __machine_emergency_restart(0);
19072 }
19073
19074-static void native_machine_halt(void)
19075+static __noreturn void native_machine_halt(void)
19076 {
19077 /* stop other cpus and apics */
19078 machine_shutdown();
19079@@ -685,7 +685,7 @@ static void native_machine_halt(void)
19080 stop_this_cpu(NULL);
19081 }
19082
19083-static void native_machine_power_off(void)
19084+__noreturn static void native_machine_power_off(void)
19085 {
19086 if (pm_power_off) {
19087 if (!reboot_force)
19088@@ -694,6 +694,7 @@ static void native_machine_power_off(void)
19089 }
19090 /* a fallback in case there is no PM info available */
19091 tboot_shutdown(TB_SHUTDOWN_HALT);
19092+ do { } while (1);
19093 }
19094
19095 struct machine_ops machine_ops = {
19096diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
19097index 7a6f3b3..976a959 100644
19098--- a/arch/x86/kernel/relocate_kernel_64.S
19099+++ b/arch/x86/kernel/relocate_kernel_64.S
19100@@ -11,6 +11,7 @@
19101 #include <asm/kexec.h>
19102 #include <asm/processor-flags.h>
19103 #include <asm/pgtable_types.h>
19104+#include <asm/alternative-asm.h>
19105
19106 /*
19107 * Must be relocatable PIC code callable as a C function
19108@@ -167,6 +168,7 @@ identity_mapped:
19109 xorq %r14, %r14
19110 xorq %r15, %r15
19111
19112+ pax_force_retaddr 0, 1
19113 ret
19114
19115 1:
19116diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
19117index 5449a26..0b6c759 100644
19118--- a/arch/x86/kernel/setup.c
19119+++ b/arch/x86/kernel/setup.c
19120@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
19121
19122 if (!boot_params.hdr.root_flags)
19123 root_mountflags &= ~MS_RDONLY;
19124- init_mm.start_code = (unsigned long) _text;
19125- init_mm.end_code = (unsigned long) _etext;
19126+ init_mm.start_code = ktla_ktva((unsigned long) _text);
19127+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
19128 init_mm.end_data = (unsigned long) _edata;
19129 init_mm.brk = _brk_end;
19130
19131- code_resource.start = virt_to_phys(_text);
19132- code_resource.end = virt_to_phys(_etext)-1;
19133- data_resource.start = virt_to_phys(_etext);
19134+ code_resource.start = virt_to_phys(ktla_ktva(_text));
19135+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
19136+ data_resource.start = virt_to_phys(_sdata);
19137 data_resource.end = virt_to_phys(_edata)-1;
19138 bss_resource.start = virt_to_phys(&__bss_start);
19139 bss_resource.end = virt_to_phys(&__bss_stop)-1;
19140diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
19141index d559af9..524c6ad 100644
19142--- a/arch/x86/kernel/setup_percpu.c
19143+++ b/arch/x86/kernel/setup_percpu.c
19144@@ -25,19 +25,17 @@
19145 # define DBG(x...)
19146 #endif
19147
19148-DEFINE_PER_CPU(int, cpu_number);
19149+#ifdef CONFIG_SMP
19150+DEFINE_PER_CPU(unsigned int, cpu_number);
19151 EXPORT_PER_CPU_SYMBOL(cpu_number);
19152+#endif
19153
19154-#ifdef CONFIG_X86_64
19155 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
19156-#else
19157-#define BOOT_PERCPU_OFFSET 0
19158-#endif
19159
19160 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
19161 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
19162
19163-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
19164+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
19165 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
19166 };
19167 EXPORT_SYMBOL(__per_cpu_offset);
19168@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(int cpu)
19169 {
19170 #ifdef CONFIG_X86_32
19171 struct desc_struct gdt;
19172+ unsigned long base = per_cpu_offset(cpu);
19173
19174- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
19175- 0x2 | DESCTYPE_S, 0x8);
19176- gdt.s = 1;
19177+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
19178+ 0x83 | DESCTYPE_S, 0xC);
19179 write_gdt_entry(get_cpu_gdt_table(cpu),
19180 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
19181 #endif
19182@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
19183 /* alrighty, percpu areas up and running */
19184 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
19185 for_each_possible_cpu(cpu) {
19186+#ifdef CONFIG_CC_STACKPROTECTOR
19187+#ifdef CONFIG_X86_32
19188+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
19189+#endif
19190+#endif
19191 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
19192 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
19193 per_cpu(cpu_number, cpu) = cpu;
19194@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
19195 early_per_cpu_map(x86_cpu_to_node_map, cpu);
19196 #endif
19197 #endif
19198+#ifdef CONFIG_CC_STACKPROTECTOR
19199+#ifdef CONFIG_X86_32
19200+ if (!cpu)
19201+ per_cpu(stack_canary.canary, cpu) = canary;
19202+#endif
19203+#endif
19204 /*
19205 * Up to this point, the boot CPU has been using .data.init
19206 * area. Reload any changed state for the boot CPU.
19207diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
19208index 6a44a76..a9287a1 100644
19209--- a/arch/x86/kernel/signal.c
19210+++ b/arch/x86/kernel/signal.c
19211@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsigned long sp)
19212 * Align the stack pointer according to the i386 ABI,
19213 * i.e. so that on function entry ((sp + 4) & 15) == 0.
19214 */
19215- sp = ((sp + 4) & -16ul) - 4;
19216+ sp = ((sp - 12) & -16ul) - 4;
19217 #else /* !CONFIG_X86_32 */
19218 sp = round_down(sp, 16) - 8;
19219 #endif
19220@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
19221 * Return an always-bogus address instead so we will die with SIGSEGV.
19222 */
19223 if (onsigstack && !likely(on_sig_stack(sp)))
19224- return (void __user *)-1L;
19225+ return (__force void __user *)-1L;
19226
19227 /* save i387 state */
19228 if (used_math() && save_i387_xstate(*fpstate) < 0)
19229- return (void __user *)-1L;
19230+ return (__force void __user *)-1L;
19231
19232 return (void __user *)sp;
19233 }
19234@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19235 }
19236
19237 if (current->mm->context.vdso)
19238- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19239+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
19240 else
19241- restorer = &frame->retcode;
19242+ restorer = (void __user *)&frame->retcode;
19243 if (ka->sa.sa_flags & SA_RESTORER)
19244 restorer = ka->sa.sa_restorer;
19245
19246@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
19247 * reasons and because gdb uses it as a signature to notice
19248 * signal handler stack frames.
19249 */
19250- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
19251+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
19252
19253 if (err)
19254 return -EFAULT;
19255@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19256 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
19257
19258 /* Set up to return from userspace. */
19259- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19260+ if (current->mm->context.vdso)
19261+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
19262+ else
19263+ restorer = (void __user *)&frame->retcode;
19264 if (ka->sa.sa_flags & SA_RESTORER)
19265 restorer = ka->sa.sa_restorer;
19266 put_user_ex(restorer, &frame->pretcode);
19267@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
19268 * reasons and because gdb uses it as a signature to notice
19269 * signal handler stack frames.
19270 */
19271- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
19272+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
19273 } put_user_catch(err);
19274
19275 if (err)
19276@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *regs)
19277 int signr;
19278 sigset_t *oldset;
19279
19280+ pax_track_stack();
19281+
19282 /*
19283 * We want the common case to go fast, which is why we may in certain
19284 * cases get here from kernel mode. Just return without doing anything
19285@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *regs)
19286 * X86_32: vm86 regs switched out by assembly code before reaching
19287 * here, so testing against kernel CS suffices.
19288 */
19289- if (!user_mode(regs))
19290+ if (!user_mode_novm(regs))
19291 return;
19292
19293 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
19294diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
19295index 7e8e905..64d5c32 100644
19296--- a/arch/x86/kernel/smpboot.c
19297+++ b/arch/x86/kernel/smpboot.c
19298@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct *, idle_thread_array);
19299 */
19300 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
19301
19302-void cpu_hotplug_driver_lock()
19303+void cpu_hotplug_driver_lock(void)
19304 {
19305- mutex_lock(&x86_cpu_hotplug_driver_mutex);
19306+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
19307 }
19308
19309-void cpu_hotplug_driver_unlock()
19310+void cpu_hotplug_driver_unlock(void)
19311 {
19312- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19313+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
19314 }
19315
19316 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
19317@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
19318 * target processor state.
19319 */
19320 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
19321- (unsigned long)stack_start.sp);
19322+ stack_start);
19323
19324 /*
19325 * Run STARTUP IPI loop.
19326@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
19327 set_idle_for_cpu(cpu, c_idle.idle);
19328 do_rest:
19329 per_cpu(current_task, cpu) = c_idle.idle;
19330+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
19331 #ifdef CONFIG_X86_32
19332 /* Stack for startup_32 can be just as for start_secondary onwards */
19333 irq_ctx_init(cpu);
19334@@ -750,13 +751,15 @@ do_rest:
19335 #else
19336 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
19337 initial_gs = per_cpu_offset(cpu);
19338- per_cpu(kernel_stack, cpu) =
19339- (unsigned long)task_stack_page(c_idle.idle) -
19340- KERNEL_STACK_OFFSET + THREAD_SIZE;
19341+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
19342 #endif
19343+
19344+ pax_open_kernel();
19345 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
19346+ pax_close_kernel();
19347+
19348 initial_code = (unsigned long)start_secondary;
19349- stack_start.sp = (void *) c_idle.idle->thread.sp;
19350+ stack_start = c_idle.idle->thread.sp;
19351
19352 /* start_ip had better be page-aligned! */
19353 start_ip = setup_trampoline();
19354@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int cpu)
19355
19356 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
19357
19358+#ifdef CONFIG_PAX_PER_CPU_PGD
19359+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
19360+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
19361+ KERNEL_PGD_PTRS);
19362+#endif
19363+
19364 err = do_boot_cpu(apicid, cpu);
19365
19366 if (err) {
19367diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
19368index 3149032..14f1053 100644
19369--- a/arch/x86/kernel/step.c
19370+++ b/arch/x86/kernel/step.c
19371@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19372 struct desc_struct *desc;
19373 unsigned long base;
19374
19375- seg &= ~7UL;
19376+ seg >>= 3;
19377
19378 mutex_lock(&child->mm->context.lock);
19379- if (unlikely((seg >> 3) >= child->mm->context.size))
19380+ if (unlikely(seg >= child->mm->context.size))
19381 addr = -1L; /* bogus selector, access would fault */
19382 else {
19383 desc = child->mm->context.ldt + seg;
19384@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
19385 addr += base;
19386 }
19387 mutex_unlock(&child->mm->context.lock);
19388- }
19389+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
19390+ addr = ktla_ktva(addr);
19391
19392 return addr;
19393 }
19394@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19395 unsigned char opcode[15];
19396 unsigned long addr = convert_ip_to_linear(child, regs);
19397
19398+ if (addr == -EINVAL)
19399+ return 0;
19400+
19401 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
19402 for (i = 0; i < copied; i++) {
19403 switch (opcode[i]) {
19404@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
19405
19406 #ifdef CONFIG_X86_64
19407 case 0x40 ... 0x4f:
19408- if (regs->cs != __USER_CS)
19409+ if ((regs->cs & 0xffff) != __USER_CS)
19410 /* 32-bit mode: register increment */
19411 return 0;
19412 /* 64-bit mode: REX prefix */
19413diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
19414index dee1ff7..a397f7f 100644
19415--- a/arch/x86/kernel/sys_i386_32.c
19416+++ b/arch/x86/kernel/sys_i386_32.c
19417@@ -24,6 +24,21 @@
19418
19419 #include <asm/syscalls.h>
19420
19421+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
19422+{
19423+ unsigned long pax_task_size = TASK_SIZE;
19424+
19425+#ifdef CONFIG_PAX_SEGMEXEC
19426+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
19427+ pax_task_size = SEGMEXEC_TASK_SIZE;
19428+#endif
19429+
19430+ if (len > pax_task_size || addr > pax_task_size - len)
19431+ return -EINVAL;
19432+
19433+ return 0;
19434+}
19435+
19436 /*
19437 * Perform the select(nd, in, out, ex, tv) and mmap() system
19438 * calls. Linux/i386 didn't use to be able to handle more than
19439@@ -58,6 +73,212 @@ out:
19440 return err;
19441 }
19442
19443+unsigned long
19444+arch_get_unmapped_area(struct file *filp, unsigned long addr,
19445+ unsigned long len, unsigned long pgoff, unsigned long flags)
19446+{
19447+ struct mm_struct *mm = current->mm;
19448+ struct vm_area_struct *vma;
19449+ unsigned long start_addr, pax_task_size = TASK_SIZE;
19450+
19451+#ifdef CONFIG_PAX_SEGMEXEC
19452+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19453+ pax_task_size = SEGMEXEC_TASK_SIZE;
19454+#endif
19455+
19456+ pax_task_size -= PAGE_SIZE;
19457+
19458+ if (len > pax_task_size)
19459+ return -ENOMEM;
19460+
19461+ if (flags & MAP_FIXED)
19462+ return addr;
19463+
19464+#ifdef CONFIG_PAX_RANDMMAP
19465+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19466+#endif
19467+
19468+ if (addr) {
19469+ addr = PAGE_ALIGN(addr);
19470+ if (pax_task_size - len >= addr) {
19471+ vma = find_vma(mm, addr);
19472+ if (check_heap_stack_gap(vma, addr, len))
19473+ return addr;
19474+ }
19475+ }
19476+ if (len > mm->cached_hole_size) {
19477+ start_addr = addr = mm->free_area_cache;
19478+ } else {
19479+ start_addr = addr = mm->mmap_base;
19480+ mm->cached_hole_size = 0;
19481+ }
19482+
19483+#ifdef CONFIG_PAX_PAGEEXEC
19484+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
19485+ start_addr = 0x00110000UL;
19486+
19487+#ifdef CONFIG_PAX_RANDMMAP
19488+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19489+ start_addr += mm->delta_mmap & 0x03FFF000UL;
19490+#endif
19491+
19492+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
19493+ start_addr = addr = mm->mmap_base;
19494+ else
19495+ addr = start_addr;
19496+ }
19497+#endif
19498+
19499+full_search:
19500+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
19501+ /* At this point: (!vma || addr < vma->vm_end). */
19502+ if (pax_task_size - len < addr) {
19503+ /*
19504+ * Start a new search - just in case we missed
19505+ * some holes.
19506+ */
19507+ if (start_addr != mm->mmap_base) {
19508+ start_addr = addr = mm->mmap_base;
19509+ mm->cached_hole_size = 0;
19510+ goto full_search;
19511+ }
19512+ return -ENOMEM;
19513+ }
19514+ if (check_heap_stack_gap(vma, addr, len))
19515+ break;
19516+ if (addr + mm->cached_hole_size < vma->vm_start)
19517+ mm->cached_hole_size = vma->vm_start - addr;
19518+ addr = vma->vm_end;
19519+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
19520+ start_addr = addr = mm->mmap_base;
19521+ mm->cached_hole_size = 0;
19522+ goto full_search;
19523+ }
19524+ }
19525+
19526+ /*
19527+ * Remember the place where we stopped the search:
19528+ */
19529+ mm->free_area_cache = addr + len;
19530+ return addr;
19531+}
19532+
19533+unsigned long
19534+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19535+ const unsigned long len, const unsigned long pgoff,
19536+ const unsigned long flags)
19537+{
19538+ struct vm_area_struct *vma;
19539+ struct mm_struct *mm = current->mm;
19540+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
19541+
19542+#ifdef CONFIG_PAX_SEGMEXEC
19543+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19544+ pax_task_size = SEGMEXEC_TASK_SIZE;
19545+#endif
19546+
19547+ pax_task_size -= PAGE_SIZE;
19548+
19549+ /* requested length too big for entire address space */
19550+ if (len > pax_task_size)
19551+ return -ENOMEM;
19552+
19553+ if (flags & MAP_FIXED)
19554+ return addr;
19555+
19556+#ifdef CONFIG_PAX_PAGEEXEC
19557+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
19558+ goto bottomup;
19559+#endif
19560+
19561+#ifdef CONFIG_PAX_RANDMMAP
19562+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19563+#endif
19564+
19565+ /* requesting a specific address */
19566+ if (addr) {
19567+ addr = PAGE_ALIGN(addr);
19568+ if (pax_task_size - len >= addr) {
19569+ vma = find_vma(mm, addr);
19570+ if (check_heap_stack_gap(vma, addr, len))
19571+ return addr;
19572+ }
19573+ }
19574+
19575+ /* check if free_area_cache is useful for us */
19576+ if (len <= mm->cached_hole_size) {
19577+ mm->cached_hole_size = 0;
19578+ mm->free_area_cache = mm->mmap_base;
19579+ }
19580+
19581+ /* either no address requested or can't fit in requested address hole */
19582+ addr = mm->free_area_cache;
19583+
19584+ /* make sure it can fit in the remaining address space */
19585+ if (addr > len) {
19586+ vma = find_vma(mm, addr-len);
19587+ if (check_heap_stack_gap(vma, addr - len, len))
19588+ /* remember the address as a hint for next time */
19589+ return (mm->free_area_cache = addr-len);
19590+ }
19591+
19592+ if (mm->mmap_base < len)
19593+ goto bottomup;
19594+
19595+ addr = mm->mmap_base-len;
19596+
19597+ do {
19598+ /*
19599+ * Lookup failure means no vma is above this address,
19600+ * else if new region fits below vma->vm_start,
19601+ * return with success:
19602+ */
19603+ vma = find_vma(mm, addr);
19604+ if (check_heap_stack_gap(vma, addr, len))
19605+ /* remember the address as a hint for next time */
19606+ return (mm->free_area_cache = addr);
19607+
19608+ /* remember the largest hole we saw so far */
19609+ if (addr + mm->cached_hole_size < vma->vm_start)
19610+ mm->cached_hole_size = vma->vm_start - addr;
19611+
19612+ /* try just below the current vma->vm_start */
19613+ addr = skip_heap_stack_gap(vma, len);
19614+ } while (!IS_ERR_VALUE(addr));
19615+
19616+bottomup:
19617+ /*
19618+ * A failed mmap() very likely causes application failure,
19619+ * so fall back to the bottom-up function here. This scenario
19620+ * can happen with large stack limits and large mmap()
19621+ * allocations.
19622+ */
19623+
19624+#ifdef CONFIG_PAX_SEGMEXEC
19625+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
19626+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
19627+ else
19628+#endif
19629+
19630+ mm->mmap_base = TASK_UNMAPPED_BASE;
19631+
19632+#ifdef CONFIG_PAX_RANDMMAP
19633+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19634+ mm->mmap_base += mm->delta_mmap;
19635+#endif
19636+
19637+ mm->free_area_cache = mm->mmap_base;
19638+ mm->cached_hole_size = ~0UL;
19639+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19640+ /*
19641+ * Restore the topdown base:
19642+ */
19643+ mm->mmap_base = base;
19644+ mm->free_area_cache = base;
19645+ mm->cached_hole_size = ~0UL;
19646+
19647+ return addr;
19648+}
19649
19650 struct sel_arg_struct {
19651 unsigned long n;
19652@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19653 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
19654 case SEMTIMEDOP:
19655 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
19656- (const struct timespec __user *)fifth);
19657+ (__force const struct timespec __user *)fifth);
19658
19659 case SEMGET:
19660 return sys_semget(first, second, third);
19661@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int first, int second,
19662 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
19663 if (ret)
19664 return ret;
19665- return put_user(raddr, (ulong __user *) third);
19666+ return put_user(raddr, (__force ulong __user *) third);
19667 }
19668 case 1: /* iBCS2 emulator entry point */
19669 if (!segment_eq(get_fs(), get_ds()))
19670@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldold_utsname __user *name)
19671
19672 return error;
19673 }
19674-
19675-
19676-/*
19677- * Do a system call from kernel instead of calling sys_execve so we
19678- * end up with proper pt_regs.
19679- */
19680-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
19681-{
19682- long __res;
19683- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
19684- : "=a" (__res)
19685- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
19686- return __res;
19687-}
19688diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
19689index 8aa2057..b604bc1 100644
19690--- a/arch/x86/kernel/sys_x86_64.c
19691+++ b/arch/x86/kernel/sys_x86_64.c
19692@@ -32,8 +32,8 @@ out:
19693 return error;
19694 }
19695
19696-static void find_start_end(unsigned long flags, unsigned long *begin,
19697- unsigned long *end)
19698+static void find_start_end(struct mm_struct *mm, unsigned long flags,
19699+ unsigned long *begin, unsigned long *end)
19700 {
19701 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
19702 unsigned long new_begin;
19703@@ -52,7 +52,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
19704 *begin = new_begin;
19705 }
19706 } else {
19707- *begin = TASK_UNMAPPED_BASE;
19708+ *begin = mm->mmap_base;
19709 *end = TASK_SIZE;
19710 }
19711 }
19712@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
19713 if (flags & MAP_FIXED)
19714 return addr;
19715
19716- find_start_end(flags, &begin, &end);
19717+ find_start_end(mm, flags, &begin, &end);
19718
19719 if (len > end)
19720 return -ENOMEM;
19721
19722+#ifdef CONFIG_PAX_RANDMMAP
19723+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19724+#endif
19725+
19726 if (addr) {
19727 addr = PAGE_ALIGN(addr);
19728 vma = find_vma(mm, addr);
19729- if (end - len >= addr &&
19730- (!vma || addr + len <= vma->vm_start))
19731+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
19732 return addr;
19733 }
19734 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
19735@@ -106,7 +109,7 @@ full_search:
19736 }
19737 return -ENOMEM;
19738 }
19739- if (!vma || addr + len <= vma->vm_start) {
19740+ if (check_heap_stack_gap(vma, addr, len)) {
19741 /*
19742 * Remember the place where we stopped the search:
19743 */
19744@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19745 {
19746 struct vm_area_struct *vma;
19747 struct mm_struct *mm = current->mm;
19748- unsigned long addr = addr0;
19749+ unsigned long base = mm->mmap_base, addr = addr0;
19750
19751 /* requested length too big for entire address space */
19752 if (len > TASK_SIZE)
19753@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19754 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
19755 goto bottomup;
19756
19757+#ifdef CONFIG_PAX_RANDMMAP
19758+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
19759+#endif
19760+
19761 /* requesting a specific address */
19762 if (addr) {
19763 addr = PAGE_ALIGN(addr);
19764- vma = find_vma(mm, addr);
19765- if (TASK_SIZE - len >= addr &&
19766- (!vma || addr + len <= vma->vm_start))
19767- return addr;
19768+ if (TASK_SIZE - len >= addr) {
19769+ vma = find_vma(mm, addr);
19770+ if (check_heap_stack_gap(vma, addr, len))
19771+ return addr;
19772+ }
19773 }
19774
19775 /* check if free_area_cache is useful for us */
19776@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19777 /* make sure it can fit in the remaining address space */
19778 if (addr > len) {
19779 vma = find_vma(mm, addr-len);
19780- if (!vma || addr <= vma->vm_start)
19781+ if (check_heap_stack_gap(vma, addr - len, len))
19782 /* remember the address as a hint for next time */
19783 return mm->free_area_cache = addr-len;
19784 }
19785@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19786 * return with success:
19787 */
19788 vma = find_vma(mm, addr);
19789- if (!vma || addr+len <= vma->vm_start)
19790+ if (check_heap_stack_gap(vma, addr, len))
19791 /* remember the address as a hint for next time */
19792 return mm->free_area_cache = addr;
19793
19794@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
19795 mm->cached_hole_size = vma->vm_start - addr;
19796
19797 /* try just below the current vma->vm_start */
19798- addr = vma->vm_start-len;
19799- } while (len < vma->vm_start);
19800+ addr = skip_heap_stack_gap(vma, len);
19801+ } while (!IS_ERR_VALUE(addr));
19802
19803 bottomup:
19804 /*
19805@@ -198,13 +206,21 @@ bottomup:
19806 * can happen with large stack limits and large mmap()
19807 * allocations.
19808 */
19809+ mm->mmap_base = TASK_UNMAPPED_BASE;
19810+
19811+#ifdef CONFIG_PAX_RANDMMAP
19812+ if (mm->pax_flags & MF_PAX_RANDMMAP)
19813+ mm->mmap_base += mm->delta_mmap;
19814+#endif
19815+
19816+ mm->free_area_cache = mm->mmap_base;
19817 mm->cached_hole_size = ~0UL;
19818- mm->free_area_cache = TASK_UNMAPPED_BASE;
19819 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
19820 /*
19821 * Restore the topdown base:
19822 */
19823- mm->free_area_cache = mm->mmap_base;
19824+ mm->mmap_base = base;
19825+ mm->free_area_cache = base;
19826 mm->cached_hole_size = ~0UL;
19827
19828 return addr;
19829diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
19830index 76d70a4..4c94a44 100644
19831--- a/arch/x86/kernel/syscall_table_32.S
19832+++ b/arch/x86/kernel/syscall_table_32.S
19833@@ -1,3 +1,4 @@
19834+.section .rodata,"a",@progbits
19835 ENTRY(sys_call_table)
19836 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
19837 .long sys_exit
19838diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
19839index 46b8277..3349d55 100644
19840--- a/arch/x86/kernel/tboot.c
19841+++ b/arch/x86/kernel/tboot.c
19842@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
19843
19844 void tboot_shutdown(u32 shutdown_type)
19845 {
19846- void (*shutdown)(void);
19847+ void (* __noreturn shutdown)(void);
19848
19849 if (!tboot_enabled())
19850 return;
19851@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
19852
19853 switch_to_tboot_pt();
19854
19855- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
19856+ shutdown = (void *)tboot->shutdown_entry;
19857 shutdown();
19858
19859 /* should not reach here */
19860@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
19861 tboot_shutdown(acpi_shutdown_map[sleep_state]);
19862 }
19863
19864-static atomic_t ap_wfs_count;
19865+static atomic_unchecked_t ap_wfs_count;
19866
19867 static int tboot_wait_for_aps(int num_aps)
19868 {
19869@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
19870 {
19871 switch (action) {
19872 case CPU_DYING:
19873- atomic_inc(&ap_wfs_count);
19874+ atomic_inc_unchecked(&ap_wfs_count);
19875 if (num_online_cpus() == 1)
19876- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
19877+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
19878 return NOTIFY_BAD;
19879 break;
19880 }
19881@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
19882
19883 tboot_create_trampoline();
19884
19885- atomic_set(&ap_wfs_count, 0);
19886+ atomic_set_unchecked(&ap_wfs_count, 0);
19887 register_hotcpu_notifier(&tboot_cpu_notifier);
19888 return 0;
19889 }
19890diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
19891index be25734..87fe232 100644
19892--- a/arch/x86/kernel/time.c
19893+++ b/arch/x86/kernel/time.c
19894@@ -26,17 +26,13 @@
19895 int timer_ack;
19896 #endif
19897
19898-#ifdef CONFIG_X86_64
19899-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
19900-#endif
19901-
19902 unsigned long profile_pc(struct pt_regs *regs)
19903 {
19904 unsigned long pc = instruction_pointer(regs);
19905
19906- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
19907+ if (!user_mode(regs) && in_lock_functions(pc)) {
19908 #ifdef CONFIG_FRAME_POINTER
19909- return *(unsigned long *)(regs->bp + sizeof(long));
19910+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
19911 #else
19912 unsigned long *sp =
19913 (unsigned long *)kernel_stack_pointer(regs);
19914@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
19915 * or above a saved flags. Eflags has bits 22-31 zero,
19916 * kernel addresses don't.
19917 */
19918+
19919+#ifdef CONFIG_PAX_KERNEXEC
19920+ return ktla_ktva(sp[0]);
19921+#else
19922 if (sp[0] >> 22)
19923 return sp[0];
19924 if (sp[1] >> 22)
19925 return sp[1];
19926 #endif
19927+
19928+#endif
19929 }
19930 return pc;
19931 }
19932diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
19933index 6bb7b85..dd853e1 100644
19934--- a/arch/x86/kernel/tls.c
19935+++ b/arch/x86/kernel/tls.c
19936@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
19937 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
19938 return -EINVAL;
19939
19940+#ifdef CONFIG_PAX_SEGMEXEC
19941+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
19942+ return -EINVAL;
19943+#endif
19944+
19945 set_tls_desc(p, idx, &info, 1);
19946
19947 return 0;
19948diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
19949index 8508237..229b664 100644
19950--- a/arch/x86/kernel/trampoline_32.S
19951+++ b/arch/x86/kernel/trampoline_32.S
19952@@ -32,6 +32,12 @@
19953 #include <asm/segment.h>
19954 #include <asm/page_types.h>
19955
19956+#ifdef CONFIG_PAX_KERNEXEC
19957+#define ta(X) (X)
19958+#else
19959+#define ta(X) ((X) - __PAGE_OFFSET)
19960+#endif
19961+
19962 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
19963 __CPUINITRODATA
19964 .code16
19965@@ -60,7 +66,7 @@ r_base = .
19966 inc %ax # protected mode (PE) bit
19967 lmsw %ax # into protected mode
19968 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
19969- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
19970+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
19971
19972 # These need to be in the same 64K segment as the above;
19973 # hence we don't use the boot_gdt_descr defined in head.S
19974diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/kernel/trampoline_64.S
19975index 3af2dff..ba8aa49 100644
19976--- a/arch/x86/kernel/trampoline_64.S
19977+++ b/arch/x86/kernel/trampoline_64.S
19978@@ -91,7 +91,7 @@ startup_32:
19979 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
19980 movl %eax, %ds
19981
19982- movl $X86_CR4_PAE, %eax
19983+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
19984 movl %eax, %cr4 # Enable PAE mode
19985
19986 # Setup trampoline 4 level pagetables
19987@@ -127,7 +127,7 @@ startup_64:
19988 no_longmode:
19989 hlt
19990 jmp no_longmode
19991-#include "verify_cpu_64.S"
19992+#include "verify_cpu.S"
19993
19994 # Careful these need to be in the same 64K segment as the above;
19995 tidt:
19996@@ -138,7 +138,7 @@ tidt:
19997 # so the kernel can live anywhere
19998 .balign 4
19999 tgdt:
20000- .short tgdt_end - tgdt # gdt limit
20001+ .short tgdt_end - tgdt - 1 # gdt limit
20002 .long tgdt - r_base
20003 .short 0
20004 .quad 0x00cf9b000000ffff # __KERNEL32_CS
20005diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
20006index 7e37dce..ec3f8e5 100644
20007--- a/arch/x86/kernel/traps.c
20008+++ b/arch/x86/kernel/traps.c
20009@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
20010
20011 /* Do we ignore FPU interrupts ? */
20012 char ignore_fpu_irq;
20013-
20014-/*
20015- * The IDT has to be page-aligned to simplify the Pentium
20016- * F0 0F bug workaround.
20017- */
20018-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
20019 #endif
20020
20021 DECLARE_BITMAP(used_vectors, NR_VECTORS);
20022@@ -112,19 +106,19 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
20023 static inline void
20024 die_if_kernel(const char *str, struct pt_regs *regs, long err)
20025 {
20026- if (!user_mode_vm(regs))
20027+ if (!user_mode(regs))
20028 die(str, regs, err);
20029 }
20030 #endif
20031
20032 static void __kprobes
20033-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20034+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
20035 long error_code, siginfo_t *info)
20036 {
20037 struct task_struct *tsk = current;
20038
20039 #ifdef CONFIG_X86_32
20040- if (regs->flags & X86_VM_MASK) {
20041+ if (v8086_mode(regs)) {
20042 /*
20043 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
20044 * On nmi (interrupt 2), do_trap should not be called.
20045@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
20046 }
20047 #endif
20048
20049- if (!user_mode(regs))
20050+ if (!user_mode_novm(regs))
20051 goto kernel_trap;
20052
20053 #ifdef CONFIG_X86_32
20054@@ -158,7 +152,7 @@ trap_signal:
20055 printk_ratelimit()) {
20056 printk(KERN_INFO
20057 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
20058- tsk->comm, tsk->pid, str,
20059+ tsk->comm, task_pid_nr(tsk), str,
20060 regs->ip, regs->sp, error_code);
20061 print_vma_addr(" in ", regs->ip);
20062 printk("\n");
20063@@ -175,8 +169,20 @@ kernel_trap:
20064 if (!fixup_exception(regs)) {
20065 tsk->thread.error_code = error_code;
20066 tsk->thread.trap_no = trapnr;
20067+
20068+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20069+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
20070+ str = "PAX: suspicious stack segment fault";
20071+#endif
20072+
20073 die(str, regs, error_code);
20074 }
20075+
20076+#ifdef CONFIG_PAX_REFCOUNT
20077+ if (trapnr == 4)
20078+ pax_report_refcount_overflow(regs);
20079+#endif
20080+
20081 return;
20082
20083 #ifdef CONFIG_X86_32
20084@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *regs, long error_code)
20085 conditional_sti(regs);
20086
20087 #ifdef CONFIG_X86_32
20088- if (regs->flags & X86_VM_MASK)
20089+ if (v8086_mode(regs))
20090 goto gp_in_vm86;
20091 #endif
20092
20093 tsk = current;
20094- if (!user_mode(regs))
20095+ if (!user_mode_novm(regs))
20096 goto gp_in_kernel;
20097
20098+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20099+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
20100+ struct mm_struct *mm = tsk->mm;
20101+ unsigned long limit;
20102+
20103+ down_write(&mm->mmap_sem);
20104+ limit = mm->context.user_cs_limit;
20105+ if (limit < TASK_SIZE) {
20106+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
20107+ up_write(&mm->mmap_sem);
20108+ return;
20109+ }
20110+ up_write(&mm->mmap_sem);
20111+ }
20112+#endif
20113+
20114 tsk->thread.error_code = error_code;
20115 tsk->thread.trap_no = 13;
20116
20117@@ -305,6 +327,13 @@ gp_in_kernel:
20118 if (notify_die(DIE_GPF, "general protection fault", regs,
20119 error_code, 13, SIGSEGV) == NOTIFY_STOP)
20120 return;
20121+
20122+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20123+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
20124+ die("PAX: suspicious general protection fault", regs, error_code);
20125+ else
20126+#endif
20127+
20128 die("general protection fault", regs, error_code);
20129 }
20130
20131@@ -435,6 +464,17 @@ static notrace __kprobes void default_do_nmi(struct pt_regs *regs)
20132 dotraplinkage notrace __kprobes void
20133 do_nmi(struct pt_regs *regs, long error_code)
20134 {
20135+
20136+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20137+ if (!user_mode(regs)) {
20138+ unsigned long cs = regs->cs & 0xFFFF;
20139+ unsigned long ip = ktva_ktla(regs->ip);
20140+
20141+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
20142+ regs->ip = ip;
20143+ }
20144+#endif
20145+
20146 nmi_enter();
20147
20148 inc_irq_stat(__nmi_count);
20149@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20150 }
20151
20152 #ifdef CONFIG_X86_32
20153- if (regs->flags & X86_VM_MASK)
20154+ if (v8086_mode(regs))
20155 goto debug_vm86;
20156 #endif
20157
20158@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
20159 * kernel space (but re-enable TF when returning to user mode).
20160 */
20161 if (condition & DR_STEP) {
20162- if (!user_mode(regs))
20163+ if (!user_mode_novm(regs))
20164 goto clear_TF_reenable;
20165 }
20166
20167@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
20168 * Handle strange cache flush from user space exception
20169 * in all other cases. This is undocumented behaviour.
20170 */
20171- if (regs->flags & X86_VM_MASK) {
20172+ if (v8086_mode(regs)) {
20173 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
20174 return;
20175 }
20176@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) smp_threshold_interrupt(void)
20177 void __math_state_restore(void)
20178 {
20179 struct thread_info *thread = current_thread_info();
20180- struct task_struct *tsk = thread->task;
20181+ struct task_struct *tsk = current;
20182
20183 /*
20184 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
20185@@ -825,8 +865,7 @@ void __math_state_restore(void)
20186 */
20187 asmlinkage void math_state_restore(void)
20188 {
20189- struct thread_info *thread = current_thread_info();
20190- struct task_struct *tsk = thread->task;
20191+ struct task_struct *tsk = current;
20192
20193 if (!tsk_used_math(tsk)) {
20194 local_irq_enable();
20195diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
20196new file mode 100644
20197index 0000000..50c5edd
20198--- /dev/null
20199+++ b/arch/x86/kernel/verify_cpu.S
20200@@ -0,0 +1,140 @@
20201+/*
20202+ *
20203+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
20204+ * code has been borrowed from boot/setup.S and was introduced by
20205+ * Andi Kleen.
20206+ *
20207+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20208+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20209+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20210+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
20211+ *
20212+ * This source code is licensed under the GNU General Public License,
20213+ * Version 2. See the file COPYING for more details.
20214+ *
20215+ * This is a common code for verification whether CPU supports
20216+ * long mode and SSE or not. It is not called directly instead this
20217+ * file is included at various places and compiled in that context.
20218+ * This file is expected to run in 32bit code. Currently:
20219+ *
20220+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
20221+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
20222+ * arch/x86/kernel/head_32.S: processor startup
20223+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
20224+ *
20225+ * verify_cpu, returns the status of longmode and SSE in register %eax.
20226+ * 0: Success 1: Failure
20227+ *
20228+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
20229+ *
20230+ * The caller needs to check for the error code and take the action
20231+ * appropriately. Either display a message or halt.
20232+ */
20233+
20234+#include <asm/cpufeature.h>
20235+#include <asm/msr-index.h>
20236+
20237+verify_cpu:
20238+ pushfl # Save caller passed flags
20239+ pushl $0 # Kill any dangerous flags
20240+ popfl
20241+
20242+ pushfl # standard way to check for cpuid
20243+ popl %eax
20244+ movl %eax,%ebx
20245+ xorl $0x200000,%eax
20246+ pushl %eax
20247+ popfl
20248+ pushfl
20249+ popl %eax
20250+ cmpl %eax,%ebx
20251+ jz verify_cpu_no_longmode # cpu has no cpuid
20252+
20253+ movl $0x0,%eax # See if cpuid 1 is implemented
20254+ cpuid
20255+ cmpl $0x1,%eax
20256+ jb verify_cpu_no_longmode # no cpuid 1
20257+
20258+ xor %di,%di
20259+ cmpl $0x68747541,%ebx # AuthenticAMD
20260+ jnz verify_cpu_noamd
20261+ cmpl $0x69746e65,%edx
20262+ jnz verify_cpu_noamd
20263+ cmpl $0x444d4163,%ecx
20264+ jnz verify_cpu_noamd
20265+ mov $1,%di # cpu is from AMD
20266+ jmp verify_cpu_check
20267+
20268+verify_cpu_noamd:
20269+ cmpl $0x756e6547,%ebx # GenuineIntel?
20270+ jnz verify_cpu_check
20271+ cmpl $0x49656e69,%edx
20272+ jnz verify_cpu_check
20273+ cmpl $0x6c65746e,%ecx
20274+ jnz verify_cpu_check
20275+
20276+ # only call IA32_MISC_ENABLE when:
20277+ # family > 6 || (family == 6 && model >= 0xd)
20278+ movl $0x1, %eax # check CPU family and model
20279+ cpuid
20280+ movl %eax, %ecx
20281+
20282+ andl $0x0ff00f00, %eax # mask family and extended family
20283+ shrl $8, %eax
20284+ cmpl $6, %eax
20285+ ja verify_cpu_clear_xd # family > 6, ok
20286+ jb verify_cpu_check # family < 6, skip
20287+
20288+ andl $0x000f00f0, %ecx # mask model and extended model
20289+ shrl $4, %ecx
20290+ cmpl $0xd, %ecx
20291+ jb verify_cpu_check # family == 6, model < 0xd, skip
20292+
20293+verify_cpu_clear_xd:
20294+ movl $MSR_IA32_MISC_ENABLE, %ecx
20295+ rdmsr
20296+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
20297+ jnc verify_cpu_check # only write MSR if bit was changed
20298+ wrmsr
20299+
20300+verify_cpu_check:
20301+ movl $0x1,%eax # Does the cpu have what it takes
20302+ cpuid
20303+ andl $REQUIRED_MASK0,%edx
20304+ xorl $REQUIRED_MASK0,%edx
20305+ jnz verify_cpu_no_longmode
20306+
20307+ movl $0x80000000,%eax # See if extended cpuid is implemented
20308+ cpuid
20309+ cmpl $0x80000001,%eax
20310+ jb verify_cpu_no_longmode # no extended cpuid
20311+
20312+ movl $0x80000001,%eax # Does the cpu have what it takes
20313+ cpuid
20314+ andl $REQUIRED_MASK1,%edx
20315+ xorl $REQUIRED_MASK1,%edx
20316+ jnz verify_cpu_no_longmode
20317+
20318+verify_cpu_sse_test:
20319+ movl $1,%eax
20320+ cpuid
20321+ andl $SSE_MASK,%edx
20322+ cmpl $SSE_MASK,%edx
20323+ je verify_cpu_sse_ok
20324+ test %di,%di
20325+ jz verify_cpu_no_longmode # only try to force SSE on AMD
20326+ movl $MSR_K7_HWCR,%ecx
20327+ rdmsr
20328+ btr $15,%eax # enable SSE
20329+ wrmsr
20330+ xor %di,%di # don't loop
20331+ jmp verify_cpu_sse_test # try again
20332+
20333+verify_cpu_no_longmode:
20334+ popfl # Restore caller passed flags
20335+ movl $1,%eax
20336+ ret
20337+verify_cpu_sse_ok:
20338+ popfl # Restore caller passed flags
20339+ xorl %eax, %eax
20340+ ret
20341diff --git a/arch/x86/kernel/verify_cpu_64.S b/arch/x86/kernel/verify_cpu_64.S
20342deleted file mode 100644
20343index 45b6f8a..0000000
20344--- a/arch/x86/kernel/verify_cpu_64.S
20345+++ /dev/null
20346@@ -1,105 +0,0 @@
20347-/*
20348- *
20349- * verify_cpu.S - Code for cpu long mode and SSE verification. This
20350- * code has been borrowed from boot/setup.S and was introduced by
20351- * Andi Kleen.
20352- *
20353- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
20354- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
20355- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
20356- *
20357- * This source code is licensed under the GNU General Public License,
20358- * Version 2. See the file COPYING for more details.
20359- *
20360- * This is a common code for verification whether CPU supports
20361- * long mode and SSE or not. It is not called directly instead this
20362- * file is included at various places and compiled in that context.
20363- * Following are the current usage.
20364- *
20365- * This file is included by both 16bit and 32bit code.
20366- *
20367- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
20368- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
20369- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
20370- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
20371- *
20372- * verify_cpu, returns the status of cpu check in register %eax.
20373- * 0: Success 1: Failure
20374- *
20375- * The caller needs to check for the error code and take the action
20376- * appropriately. Either display a message or halt.
20377- */
20378-
20379-#include <asm/cpufeature.h>
20380-
20381-verify_cpu:
20382- pushfl # Save caller passed flags
20383- pushl $0 # Kill any dangerous flags
20384- popfl
20385-
20386- pushfl # standard way to check for cpuid
20387- popl %eax
20388- movl %eax,%ebx
20389- xorl $0x200000,%eax
20390- pushl %eax
20391- popfl
20392- pushfl
20393- popl %eax
20394- cmpl %eax,%ebx
20395- jz verify_cpu_no_longmode # cpu has no cpuid
20396-
20397- movl $0x0,%eax # See if cpuid 1 is implemented
20398- cpuid
20399- cmpl $0x1,%eax
20400- jb verify_cpu_no_longmode # no cpuid 1
20401-
20402- xor %di,%di
20403- cmpl $0x68747541,%ebx # AuthenticAMD
20404- jnz verify_cpu_noamd
20405- cmpl $0x69746e65,%edx
20406- jnz verify_cpu_noamd
20407- cmpl $0x444d4163,%ecx
20408- jnz verify_cpu_noamd
20409- mov $1,%di # cpu is from AMD
20410-
20411-verify_cpu_noamd:
20412- movl $0x1,%eax # Does the cpu have what it takes
20413- cpuid
20414- andl $REQUIRED_MASK0,%edx
20415- xorl $REQUIRED_MASK0,%edx
20416- jnz verify_cpu_no_longmode
20417-
20418- movl $0x80000000,%eax # See if extended cpuid is implemented
20419- cpuid
20420- cmpl $0x80000001,%eax
20421- jb verify_cpu_no_longmode # no extended cpuid
20422-
20423- movl $0x80000001,%eax # Does the cpu have what it takes
20424- cpuid
20425- andl $REQUIRED_MASK1,%edx
20426- xorl $REQUIRED_MASK1,%edx
20427- jnz verify_cpu_no_longmode
20428-
20429-verify_cpu_sse_test:
20430- movl $1,%eax
20431- cpuid
20432- andl $SSE_MASK,%edx
20433- cmpl $SSE_MASK,%edx
20434- je verify_cpu_sse_ok
20435- test %di,%di
20436- jz verify_cpu_no_longmode # only try to force SSE on AMD
20437- movl $0xc0010015,%ecx # HWCR
20438- rdmsr
20439- btr $15,%eax # enable SSE
20440- wrmsr
20441- xor %di,%di # don't loop
20442- jmp verify_cpu_sse_test # try again
20443-
20444-verify_cpu_no_longmode:
20445- popfl # Restore caller passed flags
20446- movl $1,%eax
20447- ret
20448-verify_cpu_sse_ok:
20449- popfl # Restore caller passed flags
20450- xorl %eax, %eax
20451- ret
20452diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
20453index 9c4e625..c992817 100644
20454--- a/arch/x86/kernel/vm86_32.c
20455+++ b/arch/x86/kernel/vm86_32.c
20456@@ -41,6 +41,7 @@
20457 #include <linux/ptrace.h>
20458 #include <linux/audit.h>
20459 #include <linux/stddef.h>
20460+#include <linux/grsecurity.h>
20461
20462 #include <asm/uaccess.h>
20463 #include <asm/io.h>
20464@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
20465 do_exit(SIGSEGV);
20466 }
20467
20468- tss = &per_cpu(init_tss, get_cpu());
20469+ tss = init_tss + get_cpu();
20470 current->thread.sp0 = current->thread.saved_sp0;
20471 current->thread.sysenter_cs = __KERNEL_CS;
20472 load_sp0(tss, &current->thread);
20473@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
20474 struct task_struct *tsk;
20475 int tmp, ret = -EPERM;
20476
20477+#ifdef CONFIG_GRKERNSEC_VM86
20478+ if (!capable(CAP_SYS_RAWIO)) {
20479+ gr_handle_vm86();
20480+ goto out;
20481+ }
20482+#endif
20483+
20484 tsk = current;
20485 if (tsk->thread.saved_sp0)
20486 goto out;
20487@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
20488 int tmp, ret;
20489 struct vm86plus_struct __user *v86;
20490
20491+#ifdef CONFIG_GRKERNSEC_VM86
20492+ if (!capable(CAP_SYS_RAWIO)) {
20493+ gr_handle_vm86();
20494+ ret = -EPERM;
20495+ goto out;
20496+ }
20497+#endif
20498+
20499 tsk = current;
20500 switch (regs->bx) {
20501 case VM86_REQUEST_IRQ:
20502@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
20503 tsk->thread.saved_fs = info->regs32->fs;
20504 tsk->thread.saved_gs = get_user_gs(info->regs32);
20505
20506- tss = &per_cpu(init_tss, get_cpu());
20507+ tss = init_tss + get_cpu();
20508 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
20509 if (cpu_has_sep)
20510 tsk->thread.sysenter_cs = 0;
20511@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
20512 goto cannot_handle;
20513 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
20514 goto cannot_handle;
20515- intr_ptr = (unsigned long __user *) (i << 2);
20516+ intr_ptr = (__force unsigned long __user *) (i << 2);
20517 if (get_user(segoffs, intr_ptr))
20518 goto cannot_handle;
20519 if ((segoffs >> 16) == BIOSSEG)
20520diff --git a/arch/x86/kernel/vmi_32.c b/arch/x86/kernel/vmi_32.c
20521index d430e4c..831f817 100644
20522--- a/arch/x86/kernel/vmi_32.c
20523+++ b/arch/x86/kernel/vmi_32.c
20524@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1))) (VROMFUNC)(void);
20525 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
20526
20527 #define call_vrom_func(rom,func) \
20528- (((VROMFUNC *)(rom->func))())
20529+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
20530
20531 #define call_vrom_long_func(rom,func,arg) \
20532- (((VROMLONGFUNC *)(rom->func)) (arg))
20533+({\
20534+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
20535+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
20536+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
20537+ __reloc;\
20538+})
20539
20540-static struct vrom_header *vmi_rom;
20541+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
20542 static int disable_pge;
20543 static int disable_pse;
20544 static int disable_sep;
20545@@ -76,10 +81,10 @@ static struct {
20546 void (*set_initial_ap_state)(int, int);
20547 void (*halt)(void);
20548 void (*set_lazy_mode)(int mode);
20549-} vmi_ops;
20550+} __no_const vmi_ops __read_only;
20551
20552 /* Cached VMI operations */
20553-struct vmi_timer_ops vmi_timer_ops;
20554+struct vmi_timer_ops vmi_timer_ops __read_only;
20555
20556 /*
20557 * VMI patching routines.
20558@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
20559 static inline void patch_offset(void *insnbuf,
20560 unsigned long ip, unsigned long dest)
20561 {
20562- *(unsigned long *)(insnbuf+1) = dest-ip-5;
20563+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
20564 }
20565
20566 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20567@@ -102,6 +107,7 @@ static unsigned patch_internal(int call, unsigned len, void *insnbuf,
20568 {
20569 u64 reloc;
20570 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
20571+
20572 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
20573 switch(rel->type) {
20574 case VMI_RELOCATION_CALL_REL:
20575@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud_t pudval)
20576
20577 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
20578 {
20579- const pte_t pte = { .pte = 0 };
20580+ const pte_t pte = __pte(0ULL);
20581 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
20582 }
20583
20584 static void vmi_pmd_clear(pmd_t *pmd)
20585 {
20586- const pte_t pte = { .pte = 0 };
20587+ const pte_t pte = __pte(0ULL);
20588 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
20589 }
20590 #endif
20591@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, unsigned long start_eip,
20592 ap.ss = __KERNEL_DS;
20593 ap.esp = (unsigned long) start_esp;
20594
20595- ap.ds = __USER_DS;
20596- ap.es = __USER_DS;
20597+ ap.ds = __KERNEL_DS;
20598+ ap.es = __KERNEL_DS;
20599 ap.fs = __KERNEL_PERCPU;
20600- ap.gs = __KERNEL_STACK_CANARY;
20601+ savesegment(gs, ap.gs);
20602
20603 ap.eflags = 0;
20604
20605@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
20606 paravirt_leave_lazy_mmu();
20607 }
20608
20609+#ifdef CONFIG_PAX_KERNEXEC
20610+static unsigned long vmi_pax_open_kernel(void)
20611+{
20612+ return 0;
20613+}
20614+
20615+static unsigned long vmi_pax_close_kernel(void)
20616+{
20617+ return 0;
20618+}
20619+#endif
20620+
20621 static inline int __init check_vmi_rom(struct vrom_header *rom)
20622 {
20623 struct pci_header *pci;
20624@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(struct vrom_header *rom)
20625 return 0;
20626 if (rom->vrom_signature != VMI_SIGNATURE)
20627 return 0;
20628+ if (rom->rom_length * 512 > sizeof(*rom)) {
20629+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
20630+ return 0;
20631+ }
20632 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
20633 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
20634 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
20635@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(void)
20636 struct vrom_header *romstart;
20637 romstart = (struct vrom_header *)isa_bus_to_virt(base);
20638 if (check_vmi_rom(romstart)) {
20639- vmi_rom = romstart;
20640+ vmi_rom = *romstart;
20641 return 1;
20642 }
20643 }
20644@@ -836,6 +858,11 @@ static inline int __init activate_vmi(void)
20645
20646 para_fill(pv_irq_ops.safe_halt, Halt);
20647
20648+#ifdef CONFIG_PAX_KERNEXEC
20649+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
20650+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
20651+#endif
20652+
20653 /*
20654 * Alternative instruction rewriting doesn't happen soon enough
20655 * to convert VMI_IRET to a call instead of a jump; so we have
20656@@ -853,16 +880,16 @@ static inline int __init activate_vmi(void)
20657
20658 void __init vmi_init(void)
20659 {
20660- if (!vmi_rom)
20661+ if (!vmi_rom.rom_signature)
20662 probe_vmi_rom();
20663 else
20664- check_vmi_rom(vmi_rom);
20665+ check_vmi_rom(&vmi_rom);
20666
20667 /* In case probing for or validating the ROM failed, basil */
20668- if (!vmi_rom)
20669+ if (!vmi_rom.rom_signature)
20670 return;
20671
20672- reserve_top_address(-vmi_rom->virtual_top);
20673+ reserve_top_address(-vmi_rom.virtual_top);
20674
20675 #ifdef CONFIG_X86_IO_APIC
20676 /* This is virtual hardware; timer routing is wired correctly */
20677@@ -874,7 +901,7 @@ void __init vmi_activate(void)
20678 {
20679 unsigned long flags;
20680
20681- if (!vmi_rom)
20682+ if (!vmi_rom.rom_signature)
20683 return;
20684
20685 local_irq_save(flags);
20686diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
20687index 3c68fe2..12c8280 100644
20688--- a/arch/x86/kernel/vmlinux.lds.S
20689+++ b/arch/x86/kernel/vmlinux.lds.S
20690@@ -26,6 +26,13 @@
20691 #include <asm/page_types.h>
20692 #include <asm/cache.h>
20693 #include <asm/boot.h>
20694+#include <asm/segment.h>
20695+
20696+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
20697+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
20698+#else
20699+#define __KERNEL_TEXT_OFFSET 0
20700+#endif
20701
20702 #undef i386 /* in case the preprocessor is a 32bit one */
20703
20704@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT, CONFIG_OUTPUT_FORMAT)
20705 #ifdef CONFIG_X86_32
20706 OUTPUT_ARCH(i386)
20707 ENTRY(phys_startup_32)
20708-jiffies = jiffies_64;
20709 #else
20710 OUTPUT_ARCH(i386:x86-64)
20711 ENTRY(phys_startup_64)
20712-jiffies_64 = jiffies;
20713 #endif
20714
20715 PHDRS {
20716 text PT_LOAD FLAGS(5); /* R_E */
20717- data PT_LOAD FLAGS(7); /* RWE */
20718+#ifdef CONFIG_X86_32
20719+ module PT_LOAD FLAGS(5); /* R_E */
20720+#endif
20721+#ifdef CONFIG_XEN
20722+ rodata PT_LOAD FLAGS(5); /* R_E */
20723+#else
20724+ rodata PT_LOAD FLAGS(4); /* R__ */
20725+#endif
20726+ data PT_LOAD FLAGS(6); /* RW_ */
20727 #ifdef CONFIG_X86_64
20728 user PT_LOAD FLAGS(5); /* R_E */
20729+#endif
20730+ init.begin PT_LOAD FLAGS(6); /* RW_ */
20731 #ifdef CONFIG_SMP
20732 percpu PT_LOAD FLAGS(6); /* RW_ */
20733 #endif
20734+ text.init PT_LOAD FLAGS(5); /* R_E */
20735+ text.exit PT_LOAD FLAGS(5); /* R_E */
20736 init PT_LOAD FLAGS(7); /* RWE */
20737-#endif
20738 note PT_NOTE FLAGS(0); /* ___ */
20739 }
20740
20741 SECTIONS
20742 {
20743 #ifdef CONFIG_X86_32
20744- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
20745- phys_startup_32 = startup_32 - LOAD_OFFSET;
20746+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
20747 #else
20748- . = __START_KERNEL;
20749- phys_startup_64 = startup_64 - LOAD_OFFSET;
20750+ . = __START_KERNEL;
20751 #endif
20752
20753 /* Text and read-only data */
20754- .text : AT(ADDR(.text) - LOAD_OFFSET) {
20755- _text = .;
20756+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20757 /* bootstrapping code */
20758+#ifdef CONFIG_X86_32
20759+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20760+#else
20761+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20762+#endif
20763+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
20764+ _text = .;
20765 HEAD_TEXT
20766 #ifdef CONFIG_X86_32
20767 . = ALIGN(PAGE_SIZE);
20768@@ -82,28 +102,71 @@ SECTIONS
20769 IRQENTRY_TEXT
20770 *(.fixup)
20771 *(.gnu.warning)
20772- /* End of text section */
20773- _etext = .;
20774 } :text = 0x9090
20775
20776- NOTES :text :note
20777+ . += __KERNEL_TEXT_OFFSET;
20778
20779- EXCEPTION_TABLE(16) :text = 0x9090
20780+#ifdef CONFIG_X86_32
20781+ . = ALIGN(PAGE_SIZE);
20782+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
20783+ *(.vmi.rom)
20784+ } :module
20785+
20786+ . = ALIGN(PAGE_SIZE);
20787+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
20788+
20789+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
20790+ MODULES_EXEC_VADDR = .;
20791+ BYTE(0)
20792+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
20793+ . = ALIGN(HPAGE_SIZE);
20794+ MODULES_EXEC_END = . - 1;
20795+#endif
20796+
20797+ } :module
20798+#endif
20799+
20800+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
20801+ /* End of text section */
20802+ _etext = . - __KERNEL_TEXT_OFFSET;
20803+ }
20804+
20805+#ifdef CONFIG_X86_32
20806+ . = ALIGN(PAGE_SIZE);
20807+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
20808+ *(.idt)
20809+ . = ALIGN(PAGE_SIZE);
20810+ *(.empty_zero_page)
20811+ *(.swapper_pg_fixmap)
20812+ *(.swapper_pg_pmd)
20813+ *(.swapper_pg_dir)
20814+ *(.trampoline_pg_dir)
20815+ } :rodata
20816+#endif
20817+
20818+ . = ALIGN(PAGE_SIZE);
20819+ NOTES :rodata :note
20820+
20821+ EXCEPTION_TABLE(16) :rodata
20822
20823 RO_DATA(PAGE_SIZE)
20824
20825 /* Data */
20826 .data : AT(ADDR(.data) - LOAD_OFFSET) {
20827+
20828+#ifdef CONFIG_PAX_KERNEXEC
20829+ . = ALIGN(HPAGE_SIZE);
20830+#else
20831+ . = ALIGN(PAGE_SIZE);
20832+#endif
20833+
20834 /* Start of data section */
20835 _sdata = .;
20836
20837 /* init_task */
20838 INIT_TASK_DATA(THREAD_SIZE)
20839
20840-#ifdef CONFIG_X86_32
20841- /* 32 bit has nosave before _edata */
20842 NOSAVE_DATA
20843-#endif
20844
20845 PAGE_ALIGNED_DATA(PAGE_SIZE)
20846
20847@@ -112,6 +175,8 @@ SECTIONS
20848 DATA_DATA
20849 CONSTRUCTORS
20850
20851+ jiffies = jiffies_64;
20852+
20853 /* rarely changed data like cpu maps */
20854 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
20855
20856@@ -166,12 +231,6 @@ SECTIONS
20857 }
20858 vgetcpu_mode = VVIRT(.vgetcpu_mode);
20859
20860- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
20861- .jiffies : AT(VLOAD(.jiffies)) {
20862- *(.jiffies)
20863- }
20864- jiffies = VVIRT(.jiffies);
20865-
20866 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
20867 *(.vsyscall_3)
20868 }
20869@@ -187,12 +246,19 @@ SECTIONS
20870 #endif /* CONFIG_X86_64 */
20871
20872 /* Init code and data - will be freed after init */
20873- . = ALIGN(PAGE_SIZE);
20874 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
20875+ BYTE(0)
20876+
20877+#ifdef CONFIG_PAX_KERNEXEC
20878+ . = ALIGN(HPAGE_SIZE);
20879+#else
20880+ . = ALIGN(PAGE_SIZE);
20881+#endif
20882+
20883 __init_begin = .; /* paired with __init_end */
20884- }
20885+ } :init.begin
20886
20887-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
20888+#ifdef CONFIG_SMP
20889 /*
20890 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
20891 * output PHDR, so the next output section - .init.text - should
20892@@ -201,12 +267,27 @@ SECTIONS
20893 PERCPU_VADDR(0, :percpu)
20894 #endif
20895
20896- INIT_TEXT_SECTION(PAGE_SIZE)
20897-#ifdef CONFIG_X86_64
20898- :init
20899-#endif
20900+ . = ALIGN(PAGE_SIZE);
20901+ init_begin = .;
20902+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
20903+ VMLINUX_SYMBOL(_sinittext) = .;
20904+ INIT_TEXT
20905+ VMLINUX_SYMBOL(_einittext) = .;
20906+ . = ALIGN(PAGE_SIZE);
20907+ } :text.init
20908
20909- INIT_DATA_SECTION(16)
20910+ /*
20911+ * .exit.text is discard at runtime, not link time, to deal with
20912+ * references from .altinstructions and .eh_frame
20913+ */
20914+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
20915+ EXIT_TEXT
20916+ . = ALIGN(16);
20917+ } :text.exit
20918+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
20919+
20920+ . = ALIGN(PAGE_SIZE);
20921+ INIT_DATA_SECTION(16) :init
20922
20923 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
20924 __x86_cpu_dev_start = .;
20925@@ -232,19 +313,11 @@ SECTIONS
20926 *(.altinstr_replacement)
20927 }
20928
20929- /*
20930- * .exit.text is discard at runtime, not link time, to deal with
20931- * references from .altinstructions and .eh_frame
20932- */
20933- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
20934- EXIT_TEXT
20935- }
20936-
20937 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
20938 EXIT_DATA
20939 }
20940
20941-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
20942+#ifndef CONFIG_SMP
20943 PERCPU(PAGE_SIZE)
20944 #endif
20945
20946@@ -267,12 +340,6 @@ SECTIONS
20947 . = ALIGN(PAGE_SIZE);
20948 }
20949
20950-#ifdef CONFIG_X86_64
20951- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
20952- NOSAVE_DATA
20953- }
20954-#endif
20955-
20956 /* BSS */
20957 . = ALIGN(PAGE_SIZE);
20958 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
20959@@ -288,6 +355,7 @@ SECTIONS
20960 __brk_base = .;
20961 . += 64 * 1024; /* 64k alignment slop space */
20962 *(.brk_reservation) /* areas brk users have reserved */
20963+ . = ALIGN(HPAGE_SIZE);
20964 __brk_limit = .;
20965 }
20966
20967@@ -316,13 +384,12 @@ SECTIONS
20968 * for the boot processor.
20969 */
20970 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
20971-INIT_PER_CPU(gdt_page);
20972 INIT_PER_CPU(irq_stack_union);
20973
20974 /*
20975 * Build-time check on the image size:
20976 */
20977-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
20978+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
20979 "kernel image bigger than KERNEL_IMAGE_SIZE");
20980
20981 #ifdef CONFIG_SMP
20982diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
20983index 62f39d7..3bc46a1 100644
20984--- a/arch/x86/kernel/vsyscall_64.c
20985+++ b/arch/x86/kernel/vsyscall_64.c
20986@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock,
20987
20988 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
20989 /* copy vsyscall data */
20990+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
20991 vsyscall_gtod_data.clock.vread = clock->vread;
20992 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
20993 vsyscall_gtod_data.clock.mask = clock->mask;
20994@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
20995 We do this here because otherwise user space would do it on
20996 its own in a likely inferior way (no access to jiffies).
20997 If you don't like it pass NULL. */
20998- if (tcache && tcache->blob[0] == (j = __jiffies)) {
20999+ if (tcache && tcache->blob[0] == (j = jiffies)) {
21000 p = tcache->blob[1];
21001 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
21002 /* Load per CPU data from RDTSCP */
21003diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
21004index 3909e3b..5433a97 100644
21005--- a/arch/x86/kernel/x8664_ksyms_64.c
21006+++ b/arch/x86/kernel/x8664_ksyms_64.c
21007@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
21008
21009 EXPORT_SYMBOL(copy_user_generic);
21010 EXPORT_SYMBOL(__copy_user_nocache);
21011-EXPORT_SYMBOL(copy_from_user);
21012-EXPORT_SYMBOL(copy_to_user);
21013 EXPORT_SYMBOL(__copy_from_user_inatomic);
21014
21015 EXPORT_SYMBOL(copy_page);
21016diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
21017index c5ee17e..d63218f 100644
21018--- a/arch/x86/kernel/xsave.c
21019+++ b/arch/x86/kernel/xsave.c
21020@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_struct __user *buf,
21021 fx_sw_user->xstate_size > fx_sw_user->extended_size)
21022 return -1;
21023
21024- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
21025+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
21026 fx_sw_user->extended_size -
21027 FP_XSTATE_MAGIC2_SIZE));
21028 /*
21029@@ -196,7 +196,7 @@ fx_only:
21030 * the other extended state.
21031 */
21032 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
21033- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
21034+ return fxrstor_checking((struct i387_fxsave_struct __force_kernel *)buf);
21035 }
21036
21037 /*
21038@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf)
21039 if (task_thread_info(tsk)->status & TS_XSAVE)
21040 err = restore_user_xstate(buf);
21041 else
21042- err = fxrstor_checking((__force struct i387_fxsave_struct *)
21043+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
21044 buf);
21045 if (unlikely(err)) {
21046 /*
21047diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
21048index 1350e43..a94b011 100644
21049--- a/arch/x86/kvm/emulate.c
21050+++ b/arch/x86/kvm/emulate.c
21051@@ -81,8 +81,8 @@
21052 #define Src2CL (1<<29)
21053 #define Src2ImmByte (2<<29)
21054 #define Src2One (3<<29)
21055-#define Src2Imm16 (4<<29)
21056-#define Src2Mask (7<<29)
21057+#define Src2Imm16 (4U<<29)
21058+#define Src2Mask (7U<<29)
21059
21060 enum {
21061 Group1_80, Group1_81, Group1_82, Group1_83,
21062@@ -411,6 +411,7 @@ static u32 group2_table[] = {
21063
21064 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
21065 do { \
21066+ unsigned long _tmp; \
21067 __asm__ __volatile__ ( \
21068 _PRE_EFLAGS("0", "4", "2") \
21069 _op _suffix " %"_x"3,%1; " \
21070@@ -424,8 +425,6 @@ static u32 group2_table[] = {
21071 /* Raw emulation: instruction has two explicit operands. */
21072 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
21073 do { \
21074- unsigned long _tmp; \
21075- \
21076 switch ((_dst).bytes) { \
21077 case 2: \
21078 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
21079@@ -441,7 +440,6 @@ static u32 group2_table[] = {
21080
21081 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
21082 do { \
21083- unsigned long _tmp; \
21084 switch ((_dst).bytes) { \
21085 case 1: \
21086 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
21087diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
21088index 8dfeaaa..4daa395 100644
21089--- a/arch/x86/kvm/lapic.c
21090+++ b/arch/x86/kvm/lapic.c
21091@@ -52,7 +52,7 @@
21092 #define APIC_BUS_CYCLE_NS 1
21093
21094 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
21095-#define apic_debug(fmt, arg...)
21096+#define apic_debug(fmt, arg...) do {} while (0)
21097
21098 #define APIC_LVT_NUM 6
21099 /* 14 is the version for Xeon and Pentium 8.4.8*/
21100diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
21101index 3bc2707..dd157e2 100644
21102--- a/arch/x86/kvm/paging_tmpl.h
21103+++ b/arch/x86/kvm/paging_tmpl.h
21104@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21105 int level = PT_PAGE_TABLE_LEVEL;
21106 unsigned long mmu_seq;
21107
21108+ pax_track_stack();
21109+
21110 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
21111 kvm_mmu_audit(vcpu, "pre page fault");
21112
21113@@ -461,6 +463,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
21114 kvm_mmu_free_some_pages(vcpu);
21115 sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault,
21116 level, &write_pt, pfn);
21117+ (void)sptep;
21118 pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__,
21119 sptep, *sptep, write_pt);
21120
21121diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
21122index 7c6e63e..c5d92c1 100644
21123--- a/arch/x86/kvm/svm.c
21124+++ b/arch/x86/kvm/svm.c
21125@@ -2486,7 +2486,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
21126 int cpu = raw_smp_processor_id();
21127
21128 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
21129+
21130+ pax_open_kernel();
21131 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
21132+ pax_close_kernel();
21133+
21134 load_TR_desc();
21135 }
21136
21137@@ -2947,7 +2951,7 @@ static bool svm_gb_page_enable(void)
21138 return true;
21139 }
21140
21141-static struct kvm_x86_ops svm_x86_ops = {
21142+static const struct kvm_x86_ops svm_x86_ops = {
21143 .cpu_has_kvm_support = has_svm,
21144 .disabled_by_bios = is_disabled,
21145 .hardware_setup = svm_hardware_setup,
21146diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
21147index e6d925f..e7a4af8 100644
21148--- a/arch/x86/kvm/vmx.c
21149+++ b/arch/x86/kvm/vmx.c
21150@@ -570,7 +570,11 @@ static void reload_tss(void)
21151
21152 kvm_get_gdt(&gdt);
21153 descs = (void *)gdt.base;
21154+
21155+ pax_open_kernel();
21156 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
21157+ pax_close_kernel();
21158+
21159 load_TR_desc();
21160 }
21161
21162@@ -1410,8 +1414,11 @@ static __init int hardware_setup(void)
21163 if (!cpu_has_vmx_flexpriority())
21164 flexpriority_enabled = 0;
21165
21166- if (!cpu_has_vmx_tpr_shadow())
21167- kvm_x86_ops->update_cr8_intercept = NULL;
21168+ if (!cpu_has_vmx_tpr_shadow()) {
21169+ pax_open_kernel();
21170+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
21171+ pax_close_kernel();
21172+ }
21173
21174 if (enable_ept && !cpu_has_vmx_ept_2m_page())
21175 kvm_disable_largepages();
21176@@ -2362,7 +2369,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
21177 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
21178
21179 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
21180- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
21181+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
21182 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
21183 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
21184 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
21185@@ -3718,6 +3725,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21186 "jmp .Lkvm_vmx_return \n\t"
21187 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
21188 ".Lkvm_vmx_return: "
21189+
21190+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21191+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
21192+ ".Lkvm_vmx_return2: "
21193+#endif
21194+
21195 /* Save guest registers, load host registers, keep flags */
21196 "xchg %0, (%%"R"sp) \n\t"
21197 "mov %%"R"ax, %c[rax](%0) \n\t"
21198@@ -3764,8 +3777,13 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21199 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
21200 #endif
21201 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
21202+
21203+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21204+ ,[cs]"i"(__KERNEL_CS)
21205+#endif
21206+
21207 : "cc", "memory"
21208- , R"bx", R"di", R"si"
21209+ , R"ax", R"bx", R"di", R"si"
21210 #ifdef CONFIG_X86_64
21211 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
21212 #endif
21213@@ -3782,7 +3800,16 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
21214 if (vmx->rmode.irq.pending)
21215 fixup_rmode_irq(vmx);
21216
21217- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
21218+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
21219+
21220+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21221+ loadsegment(fs, __KERNEL_PERCPU);
21222+#endif
21223+
21224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
21225+ __set_fs(current_thread_info()->addr_limit);
21226+#endif
21227+
21228 vmx->launched = 1;
21229
21230 vmx_complete_interrupts(vmx);
21231@@ -3957,7 +3984,7 @@ static bool vmx_gb_page_enable(void)
21232 return false;
21233 }
21234
21235-static struct kvm_x86_ops vmx_x86_ops = {
21236+static const struct kvm_x86_ops vmx_x86_ops = {
21237 .cpu_has_kvm_support = cpu_has_kvm_support,
21238 .disabled_by_bios = vmx_disabled_by_bios,
21239 .hardware_setup = hardware_setup,
21240diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
21241index df1cefb..5e882ad 100644
21242--- a/arch/x86/kvm/x86.c
21243+++ b/arch/x86/kvm/x86.c
21244@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu);
21245 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
21246 struct kvm_cpuid_entry2 __user *entries);
21247
21248-struct kvm_x86_ops *kvm_x86_ops;
21249+const struct kvm_x86_ops *kvm_x86_ops;
21250 EXPORT_SYMBOL_GPL(kvm_x86_ops);
21251
21252 int ignore_msrs = 0;
21253@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
21254 struct kvm_cpuid2 *cpuid,
21255 struct kvm_cpuid_entry2 __user *entries)
21256 {
21257- int r;
21258+ int r, i;
21259
21260 r = -E2BIG;
21261 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
21262 goto out;
21263 r = -EFAULT;
21264- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
21265- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21266+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
21267 goto out;
21268+ for (i = 0; i < cpuid->nent; ++i) {
21269+ struct kvm_cpuid_entry2 cpuid_entry;
21270+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
21271+ goto out;
21272+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
21273+ }
21274 vcpu->arch.cpuid_nent = cpuid->nent;
21275 kvm_apic_set_version(vcpu);
21276 return 0;
21277@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
21278 struct kvm_cpuid2 *cpuid,
21279 struct kvm_cpuid_entry2 __user *entries)
21280 {
21281- int r;
21282+ int r, i;
21283
21284 vcpu_load(vcpu);
21285 r = -E2BIG;
21286 if (cpuid->nent < vcpu->arch.cpuid_nent)
21287 goto out;
21288 r = -EFAULT;
21289- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
21290- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21291+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
21292 goto out;
21293+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
21294+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
21295+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
21296+ goto out;
21297+ }
21298 return 0;
21299
21300 out:
21301@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
21302 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
21303 struct kvm_interrupt *irq)
21304 {
21305- if (irq->irq < 0 || irq->irq >= 256)
21306+ if (irq->irq >= 256)
21307 return -EINVAL;
21308 if (irqchip_in_kernel(vcpu->kvm))
21309 return -ENXIO;
21310@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cpufreq_notifier_block = {
21311 .notifier_call = kvmclock_cpufreq_notifier
21312 };
21313
21314-int kvm_arch_init(void *opaque)
21315+int kvm_arch_init(const void *opaque)
21316 {
21317 int r, cpu;
21318- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
21319+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
21320
21321 if (kvm_x86_ops) {
21322 printk(KERN_ERR "kvm: already loaded the other module\n");
21323diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
21324index 7e59dc1..b88c98f 100644
21325--- a/arch/x86/lguest/boot.c
21326+++ b/arch/x86/lguest/boot.c
21327@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
21328 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
21329 * Launcher to reboot us.
21330 */
21331-static void lguest_restart(char *reason)
21332+static __noreturn void lguest_restart(char *reason)
21333 {
21334 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
21335+ BUG();
21336 }
21337
21338 /*G:050
21339diff --git a/arch/x86/lib/atomic64_32.c b/arch/x86/lib/atomic64_32.c
21340index 824fa0b..c619e96 100644
21341--- a/arch/x86/lib/atomic64_32.c
21342+++ b/arch/x86/lib/atomic64_32.c
21343@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val)
21344 }
21345 EXPORT_SYMBOL(atomic64_cmpxchg);
21346
21347+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
21348+{
21349+ return cmpxchg8b(&ptr->counter, old_val, new_val);
21350+}
21351+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
21352+
21353 /**
21354 * atomic64_xchg - xchg atomic64 variable
21355 * @ptr: pointer to type atomic64_t
21356@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 new_val)
21357 EXPORT_SYMBOL(atomic64_xchg);
21358
21359 /**
21360+ * atomic64_xchg_unchecked - xchg atomic64 variable
21361+ * @ptr: pointer to type atomic64_unchecked_t
21362+ * @new_val: value to assign
21363+ *
21364+ * Atomically xchgs the value of @ptr to @new_val and returns
21365+ * the old value.
21366+ */
21367+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21368+{
21369+ /*
21370+ * Try first with a (possibly incorrect) assumption about
21371+ * what we have there. We'll do two loops most likely,
21372+ * but we'll get an ownership MESI transaction straight away
21373+ * instead of a read transaction followed by a
21374+ * flush-for-ownership transaction:
21375+ */
21376+ u64 old_val, real_val = 0;
21377+
21378+ do {
21379+ old_val = real_val;
21380+
21381+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21382+
21383+ } while (real_val != old_val);
21384+
21385+ return old_val;
21386+}
21387+EXPORT_SYMBOL(atomic64_xchg_unchecked);
21388+
21389+/**
21390 * atomic64_set - set atomic64 variable
21391 * @ptr: pointer to type atomic64_t
21392 * @new_val: value to assign
21393@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 new_val)
21394 EXPORT_SYMBOL(atomic64_set);
21395
21396 /**
21397-EXPORT_SYMBOL(atomic64_read);
21398+ * atomic64_unchecked_set - set atomic64 variable
21399+ * @ptr: pointer to type atomic64_unchecked_t
21400+ * @new_val: value to assign
21401+ *
21402+ * Atomically sets the value of @ptr to @new_val.
21403+ */
21404+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
21405+{
21406+ atomic64_xchg_unchecked(ptr, new_val);
21407+}
21408+EXPORT_SYMBOL(atomic64_set_unchecked);
21409+
21410+/**
21411 * atomic64_add_return - add and return
21412 * @delta: integer value to add
21413 * @ptr: pointer to type atomic64_t
21414@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 delta, atomic64_t *ptr)
21415 }
21416 EXPORT_SYMBOL(atomic64_add_return);
21417
21418+/**
21419+ * atomic64_add_return_unchecked - add and return
21420+ * @delta: integer value to add
21421+ * @ptr: pointer to type atomic64_unchecked_t
21422+ *
21423+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
21424+ */
21425+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21426+{
21427+ /*
21428+ * Try first with a (possibly incorrect) assumption about
21429+ * what we have there. We'll do two loops most likely,
21430+ * but we'll get an ownership MESI transaction straight away
21431+ * instead of a read transaction followed by a
21432+ * flush-for-ownership transaction:
21433+ */
21434+ u64 old_val, new_val, real_val = 0;
21435+
21436+ do {
21437+ old_val = real_val;
21438+ new_val = old_val + delta;
21439+
21440+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
21441+
21442+ } while (real_val != old_val);
21443+
21444+ return new_val;
21445+}
21446+EXPORT_SYMBOL(atomic64_add_return_unchecked);
21447+
21448 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
21449 {
21450 return atomic64_add_return(-delta, ptr);
21451 }
21452 EXPORT_SYMBOL(atomic64_sub_return);
21453
21454+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21455+{
21456+ return atomic64_add_return_unchecked(-delta, ptr);
21457+}
21458+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
21459+
21460 u64 atomic64_inc_return(atomic64_t *ptr)
21461 {
21462 return atomic64_add_return(1, ptr);
21463 }
21464 EXPORT_SYMBOL(atomic64_inc_return);
21465
21466+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
21467+{
21468+ return atomic64_add_return_unchecked(1, ptr);
21469+}
21470+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
21471+
21472 u64 atomic64_dec_return(atomic64_t *ptr)
21473 {
21474 return atomic64_sub_return(1, ptr);
21475 }
21476 EXPORT_SYMBOL(atomic64_dec_return);
21477
21478+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
21479+{
21480+ return atomic64_sub_return_unchecked(1, ptr);
21481+}
21482+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
21483+
21484 /**
21485 * atomic64_add - add integer to atomic64 variable
21486 * @delta: integer value to add
21487@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t *ptr)
21488 EXPORT_SYMBOL(atomic64_add);
21489
21490 /**
21491+ * atomic64_add_unchecked - add integer to atomic64 variable
21492+ * @delta: integer value to add
21493+ * @ptr: pointer to type atomic64_unchecked_t
21494+ *
21495+ * Atomically adds @delta to @ptr.
21496+ */
21497+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21498+{
21499+ atomic64_add_return_unchecked(delta, ptr);
21500+}
21501+EXPORT_SYMBOL(atomic64_add_unchecked);
21502+
21503+/**
21504 * atomic64_sub - subtract the atomic64 variable
21505 * @delta: integer value to subtract
21506 * @ptr: pointer to type atomic64_t
21507@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t *ptr)
21508 EXPORT_SYMBOL(atomic64_sub);
21509
21510 /**
21511+ * atomic64_sub_unchecked - subtract the atomic64 variable
21512+ * @delta: integer value to subtract
21513+ * @ptr: pointer to type atomic64_unchecked_t
21514+ *
21515+ * Atomically subtracts @delta from @ptr.
21516+ */
21517+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
21518+{
21519+ atomic64_add_unchecked(-delta, ptr);
21520+}
21521+EXPORT_SYMBOL(atomic64_sub_unchecked);
21522+
21523+/**
21524 * atomic64_sub_and_test - subtract value from variable and test result
21525 * @delta: integer value to subtract
21526 * @ptr: pointer to type atomic64_t
21527@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
21528 EXPORT_SYMBOL(atomic64_inc);
21529
21530 /**
21531+ * atomic64_inc_unchecked - increment atomic64 variable
21532+ * @ptr: pointer to type atomic64_unchecked_t
21533+ *
21534+ * Atomically increments @ptr by 1.
21535+ */
21536+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
21537+{
21538+ atomic64_add_unchecked(1, ptr);
21539+}
21540+EXPORT_SYMBOL(atomic64_inc_unchecked);
21541+
21542+/**
21543 * atomic64_dec - decrement atomic64 variable
21544 * @ptr: pointer to type atomic64_t
21545 *
21546@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
21547 EXPORT_SYMBOL(atomic64_dec);
21548
21549 /**
21550+ * atomic64_dec_unchecked - decrement atomic64 variable
21551+ * @ptr: pointer to type atomic64_unchecked_t
21552+ *
21553+ * Atomically decrements @ptr by 1.
21554+ */
21555+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
21556+{
21557+ atomic64_sub_unchecked(1, ptr);
21558+}
21559+EXPORT_SYMBOL(atomic64_dec_unchecked);
21560+
21561+/**
21562 * atomic64_dec_and_test - decrement and test
21563 * @ptr: pointer to type atomic64_t
21564 *
21565diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
21566index adbccd0..98f96c8 100644
21567--- a/arch/x86/lib/checksum_32.S
21568+++ b/arch/x86/lib/checksum_32.S
21569@@ -28,7 +28,8 @@
21570 #include <linux/linkage.h>
21571 #include <asm/dwarf2.h>
21572 #include <asm/errno.h>
21573-
21574+#include <asm/segment.h>
21575+
21576 /*
21577 * computes a partial checksum, e.g. for TCP/UDP fragments
21578 */
21579@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
21580
21581 #define ARGBASE 16
21582 #define FP 12
21583-
21584-ENTRY(csum_partial_copy_generic)
21585+
21586+ENTRY(csum_partial_copy_generic_to_user)
21587 CFI_STARTPROC
21588+
21589+#ifdef CONFIG_PAX_MEMORY_UDEREF
21590+ pushl %gs
21591+ CFI_ADJUST_CFA_OFFSET 4
21592+ popl %es
21593+ CFI_ADJUST_CFA_OFFSET -4
21594+ jmp csum_partial_copy_generic
21595+#endif
21596+
21597+ENTRY(csum_partial_copy_generic_from_user)
21598+
21599+#ifdef CONFIG_PAX_MEMORY_UDEREF
21600+ pushl %gs
21601+ CFI_ADJUST_CFA_OFFSET 4
21602+ popl %ds
21603+ CFI_ADJUST_CFA_OFFSET -4
21604+#endif
21605+
21606+ENTRY(csum_partial_copy_generic)
21607 subl $4,%esp
21608 CFI_ADJUST_CFA_OFFSET 4
21609 pushl %edi
21610@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
21611 jmp 4f
21612 SRC(1: movw (%esi), %bx )
21613 addl $2, %esi
21614-DST( movw %bx, (%edi) )
21615+DST( movw %bx, %es:(%edi) )
21616 addl $2, %edi
21617 addw %bx, %ax
21618 adcl $0, %eax
21619@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
21620 SRC(1: movl (%esi), %ebx )
21621 SRC( movl 4(%esi), %edx )
21622 adcl %ebx, %eax
21623-DST( movl %ebx, (%edi) )
21624+DST( movl %ebx, %es:(%edi) )
21625 adcl %edx, %eax
21626-DST( movl %edx, 4(%edi) )
21627+DST( movl %edx, %es:4(%edi) )
21628
21629 SRC( movl 8(%esi), %ebx )
21630 SRC( movl 12(%esi), %edx )
21631 adcl %ebx, %eax
21632-DST( movl %ebx, 8(%edi) )
21633+DST( movl %ebx, %es:8(%edi) )
21634 adcl %edx, %eax
21635-DST( movl %edx, 12(%edi) )
21636+DST( movl %edx, %es:12(%edi) )
21637
21638 SRC( movl 16(%esi), %ebx )
21639 SRC( movl 20(%esi), %edx )
21640 adcl %ebx, %eax
21641-DST( movl %ebx, 16(%edi) )
21642+DST( movl %ebx, %es:16(%edi) )
21643 adcl %edx, %eax
21644-DST( movl %edx, 20(%edi) )
21645+DST( movl %edx, %es:20(%edi) )
21646
21647 SRC( movl 24(%esi), %ebx )
21648 SRC( movl 28(%esi), %edx )
21649 adcl %ebx, %eax
21650-DST( movl %ebx, 24(%edi) )
21651+DST( movl %ebx, %es:24(%edi) )
21652 adcl %edx, %eax
21653-DST( movl %edx, 28(%edi) )
21654+DST( movl %edx, %es:28(%edi) )
21655
21656 lea 32(%esi), %esi
21657 lea 32(%edi), %edi
21658@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
21659 shrl $2, %edx # This clears CF
21660 SRC(3: movl (%esi), %ebx )
21661 adcl %ebx, %eax
21662-DST( movl %ebx, (%edi) )
21663+DST( movl %ebx, %es:(%edi) )
21664 lea 4(%esi), %esi
21665 lea 4(%edi), %edi
21666 dec %edx
21667@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
21668 jb 5f
21669 SRC( movw (%esi), %cx )
21670 leal 2(%esi), %esi
21671-DST( movw %cx, (%edi) )
21672+DST( movw %cx, %es:(%edi) )
21673 leal 2(%edi), %edi
21674 je 6f
21675 shll $16,%ecx
21676 SRC(5: movb (%esi), %cl )
21677-DST( movb %cl, (%edi) )
21678+DST( movb %cl, %es:(%edi) )
21679 6: addl %ecx, %eax
21680 adcl $0, %eax
21681 7:
21682@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
21683
21684 6001:
21685 movl ARGBASE+20(%esp), %ebx # src_err_ptr
21686- movl $-EFAULT, (%ebx)
21687+ movl $-EFAULT, %ss:(%ebx)
21688
21689 # zero the complete destination - computing the rest
21690 # is too much work
21691@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
21692
21693 6002:
21694 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21695- movl $-EFAULT,(%ebx)
21696+ movl $-EFAULT,%ss:(%ebx)
21697 jmp 5000b
21698
21699 .previous
21700
21701+ pushl %ss
21702+ CFI_ADJUST_CFA_OFFSET 4
21703+ popl %ds
21704+ CFI_ADJUST_CFA_OFFSET -4
21705+ pushl %ss
21706+ CFI_ADJUST_CFA_OFFSET 4
21707+ popl %es
21708+ CFI_ADJUST_CFA_OFFSET -4
21709 popl %ebx
21710 CFI_ADJUST_CFA_OFFSET -4
21711 CFI_RESTORE ebx
21712@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
21713 CFI_ADJUST_CFA_OFFSET -4
21714 ret
21715 CFI_ENDPROC
21716-ENDPROC(csum_partial_copy_generic)
21717+ENDPROC(csum_partial_copy_generic_to_user)
21718
21719 #else
21720
21721 /* Version for PentiumII/PPro */
21722
21723 #define ROUND1(x) \
21724+ nop; nop; nop; \
21725 SRC(movl x(%esi), %ebx ) ; \
21726 addl %ebx, %eax ; \
21727- DST(movl %ebx, x(%edi) ) ;
21728+ DST(movl %ebx, %es:x(%edi)) ;
21729
21730 #define ROUND(x) \
21731+ nop; nop; nop; \
21732 SRC(movl x(%esi), %ebx ) ; \
21733 adcl %ebx, %eax ; \
21734- DST(movl %ebx, x(%edi) ) ;
21735+ DST(movl %ebx, %es:x(%edi)) ;
21736
21737 #define ARGBASE 12
21738-
21739-ENTRY(csum_partial_copy_generic)
21740+
21741+ENTRY(csum_partial_copy_generic_to_user)
21742 CFI_STARTPROC
21743+
21744+#ifdef CONFIG_PAX_MEMORY_UDEREF
21745+ pushl %gs
21746+ CFI_ADJUST_CFA_OFFSET 4
21747+ popl %es
21748+ CFI_ADJUST_CFA_OFFSET -4
21749+ jmp csum_partial_copy_generic
21750+#endif
21751+
21752+ENTRY(csum_partial_copy_generic_from_user)
21753+
21754+#ifdef CONFIG_PAX_MEMORY_UDEREF
21755+ pushl %gs
21756+ CFI_ADJUST_CFA_OFFSET 4
21757+ popl %ds
21758+ CFI_ADJUST_CFA_OFFSET -4
21759+#endif
21760+
21761+ENTRY(csum_partial_copy_generic)
21762 pushl %ebx
21763 CFI_ADJUST_CFA_OFFSET 4
21764 CFI_REL_OFFSET ebx, 0
21765@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
21766 subl %ebx, %edi
21767 lea -1(%esi),%edx
21768 andl $-32,%edx
21769- lea 3f(%ebx,%ebx), %ebx
21770+ lea 3f(%ebx,%ebx,2), %ebx
21771 testl %esi, %esi
21772 jmp *%ebx
21773 1: addl $64,%esi
21774@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
21775 jb 5f
21776 SRC( movw (%esi), %dx )
21777 leal 2(%esi), %esi
21778-DST( movw %dx, (%edi) )
21779+DST( movw %dx, %es:(%edi) )
21780 leal 2(%edi), %edi
21781 je 6f
21782 shll $16,%edx
21783 5:
21784 SRC( movb (%esi), %dl )
21785-DST( movb %dl, (%edi) )
21786+DST( movb %dl, %es:(%edi) )
21787 6: addl %edx, %eax
21788 adcl $0, %eax
21789 7:
21790 .section .fixup, "ax"
21791 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
21792- movl $-EFAULT, (%ebx)
21793+ movl $-EFAULT, %ss:(%ebx)
21794 # zero the complete destination (computing the rest is too much work)
21795 movl ARGBASE+8(%esp),%edi # dst
21796 movl ARGBASE+12(%esp),%ecx # len
21797@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
21798 rep; stosb
21799 jmp 7b
21800 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
21801- movl $-EFAULT, (%ebx)
21802+ movl $-EFAULT, %ss:(%ebx)
21803 jmp 7b
21804 .previous
21805
21806+#ifdef CONFIG_PAX_MEMORY_UDEREF
21807+ pushl %ss
21808+ CFI_ADJUST_CFA_OFFSET 4
21809+ popl %ds
21810+ CFI_ADJUST_CFA_OFFSET -4
21811+ pushl %ss
21812+ CFI_ADJUST_CFA_OFFSET 4
21813+ popl %es
21814+ CFI_ADJUST_CFA_OFFSET -4
21815+#endif
21816+
21817 popl %esi
21818 CFI_ADJUST_CFA_OFFSET -4
21819 CFI_RESTORE esi
21820@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
21821 CFI_RESTORE ebx
21822 ret
21823 CFI_ENDPROC
21824-ENDPROC(csum_partial_copy_generic)
21825+ENDPROC(csum_partial_copy_generic_to_user)
21826
21827 #undef ROUND
21828 #undef ROUND1
21829diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
21830index ebeafcc..1e3a402 100644
21831--- a/arch/x86/lib/clear_page_64.S
21832+++ b/arch/x86/lib/clear_page_64.S
21833@@ -1,5 +1,6 @@
21834 #include <linux/linkage.h>
21835 #include <asm/dwarf2.h>
21836+#include <asm/alternative-asm.h>
21837
21838 /*
21839 * Zero a page.
21840@@ -10,6 +11,7 @@ ENTRY(clear_page_c)
21841 movl $4096/8,%ecx
21842 xorl %eax,%eax
21843 rep stosq
21844+ pax_force_retaddr
21845 ret
21846 CFI_ENDPROC
21847 ENDPROC(clear_page_c)
21848@@ -33,6 +35,7 @@ ENTRY(clear_page)
21849 leaq 64(%rdi),%rdi
21850 jnz .Lloop
21851 nop
21852+ pax_force_retaddr
21853 ret
21854 CFI_ENDPROC
21855 .Lclear_page_end:
21856@@ -43,7 +46,7 @@ ENDPROC(clear_page)
21857
21858 #include <asm/cpufeature.h>
21859
21860- .section .altinstr_replacement,"ax"
21861+ .section .altinstr_replacement,"a"
21862 1: .byte 0xeb /* jmp <disp8> */
21863 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
21864 2:
21865diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
21866index 727a5d4..333818a 100644
21867--- a/arch/x86/lib/copy_page_64.S
21868+++ b/arch/x86/lib/copy_page_64.S
21869@@ -2,12 +2,14 @@
21870
21871 #include <linux/linkage.h>
21872 #include <asm/dwarf2.h>
21873+#include <asm/alternative-asm.h>
21874
21875 ALIGN
21876 copy_page_c:
21877 CFI_STARTPROC
21878 movl $4096/8,%ecx
21879 rep movsq
21880+ pax_force_retaddr
21881 ret
21882 CFI_ENDPROC
21883 ENDPROC(copy_page_c)
21884@@ -38,7 +40,7 @@ ENTRY(copy_page)
21885 movq 16 (%rsi), %rdx
21886 movq 24 (%rsi), %r8
21887 movq 32 (%rsi), %r9
21888- movq 40 (%rsi), %r10
21889+ movq 40 (%rsi), %r13
21890 movq 48 (%rsi), %r11
21891 movq 56 (%rsi), %r12
21892
21893@@ -49,7 +51,7 @@ ENTRY(copy_page)
21894 movq %rdx, 16 (%rdi)
21895 movq %r8, 24 (%rdi)
21896 movq %r9, 32 (%rdi)
21897- movq %r10, 40 (%rdi)
21898+ movq %r13, 40 (%rdi)
21899 movq %r11, 48 (%rdi)
21900 movq %r12, 56 (%rdi)
21901
21902@@ -68,7 +70,7 @@ ENTRY(copy_page)
21903 movq 16 (%rsi), %rdx
21904 movq 24 (%rsi), %r8
21905 movq 32 (%rsi), %r9
21906- movq 40 (%rsi), %r10
21907+ movq 40 (%rsi), %r13
21908 movq 48 (%rsi), %r11
21909 movq 56 (%rsi), %r12
21910
21911@@ -77,7 +79,7 @@ ENTRY(copy_page)
21912 movq %rdx, 16 (%rdi)
21913 movq %r8, 24 (%rdi)
21914 movq %r9, 32 (%rdi)
21915- movq %r10, 40 (%rdi)
21916+ movq %r13, 40 (%rdi)
21917 movq %r11, 48 (%rdi)
21918 movq %r12, 56 (%rdi)
21919
21920@@ -94,6 +96,7 @@ ENTRY(copy_page)
21921 CFI_RESTORE r13
21922 addq $3*8,%rsp
21923 CFI_ADJUST_CFA_OFFSET -3*8
21924+ pax_force_retaddr
21925 ret
21926 .Lcopy_page_end:
21927 CFI_ENDPROC
21928@@ -104,7 +107,7 @@ ENDPROC(copy_page)
21929
21930 #include <asm/cpufeature.h>
21931
21932- .section .altinstr_replacement,"ax"
21933+ .section .altinstr_replacement,"a"
21934 1: .byte 0xeb /* jmp <disp8> */
21935 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
21936 2:
21937diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
21938index af8debd..40c75f3 100644
21939--- a/arch/x86/lib/copy_user_64.S
21940+++ b/arch/x86/lib/copy_user_64.S
21941@@ -15,13 +15,15 @@
21942 #include <asm/asm-offsets.h>
21943 #include <asm/thread_info.h>
21944 #include <asm/cpufeature.h>
21945+#include <asm/pgtable.h>
21946+#include <asm/alternative-asm.h>
21947
21948 .macro ALTERNATIVE_JUMP feature,orig,alt
21949 0:
21950 .byte 0xe9 /* 32bit jump */
21951 .long \orig-1f /* by default jump to orig */
21952 1:
21953- .section .altinstr_replacement,"ax"
21954+ .section .altinstr_replacement,"a"
21955 2: .byte 0xe9 /* near jump with 32bit immediate */
21956 .long \alt-1b /* offset */ /* or alternatively to alt */
21957 .previous
21958@@ -64,55 +66,26 @@
21959 #endif
21960 .endm
21961
21962-/* Standard copy_to_user with segment limit checking */
21963-ENTRY(copy_to_user)
21964- CFI_STARTPROC
21965- GET_THREAD_INFO(%rax)
21966- movq %rdi,%rcx
21967- addq %rdx,%rcx
21968- jc bad_to_user
21969- cmpq TI_addr_limit(%rax),%rcx
21970- ja bad_to_user
21971- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21972- CFI_ENDPROC
21973-ENDPROC(copy_to_user)
21974-
21975-/* Standard copy_from_user with segment limit checking */
21976-ENTRY(copy_from_user)
21977- CFI_STARTPROC
21978- GET_THREAD_INFO(%rax)
21979- movq %rsi,%rcx
21980- addq %rdx,%rcx
21981- jc bad_from_user
21982- cmpq TI_addr_limit(%rax),%rcx
21983- ja bad_from_user
21984- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21985- CFI_ENDPROC
21986-ENDPROC(copy_from_user)
21987-
21988 ENTRY(copy_user_generic)
21989 CFI_STARTPROC
21990 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21991 CFI_ENDPROC
21992 ENDPROC(copy_user_generic)
21993
21994-ENTRY(__copy_from_user_inatomic)
21995- CFI_STARTPROC
21996- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
21997- CFI_ENDPROC
21998-ENDPROC(__copy_from_user_inatomic)
21999-
22000 .section .fixup,"ax"
22001 /* must zero dest */
22002 ENTRY(bad_from_user)
22003 bad_from_user:
22004 CFI_STARTPROC
22005+ testl %edx,%edx
22006+ js bad_to_user
22007 movl %edx,%ecx
22008 xorl %eax,%eax
22009 rep
22010 stosb
22011 bad_to_user:
22012 movl %edx,%eax
22013+ pax_force_retaddr
22014 ret
22015 CFI_ENDPROC
22016 ENDPROC(bad_from_user)
22017@@ -142,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
22018 jz 17f
22019 1: movq (%rsi),%r8
22020 2: movq 1*8(%rsi),%r9
22021-3: movq 2*8(%rsi),%r10
22022+3: movq 2*8(%rsi),%rax
22023 4: movq 3*8(%rsi),%r11
22024 5: movq %r8,(%rdi)
22025 6: movq %r9,1*8(%rdi)
22026-7: movq %r10,2*8(%rdi)
22027+7: movq %rax,2*8(%rdi)
22028 8: movq %r11,3*8(%rdi)
22029 9: movq 4*8(%rsi),%r8
22030 10: movq 5*8(%rsi),%r9
22031-11: movq 6*8(%rsi),%r10
22032+11: movq 6*8(%rsi),%rax
22033 12: movq 7*8(%rsi),%r11
22034 13: movq %r8,4*8(%rdi)
22035 14: movq %r9,5*8(%rdi)
22036-15: movq %r10,6*8(%rdi)
22037+15: movq %rax,6*8(%rdi)
22038 16: movq %r11,7*8(%rdi)
22039 leaq 64(%rsi),%rsi
22040 leaq 64(%rdi),%rdi
22041@@ -180,6 +153,7 @@ ENTRY(copy_user_generic_unrolled)
22042 decl %ecx
22043 jnz 21b
22044 23: xor %eax,%eax
22045+ pax_force_retaddr
22046 ret
22047
22048 .section .fixup,"ax"
22049@@ -252,6 +226,7 @@ ENTRY(copy_user_generic_string)
22050 3: rep
22051 movsb
22052 4: xorl %eax,%eax
22053+ pax_force_retaddr
22054 ret
22055
22056 .section .fixup,"ax"
22057diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
22058index cb0c112..e3a6895 100644
22059--- a/arch/x86/lib/copy_user_nocache_64.S
22060+++ b/arch/x86/lib/copy_user_nocache_64.S
22061@@ -8,12 +8,14 @@
22062
22063 #include <linux/linkage.h>
22064 #include <asm/dwarf2.h>
22065+#include <asm/alternative-asm.h>
22066
22067 #define FIX_ALIGNMENT 1
22068
22069 #include <asm/current.h>
22070 #include <asm/asm-offsets.h>
22071 #include <asm/thread_info.h>
22072+#include <asm/pgtable.h>
22073
22074 .macro ALIGN_DESTINATION
22075 #ifdef FIX_ALIGNMENT
22076@@ -50,6 +52,15 @@
22077 */
22078 ENTRY(__copy_user_nocache)
22079 CFI_STARTPROC
22080+
22081+#ifdef CONFIG_PAX_MEMORY_UDEREF
22082+ mov $PAX_USER_SHADOW_BASE,%rcx
22083+ cmp %rcx,%rsi
22084+ jae 1f
22085+ add %rcx,%rsi
22086+1:
22087+#endif
22088+
22089 cmpl $8,%edx
22090 jb 20f /* less then 8 bytes, go to byte copy loop */
22091 ALIGN_DESTINATION
22092@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
22093 jz 17f
22094 1: movq (%rsi),%r8
22095 2: movq 1*8(%rsi),%r9
22096-3: movq 2*8(%rsi),%r10
22097+3: movq 2*8(%rsi),%rax
22098 4: movq 3*8(%rsi),%r11
22099 5: movnti %r8,(%rdi)
22100 6: movnti %r9,1*8(%rdi)
22101-7: movnti %r10,2*8(%rdi)
22102+7: movnti %rax,2*8(%rdi)
22103 8: movnti %r11,3*8(%rdi)
22104 9: movq 4*8(%rsi),%r8
22105 10: movq 5*8(%rsi),%r9
22106-11: movq 6*8(%rsi),%r10
22107+11: movq 6*8(%rsi),%rax
22108 12: movq 7*8(%rsi),%r11
22109 13: movnti %r8,4*8(%rdi)
22110 14: movnti %r9,5*8(%rdi)
22111-15: movnti %r10,6*8(%rdi)
22112+15: movnti %rax,6*8(%rdi)
22113 16: movnti %r11,7*8(%rdi)
22114 leaq 64(%rsi),%rsi
22115 leaq 64(%rdi),%rdi
22116@@ -98,6 +109,7 @@ ENTRY(__copy_user_nocache)
22117 jnz 21b
22118 23: xorl %eax,%eax
22119 sfence
22120+ pax_force_retaddr
22121 ret
22122
22123 .section .fixup,"ax"
22124diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
22125index f0dba36..48cb4d6 100644
22126--- a/arch/x86/lib/csum-copy_64.S
22127+++ b/arch/x86/lib/csum-copy_64.S
22128@@ -8,6 +8,7 @@
22129 #include <linux/linkage.h>
22130 #include <asm/dwarf2.h>
22131 #include <asm/errno.h>
22132+#include <asm/alternative-asm.h>
22133
22134 /*
22135 * Checksum copy with exception handling.
22136@@ -228,6 +229,7 @@ ENTRY(csum_partial_copy_generic)
22137 CFI_RESTORE rbp
22138 addq $7*8,%rsp
22139 CFI_ADJUST_CFA_OFFSET -7*8
22140+ pax_force_retaddr 0, 1
22141 ret
22142 CFI_RESTORE_STATE
22143
22144diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
22145index 459b58a..9570bc7 100644
22146--- a/arch/x86/lib/csum-wrappers_64.c
22147+++ b/arch/x86/lib/csum-wrappers_64.c
22148@@ -52,7 +52,13 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
22149 len -= 2;
22150 }
22151 }
22152- isum = csum_partial_copy_generic((__force const void *)src,
22153+
22154+#ifdef CONFIG_PAX_MEMORY_UDEREF
22155+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
22156+ src += PAX_USER_SHADOW_BASE;
22157+#endif
22158+
22159+ isum = csum_partial_copy_generic((const void __force_kernel *)src,
22160 dst, len, isum, errp, NULL);
22161 if (unlikely(*errp))
22162 goto out_err;
22163@@ -105,7 +111,13 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
22164 }
22165
22166 *errp = 0;
22167- return csum_partial_copy_generic(src, (void __force *)dst,
22168+
22169+#ifdef CONFIG_PAX_MEMORY_UDEREF
22170+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
22171+ dst += PAX_USER_SHADOW_BASE;
22172+#endif
22173+
22174+ return csum_partial_copy_generic(src, (void __force_kernel *)dst,
22175 len, isum, NULL, errp);
22176 }
22177 EXPORT_SYMBOL(csum_partial_copy_to_user);
22178diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
22179index 51f1504..ddac4c1 100644
22180--- a/arch/x86/lib/getuser.S
22181+++ b/arch/x86/lib/getuser.S
22182@@ -33,15 +33,38 @@
22183 #include <asm/asm-offsets.h>
22184 #include <asm/thread_info.h>
22185 #include <asm/asm.h>
22186+#include <asm/segment.h>
22187+#include <asm/pgtable.h>
22188+#include <asm/alternative-asm.h>
22189+
22190+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22191+#define __copyuser_seg gs;
22192+#else
22193+#define __copyuser_seg
22194+#endif
22195
22196 .text
22197 ENTRY(__get_user_1)
22198 CFI_STARTPROC
22199+
22200+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22201 GET_THREAD_INFO(%_ASM_DX)
22202 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22203 jae bad_get_user
22204-1: movzb (%_ASM_AX),%edx
22205+
22206+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22207+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22208+ cmp %_ASM_DX,%_ASM_AX
22209+ jae 1234f
22210+ add %_ASM_DX,%_ASM_AX
22211+1234:
22212+#endif
22213+
22214+#endif
22215+
22216+1: __copyuser_seg movzb (%_ASM_AX),%edx
22217 xor %eax,%eax
22218+ pax_force_retaddr
22219 ret
22220 CFI_ENDPROC
22221 ENDPROC(__get_user_1)
22222@@ -49,12 +72,26 @@ ENDPROC(__get_user_1)
22223 ENTRY(__get_user_2)
22224 CFI_STARTPROC
22225 add $1,%_ASM_AX
22226+
22227+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22228 jc bad_get_user
22229 GET_THREAD_INFO(%_ASM_DX)
22230 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22231 jae bad_get_user
22232-2: movzwl -1(%_ASM_AX),%edx
22233+
22234+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22235+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22236+ cmp %_ASM_DX,%_ASM_AX
22237+ jae 1234f
22238+ add %_ASM_DX,%_ASM_AX
22239+1234:
22240+#endif
22241+
22242+#endif
22243+
22244+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
22245 xor %eax,%eax
22246+ pax_force_retaddr
22247 ret
22248 CFI_ENDPROC
22249 ENDPROC(__get_user_2)
22250@@ -62,12 +99,26 @@ ENDPROC(__get_user_2)
22251 ENTRY(__get_user_4)
22252 CFI_STARTPROC
22253 add $3,%_ASM_AX
22254+
22255+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22256 jc bad_get_user
22257 GET_THREAD_INFO(%_ASM_DX)
22258 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22259 jae bad_get_user
22260-3: mov -3(%_ASM_AX),%edx
22261+
22262+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22263+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22264+ cmp %_ASM_DX,%_ASM_AX
22265+ jae 1234f
22266+ add %_ASM_DX,%_ASM_AX
22267+1234:
22268+#endif
22269+
22270+#endif
22271+
22272+3: __copyuser_seg mov -3(%_ASM_AX),%edx
22273 xor %eax,%eax
22274+ pax_force_retaddr
22275 ret
22276 CFI_ENDPROC
22277 ENDPROC(__get_user_4)
22278@@ -80,8 +131,18 @@ ENTRY(__get_user_8)
22279 GET_THREAD_INFO(%_ASM_DX)
22280 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
22281 jae bad_get_user
22282+
22283+#ifdef CONFIG_PAX_MEMORY_UDEREF
22284+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
22285+ cmp %_ASM_DX,%_ASM_AX
22286+ jae 1234f
22287+ add %_ASM_DX,%_ASM_AX
22288+1234:
22289+#endif
22290+
22291 4: movq -7(%_ASM_AX),%_ASM_DX
22292 xor %eax,%eax
22293+ pax_force_retaddr
22294 ret
22295 CFI_ENDPROC
22296 ENDPROC(__get_user_8)
22297@@ -91,6 +152,7 @@ bad_get_user:
22298 CFI_STARTPROC
22299 xor %edx,%edx
22300 mov $(-EFAULT),%_ASM_AX
22301+ pax_force_retaddr
22302 ret
22303 CFI_ENDPROC
22304 END(bad_get_user)
22305diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
22306index 05a95e7..326f2fa 100644
22307--- a/arch/x86/lib/iomap_copy_64.S
22308+++ b/arch/x86/lib/iomap_copy_64.S
22309@@ -17,6 +17,7 @@
22310
22311 #include <linux/linkage.h>
22312 #include <asm/dwarf2.h>
22313+#include <asm/alternative-asm.h>
22314
22315 /*
22316 * override generic version in lib/iomap_copy.c
22317@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
22318 CFI_STARTPROC
22319 movl %edx,%ecx
22320 rep movsd
22321+ pax_force_retaddr
22322 ret
22323 CFI_ENDPROC
22324 ENDPROC(__iowrite32_copy)
22325diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
22326index ad5441e..610e351 100644
22327--- a/arch/x86/lib/memcpy_64.S
22328+++ b/arch/x86/lib/memcpy_64.S
22329@@ -4,6 +4,7 @@
22330
22331 #include <asm/cpufeature.h>
22332 #include <asm/dwarf2.h>
22333+#include <asm/alternative-asm.h>
22334
22335 /*
22336 * memcpy - Copy a memory block.
22337@@ -34,6 +35,7 @@ memcpy_c:
22338 rep movsq
22339 movl %edx, %ecx
22340 rep movsb
22341+ pax_force_retaddr
22342 ret
22343 CFI_ENDPROC
22344 ENDPROC(memcpy_c)
22345@@ -118,6 +120,7 @@ ENTRY(memcpy)
22346 jnz .Lloop_1
22347
22348 .Lend:
22349+ pax_force_retaddr 0, 1
22350 ret
22351 CFI_ENDPROC
22352 ENDPROC(memcpy)
22353@@ -128,7 +131,7 @@ ENDPROC(__memcpy)
22354 * It is also a lot simpler. Use this when possible:
22355 */
22356
22357- .section .altinstr_replacement, "ax"
22358+ .section .altinstr_replacement, "a"
22359 1: .byte 0xeb /* jmp <disp8> */
22360 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
22361 2:
22362diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
22363index 2c59481..7e9ba4e 100644
22364--- a/arch/x86/lib/memset_64.S
22365+++ b/arch/x86/lib/memset_64.S
22366@@ -2,6 +2,7 @@
22367
22368 #include <linux/linkage.h>
22369 #include <asm/dwarf2.h>
22370+#include <asm/alternative-asm.h>
22371
22372 /*
22373 * ISO C memset - set a memory block to a byte value.
22374@@ -28,6 +29,7 @@ memset_c:
22375 movl %r8d,%ecx
22376 rep stosb
22377 movq %r9,%rax
22378+ pax_force_retaddr
22379 ret
22380 CFI_ENDPROC
22381 ENDPROC(memset_c)
22382@@ -35,13 +37,13 @@ ENDPROC(memset_c)
22383 ENTRY(memset)
22384 ENTRY(__memset)
22385 CFI_STARTPROC
22386- movq %rdi,%r10
22387 movq %rdx,%r11
22388
22389 /* expand byte value */
22390 movzbl %sil,%ecx
22391 movabs $0x0101010101010101,%rax
22392 mul %rcx /* with rax, clobbers rdx */
22393+ movq %rdi,%rdx
22394
22395 /* align dst */
22396 movl %edi,%r9d
22397@@ -95,7 +97,8 @@ ENTRY(__memset)
22398 jnz .Lloop_1
22399
22400 .Lende:
22401- movq %r10,%rax
22402+ movq %rdx,%rax
22403+ pax_force_retaddr
22404 ret
22405
22406 CFI_RESTORE_STATE
22407@@ -118,7 +121,7 @@ ENDPROC(__memset)
22408
22409 #include <asm/cpufeature.h>
22410
22411- .section .altinstr_replacement,"ax"
22412+ .section .altinstr_replacement,"a"
22413 1: .byte 0xeb /* jmp <disp8> */
22414 .byte (memset_c - memset) - (2f - 1b) /* offset */
22415 2:
22416diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
22417index c9f2d9b..e7fd2c0 100644
22418--- a/arch/x86/lib/mmx_32.c
22419+++ b/arch/x86/lib/mmx_32.c
22420@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22421 {
22422 void *p;
22423 int i;
22424+ unsigned long cr0;
22425
22426 if (unlikely(in_interrupt()))
22427 return __memcpy(to, from, len);
22428@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
22429 kernel_fpu_begin();
22430
22431 __asm__ __volatile__ (
22432- "1: prefetch (%0)\n" /* This set is 28 bytes */
22433- " prefetch 64(%0)\n"
22434- " prefetch 128(%0)\n"
22435- " prefetch 192(%0)\n"
22436- " prefetch 256(%0)\n"
22437+ "1: prefetch (%1)\n" /* This set is 28 bytes */
22438+ " prefetch 64(%1)\n"
22439+ " prefetch 128(%1)\n"
22440+ " prefetch 192(%1)\n"
22441+ " prefetch 256(%1)\n"
22442 "2: \n"
22443 ".section .fixup, \"ax\"\n"
22444- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22445+ "3: \n"
22446+
22447+#ifdef CONFIG_PAX_KERNEXEC
22448+ " movl %%cr0, %0\n"
22449+ " movl %0, %%eax\n"
22450+ " andl $0xFFFEFFFF, %%eax\n"
22451+ " movl %%eax, %%cr0\n"
22452+#endif
22453+
22454+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22455+
22456+#ifdef CONFIG_PAX_KERNEXEC
22457+ " movl %0, %%cr0\n"
22458+#endif
22459+
22460 " jmp 2b\n"
22461 ".previous\n"
22462 _ASM_EXTABLE(1b, 3b)
22463- : : "r" (from));
22464+ : "=&r" (cr0) : "r" (from) : "ax");
22465
22466 for ( ; i > 5; i--) {
22467 __asm__ __volatile__ (
22468- "1: prefetch 320(%0)\n"
22469- "2: movq (%0), %%mm0\n"
22470- " movq 8(%0), %%mm1\n"
22471- " movq 16(%0), %%mm2\n"
22472- " movq 24(%0), %%mm3\n"
22473- " movq %%mm0, (%1)\n"
22474- " movq %%mm1, 8(%1)\n"
22475- " movq %%mm2, 16(%1)\n"
22476- " movq %%mm3, 24(%1)\n"
22477- " movq 32(%0), %%mm0\n"
22478- " movq 40(%0), %%mm1\n"
22479- " movq 48(%0), %%mm2\n"
22480- " movq 56(%0), %%mm3\n"
22481- " movq %%mm0, 32(%1)\n"
22482- " movq %%mm1, 40(%1)\n"
22483- " movq %%mm2, 48(%1)\n"
22484- " movq %%mm3, 56(%1)\n"
22485+ "1: prefetch 320(%1)\n"
22486+ "2: movq (%1), %%mm0\n"
22487+ " movq 8(%1), %%mm1\n"
22488+ " movq 16(%1), %%mm2\n"
22489+ " movq 24(%1), %%mm3\n"
22490+ " movq %%mm0, (%2)\n"
22491+ " movq %%mm1, 8(%2)\n"
22492+ " movq %%mm2, 16(%2)\n"
22493+ " movq %%mm3, 24(%2)\n"
22494+ " movq 32(%1), %%mm0\n"
22495+ " movq 40(%1), %%mm1\n"
22496+ " movq 48(%1), %%mm2\n"
22497+ " movq 56(%1), %%mm3\n"
22498+ " movq %%mm0, 32(%2)\n"
22499+ " movq %%mm1, 40(%2)\n"
22500+ " movq %%mm2, 48(%2)\n"
22501+ " movq %%mm3, 56(%2)\n"
22502 ".section .fixup, \"ax\"\n"
22503- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22504+ "3:\n"
22505+
22506+#ifdef CONFIG_PAX_KERNEXEC
22507+ " movl %%cr0, %0\n"
22508+ " movl %0, %%eax\n"
22509+ " andl $0xFFFEFFFF, %%eax\n"
22510+ " movl %%eax, %%cr0\n"
22511+#endif
22512+
22513+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22514+
22515+#ifdef CONFIG_PAX_KERNEXEC
22516+ " movl %0, %%cr0\n"
22517+#endif
22518+
22519 " jmp 2b\n"
22520 ".previous\n"
22521 _ASM_EXTABLE(1b, 3b)
22522- : : "r" (from), "r" (to) : "memory");
22523+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22524
22525 from += 64;
22526 to += 64;
22527@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
22528 static void fast_copy_page(void *to, void *from)
22529 {
22530 int i;
22531+ unsigned long cr0;
22532
22533 kernel_fpu_begin();
22534
22535@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
22536 * but that is for later. -AV
22537 */
22538 __asm__ __volatile__(
22539- "1: prefetch (%0)\n"
22540- " prefetch 64(%0)\n"
22541- " prefetch 128(%0)\n"
22542- " prefetch 192(%0)\n"
22543- " prefetch 256(%0)\n"
22544+ "1: prefetch (%1)\n"
22545+ " prefetch 64(%1)\n"
22546+ " prefetch 128(%1)\n"
22547+ " prefetch 192(%1)\n"
22548+ " prefetch 256(%1)\n"
22549 "2: \n"
22550 ".section .fixup, \"ax\"\n"
22551- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22552+ "3: \n"
22553+
22554+#ifdef CONFIG_PAX_KERNEXEC
22555+ " movl %%cr0, %0\n"
22556+ " movl %0, %%eax\n"
22557+ " andl $0xFFFEFFFF, %%eax\n"
22558+ " movl %%eax, %%cr0\n"
22559+#endif
22560+
22561+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22562+
22563+#ifdef CONFIG_PAX_KERNEXEC
22564+ " movl %0, %%cr0\n"
22565+#endif
22566+
22567 " jmp 2b\n"
22568 ".previous\n"
22569- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22570+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22571
22572 for (i = 0; i < (4096-320)/64; i++) {
22573 __asm__ __volatile__ (
22574- "1: prefetch 320(%0)\n"
22575- "2: movq (%0), %%mm0\n"
22576- " movntq %%mm0, (%1)\n"
22577- " movq 8(%0), %%mm1\n"
22578- " movntq %%mm1, 8(%1)\n"
22579- " movq 16(%0), %%mm2\n"
22580- " movntq %%mm2, 16(%1)\n"
22581- " movq 24(%0), %%mm3\n"
22582- " movntq %%mm3, 24(%1)\n"
22583- " movq 32(%0), %%mm4\n"
22584- " movntq %%mm4, 32(%1)\n"
22585- " movq 40(%0), %%mm5\n"
22586- " movntq %%mm5, 40(%1)\n"
22587- " movq 48(%0), %%mm6\n"
22588- " movntq %%mm6, 48(%1)\n"
22589- " movq 56(%0), %%mm7\n"
22590- " movntq %%mm7, 56(%1)\n"
22591+ "1: prefetch 320(%1)\n"
22592+ "2: movq (%1), %%mm0\n"
22593+ " movntq %%mm0, (%2)\n"
22594+ " movq 8(%1), %%mm1\n"
22595+ " movntq %%mm1, 8(%2)\n"
22596+ " movq 16(%1), %%mm2\n"
22597+ " movntq %%mm2, 16(%2)\n"
22598+ " movq 24(%1), %%mm3\n"
22599+ " movntq %%mm3, 24(%2)\n"
22600+ " movq 32(%1), %%mm4\n"
22601+ " movntq %%mm4, 32(%2)\n"
22602+ " movq 40(%1), %%mm5\n"
22603+ " movntq %%mm5, 40(%2)\n"
22604+ " movq 48(%1), %%mm6\n"
22605+ " movntq %%mm6, 48(%2)\n"
22606+ " movq 56(%1), %%mm7\n"
22607+ " movntq %%mm7, 56(%2)\n"
22608 ".section .fixup, \"ax\"\n"
22609- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22610+ "3:\n"
22611+
22612+#ifdef CONFIG_PAX_KERNEXEC
22613+ " movl %%cr0, %0\n"
22614+ " movl %0, %%eax\n"
22615+ " andl $0xFFFEFFFF, %%eax\n"
22616+ " movl %%eax, %%cr0\n"
22617+#endif
22618+
22619+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22620+
22621+#ifdef CONFIG_PAX_KERNEXEC
22622+ " movl %0, %%cr0\n"
22623+#endif
22624+
22625 " jmp 2b\n"
22626 ".previous\n"
22627- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
22628+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22629
22630 from += 64;
22631 to += 64;
22632@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
22633 static void fast_copy_page(void *to, void *from)
22634 {
22635 int i;
22636+ unsigned long cr0;
22637
22638 kernel_fpu_begin();
22639
22640 __asm__ __volatile__ (
22641- "1: prefetch (%0)\n"
22642- " prefetch 64(%0)\n"
22643- " prefetch 128(%0)\n"
22644- " prefetch 192(%0)\n"
22645- " prefetch 256(%0)\n"
22646+ "1: prefetch (%1)\n"
22647+ " prefetch 64(%1)\n"
22648+ " prefetch 128(%1)\n"
22649+ " prefetch 192(%1)\n"
22650+ " prefetch 256(%1)\n"
22651 "2: \n"
22652 ".section .fixup, \"ax\"\n"
22653- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22654+ "3: \n"
22655+
22656+#ifdef CONFIG_PAX_KERNEXEC
22657+ " movl %%cr0, %0\n"
22658+ " movl %0, %%eax\n"
22659+ " andl $0xFFFEFFFF, %%eax\n"
22660+ " movl %%eax, %%cr0\n"
22661+#endif
22662+
22663+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
22664+
22665+#ifdef CONFIG_PAX_KERNEXEC
22666+ " movl %0, %%cr0\n"
22667+#endif
22668+
22669 " jmp 2b\n"
22670 ".previous\n"
22671- _ASM_EXTABLE(1b, 3b) : : "r" (from));
22672+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
22673
22674 for (i = 0; i < 4096/64; i++) {
22675 __asm__ __volatile__ (
22676- "1: prefetch 320(%0)\n"
22677- "2: movq (%0), %%mm0\n"
22678- " movq 8(%0), %%mm1\n"
22679- " movq 16(%0), %%mm2\n"
22680- " movq 24(%0), %%mm3\n"
22681- " movq %%mm0, (%1)\n"
22682- " movq %%mm1, 8(%1)\n"
22683- " movq %%mm2, 16(%1)\n"
22684- " movq %%mm3, 24(%1)\n"
22685- " movq 32(%0), %%mm0\n"
22686- " movq 40(%0), %%mm1\n"
22687- " movq 48(%0), %%mm2\n"
22688- " movq 56(%0), %%mm3\n"
22689- " movq %%mm0, 32(%1)\n"
22690- " movq %%mm1, 40(%1)\n"
22691- " movq %%mm2, 48(%1)\n"
22692- " movq %%mm3, 56(%1)\n"
22693+ "1: prefetch 320(%1)\n"
22694+ "2: movq (%1), %%mm0\n"
22695+ " movq 8(%1), %%mm1\n"
22696+ " movq 16(%1), %%mm2\n"
22697+ " movq 24(%1), %%mm3\n"
22698+ " movq %%mm0, (%2)\n"
22699+ " movq %%mm1, 8(%2)\n"
22700+ " movq %%mm2, 16(%2)\n"
22701+ " movq %%mm3, 24(%2)\n"
22702+ " movq 32(%1), %%mm0\n"
22703+ " movq 40(%1), %%mm1\n"
22704+ " movq 48(%1), %%mm2\n"
22705+ " movq 56(%1), %%mm3\n"
22706+ " movq %%mm0, 32(%2)\n"
22707+ " movq %%mm1, 40(%2)\n"
22708+ " movq %%mm2, 48(%2)\n"
22709+ " movq %%mm3, 56(%2)\n"
22710 ".section .fixup, \"ax\"\n"
22711- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22712+ "3:\n"
22713+
22714+#ifdef CONFIG_PAX_KERNEXEC
22715+ " movl %%cr0, %0\n"
22716+ " movl %0, %%eax\n"
22717+ " andl $0xFFFEFFFF, %%eax\n"
22718+ " movl %%eax, %%cr0\n"
22719+#endif
22720+
22721+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
22722+
22723+#ifdef CONFIG_PAX_KERNEXEC
22724+ " movl %0, %%cr0\n"
22725+#endif
22726+
22727 " jmp 2b\n"
22728 ".previous\n"
22729 _ASM_EXTABLE(1b, 3b)
22730- : : "r" (from), "r" (to) : "memory");
22731+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
22732
22733 from += 64;
22734 to += 64;
22735diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
22736index 69fa106..adda88b 100644
22737--- a/arch/x86/lib/msr-reg.S
22738+++ b/arch/x86/lib/msr-reg.S
22739@@ -3,6 +3,7 @@
22740 #include <asm/dwarf2.h>
22741 #include <asm/asm.h>
22742 #include <asm/msr.h>
22743+#include <asm/alternative-asm.h>
22744
22745 #ifdef CONFIG_X86_64
22746 /*
22747@@ -16,7 +17,7 @@ ENTRY(native_\op\()_safe_regs)
22748 CFI_STARTPROC
22749 pushq_cfi %rbx
22750 pushq_cfi %rbp
22751- movq %rdi, %r10 /* Save pointer */
22752+ movq %rdi, %r9 /* Save pointer */
22753 xorl %r11d, %r11d /* Return value */
22754 movl (%rdi), %eax
22755 movl 4(%rdi), %ecx
22756@@ -27,16 +28,17 @@ ENTRY(native_\op\()_safe_regs)
22757 movl 28(%rdi), %edi
22758 CFI_REMEMBER_STATE
22759 1: \op
22760-2: movl %eax, (%r10)
22761+2: movl %eax, (%r9)
22762 movl %r11d, %eax /* Return value */
22763- movl %ecx, 4(%r10)
22764- movl %edx, 8(%r10)
22765- movl %ebx, 12(%r10)
22766- movl %ebp, 20(%r10)
22767- movl %esi, 24(%r10)
22768- movl %edi, 28(%r10)
22769+ movl %ecx, 4(%r9)
22770+ movl %edx, 8(%r9)
22771+ movl %ebx, 12(%r9)
22772+ movl %ebp, 20(%r9)
22773+ movl %esi, 24(%r9)
22774+ movl %edi, 28(%r9)
22775 popq_cfi %rbp
22776 popq_cfi %rbx
22777+ pax_force_retaddr
22778 ret
22779 3:
22780 CFI_RESTORE_STATE
22781diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
22782index 36b0d15..d381858 100644
22783--- a/arch/x86/lib/putuser.S
22784+++ b/arch/x86/lib/putuser.S
22785@@ -15,7 +15,9 @@
22786 #include <asm/thread_info.h>
22787 #include <asm/errno.h>
22788 #include <asm/asm.h>
22789-
22790+#include <asm/segment.h>
22791+#include <asm/pgtable.h>
22792+#include <asm/alternative-asm.h>
22793
22794 /*
22795 * __put_user_X
22796@@ -29,52 +31,119 @@
22797 * as they get called from within inline assembly.
22798 */
22799
22800-#define ENTER CFI_STARTPROC ; \
22801- GET_THREAD_INFO(%_ASM_BX)
22802-#define EXIT ret ; \
22803+#define ENTER CFI_STARTPROC
22804+#define EXIT pax_force_retaddr; ret ; \
22805 CFI_ENDPROC
22806
22807+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22808+#define _DEST %_ASM_CX,%_ASM_BX
22809+#else
22810+#define _DEST %_ASM_CX
22811+#endif
22812+
22813+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
22814+#define __copyuser_seg gs;
22815+#else
22816+#define __copyuser_seg
22817+#endif
22818+
22819 .text
22820 ENTRY(__put_user_1)
22821 ENTER
22822+
22823+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22824+ GET_THREAD_INFO(%_ASM_BX)
22825 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
22826 jae bad_put_user
22827-1: movb %al,(%_ASM_CX)
22828+
22829+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22830+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22831+ cmp %_ASM_BX,%_ASM_CX
22832+ jb 1234f
22833+ xor %ebx,%ebx
22834+1234:
22835+#endif
22836+
22837+#endif
22838+
22839+1: __copyuser_seg movb %al,(_DEST)
22840 xor %eax,%eax
22841 EXIT
22842 ENDPROC(__put_user_1)
22843
22844 ENTRY(__put_user_2)
22845 ENTER
22846+
22847+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22848+ GET_THREAD_INFO(%_ASM_BX)
22849 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22850 sub $1,%_ASM_BX
22851 cmp %_ASM_BX,%_ASM_CX
22852 jae bad_put_user
22853-2: movw %ax,(%_ASM_CX)
22854+
22855+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22856+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22857+ cmp %_ASM_BX,%_ASM_CX
22858+ jb 1234f
22859+ xor %ebx,%ebx
22860+1234:
22861+#endif
22862+
22863+#endif
22864+
22865+2: __copyuser_seg movw %ax,(_DEST)
22866 xor %eax,%eax
22867 EXIT
22868 ENDPROC(__put_user_2)
22869
22870 ENTRY(__put_user_4)
22871 ENTER
22872+
22873+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22874+ GET_THREAD_INFO(%_ASM_BX)
22875 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22876 sub $3,%_ASM_BX
22877 cmp %_ASM_BX,%_ASM_CX
22878 jae bad_put_user
22879-3: movl %eax,(%_ASM_CX)
22880+
22881+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22882+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22883+ cmp %_ASM_BX,%_ASM_CX
22884+ jb 1234f
22885+ xor %ebx,%ebx
22886+1234:
22887+#endif
22888+
22889+#endif
22890+
22891+3: __copyuser_seg movl %eax,(_DEST)
22892 xor %eax,%eax
22893 EXIT
22894 ENDPROC(__put_user_4)
22895
22896 ENTRY(__put_user_8)
22897 ENTER
22898+
22899+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
22900+ GET_THREAD_INFO(%_ASM_BX)
22901 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
22902 sub $7,%_ASM_BX
22903 cmp %_ASM_BX,%_ASM_CX
22904 jae bad_put_user
22905-4: mov %_ASM_AX,(%_ASM_CX)
22906+
22907+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22908+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
22909+ cmp %_ASM_BX,%_ASM_CX
22910+ jb 1234f
22911+ xor %ebx,%ebx
22912+1234:
22913+#endif
22914+
22915+#endif
22916+
22917+4: __copyuser_seg mov %_ASM_AX,(_DEST)
22918 #ifdef CONFIG_X86_32
22919-5: movl %edx,4(%_ASM_CX)
22920+5: __copyuser_seg movl %edx,4(_DEST)
22921 #endif
22922 xor %eax,%eax
22923 EXIT
22924diff --git a/arch/x86/lib/rwlock_64.S b/arch/x86/lib/rwlock_64.S
22925index 05ea55f..6345b9a 100644
22926--- a/arch/x86/lib/rwlock_64.S
22927+++ b/arch/x86/lib/rwlock_64.S
22928@@ -2,6 +2,7 @@
22929
22930 #include <linux/linkage.h>
22931 #include <asm/rwlock.h>
22932+#include <asm/asm.h>
22933 #include <asm/alternative-asm.h>
22934 #include <asm/dwarf2.h>
22935
22936@@ -10,13 +11,34 @@ ENTRY(__write_lock_failed)
22937 CFI_STARTPROC
22938 LOCK_PREFIX
22939 addl $RW_LOCK_BIAS,(%rdi)
22940+
22941+#ifdef CONFIG_PAX_REFCOUNT
22942+ jno 1234f
22943+ LOCK_PREFIX
22944+ subl $RW_LOCK_BIAS,(%rdi)
22945+ int $4
22946+1234:
22947+ _ASM_EXTABLE(1234b, 1234b)
22948+#endif
22949+
22950 1: rep
22951 nop
22952 cmpl $RW_LOCK_BIAS,(%rdi)
22953 jne 1b
22954 LOCK_PREFIX
22955 subl $RW_LOCK_BIAS,(%rdi)
22956+
22957+#ifdef CONFIG_PAX_REFCOUNT
22958+ jno 1234f
22959+ LOCK_PREFIX
22960+ addl $RW_LOCK_BIAS,(%rdi)
22961+ int $4
22962+1234:
22963+ _ASM_EXTABLE(1234b, 1234b)
22964+#endif
22965+
22966 jnz __write_lock_failed
22967+ pax_force_retaddr
22968 ret
22969 CFI_ENDPROC
22970 END(__write_lock_failed)
22971@@ -26,13 +48,34 @@ ENTRY(__read_lock_failed)
22972 CFI_STARTPROC
22973 LOCK_PREFIX
22974 incl (%rdi)
22975+
22976+#ifdef CONFIG_PAX_REFCOUNT
22977+ jno 1234f
22978+ LOCK_PREFIX
22979+ decl (%rdi)
22980+ int $4
22981+1234:
22982+ _ASM_EXTABLE(1234b, 1234b)
22983+#endif
22984+
22985 1: rep
22986 nop
22987 cmpl $1,(%rdi)
22988 js 1b
22989 LOCK_PREFIX
22990 decl (%rdi)
22991+
22992+#ifdef CONFIG_PAX_REFCOUNT
22993+ jno 1234f
22994+ LOCK_PREFIX
22995+ incl (%rdi)
22996+ int $4
22997+1234:
22998+ _ASM_EXTABLE(1234b, 1234b)
22999+#endif
23000+
23001 js __read_lock_failed
23002+ pax_force_retaddr
23003 ret
23004 CFI_ENDPROC
23005 END(__read_lock_failed)
23006diff --git a/arch/x86/lib/rwsem_64.S b/arch/x86/lib/rwsem_64.S
23007index 15acecf..f768b10 100644
23008--- a/arch/x86/lib/rwsem_64.S
23009+++ b/arch/x86/lib/rwsem_64.S
23010@@ -48,6 +48,7 @@ ENTRY(call_rwsem_down_read_failed)
23011 call rwsem_down_read_failed
23012 popq %rdx
23013 restore_common_regs
23014+ pax_force_retaddr
23015 ret
23016 ENDPROC(call_rwsem_down_read_failed)
23017
23018@@ -56,6 +57,7 @@ ENTRY(call_rwsem_down_write_failed)
23019 movq %rax,%rdi
23020 call rwsem_down_write_failed
23021 restore_common_regs
23022+ pax_force_retaddr
23023 ret
23024 ENDPROC(call_rwsem_down_write_failed)
23025
23026@@ -66,7 +68,8 @@ ENTRY(call_rwsem_wake)
23027 movq %rax,%rdi
23028 call rwsem_wake
23029 restore_common_regs
23030-1: ret
23031+1: pax_force_retaddr
23032+ ret
23033 ENDPROC(call_rwsem_wake)
23034
23035 /* Fix up special calling conventions */
23036@@ -77,5 +80,6 @@ ENTRY(call_rwsem_downgrade_wake)
23037 call rwsem_downgrade_wake
23038 popq %rdx
23039 restore_common_regs
23040+ pax_force_retaddr
23041 ret
23042 ENDPROC(call_rwsem_downgrade_wake)
23043diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
23044index bf9a7d5..fb06ab5 100644
23045--- a/arch/x86/lib/thunk_64.S
23046+++ b/arch/x86/lib/thunk_64.S
23047@@ -10,7 +10,8 @@
23048 #include <asm/dwarf2.h>
23049 #include <asm/calling.h>
23050 #include <asm/rwlock.h>
23051-
23052+ #include <asm/alternative-asm.h>
23053+
23054 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
23055 .macro thunk name,func
23056 .globl \name
23057@@ -70,6 +71,7 @@
23058 SAVE_ARGS
23059 restore:
23060 RESTORE_ARGS
23061+ pax_force_retaddr
23062 ret
23063 CFI_ENDPROC
23064
23065@@ -77,5 +79,6 @@ restore:
23066 SAVE_ARGS
23067 restore_norax:
23068 RESTORE_ARGS 1
23069+ pax_force_retaddr
23070 ret
23071 CFI_ENDPROC
23072diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
23073index 1f118d4..ec4a953 100644
23074--- a/arch/x86/lib/usercopy_32.c
23075+++ b/arch/x86/lib/usercopy_32.c
23076@@ -43,7 +43,7 @@ do { \
23077 __asm__ __volatile__( \
23078 " testl %1,%1\n" \
23079 " jz 2f\n" \
23080- "0: lodsb\n" \
23081+ "0: "__copyuser_seg"lodsb\n" \
23082 " stosb\n" \
23083 " testb %%al,%%al\n" \
23084 " jz 1f\n" \
23085@@ -128,10 +128,12 @@ do { \
23086 int __d0; \
23087 might_fault(); \
23088 __asm__ __volatile__( \
23089+ __COPYUSER_SET_ES \
23090 "0: rep; stosl\n" \
23091 " movl %2,%0\n" \
23092 "1: rep; stosb\n" \
23093 "2:\n" \
23094+ __COPYUSER_RESTORE_ES \
23095 ".section .fixup,\"ax\"\n" \
23096 "3: lea 0(%2,%0,4),%0\n" \
23097 " jmp 2b\n" \
23098@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s, long n)
23099 might_fault();
23100
23101 __asm__ __volatile__(
23102+ __COPYUSER_SET_ES
23103 " testl %0, %0\n"
23104 " jz 3f\n"
23105 " andl %0,%%ecx\n"
23106@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s, long n)
23107 " subl %%ecx,%0\n"
23108 " addl %0,%%eax\n"
23109 "1:\n"
23110+ __COPYUSER_RESTORE_ES
23111 ".section .fixup,\"ax\"\n"
23112 "2: xorl %%eax,%%eax\n"
23113 " jmp 1b\n"
23114@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
23115
23116 #ifdef CONFIG_X86_INTEL_USERCOPY
23117 static unsigned long
23118-__copy_user_intel(void __user *to, const void *from, unsigned long size)
23119+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
23120 {
23121 int d0, d1;
23122 __asm__ __volatile__(
23123@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23124 " .align 2,0x90\n"
23125 "3: movl 0(%4), %%eax\n"
23126 "4: movl 4(%4), %%edx\n"
23127- "5: movl %%eax, 0(%3)\n"
23128- "6: movl %%edx, 4(%3)\n"
23129+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
23130+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
23131 "7: movl 8(%4), %%eax\n"
23132 "8: movl 12(%4),%%edx\n"
23133- "9: movl %%eax, 8(%3)\n"
23134- "10: movl %%edx, 12(%3)\n"
23135+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
23136+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
23137 "11: movl 16(%4), %%eax\n"
23138 "12: movl 20(%4), %%edx\n"
23139- "13: movl %%eax, 16(%3)\n"
23140- "14: movl %%edx, 20(%3)\n"
23141+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
23142+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
23143 "15: movl 24(%4), %%eax\n"
23144 "16: movl 28(%4), %%edx\n"
23145- "17: movl %%eax, 24(%3)\n"
23146- "18: movl %%edx, 28(%3)\n"
23147+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
23148+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
23149 "19: movl 32(%4), %%eax\n"
23150 "20: movl 36(%4), %%edx\n"
23151- "21: movl %%eax, 32(%3)\n"
23152- "22: movl %%edx, 36(%3)\n"
23153+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
23154+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
23155 "23: movl 40(%4), %%eax\n"
23156 "24: movl 44(%4), %%edx\n"
23157- "25: movl %%eax, 40(%3)\n"
23158- "26: movl %%edx, 44(%3)\n"
23159+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
23160+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
23161 "27: movl 48(%4), %%eax\n"
23162 "28: movl 52(%4), %%edx\n"
23163- "29: movl %%eax, 48(%3)\n"
23164- "30: movl %%edx, 52(%3)\n"
23165+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
23166+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
23167 "31: movl 56(%4), %%eax\n"
23168 "32: movl 60(%4), %%edx\n"
23169- "33: movl %%eax, 56(%3)\n"
23170- "34: movl %%edx, 60(%3)\n"
23171+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
23172+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
23173 " addl $-64, %0\n"
23174 " addl $64, %4\n"
23175 " addl $64, %3\n"
23176@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
23177 " shrl $2, %0\n"
23178 " andl $3, %%eax\n"
23179 " cld\n"
23180+ __COPYUSER_SET_ES
23181 "99: rep; movsl\n"
23182 "36: movl %%eax, %0\n"
23183 "37: rep; movsb\n"
23184 "100:\n"
23185+ __COPYUSER_RESTORE_ES
23186+ ".section .fixup,\"ax\"\n"
23187+ "101: lea 0(%%eax,%0,4),%0\n"
23188+ " jmp 100b\n"
23189+ ".previous\n"
23190+ ".section __ex_table,\"a\"\n"
23191+ " .align 4\n"
23192+ " .long 1b,100b\n"
23193+ " .long 2b,100b\n"
23194+ " .long 3b,100b\n"
23195+ " .long 4b,100b\n"
23196+ " .long 5b,100b\n"
23197+ " .long 6b,100b\n"
23198+ " .long 7b,100b\n"
23199+ " .long 8b,100b\n"
23200+ " .long 9b,100b\n"
23201+ " .long 10b,100b\n"
23202+ " .long 11b,100b\n"
23203+ " .long 12b,100b\n"
23204+ " .long 13b,100b\n"
23205+ " .long 14b,100b\n"
23206+ " .long 15b,100b\n"
23207+ " .long 16b,100b\n"
23208+ " .long 17b,100b\n"
23209+ " .long 18b,100b\n"
23210+ " .long 19b,100b\n"
23211+ " .long 20b,100b\n"
23212+ " .long 21b,100b\n"
23213+ " .long 22b,100b\n"
23214+ " .long 23b,100b\n"
23215+ " .long 24b,100b\n"
23216+ " .long 25b,100b\n"
23217+ " .long 26b,100b\n"
23218+ " .long 27b,100b\n"
23219+ " .long 28b,100b\n"
23220+ " .long 29b,100b\n"
23221+ " .long 30b,100b\n"
23222+ " .long 31b,100b\n"
23223+ " .long 32b,100b\n"
23224+ " .long 33b,100b\n"
23225+ " .long 34b,100b\n"
23226+ " .long 35b,100b\n"
23227+ " .long 36b,100b\n"
23228+ " .long 37b,100b\n"
23229+ " .long 99b,101b\n"
23230+ ".previous"
23231+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
23232+ : "1"(to), "2"(from), "0"(size)
23233+ : "eax", "edx", "memory");
23234+ return size;
23235+}
23236+
23237+static unsigned long
23238+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
23239+{
23240+ int d0, d1;
23241+ __asm__ __volatile__(
23242+ " .align 2,0x90\n"
23243+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
23244+ " cmpl $67, %0\n"
23245+ " jbe 3f\n"
23246+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
23247+ " .align 2,0x90\n"
23248+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
23249+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
23250+ "5: movl %%eax, 0(%3)\n"
23251+ "6: movl %%edx, 4(%3)\n"
23252+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
23253+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
23254+ "9: movl %%eax, 8(%3)\n"
23255+ "10: movl %%edx, 12(%3)\n"
23256+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
23257+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
23258+ "13: movl %%eax, 16(%3)\n"
23259+ "14: movl %%edx, 20(%3)\n"
23260+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
23261+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
23262+ "17: movl %%eax, 24(%3)\n"
23263+ "18: movl %%edx, 28(%3)\n"
23264+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
23265+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
23266+ "21: movl %%eax, 32(%3)\n"
23267+ "22: movl %%edx, 36(%3)\n"
23268+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
23269+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
23270+ "25: movl %%eax, 40(%3)\n"
23271+ "26: movl %%edx, 44(%3)\n"
23272+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
23273+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
23274+ "29: movl %%eax, 48(%3)\n"
23275+ "30: movl %%edx, 52(%3)\n"
23276+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
23277+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
23278+ "33: movl %%eax, 56(%3)\n"
23279+ "34: movl %%edx, 60(%3)\n"
23280+ " addl $-64, %0\n"
23281+ " addl $64, %4\n"
23282+ " addl $64, %3\n"
23283+ " cmpl $63, %0\n"
23284+ " ja 1b\n"
23285+ "35: movl %0, %%eax\n"
23286+ " shrl $2, %0\n"
23287+ " andl $3, %%eax\n"
23288+ " cld\n"
23289+ "99: rep; "__copyuser_seg" movsl\n"
23290+ "36: movl %%eax, %0\n"
23291+ "37: rep; "__copyuser_seg" movsb\n"
23292+ "100:\n"
23293 ".section .fixup,\"ax\"\n"
23294 "101: lea 0(%%eax,%0,4),%0\n"
23295 " jmp 100b\n"
23296@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23297 int d0, d1;
23298 __asm__ __volatile__(
23299 " .align 2,0x90\n"
23300- "0: movl 32(%4), %%eax\n"
23301+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23302 " cmpl $67, %0\n"
23303 " jbe 2f\n"
23304- "1: movl 64(%4), %%eax\n"
23305+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23306 " .align 2,0x90\n"
23307- "2: movl 0(%4), %%eax\n"
23308- "21: movl 4(%4), %%edx\n"
23309+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23310+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23311 " movl %%eax, 0(%3)\n"
23312 " movl %%edx, 4(%3)\n"
23313- "3: movl 8(%4), %%eax\n"
23314- "31: movl 12(%4),%%edx\n"
23315+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23316+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23317 " movl %%eax, 8(%3)\n"
23318 " movl %%edx, 12(%3)\n"
23319- "4: movl 16(%4), %%eax\n"
23320- "41: movl 20(%4), %%edx\n"
23321+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23322+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23323 " movl %%eax, 16(%3)\n"
23324 " movl %%edx, 20(%3)\n"
23325- "10: movl 24(%4), %%eax\n"
23326- "51: movl 28(%4), %%edx\n"
23327+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23328+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23329 " movl %%eax, 24(%3)\n"
23330 " movl %%edx, 28(%3)\n"
23331- "11: movl 32(%4), %%eax\n"
23332- "61: movl 36(%4), %%edx\n"
23333+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23334+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23335 " movl %%eax, 32(%3)\n"
23336 " movl %%edx, 36(%3)\n"
23337- "12: movl 40(%4), %%eax\n"
23338- "71: movl 44(%4), %%edx\n"
23339+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23340+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23341 " movl %%eax, 40(%3)\n"
23342 " movl %%edx, 44(%3)\n"
23343- "13: movl 48(%4), %%eax\n"
23344- "81: movl 52(%4), %%edx\n"
23345+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23346+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23347 " movl %%eax, 48(%3)\n"
23348 " movl %%edx, 52(%3)\n"
23349- "14: movl 56(%4), %%eax\n"
23350- "91: movl 60(%4), %%edx\n"
23351+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23352+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23353 " movl %%eax, 56(%3)\n"
23354 " movl %%edx, 60(%3)\n"
23355 " addl $-64, %0\n"
23356@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
23357 " shrl $2, %0\n"
23358 " andl $3, %%eax\n"
23359 " cld\n"
23360- "6: rep; movsl\n"
23361+ "6: rep; "__copyuser_seg" movsl\n"
23362 " movl %%eax,%0\n"
23363- "7: rep; movsb\n"
23364+ "7: rep; "__copyuser_seg" movsb\n"
23365 "8:\n"
23366 ".section .fixup,\"ax\"\n"
23367 "9: lea 0(%%eax,%0,4),%0\n"
23368@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23369
23370 __asm__ __volatile__(
23371 " .align 2,0x90\n"
23372- "0: movl 32(%4), %%eax\n"
23373+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23374 " cmpl $67, %0\n"
23375 " jbe 2f\n"
23376- "1: movl 64(%4), %%eax\n"
23377+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23378 " .align 2,0x90\n"
23379- "2: movl 0(%4), %%eax\n"
23380- "21: movl 4(%4), %%edx\n"
23381+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23382+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23383 " movnti %%eax, 0(%3)\n"
23384 " movnti %%edx, 4(%3)\n"
23385- "3: movl 8(%4), %%eax\n"
23386- "31: movl 12(%4),%%edx\n"
23387+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23388+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23389 " movnti %%eax, 8(%3)\n"
23390 " movnti %%edx, 12(%3)\n"
23391- "4: movl 16(%4), %%eax\n"
23392- "41: movl 20(%4), %%edx\n"
23393+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23394+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23395 " movnti %%eax, 16(%3)\n"
23396 " movnti %%edx, 20(%3)\n"
23397- "10: movl 24(%4), %%eax\n"
23398- "51: movl 28(%4), %%edx\n"
23399+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23400+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23401 " movnti %%eax, 24(%3)\n"
23402 " movnti %%edx, 28(%3)\n"
23403- "11: movl 32(%4), %%eax\n"
23404- "61: movl 36(%4), %%edx\n"
23405+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23406+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23407 " movnti %%eax, 32(%3)\n"
23408 " movnti %%edx, 36(%3)\n"
23409- "12: movl 40(%4), %%eax\n"
23410- "71: movl 44(%4), %%edx\n"
23411+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23412+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23413 " movnti %%eax, 40(%3)\n"
23414 " movnti %%edx, 44(%3)\n"
23415- "13: movl 48(%4), %%eax\n"
23416- "81: movl 52(%4), %%edx\n"
23417+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23418+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23419 " movnti %%eax, 48(%3)\n"
23420 " movnti %%edx, 52(%3)\n"
23421- "14: movl 56(%4), %%eax\n"
23422- "91: movl 60(%4), %%edx\n"
23423+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23424+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23425 " movnti %%eax, 56(%3)\n"
23426 " movnti %%edx, 60(%3)\n"
23427 " addl $-64, %0\n"
23428@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
23429 " shrl $2, %0\n"
23430 " andl $3, %%eax\n"
23431 " cld\n"
23432- "6: rep; movsl\n"
23433+ "6: rep; "__copyuser_seg" movsl\n"
23434 " movl %%eax,%0\n"
23435- "7: rep; movsb\n"
23436+ "7: rep; "__copyuser_seg" movsb\n"
23437 "8:\n"
23438 ".section .fixup,\"ax\"\n"
23439 "9: lea 0(%%eax,%0,4),%0\n"
23440@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_nocache(void *to,
23441
23442 __asm__ __volatile__(
23443 " .align 2,0x90\n"
23444- "0: movl 32(%4), %%eax\n"
23445+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
23446 " cmpl $67, %0\n"
23447 " jbe 2f\n"
23448- "1: movl 64(%4), %%eax\n"
23449+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
23450 " .align 2,0x90\n"
23451- "2: movl 0(%4), %%eax\n"
23452- "21: movl 4(%4), %%edx\n"
23453+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
23454+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
23455 " movnti %%eax, 0(%3)\n"
23456 " movnti %%edx, 4(%3)\n"
23457- "3: movl 8(%4), %%eax\n"
23458- "31: movl 12(%4),%%edx\n"
23459+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
23460+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
23461 " movnti %%eax, 8(%3)\n"
23462 " movnti %%edx, 12(%3)\n"
23463- "4: movl 16(%4), %%eax\n"
23464- "41: movl 20(%4), %%edx\n"
23465+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
23466+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
23467 " movnti %%eax, 16(%3)\n"
23468 " movnti %%edx, 20(%3)\n"
23469- "10: movl 24(%4), %%eax\n"
23470- "51: movl 28(%4), %%edx\n"
23471+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
23472+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
23473 " movnti %%eax, 24(%3)\n"
23474 " movnti %%edx, 28(%3)\n"
23475- "11: movl 32(%4), %%eax\n"
23476- "61: movl 36(%4), %%edx\n"
23477+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
23478+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
23479 " movnti %%eax, 32(%3)\n"
23480 " movnti %%edx, 36(%3)\n"
23481- "12: movl 40(%4), %%eax\n"
23482- "71: movl 44(%4), %%edx\n"
23483+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
23484+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
23485 " movnti %%eax, 40(%3)\n"
23486 " movnti %%edx, 44(%3)\n"
23487- "13: movl 48(%4), %%eax\n"
23488- "81: movl 52(%4), %%edx\n"
23489+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
23490+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
23491 " movnti %%eax, 48(%3)\n"
23492 " movnti %%edx, 52(%3)\n"
23493- "14: movl 56(%4), %%eax\n"
23494- "91: movl 60(%4), %%edx\n"
23495+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
23496+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
23497 " movnti %%eax, 56(%3)\n"
23498 " movnti %%edx, 60(%3)\n"
23499 " addl $-64, %0\n"
23500@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
23501 " shrl $2, %0\n"
23502 " andl $3, %%eax\n"
23503 " cld\n"
23504- "6: rep; movsl\n"
23505+ "6: rep; "__copyuser_seg" movsl\n"
23506 " movl %%eax,%0\n"
23507- "7: rep; movsb\n"
23508+ "7: rep; "__copyuser_seg" movsb\n"
23509 "8:\n"
23510 ".section .fixup,\"ax\"\n"
23511 "9: lea 0(%%eax,%0,4),%0\n"
23512@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
23513 */
23514 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
23515 unsigned long size);
23516-unsigned long __copy_user_intel(void __user *to, const void *from,
23517+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
23518+ unsigned long size);
23519+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
23520 unsigned long size);
23521 unsigned long __copy_user_zeroing_intel_nocache(void *to,
23522 const void __user *from, unsigned long size);
23523 #endif /* CONFIG_X86_INTEL_USERCOPY */
23524
23525 /* Generic arbitrary sized copy. */
23526-#define __copy_user(to, from, size) \
23527+#define __copy_user(to, from, size, prefix, set, restore) \
23528 do { \
23529 int __d0, __d1, __d2; \
23530 __asm__ __volatile__( \
23531+ set \
23532 " cmp $7,%0\n" \
23533 " jbe 1f\n" \
23534 " movl %1,%0\n" \
23535 " negl %0\n" \
23536 " andl $7,%0\n" \
23537 " subl %0,%3\n" \
23538- "4: rep; movsb\n" \
23539+ "4: rep; "prefix"movsb\n" \
23540 " movl %3,%0\n" \
23541 " shrl $2,%0\n" \
23542 " andl $3,%3\n" \
23543 " .align 2,0x90\n" \
23544- "0: rep; movsl\n" \
23545+ "0: rep; "prefix"movsl\n" \
23546 " movl %3,%0\n" \
23547- "1: rep; movsb\n" \
23548+ "1: rep; "prefix"movsb\n" \
23549 "2:\n" \
23550+ restore \
23551 ".section .fixup,\"ax\"\n" \
23552 "5: addl %3,%0\n" \
23553 " jmp 2b\n" \
23554@@ -682,14 +799,14 @@ do { \
23555 " negl %0\n" \
23556 " andl $7,%0\n" \
23557 " subl %0,%3\n" \
23558- "4: rep; movsb\n" \
23559+ "4: rep; "__copyuser_seg"movsb\n" \
23560 " movl %3,%0\n" \
23561 " shrl $2,%0\n" \
23562 " andl $3,%3\n" \
23563 " .align 2,0x90\n" \
23564- "0: rep; movsl\n" \
23565+ "0: rep; "__copyuser_seg"movsl\n" \
23566 " movl %3,%0\n" \
23567- "1: rep; movsb\n" \
23568+ "1: rep; "__copyuser_seg"movsb\n" \
23569 "2:\n" \
23570 ".section .fixup,\"ax\"\n" \
23571 "5: addl %3,%0\n" \
23572@@ -775,9 +892,9 @@ survive:
23573 }
23574 #endif
23575 if (movsl_is_ok(to, from, n))
23576- __copy_user(to, from, n);
23577+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
23578 else
23579- n = __copy_user_intel(to, from, n);
23580+ n = __generic_copy_to_user_intel(to, from, n);
23581 return n;
23582 }
23583 EXPORT_SYMBOL(__copy_to_user_ll);
23584@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
23585 unsigned long n)
23586 {
23587 if (movsl_is_ok(to, from, n))
23588- __copy_user(to, from, n);
23589+ __copy_user(to, from, n, __copyuser_seg, "", "");
23590 else
23591- n = __copy_user_intel((void __user *)to,
23592- (const void *)from, n);
23593+ n = __generic_copy_from_user_intel(to, from, n);
23594 return n;
23595 }
23596 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
23597@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
23598 if (n > 64 && cpu_has_xmm2)
23599 n = __copy_user_intel_nocache(to, from, n);
23600 else
23601- __copy_user(to, from, n);
23602+ __copy_user(to, from, n, __copyuser_seg, "", "");
23603 #else
23604- __copy_user(to, from, n);
23605+ __copy_user(to, from, n, __copyuser_seg, "", "");
23606 #endif
23607 return n;
23608 }
23609 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
23610
23611-/**
23612- * copy_to_user: - Copy a block of data into user space.
23613- * @to: Destination address, in user space.
23614- * @from: Source address, in kernel space.
23615- * @n: Number of bytes to copy.
23616- *
23617- * Context: User context only. This function may sleep.
23618- *
23619- * Copy data from kernel space to user space.
23620- *
23621- * Returns number of bytes that could not be copied.
23622- * On success, this will be zero.
23623- */
23624-unsigned long
23625-copy_to_user(void __user *to, const void *from, unsigned long n)
23626+#ifdef CONFIG_PAX_MEMORY_UDEREF
23627+void __set_fs(mm_segment_t x)
23628 {
23629- if (access_ok(VERIFY_WRITE, to, n))
23630- n = __copy_to_user(to, from, n);
23631- return n;
23632+ switch (x.seg) {
23633+ case 0:
23634+ loadsegment(gs, 0);
23635+ break;
23636+ case TASK_SIZE_MAX:
23637+ loadsegment(gs, __USER_DS);
23638+ break;
23639+ case -1UL:
23640+ loadsegment(gs, __KERNEL_DS);
23641+ break;
23642+ default:
23643+ BUG();
23644+ }
23645+ return;
23646 }
23647-EXPORT_SYMBOL(copy_to_user);
23648+EXPORT_SYMBOL(__set_fs);
23649
23650-/**
23651- * copy_from_user: - Copy a block of data from user space.
23652- * @to: Destination address, in kernel space.
23653- * @from: Source address, in user space.
23654- * @n: Number of bytes to copy.
23655- *
23656- * Context: User context only. This function may sleep.
23657- *
23658- * Copy data from user space to kernel space.
23659- *
23660- * Returns number of bytes that could not be copied.
23661- * On success, this will be zero.
23662- *
23663- * If some data could not be copied, this function will pad the copied
23664- * data to the requested size using zero bytes.
23665- */
23666-unsigned long
23667-copy_from_user(void *to, const void __user *from, unsigned long n)
23668+void set_fs(mm_segment_t x)
23669 {
23670- if (access_ok(VERIFY_READ, from, n))
23671- n = __copy_from_user(to, from, n);
23672- else
23673- memset(to, 0, n);
23674- return n;
23675+ current_thread_info()->addr_limit = x;
23676+ __set_fs(x);
23677 }
23678-EXPORT_SYMBOL(copy_from_user);
23679+EXPORT_SYMBOL(set_fs);
23680+#endif
23681diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
23682index b7c2849..8633ad8 100644
23683--- a/arch/x86/lib/usercopy_64.c
23684+++ b/arch/x86/lib/usercopy_64.c
23685@@ -42,6 +42,12 @@ long
23686 __strncpy_from_user(char *dst, const char __user *src, long count)
23687 {
23688 long res;
23689+
23690+#ifdef CONFIG_PAX_MEMORY_UDEREF
23691+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
23692+ src += PAX_USER_SHADOW_BASE;
23693+#endif
23694+
23695 __do_strncpy_from_user(dst, src, count, res);
23696 return res;
23697 }
23698@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
23699 {
23700 long __d0;
23701 might_fault();
23702+
23703+#ifdef CONFIG_PAX_MEMORY_UDEREF
23704+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
23705+ addr += PAX_USER_SHADOW_BASE;
23706+#endif
23707+
23708 /* no memory constraint because it doesn't change any memory gcc knows
23709 about */
23710 asm volatile(
23711@@ -149,12 +161,20 @@ long strlen_user(const char __user *s)
23712 }
23713 EXPORT_SYMBOL(strlen_user);
23714
23715-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
23716+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
23717 {
23718- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23719- return copy_user_generic((__force void *)to, (__force void *)from, len);
23720- }
23721- return len;
23722+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
23723+
23724+#ifdef CONFIG_PAX_MEMORY_UDEREF
23725+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
23726+ to += PAX_USER_SHADOW_BASE;
23727+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
23728+ from += PAX_USER_SHADOW_BASE;
23729+#endif
23730+
23731+ return copy_user_generic((void __force_kernel *)to, (void __force_kernel *)from, len);
23732+ }
23733+ return len;
23734 }
23735 EXPORT_SYMBOL(copy_in_user);
23736
23737@@ -164,7 +184,7 @@ EXPORT_SYMBOL(copy_in_user);
23738 * it is not necessary to optimize tail handling.
23739 */
23740 unsigned long
23741-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
23742+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
23743 {
23744 char c;
23745 unsigned zero_len;
23746diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
23747index 61b41ca..5fef66a 100644
23748--- a/arch/x86/mm/extable.c
23749+++ b/arch/x86/mm/extable.c
23750@@ -1,14 +1,71 @@
23751 #include <linux/module.h>
23752 #include <linux/spinlock.h>
23753+#include <linux/sort.h>
23754 #include <asm/uaccess.h>
23755+#include <asm/pgtable.h>
23756
23757+/*
23758+ * The exception table needs to be sorted so that the binary
23759+ * search that we use to find entries in it works properly.
23760+ * This is used both for the kernel exception table and for
23761+ * the exception tables of modules that get loaded.
23762+ */
23763+static int cmp_ex(const void *a, const void *b)
23764+{
23765+ const struct exception_table_entry *x = a, *y = b;
23766+
23767+ /* avoid overflow */
23768+ if (x->insn > y->insn)
23769+ return 1;
23770+ if (x->insn < y->insn)
23771+ return -1;
23772+ return 0;
23773+}
23774+
23775+static void swap_ex(void *a, void *b, int size)
23776+{
23777+ struct exception_table_entry t, *x = a, *y = b;
23778+
23779+ t = *x;
23780+
23781+ pax_open_kernel();
23782+ *x = *y;
23783+ *y = t;
23784+ pax_close_kernel();
23785+}
23786+
23787+void sort_extable(struct exception_table_entry *start,
23788+ struct exception_table_entry *finish)
23789+{
23790+ sort(start, finish - start, sizeof(struct exception_table_entry),
23791+ cmp_ex, swap_ex);
23792+}
23793+
23794+#ifdef CONFIG_MODULES
23795+/*
23796+ * If the exception table is sorted, any referring to the module init
23797+ * will be at the beginning or the end.
23798+ */
23799+void trim_init_extable(struct module *m)
23800+{
23801+ /*trim the beginning*/
23802+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
23803+ m->extable++;
23804+ m->num_exentries--;
23805+ }
23806+ /*trim the end*/
23807+ while (m->num_exentries &&
23808+ within_module_init(m->extable[m->num_exentries-1].insn, m))
23809+ m->num_exentries--;
23810+}
23811+#endif /* CONFIG_MODULES */
23812
23813 int fixup_exception(struct pt_regs *regs)
23814 {
23815 const struct exception_table_entry *fixup;
23816
23817 #ifdef CONFIG_PNPBIOS
23818- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
23819+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
23820 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
23821 extern u32 pnp_bios_is_utter_crap;
23822 pnp_bios_is_utter_crap = 1;
23823diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
23824index 8ac0d76..87899a4 100644
23825--- a/arch/x86/mm/fault.c
23826+++ b/arch/x86/mm/fault.c
23827@@ -11,10 +11,19 @@
23828 #include <linux/kprobes.h> /* __kprobes, ... */
23829 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
23830 #include <linux/perf_event.h> /* perf_sw_event */
23831+#include <linux/unistd.h>
23832+#include <linux/compiler.h>
23833
23834 #include <asm/traps.h> /* dotraplinkage, ... */
23835 #include <asm/pgalloc.h> /* pgd_*(), ... */
23836 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
23837+#include <asm/vsyscall.h>
23838+#include <asm/tlbflush.h>
23839+
23840+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
23841+#include <asm/stacktrace.h>
23842+#include "../kernel/dumpstack.h"
23843+#endif
23844
23845 /*
23846 * Page fault error code bits:
23847@@ -51,7 +60,7 @@ static inline int notify_page_fault(struct pt_regs *regs)
23848 int ret = 0;
23849
23850 /* kprobe_running() needs smp_processor_id() */
23851- if (kprobes_built_in() && !user_mode_vm(regs)) {
23852+ if (kprobes_built_in() && !user_mode(regs)) {
23853 preempt_disable();
23854 if (kprobe_running() && kprobe_fault_handler(regs, 14))
23855 ret = 1;
23856@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
23857 return !instr_lo || (instr_lo>>1) == 1;
23858 case 0x00:
23859 /* Prefetch instruction is 0x0F0D or 0x0F18 */
23860- if (probe_kernel_address(instr, opcode))
23861+ if (user_mode(regs)) {
23862+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23863+ return 0;
23864+ } else if (probe_kernel_address(instr, opcode))
23865 return 0;
23866
23867 *prefetch = (instr_lo == 0xF) &&
23868@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
23869 while (instr < max_instr) {
23870 unsigned char opcode;
23871
23872- if (probe_kernel_address(instr, opcode))
23873+ if (user_mode(regs)) {
23874+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
23875+ break;
23876+ } else if (probe_kernel_address(instr, opcode))
23877 break;
23878
23879 instr++;
23880@@ -172,6 +187,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
23881 force_sig_info(si_signo, &info, tsk);
23882 }
23883
23884+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
23885+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
23886+#endif
23887+
23888+#ifdef CONFIG_PAX_EMUTRAMP
23889+static int pax_handle_fetch_fault(struct pt_regs *regs);
23890+#endif
23891+
23892+#ifdef CONFIG_PAX_PAGEEXEC
23893+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
23894+{
23895+ pgd_t *pgd;
23896+ pud_t *pud;
23897+ pmd_t *pmd;
23898+
23899+ pgd = pgd_offset(mm, address);
23900+ if (!pgd_present(*pgd))
23901+ return NULL;
23902+ pud = pud_offset(pgd, address);
23903+ if (!pud_present(*pud))
23904+ return NULL;
23905+ pmd = pmd_offset(pud, address);
23906+ if (!pmd_present(*pmd))
23907+ return NULL;
23908+ return pmd;
23909+}
23910+#endif
23911+
23912 DEFINE_SPINLOCK(pgd_lock);
23913 LIST_HEAD(pgd_list);
23914
23915@@ -224,11 +267,24 @@ void vmalloc_sync_all(void)
23916 address += PMD_SIZE) {
23917
23918 unsigned long flags;
23919+
23920+#ifdef CONFIG_PAX_PER_CPU_PGD
23921+ unsigned long cpu;
23922+#else
23923 struct page *page;
23924+#endif
23925
23926 spin_lock_irqsave(&pgd_lock, flags);
23927+
23928+#ifdef CONFIG_PAX_PER_CPU_PGD
23929+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23930+ pgd_t *pgd = get_cpu_pgd(cpu);
23931+#else
23932 list_for_each_entry(page, &pgd_list, lru) {
23933- if (!vmalloc_sync_one(page_address(page), address))
23934+ pgd_t *pgd = page_address(page);
23935+#endif
23936+
23937+ if (!vmalloc_sync_one(pgd, address))
23938 break;
23939 }
23940 spin_unlock_irqrestore(&pgd_lock, flags);
23941@@ -258,6 +314,11 @@ static noinline int vmalloc_fault(unsigned long address)
23942 * an interrupt in the middle of a task switch..
23943 */
23944 pgd_paddr = read_cr3();
23945+
23946+#ifdef CONFIG_PAX_PER_CPU_PGD
23947+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
23948+#endif
23949+
23950 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
23951 if (!pmd_k)
23952 return -1;
23953@@ -332,15 +393,27 @@ void vmalloc_sync_all(void)
23954
23955 const pgd_t *pgd_ref = pgd_offset_k(address);
23956 unsigned long flags;
23957+
23958+#ifdef CONFIG_PAX_PER_CPU_PGD
23959+ unsigned long cpu;
23960+#else
23961 struct page *page;
23962+#endif
23963
23964 if (pgd_none(*pgd_ref))
23965 continue;
23966
23967 spin_lock_irqsave(&pgd_lock, flags);
23968+
23969+#ifdef CONFIG_PAX_PER_CPU_PGD
23970+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
23971+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
23972+#else
23973 list_for_each_entry(page, &pgd_list, lru) {
23974 pgd_t *pgd;
23975 pgd = (pgd_t *)page_address(page) + pgd_index(address);
23976+#endif
23977+
23978 if (pgd_none(*pgd))
23979 set_pgd(pgd, *pgd_ref);
23980 else
23981@@ -373,7 +446,14 @@ static noinline int vmalloc_fault(unsigned long address)
23982 * happen within a race in page table update. In the later
23983 * case just flush:
23984 */
23985+
23986+#ifdef CONFIG_PAX_PER_CPU_PGD
23987+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
23988+ pgd = pgd_offset_cpu(smp_processor_id(), address);
23989+#else
23990 pgd = pgd_offset(current->active_mm, address);
23991+#endif
23992+
23993 pgd_ref = pgd_offset_k(address);
23994 if (pgd_none(*pgd_ref))
23995 return -1;
23996@@ -535,7 +615,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
23997 static int is_errata100(struct pt_regs *regs, unsigned long address)
23998 {
23999 #ifdef CONFIG_X86_64
24000- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
24001+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
24002 return 1;
24003 #endif
24004 return 0;
24005@@ -562,7 +642,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
24006 }
24007
24008 static const char nx_warning[] = KERN_CRIT
24009-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
24010+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
24011
24012 static void
24013 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24014@@ -571,15 +651,26 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
24015 if (!oops_may_print())
24016 return;
24017
24018- if (error_code & PF_INSTR) {
24019+ if (nx_enabled && (error_code & PF_INSTR)) {
24020 unsigned int level;
24021
24022 pte_t *pte = lookup_address(address, &level);
24023
24024 if (pte && pte_present(*pte) && !pte_exec(*pte))
24025- printk(nx_warning, current_uid());
24026+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
24027 }
24028
24029+#ifdef CONFIG_PAX_KERNEXEC
24030+ if (init_mm.start_code <= address && address < init_mm.end_code) {
24031+ if (current->signal->curr_ip)
24032+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24033+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
24034+ else
24035+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
24036+ current->comm, task_pid_nr(current), current_uid(), current_euid());
24037+ }
24038+#endif
24039+
24040 printk(KERN_ALERT "BUG: unable to handle kernel ");
24041 if (address < PAGE_SIZE)
24042 printk(KERN_CONT "NULL pointer dereference");
24043@@ -705,6 +796,23 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24044 {
24045 struct task_struct *tsk = current;
24046
24047+#ifdef CONFIG_X86_64
24048+ struct mm_struct *mm = tsk->mm;
24049+
24050+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
24051+ if (regs->ip == (unsigned long)vgettimeofday) {
24052+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
24053+ return;
24054+ } else if (regs->ip == (unsigned long)vtime) {
24055+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
24056+ return;
24057+ } else if (regs->ip == (unsigned long)vgetcpu) {
24058+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
24059+ return;
24060+ }
24061+ }
24062+#endif
24063+
24064 /* User mode accesses just cause a SIGSEGV */
24065 if (error_code & PF_USER) {
24066 /*
24067@@ -722,6 +830,21 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
24068 if (is_errata100(regs, address))
24069 return;
24070
24071+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24072+ if (pax_is_fetch_fault(regs, error_code, address)) {
24073+
24074+#ifdef CONFIG_PAX_EMUTRAMP
24075+ switch (pax_handle_fetch_fault(regs)) {
24076+ case 2:
24077+ return;
24078+ }
24079+#endif
24080+
24081+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24082+ do_group_exit(SIGKILL);
24083+ }
24084+#endif
24085+
24086 if (unlikely(show_unhandled_signals))
24087 show_signal_msg(regs, error_code, address, tsk);
24088
24089@@ -818,7 +941,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
24090 if (fault & VM_FAULT_HWPOISON) {
24091 printk(KERN_ERR
24092 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
24093- tsk->comm, tsk->pid, address);
24094+ tsk->comm, task_pid_nr(tsk), address);
24095 code = BUS_MCEERR_AR;
24096 }
24097 #endif
24098@@ -857,6 +980,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
24099 return 1;
24100 }
24101
24102+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24103+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
24104+{
24105+ pte_t *pte;
24106+ pmd_t *pmd;
24107+ spinlock_t *ptl;
24108+ unsigned char pte_mask;
24109+
24110+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
24111+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
24112+ return 0;
24113+
24114+ /* PaX: it's our fault, let's handle it if we can */
24115+
24116+ /* PaX: take a look at read faults before acquiring any locks */
24117+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
24118+ /* instruction fetch attempt from a protected page in user mode */
24119+ up_read(&mm->mmap_sem);
24120+
24121+#ifdef CONFIG_PAX_EMUTRAMP
24122+ switch (pax_handle_fetch_fault(regs)) {
24123+ case 2:
24124+ return 1;
24125+ }
24126+#endif
24127+
24128+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
24129+ do_group_exit(SIGKILL);
24130+ }
24131+
24132+ pmd = pax_get_pmd(mm, address);
24133+ if (unlikely(!pmd))
24134+ return 0;
24135+
24136+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
24137+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
24138+ pte_unmap_unlock(pte, ptl);
24139+ return 0;
24140+ }
24141+
24142+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
24143+ /* write attempt to a protected page in user mode */
24144+ pte_unmap_unlock(pte, ptl);
24145+ return 0;
24146+ }
24147+
24148+#ifdef CONFIG_SMP
24149+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
24150+#else
24151+ if (likely(address > get_limit(regs->cs)))
24152+#endif
24153+ {
24154+ set_pte(pte, pte_mkread(*pte));
24155+ __flush_tlb_one(address);
24156+ pte_unmap_unlock(pte, ptl);
24157+ up_read(&mm->mmap_sem);
24158+ return 1;
24159+ }
24160+
24161+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
24162+
24163+ /*
24164+ * PaX: fill DTLB with user rights and retry
24165+ */
24166+ __asm__ __volatile__ (
24167+ "orb %2,(%1)\n"
24168+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
24169+/*
24170+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
24171+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
24172+ * page fault when examined during a TLB load attempt. this is true not only
24173+ * for PTEs holding a non-present entry but also present entries that will
24174+ * raise a page fault (such as those set up by PaX, or the copy-on-write
24175+ * mechanism). in effect it means that we do *not* need to flush the TLBs
24176+ * for our target pages since their PTEs are simply not in the TLBs at all.
24177+
24178+ * the best thing in omitting it is that we gain around 15-20% speed in the
24179+ * fast path of the page fault handler and can get rid of tracing since we
24180+ * can no longer flush unintended entries.
24181+ */
24182+ "invlpg (%0)\n"
24183+#endif
24184+ __copyuser_seg"testb $0,(%0)\n"
24185+ "xorb %3,(%1)\n"
24186+ :
24187+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
24188+ : "memory", "cc");
24189+ pte_unmap_unlock(pte, ptl);
24190+ up_read(&mm->mmap_sem);
24191+ return 1;
24192+}
24193+#endif
24194+
24195 /*
24196 * Handle a spurious fault caused by a stale TLB entry.
24197 *
24198@@ -923,6 +1139,9 @@ int show_unhandled_signals = 1;
24199 static inline int
24200 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
24201 {
24202+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
24203+ return 1;
24204+
24205 if (write) {
24206 /* write, present and write, not present: */
24207 if (unlikely(!(vma->vm_flags & VM_WRITE)))
24208@@ -956,16 +1175,30 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24209 {
24210 struct vm_area_struct *vma;
24211 struct task_struct *tsk;
24212- unsigned long address;
24213 struct mm_struct *mm;
24214 int write;
24215 int fault;
24216
24217- tsk = current;
24218- mm = tsk->mm;
24219-
24220 /* Get the faulting address: */
24221- address = read_cr2();
24222+ unsigned long address = read_cr2();
24223+
24224+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
24225+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
24226+ if (!search_exception_tables(regs->ip)) {
24227+ bad_area_nosemaphore(regs, error_code, address);
24228+ return;
24229+ }
24230+ if (address < PAX_USER_SHADOW_BASE) {
24231+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
24232+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
24233+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
24234+ } else
24235+ address -= PAX_USER_SHADOW_BASE;
24236+ }
24237+#endif
24238+
24239+ tsk = current;
24240+ mm = tsk->mm;
24241
24242 /*
24243 * Detect and handle instructions that would cause a page fault for
24244@@ -1026,7 +1259,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24245 * User-mode registers count as a user access even for any
24246 * potential system fault or CPU buglet:
24247 */
24248- if (user_mode_vm(regs)) {
24249+ if (user_mode(regs)) {
24250 local_irq_enable();
24251 error_code |= PF_USER;
24252 } else {
24253@@ -1080,6 +1313,11 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24254 might_sleep();
24255 }
24256
24257+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
24258+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
24259+ return;
24260+#endif
24261+
24262 vma = find_vma(mm, address);
24263 if (unlikely(!vma)) {
24264 bad_area(regs, error_code, address);
24265@@ -1091,18 +1329,24 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
24266 bad_area(regs, error_code, address);
24267 return;
24268 }
24269- if (error_code & PF_USER) {
24270- /*
24271- * Accessing the stack below %sp is always a bug.
24272- * The large cushion allows instructions like enter
24273- * and pusha to work. ("enter $65535, $31" pushes
24274- * 32 pointers and then decrements %sp by 65535.)
24275- */
24276- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
24277- bad_area(regs, error_code, address);
24278- return;
24279- }
24280+ /*
24281+ * Accessing the stack below %sp is always a bug.
24282+ * The large cushion allows instructions like enter
24283+ * and pusha to work. ("enter $65535, $31" pushes
24284+ * 32 pointers and then decrements %sp by 65535.)
24285+ */
24286+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
24287+ bad_area(regs, error_code, address);
24288+ return;
24289 }
24290+
24291+#ifdef CONFIG_PAX_SEGMEXEC
24292+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
24293+ bad_area(regs, error_code, address);
24294+ return;
24295+ }
24296+#endif
24297+
24298 if (unlikely(expand_stack(vma, address))) {
24299 bad_area(regs, error_code, address);
24300 return;
24301@@ -1146,3 +1390,292 @@ good_area:
24302
24303 up_read(&mm->mmap_sem);
24304 }
24305+
24306+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24307+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
24308+{
24309+ struct mm_struct *mm = current->mm;
24310+ unsigned long ip = regs->ip;
24311+
24312+ if (v8086_mode(regs))
24313+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
24314+
24315+#ifdef CONFIG_PAX_PAGEEXEC
24316+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
24317+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
24318+ return true;
24319+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
24320+ return true;
24321+ return false;
24322+ }
24323+#endif
24324+
24325+#ifdef CONFIG_PAX_SEGMEXEC
24326+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
24327+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
24328+ return true;
24329+ return false;
24330+ }
24331+#endif
24332+
24333+ return false;
24334+}
24335+#endif
24336+
24337+#ifdef CONFIG_PAX_EMUTRAMP
24338+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
24339+{
24340+ int err;
24341+
24342+ do { /* PaX: libffi trampoline emulation */
24343+ unsigned char mov, jmp;
24344+ unsigned int addr1, addr2;
24345+
24346+#ifdef CONFIG_X86_64
24347+ if ((regs->ip + 9) >> 32)
24348+ break;
24349+#endif
24350+
24351+ err = get_user(mov, (unsigned char __user *)regs->ip);
24352+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24353+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24354+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24355+
24356+ if (err)
24357+ break;
24358+
24359+ if (mov == 0xB8 && jmp == 0xE9) {
24360+ regs->ax = addr1;
24361+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24362+ return 2;
24363+ }
24364+ } while (0);
24365+
24366+ do { /* PaX: gcc trampoline emulation #1 */
24367+ unsigned char mov1, mov2;
24368+ unsigned short jmp;
24369+ unsigned int addr1, addr2;
24370+
24371+#ifdef CONFIG_X86_64
24372+ if ((regs->ip + 11) >> 32)
24373+ break;
24374+#endif
24375+
24376+ err = get_user(mov1, (unsigned char __user *)regs->ip);
24377+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24378+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
24379+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24380+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
24381+
24382+ if (err)
24383+ break;
24384+
24385+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
24386+ regs->cx = addr1;
24387+ regs->ax = addr2;
24388+ regs->ip = addr2;
24389+ return 2;
24390+ }
24391+ } while (0);
24392+
24393+ do { /* PaX: gcc trampoline emulation #2 */
24394+ unsigned char mov, jmp;
24395+ unsigned int addr1, addr2;
24396+
24397+#ifdef CONFIG_X86_64
24398+ if ((regs->ip + 9) >> 32)
24399+ break;
24400+#endif
24401+
24402+ err = get_user(mov, (unsigned char __user *)regs->ip);
24403+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
24404+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
24405+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
24406+
24407+ if (err)
24408+ break;
24409+
24410+ if (mov == 0xB9 && jmp == 0xE9) {
24411+ regs->cx = addr1;
24412+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
24413+ return 2;
24414+ }
24415+ } while (0);
24416+
24417+ return 1; /* PaX in action */
24418+}
24419+
24420+#ifdef CONFIG_X86_64
24421+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
24422+{
24423+ int err;
24424+
24425+ do { /* PaX: libffi trampoline emulation */
24426+ unsigned short mov1, mov2, jmp1;
24427+ unsigned char stcclc, jmp2;
24428+ unsigned long addr1, addr2;
24429+
24430+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24431+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24432+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24433+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24434+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
24435+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
24436+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
24437+
24438+ if (err)
24439+ break;
24440+
24441+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24442+ regs->r11 = addr1;
24443+ regs->r10 = addr2;
24444+ if (stcclc == 0xF8)
24445+ regs->flags &= ~X86_EFLAGS_CF;
24446+ else
24447+ regs->flags |= X86_EFLAGS_CF;
24448+ regs->ip = addr1;
24449+ return 2;
24450+ }
24451+ } while (0);
24452+
24453+ do { /* PaX: gcc trampoline emulation #1 */
24454+ unsigned short mov1, mov2, jmp1;
24455+ unsigned char jmp2;
24456+ unsigned int addr1;
24457+ unsigned long addr2;
24458+
24459+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24460+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
24461+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
24462+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
24463+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
24464+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
24465+
24466+ if (err)
24467+ break;
24468+
24469+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24470+ regs->r11 = addr1;
24471+ regs->r10 = addr2;
24472+ regs->ip = addr1;
24473+ return 2;
24474+ }
24475+ } while (0);
24476+
24477+ do { /* PaX: gcc trampoline emulation #2 */
24478+ unsigned short mov1, mov2, jmp1;
24479+ unsigned char jmp2;
24480+ unsigned long addr1, addr2;
24481+
24482+ err = get_user(mov1, (unsigned short __user *)regs->ip);
24483+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
24484+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
24485+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
24486+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
24487+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
24488+
24489+ if (err)
24490+ break;
24491+
24492+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
24493+ regs->r11 = addr1;
24494+ regs->r10 = addr2;
24495+ regs->ip = addr1;
24496+ return 2;
24497+ }
24498+ } while (0);
24499+
24500+ return 1; /* PaX in action */
24501+}
24502+#endif
24503+
24504+/*
24505+ * PaX: decide what to do with offenders (regs->ip = fault address)
24506+ *
24507+ * returns 1 when task should be killed
24508+ * 2 when gcc trampoline was detected
24509+ */
24510+static int pax_handle_fetch_fault(struct pt_regs *regs)
24511+{
24512+ if (v8086_mode(regs))
24513+ return 1;
24514+
24515+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
24516+ return 1;
24517+
24518+#ifdef CONFIG_X86_32
24519+ return pax_handle_fetch_fault_32(regs);
24520+#else
24521+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
24522+ return pax_handle_fetch_fault_32(regs);
24523+ else
24524+ return pax_handle_fetch_fault_64(regs);
24525+#endif
24526+}
24527+#endif
24528+
24529+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
24530+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
24531+{
24532+ long i;
24533+
24534+ printk(KERN_ERR "PAX: bytes at PC: ");
24535+ for (i = 0; i < 20; i++) {
24536+ unsigned char c;
24537+ if (get_user(c, (unsigned char __force_user *)pc+i))
24538+ printk(KERN_CONT "?? ");
24539+ else
24540+ printk(KERN_CONT "%02x ", c);
24541+ }
24542+ printk("\n");
24543+
24544+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
24545+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
24546+ unsigned long c;
24547+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
24548+#ifdef CONFIG_X86_32
24549+ printk(KERN_CONT "???????? ");
24550+#else
24551+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
24552+ printk(KERN_CONT "???????? ???????? ");
24553+ else
24554+ printk(KERN_CONT "???????????????? ");
24555+#endif
24556+ } else {
24557+#ifdef CONFIG_X86_64
24558+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
24559+ printk(KERN_CONT "%08x ", (unsigned int)c);
24560+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
24561+ } else
24562+#endif
24563+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
24564+ }
24565+ }
24566+ printk("\n");
24567+}
24568+#endif
24569+
24570+/**
24571+ * probe_kernel_write(): safely attempt to write to a location
24572+ * @dst: address to write to
24573+ * @src: pointer to the data that shall be written
24574+ * @size: size of the data chunk
24575+ *
24576+ * Safely write to address @dst from the buffer at @src. If a kernel fault
24577+ * happens, handle that and return -EFAULT.
24578+ */
24579+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
24580+{
24581+ long ret;
24582+ mm_segment_t old_fs = get_fs();
24583+
24584+ set_fs(KERNEL_DS);
24585+ pagefault_disable();
24586+ pax_open_kernel();
24587+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
24588+ pax_close_kernel();
24589+ pagefault_enable();
24590+ set_fs(old_fs);
24591+
24592+ return ret ? -EFAULT : 0;
24593+}
24594diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
24595index 71da1bc..7a16bf4 100644
24596--- a/arch/x86/mm/gup.c
24597+++ b/arch/x86/mm/gup.c
24598@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
24599 addr = start;
24600 len = (unsigned long) nr_pages << PAGE_SHIFT;
24601 end = start + len;
24602- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24603+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
24604 (void __user *)start, len)))
24605 return 0;
24606
24607diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
24608index 63a6ba6..79abd7a 100644
24609--- a/arch/x86/mm/highmem_32.c
24610+++ b/arch/x86/mm/highmem_32.c
24611@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
24612 idx = type + KM_TYPE_NR*smp_processor_id();
24613 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
24614 BUG_ON(!pte_none(*(kmap_pte-idx)));
24615+
24616+ pax_open_kernel();
24617 set_pte(kmap_pte-idx, mk_pte(page, prot));
24618+ pax_close_kernel();
24619
24620 return (void *)vaddr;
24621 }
24622diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
24623index f46c340..6ff9a26 100644
24624--- a/arch/x86/mm/hugetlbpage.c
24625+++ b/arch/x86/mm/hugetlbpage.c
24626@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
24627 struct hstate *h = hstate_file(file);
24628 struct mm_struct *mm = current->mm;
24629 struct vm_area_struct *vma;
24630- unsigned long start_addr;
24631+ unsigned long start_addr, pax_task_size = TASK_SIZE;
24632+
24633+#ifdef CONFIG_PAX_SEGMEXEC
24634+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24635+ pax_task_size = SEGMEXEC_TASK_SIZE;
24636+#endif
24637+
24638+ pax_task_size -= PAGE_SIZE;
24639
24640 if (len > mm->cached_hole_size) {
24641- start_addr = mm->free_area_cache;
24642+ start_addr = mm->free_area_cache;
24643 } else {
24644- start_addr = TASK_UNMAPPED_BASE;
24645- mm->cached_hole_size = 0;
24646+ start_addr = mm->mmap_base;
24647+ mm->cached_hole_size = 0;
24648 }
24649
24650 full_search:
24651@@ -281,26 +288,27 @@ full_search:
24652
24653 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
24654 /* At this point: (!vma || addr < vma->vm_end). */
24655- if (TASK_SIZE - len < addr) {
24656+ if (pax_task_size - len < addr) {
24657 /*
24658 * Start a new search - just in case we missed
24659 * some holes.
24660 */
24661- if (start_addr != TASK_UNMAPPED_BASE) {
24662- start_addr = TASK_UNMAPPED_BASE;
24663+ if (start_addr != mm->mmap_base) {
24664+ start_addr = mm->mmap_base;
24665 mm->cached_hole_size = 0;
24666 goto full_search;
24667 }
24668 return -ENOMEM;
24669 }
24670- if (!vma || addr + len <= vma->vm_start) {
24671- mm->free_area_cache = addr + len;
24672- return addr;
24673- }
24674+ if (check_heap_stack_gap(vma, addr, len))
24675+ break;
24676 if (addr + mm->cached_hole_size < vma->vm_start)
24677 mm->cached_hole_size = vma->vm_start - addr;
24678 addr = ALIGN(vma->vm_end, huge_page_size(h));
24679 }
24680+
24681+ mm->free_area_cache = addr + len;
24682+ return addr;
24683 }
24684
24685 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24686@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24687 {
24688 struct hstate *h = hstate_file(file);
24689 struct mm_struct *mm = current->mm;
24690- struct vm_area_struct *vma, *prev_vma;
24691- unsigned long base = mm->mmap_base, addr = addr0;
24692+ struct vm_area_struct *vma;
24693+ unsigned long base = mm->mmap_base, addr;
24694 unsigned long largest_hole = mm->cached_hole_size;
24695- int first_time = 1;
24696
24697 /* don't allow allocations above current base */
24698 if (mm->free_area_cache > base)
24699@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
24700 largest_hole = 0;
24701 mm->free_area_cache = base;
24702 }
24703-try_again:
24704+
24705 /* make sure it can fit in the remaining address space */
24706 if (mm->free_area_cache < len)
24707 goto fail;
24708
24709 /* either no address requested or cant fit in requested address hole */
24710- addr = (mm->free_area_cache - len) & huge_page_mask(h);
24711+ addr = (mm->free_area_cache - len);
24712 do {
24713+ addr &= huge_page_mask(h);
24714+ vma = find_vma(mm, addr);
24715 /*
24716 * Lookup failure means no vma is above this address,
24717 * i.e. return with success:
24718- */
24719- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
24720- return addr;
24721-
24722- /*
24723 * new region fits between prev_vma->vm_end and
24724 * vma->vm_start, use it:
24725 */
24726- if (addr + len <= vma->vm_start &&
24727- (!prev_vma || (addr >= prev_vma->vm_end))) {
24728+ if (check_heap_stack_gap(vma, addr, len)) {
24729 /* remember the address as a hint for next time */
24730- mm->cached_hole_size = largest_hole;
24731- return (mm->free_area_cache = addr);
24732- } else {
24733- /* pull free_area_cache down to the first hole */
24734- if (mm->free_area_cache == vma->vm_end) {
24735- mm->free_area_cache = vma->vm_start;
24736- mm->cached_hole_size = largest_hole;
24737- }
24738+ mm->cached_hole_size = largest_hole;
24739+ return (mm->free_area_cache = addr);
24740+ }
24741+ /* pull free_area_cache down to the first hole */
24742+ if (mm->free_area_cache == vma->vm_end) {
24743+ mm->free_area_cache = vma->vm_start;
24744+ mm->cached_hole_size = largest_hole;
24745 }
24746
24747 /* remember the largest hole we saw so far */
24748 if (addr + largest_hole < vma->vm_start)
24749- largest_hole = vma->vm_start - addr;
24750+ largest_hole = vma->vm_start - addr;
24751
24752 /* try just below the current vma->vm_start */
24753- addr = (vma->vm_start - len) & huge_page_mask(h);
24754- } while (len <= vma->vm_start);
24755+ addr = skip_heap_stack_gap(vma, len);
24756+ } while (!IS_ERR_VALUE(addr));
24757
24758 fail:
24759 /*
24760- * if hint left us with no space for the requested
24761- * mapping then try again:
24762- */
24763- if (first_time) {
24764- mm->free_area_cache = base;
24765- largest_hole = 0;
24766- first_time = 0;
24767- goto try_again;
24768- }
24769- /*
24770 * A failed mmap() very likely causes application failure,
24771 * so fall back to the bottom-up function here. This scenario
24772 * can happen with large stack limits and large mmap()
24773 * allocations.
24774 */
24775- mm->free_area_cache = TASK_UNMAPPED_BASE;
24776+
24777+#ifdef CONFIG_PAX_SEGMEXEC
24778+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24779+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
24780+ else
24781+#endif
24782+
24783+ mm->mmap_base = TASK_UNMAPPED_BASE;
24784+
24785+#ifdef CONFIG_PAX_RANDMMAP
24786+ if (mm->pax_flags & MF_PAX_RANDMMAP)
24787+ mm->mmap_base += mm->delta_mmap;
24788+#endif
24789+
24790+ mm->free_area_cache = mm->mmap_base;
24791 mm->cached_hole_size = ~0UL;
24792 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
24793 len, pgoff, flags);
24794@@ -387,6 +393,7 @@ fail:
24795 /*
24796 * Restore the topdown base:
24797 */
24798+ mm->mmap_base = base;
24799 mm->free_area_cache = base;
24800 mm->cached_hole_size = ~0UL;
24801
24802@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24803 struct hstate *h = hstate_file(file);
24804 struct mm_struct *mm = current->mm;
24805 struct vm_area_struct *vma;
24806+ unsigned long pax_task_size = TASK_SIZE;
24807
24808 if (len & ~huge_page_mask(h))
24809 return -EINVAL;
24810- if (len > TASK_SIZE)
24811+
24812+#ifdef CONFIG_PAX_SEGMEXEC
24813+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
24814+ pax_task_size = SEGMEXEC_TASK_SIZE;
24815+#endif
24816+
24817+ pax_task_size -= PAGE_SIZE;
24818+
24819+ if (len > pax_task_size)
24820 return -ENOMEM;
24821
24822 if (flags & MAP_FIXED) {
24823@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
24824 if (addr) {
24825 addr = ALIGN(addr, huge_page_size(h));
24826 vma = find_vma(mm, addr);
24827- if (TASK_SIZE - len >= addr &&
24828- (!vma || addr + len <= vma->vm_start))
24829+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
24830 return addr;
24831 }
24832 if (mm->get_unmapped_area == arch_get_unmapped_area)
24833diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
24834index 73ffd55..ad78676 100644
24835--- a/arch/x86/mm/init.c
24836+++ b/arch/x86/mm/init.c
24837@@ -69,11 +69,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse,
24838 * cause a hotspot and fill up ZONE_DMA. The page tables
24839 * need roughly 0.5KB per GB.
24840 */
24841-#ifdef CONFIG_X86_32
24842- start = 0x7000;
24843-#else
24844- start = 0x8000;
24845-#endif
24846+ start = 0x100000;
24847 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
24848 tables, PAGE_SIZE);
24849 if (e820_table_start == -1UL)
24850@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24851 #endif
24852
24853 set_nx();
24854- if (nx_enabled)
24855+ if (nx_enabled && cpu_has_nx)
24856 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
24857
24858 /* Enable PSE if available */
24859@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
24860 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
24861 * mmio resources as well as potential bios/acpi data regions.
24862 */
24863+
24864 int devmem_is_allowed(unsigned long pagenr)
24865 {
24866+#ifdef CONFIG_GRKERNSEC_KMEM
24867+ /* allow BDA */
24868+ if (!pagenr)
24869+ return 1;
24870+ /* allow EBDA */
24871+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
24872+ return 1;
24873+ /* allow ISA/video mem */
24874+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
24875+ return 1;
24876+ /* throw out everything else below 1MB */
24877+ if (pagenr <= 256)
24878+ return 0;
24879+#else
24880 if (pagenr <= 256)
24881 return 1;
24882+#endif
24883+
24884 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
24885 return 0;
24886 if (!page_is_ram(pagenr))
24887@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
24888
24889 void free_initmem(void)
24890 {
24891+
24892+#ifdef CONFIG_PAX_KERNEXEC
24893+#ifdef CONFIG_X86_32
24894+ /* PaX: limit KERNEL_CS to actual size */
24895+ unsigned long addr, limit;
24896+ struct desc_struct d;
24897+ int cpu;
24898+
24899+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
24900+ limit = (limit - 1UL) >> PAGE_SHIFT;
24901+
24902+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
24903+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
24904+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
24905+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
24906+ }
24907+
24908+ /* PaX: make KERNEL_CS read-only */
24909+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
24910+ if (!paravirt_enabled())
24911+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
24912+/*
24913+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
24914+ pgd = pgd_offset_k(addr);
24915+ pud = pud_offset(pgd, addr);
24916+ pmd = pmd_offset(pud, addr);
24917+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24918+ }
24919+*/
24920+#ifdef CONFIG_X86_PAE
24921+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
24922+/*
24923+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
24924+ pgd = pgd_offset_k(addr);
24925+ pud = pud_offset(pgd, addr);
24926+ pmd = pmd_offset(pud, addr);
24927+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24928+ }
24929+*/
24930+#endif
24931+
24932+#ifdef CONFIG_MODULES
24933+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
24934+#endif
24935+
24936+#else
24937+ pgd_t *pgd;
24938+ pud_t *pud;
24939+ pmd_t *pmd;
24940+ unsigned long addr, end;
24941+
24942+ /* PaX: make kernel code/rodata read-only, rest non-executable */
24943+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
24944+ pgd = pgd_offset_k(addr);
24945+ pud = pud_offset(pgd, addr);
24946+ pmd = pmd_offset(pud, addr);
24947+ if (!pmd_present(*pmd))
24948+ continue;
24949+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
24950+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24951+ else
24952+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
24953+ }
24954+
24955+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
24956+ end = addr + KERNEL_IMAGE_SIZE;
24957+ for (; addr < end; addr += PMD_SIZE) {
24958+ pgd = pgd_offset_k(addr);
24959+ pud = pud_offset(pgd, addr);
24960+ pmd = pmd_offset(pud, addr);
24961+ if (!pmd_present(*pmd))
24962+ continue;
24963+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
24964+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
24965+ }
24966+#endif
24967+
24968+ flush_tlb_all();
24969+#endif
24970+
24971 free_init_pages("unused kernel memory",
24972 (unsigned long)(&__init_begin),
24973 (unsigned long)(&__init_end));
24974diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
24975index 30938c1..bda3d5d 100644
24976--- a/arch/x86/mm/init_32.c
24977+++ b/arch/x86/mm/init_32.c
24978@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
24979 }
24980
24981 /*
24982- * Creates a middle page table and puts a pointer to it in the
24983- * given global directory entry. This only returns the gd entry
24984- * in non-PAE compilation mode, since the middle layer is folded.
24985- */
24986-static pmd_t * __init one_md_table_init(pgd_t *pgd)
24987-{
24988- pud_t *pud;
24989- pmd_t *pmd_table;
24990-
24991-#ifdef CONFIG_X86_PAE
24992- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
24993- if (after_bootmem)
24994- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
24995- else
24996- pmd_table = (pmd_t *)alloc_low_page();
24997- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
24998- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
24999- pud = pud_offset(pgd, 0);
25000- BUG_ON(pmd_table != pmd_offset(pud, 0));
25001-
25002- return pmd_table;
25003- }
25004-#endif
25005- pud = pud_offset(pgd, 0);
25006- pmd_table = pmd_offset(pud, 0);
25007-
25008- return pmd_table;
25009-}
25010-
25011-/*
25012 * Create a page table and place a pointer to it in a middle page
25013 * directory entry:
25014 */
25015@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
25016 page_table = (pte_t *)alloc_low_page();
25017
25018 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
25019+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
25020+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
25021+#else
25022 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
25023+#endif
25024 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
25025 }
25026
25027 return pte_offset_kernel(pmd, 0);
25028 }
25029
25030+static pmd_t * __init one_md_table_init(pgd_t *pgd)
25031+{
25032+ pud_t *pud;
25033+ pmd_t *pmd_table;
25034+
25035+ pud = pud_offset(pgd, 0);
25036+ pmd_table = pmd_offset(pud, 0);
25037+
25038+ return pmd_table;
25039+}
25040+
25041 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
25042 {
25043 int pgd_idx = pgd_index(vaddr);
25044@@ -201,6 +186,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25045 int pgd_idx, pmd_idx;
25046 unsigned long vaddr;
25047 pgd_t *pgd;
25048+ pud_t *pud;
25049 pmd_t *pmd;
25050 pte_t *pte = NULL;
25051
25052@@ -210,8 +196,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25053 pgd = pgd_base + pgd_idx;
25054
25055 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
25056- pmd = one_md_table_init(pgd);
25057- pmd = pmd + pmd_index(vaddr);
25058+ pud = pud_offset(pgd, vaddr);
25059+ pmd = pmd_offset(pud, vaddr);
25060+
25061+#ifdef CONFIG_X86_PAE
25062+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25063+#endif
25064+
25065 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
25066 pmd++, pmd_idx++) {
25067 pte = page_table_kmap_check(one_page_table_init(pmd),
25068@@ -223,11 +214,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
25069 }
25070 }
25071
25072-static inline int is_kernel_text(unsigned long addr)
25073+static inline int is_kernel_text(unsigned long start, unsigned long end)
25074 {
25075- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
25076- return 1;
25077- return 0;
25078+ if ((start > ktla_ktva((unsigned long)_etext) ||
25079+ end <= ktla_ktva((unsigned long)_stext)) &&
25080+ (start > ktla_ktva((unsigned long)_einittext) ||
25081+ end <= ktla_ktva((unsigned long)_sinittext)) &&
25082+
25083+#ifdef CONFIG_ACPI_SLEEP
25084+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
25085+#endif
25086+
25087+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
25088+ return 0;
25089+ return 1;
25090 }
25091
25092 /*
25093@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned long start,
25094 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
25095 unsigned long start_pfn, end_pfn;
25096 pgd_t *pgd_base = swapper_pg_dir;
25097- int pgd_idx, pmd_idx, pte_ofs;
25098+ unsigned int pgd_idx, pmd_idx, pte_ofs;
25099 unsigned long pfn;
25100 pgd_t *pgd;
25101+ pud_t *pud;
25102 pmd_t *pmd;
25103 pte_t *pte;
25104 unsigned pages_2m, pages_4k;
25105@@ -278,8 +279,13 @@ repeat:
25106 pfn = start_pfn;
25107 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25108 pgd = pgd_base + pgd_idx;
25109- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
25110- pmd = one_md_table_init(pgd);
25111+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
25112+ pud = pud_offset(pgd, 0);
25113+ pmd = pmd_offset(pud, 0);
25114+
25115+#ifdef CONFIG_X86_PAE
25116+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
25117+#endif
25118
25119 if (pfn >= end_pfn)
25120 continue;
25121@@ -291,14 +297,13 @@ repeat:
25122 #endif
25123 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
25124 pmd++, pmd_idx++) {
25125- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
25126+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
25127
25128 /*
25129 * Map with big pages if possible, otherwise
25130 * create normal page tables:
25131 */
25132 if (use_pse) {
25133- unsigned int addr2;
25134 pgprot_t prot = PAGE_KERNEL_LARGE;
25135 /*
25136 * first pass will use the same initial
25137@@ -308,11 +313,7 @@ repeat:
25138 __pgprot(PTE_IDENT_ATTR |
25139 _PAGE_PSE);
25140
25141- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
25142- PAGE_OFFSET + PAGE_SIZE-1;
25143-
25144- if (is_kernel_text(addr) ||
25145- is_kernel_text(addr2))
25146+ if (is_kernel_text(address, address + PMD_SIZE))
25147 prot = PAGE_KERNEL_LARGE_EXEC;
25148
25149 pages_2m++;
25150@@ -329,7 +330,7 @@ repeat:
25151 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
25152 pte += pte_ofs;
25153 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
25154- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
25155+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
25156 pgprot_t prot = PAGE_KERNEL;
25157 /*
25158 * first pass will use the same initial
25159@@ -337,7 +338,7 @@ repeat:
25160 */
25161 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
25162
25163- if (is_kernel_text(addr))
25164+ if (is_kernel_text(address, address + PAGE_SIZE))
25165 prot = PAGE_KERNEL_EXEC;
25166
25167 pages_4k++;
25168@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start(pgd_t *base)
25169
25170 pud = pud_offset(pgd, va);
25171 pmd = pmd_offset(pud, va);
25172- if (!pmd_present(*pmd))
25173+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
25174 break;
25175
25176 pte = pte_offset_kernel(pmd, va);
25177@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_range_init(void)
25178
25179 static void __init pagetable_init(void)
25180 {
25181- pgd_t *pgd_base = swapper_pg_dir;
25182-
25183- permanent_kmaps_init(pgd_base);
25184+ permanent_kmaps_init(swapper_pg_dir);
25185 }
25186
25187 #ifdef CONFIG_ACPI_SLEEP
25188@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
25189 * ACPI suspend needs this for resume, because things like the intel-agp
25190 * driver might have split up a kernel 4MB mapping.
25191 */
25192-char swsusp_pg_dir[PAGE_SIZE]
25193+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
25194 __attribute__ ((aligned(PAGE_SIZE)));
25195
25196 static inline void save_pg_dir(void)
25197 {
25198- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
25199+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
25200 }
25201 #else /* !CONFIG_ACPI_SLEEP */
25202 static inline void save_pg_dir(void)
25203@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
25204 flush_tlb_all();
25205 }
25206
25207-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25208+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
25209 EXPORT_SYMBOL_GPL(__supported_pte_mask);
25210
25211 /* user-defined highmem size */
25212@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void)
25213 * Initialize the boot-time allocator (with low memory only):
25214 */
25215 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
25216- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25217+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
25218 PAGE_SIZE);
25219 if (bootmap == -1L)
25220 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
25221@@ -864,6 +863,12 @@ void __init mem_init(void)
25222
25223 pci_iommu_alloc();
25224
25225+#ifdef CONFIG_PAX_PER_CPU_PGD
25226+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25227+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25228+ KERNEL_PGD_PTRS);
25229+#endif
25230+
25231 #ifdef CONFIG_FLATMEM
25232 BUG_ON(!mem_map);
25233 #endif
25234@@ -881,7 +886,7 @@ void __init mem_init(void)
25235 set_highmem_pages_init();
25236
25237 codesize = (unsigned long) &_etext - (unsigned long) &_text;
25238- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
25239+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
25240 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
25241
25242 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
25243@@ -923,10 +928,10 @@ void __init mem_init(void)
25244 ((unsigned long)&__init_end -
25245 (unsigned long)&__init_begin) >> 10,
25246
25247- (unsigned long)&_etext, (unsigned long)&_edata,
25248- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
25249+ (unsigned long)&_sdata, (unsigned long)&_edata,
25250+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
25251
25252- (unsigned long)&_text, (unsigned long)&_etext,
25253+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
25254 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
25255
25256 /*
25257@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
25258 if (!kernel_set_to_readonly)
25259 return;
25260
25261+ start = ktla_ktva(start);
25262 pr_debug("Set kernel text: %lx - %lx for read write\n",
25263 start, start+size);
25264
25265@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
25266 if (!kernel_set_to_readonly)
25267 return;
25268
25269+ start = ktla_ktva(start);
25270 pr_debug("Set kernel text: %lx - %lx for read only\n",
25271 start, start+size);
25272
25273@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
25274 unsigned long start = PFN_ALIGN(_text);
25275 unsigned long size = PFN_ALIGN(_etext) - start;
25276
25277+ start = ktla_ktva(start);
25278 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
25279 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
25280 size >> 10);
25281diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
25282index 7d095ad..25d2549 100644
25283--- a/arch/x86/mm/init_64.c
25284+++ b/arch/x86/mm/init_64.c
25285@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
25286 pmd = fill_pmd(pud, vaddr);
25287 pte = fill_pte(pmd, vaddr);
25288
25289+ pax_open_kernel();
25290 set_pte(pte, new_pte);
25291+ pax_close_kernel();
25292
25293 /*
25294 * It's enough to flush this one mapping.
25295@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
25296 pgd = pgd_offset_k((unsigned long)__va(phys));
25297 if (pgd_none(*pgd)) {
25298 pud = (pud_t *) spp_getpage();
25299- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
25300- _PAGE_USER));
25301+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
25302 }
25303 pud = pud_offset(pgd, (unsigned long)__va(phys));
25304 if (pud_none(*pud)) {
25305 pmd = (pmd_t *) spp_getpage();
25306- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
25307- _PAGE_USER));
25308+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
25309 }
25310 pmd = pmd_offset(pud, phys);
25311 BUG_ON(!pmd_none(*pmd));
25312@@ -675,6 +675,12 @@ void __init mem_init(void)
25313
25314 pci_iommu_alloc();
25315
25316+#ifdef CONFIG_PAX_PER_CPU_PGD
25317+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
25318+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
25319+ KERNEL_PGD_PTRS);
25320+#endif
25321+
25322 /* clear_bss() already clear the empty_zero_page */
25323
25324 reservedpages = 0;
25325@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
25326 static struct vm_area_struct gate_vma = {
25327 .vm_start = VSYSCALL_START,
25328 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
25329- .vm_page_prot = PAGE_READONLY_EXEC,
25330- .vm_flags = VM_READ | VM_EXEC
25331+ .vm_page_prot = PAGE_READONLY,
25332+ .vm_flags = VM_READ
25333 };
25334
25335 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
25336@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long addr)
25337
25338 const char *arch_vma_name(struct vm_area_struct *vma)
25339 {
25340- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
25341+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
25342 return "[vdso]";
25343 if (vma == &gate_vma)
25344 return "[vsyscall]";
25345diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
25346index 84e236c..69bd3f6 100644
25347--- a/arch/x86/mm/iomap_32.c
25348+++ b/arch/x86/mm/iomap_32.c
25349@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot)
25350 debug_kmap_atomic(type);
25351 idx = type + KM_TYPE_NR * smp_processor_id();
25352 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
25353+
25354+ pax_open_kernel();
25355 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
25356+ pax_close_kernel();
25357+
25358 arch_flush_lazy_mmu_mode();
25359
25360 return (void *)vaddr;
25361diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
25362index 2feb9bd..ab91e7b 100644
25363--- a/arch/x86/mm/ioremap.c
25364+++ b/arch/x86/mm/ioremap.c
25365@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
25366 * Second special case: Some BIOSen report the PC BIOS
25367 * area (640->1Mb) as ram even though it is not.
25368 */
25369- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
25370- pagenr < (BIOS_END >> PAGE_SHIFT))
25371+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
25372+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
25373 return 0;
25374
25375 for (i = 0; i < e820.nr_map; i++) {
25376@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
25377 /*
25378 * Don't allow anybody to remap normal RAM that we're using..
25379 */
25380- for (pfn = phys_addr >> PAGE_SHIFT;
25381- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
25382- pfn++) {
25383-
25384+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
25385 int is_ram = page_is_ram(pfn);
25386
25387- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
25388+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
25389 return NULL;
25390 WARN_ON_ONCE(is_ram);
25391 }
25392@@ -378,6 +375,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
25393
25394 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
25395 if (page_is_ram(start >> PAGE_SHIFT))
25396+#ifdef CONFIG_HIGHMEM
25397+ if ((start >> PAGE_SHIFT) < max_low_pfn)
25398+#endif
25399 return __va(phys);
25400
25401 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
25402@@ -407,7 +407,7 @@ static int __init early_ioremap_debug_setup(char *str)
25403 early_param("early_ioremap_debug", early_ioremap_debug_setup);
25404
25405 static __initdata int after_paging_init;
25406-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
25407+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
25408
25409 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
25410 {
25411@@ -439,8 +439,7 @@ void __init early_ioremap_init(void)
25412 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
25413
25414 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
25415- memset(bm_pte, 0, sizeof(bm_pte));
25416- pmd_populate_kernel(&init_mm, pmd, bm_pte);
25417+ pmd_populate_user(&init_mm, pmd, bm_pte);
25418
25419 /*
25420 * The boot-ioremap range spans multiple pmds, for which
25421diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
25422index 8cc1833..1abbc5b 100644
25423--- a/arch/x86/mm/kmemcheck/kmemcheck.c
25424+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
25425@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
25426 * memory (e.g. tracked pages)? For now, we need this to avoid
25427 * invoking kmemcheck for PnP BIOS calls.
25428 */
25429- if (regs->flags & X86_VM_MASK)
25430+ if (v8086_mode(regs))
25431 return false;
25432- if (regs->cs != __KERNEL_CS)
25433+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
25434 return false;
25435
25436 pte = kmemcheck_pte_lookup(address);
25437diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
25438index c9e57af..07a321b 100644
25439--- a/arch/x86/mm/mmap.c
25440+++ b/arch/x86/mm/mmap.c
25441@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size(void)
25442 * Leave an at least ~128 MB hole with possible stack randomization.
25443 */
25444 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
25445-#define MAX_GAP (TASK_SIZE/6*5)
25446+#define MAX_GAP (pax_task_size/6*5)
25447
25448 /*
25449 * True on X86_32 or when emulating IA32 on X86_64
25450@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
25451 return rnd << PAGE_SHIFT;
25452 }
25453
25454-static unsigned long mmap_base(void)
25455+static unsigned long mmap_base(struct mm_struct *mm)
25456 {
25457 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
25458+ unsigned long pax_task_size = TASK_SIZE;
25459+
25460+#ifdef CONFIG_PAX_SEGMEXEC
25461+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25462+ pax_task_size = SEGMEXEC_TASK_SIZE;
25463+#endif
25464
25465 if (gap < MIN_GAP)
25466 gap = MIN_GAP;
25467 else if (gap > MAX_GAP)
25468 gap = MAX_GAP;
25469
25470- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
25471+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
25472 }
25473
25474 /*
25475 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
25476 * does, but not when emulating X86_32
25477 */
25478-static unsigned long mmap_legacy_base(void)
25479+static unsigned long mmap_legacy_base(struct mm_struct *mm)
25480 {
25481- if (mmap_is_ia32())
25482+ if (mmap_is_ia32()) {
25483+
25484+#ifdef CONFIG_PAX_SEGMEXEC
25485+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
25486+ return SEGMEXEC_TASK_UNMAPPED_BASE;
25487+ else
25488+#endif
25489+
25490 return TASK_UNMAPPED_BASE;
25491- else
25492+ } else
25493 return TASK_UNMAPPED_BASE + mmap_rnd();
25494 }
25495
25496@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(void)
25497 void arch_pick_mmap_layout(struct mm_struct *mm)
25498 {
25499 if (mmap_is_legacy()) {
25500- mm->mmap_base = mmap_legacy_base();
25501+ mm->mmap_base = mmap_legacy_base(mm);
25502+
25503+#ifdef CONFIG_PAX_RANDMMAP
25504+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25505+ mm->mmap_base += mm->delta_mmap;
25506+#endif
25507+
25508 mm->get_unmapped_area = arch_get_unmapped_area;
25509 mm->unmap_area = arch_unmap_area;
25510 } else {
25511- mm->mmap_base = mmap_base();
25512+ mm->mmap_base = mmap_base(mm);
25513+
25514+#ifdef CONFIG_PAX_RANDMMAP
25515+ if (mm->pax_flags & MF_PAX_RANDMMAP)
25516+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
25517+#endif
25518+
25519 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
25520 mm->unmap_area = arch_unmap_area_topdown;
25521 }
25522diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
25523index 132772a..b961f11 100644
25524--- a/arch/x86/mm/mmio-mod.c
25525+++ b/arch/x86/mm/mmio-mod.c
25526@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
25527 break;
25528 default:
25529 {
25530- unsigned char *ip = (unsigned char *)instptr;
25531+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
25532 my_trace->opcode = MMIO_UNKNOWN_OP;
25533 my_trace->width = 0;
25534 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
25535@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
25536 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25537 void __iomem *addr)
25538 {
25539- static atomic_t next_id;
25540+ static atomic_unchecked_t next_id;
25541 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
25542 /* These are page-unaligned. */
25543 struct mmiotrace_map map = {
25544@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
25545 .private = trace
25546 },
25547 .phys = offset,
25548- .id = atomic_inc_return(&next_id)
25549+ .id = atomic_inc_return_unchecked(&next_id)
25550 };
25551 map.map_id = trace->id;
25552
25553diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
25554index d253006..e56dd6a 100644
25555--- a/arch/x86/mm/numa_32.c
25556+++ b/arch/x86/mm/numa_32.c
25557@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
25558 }
25559 #endif
25560
25561-extern unsigned long find_max_low_pfn(void);
25562 extern unsigned long highend_pfn, highstart_pfn;
25563
25564 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
25565diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
25566index e1d1069..2251ff3 100644
25567--- a/arch/x86/mm/pageattr-test.c
25568+++ b/arch/x86/mm/pageattr-test.c
25569@@ -36,7 +36,7 @@ enum {
25570
25571 static int pte_testbit(pte_t pte)
25572 {
25573- return pte_flags(pte) & _PAGE_UNUSED1;
25574+ return pte_flags(pte) & _PAGE_CPA_TEST;
25575 }
25576
25577 struct split_state {
25578diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
25579index dd38bfb..8c12306 100644
25580--- a/arch/x86/mm/pageattr.c
25581+++ b/arch/x86/mm/pageattr.c
25582@@ -261,16 +261,17 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25583 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
25584 */
25585 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
25586- pgprot_val(forbidden) |= _PAGE_NX;
25587+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25588
25589 /*
25590 * The kernel text needs to be executable for obvious reasons
25591 * Does not cover __inittext since that is gone later on. On
25592 * 64bit we do not enforce !NX on the low mapping
25593 */
25594- if (within(address, (unsigned long)_text, (unsigned long)_etext))
25595- pgprot_val(forbidden) |= _PAGE_NX;
25596+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
25597+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25598
25599+#ifdef CONFIG_DEBUG_RODATA
25600 /*
25601 * The .rodata section needs to be read-only. Using the pfn
25602 * catches all aliases.
25603@@ -278,6 +279,14 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
25604 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
25605 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
25606 pgprot_val(forbidden) |= _PAGE_RW;
25607+#endif
25608+
25609+#ifdef CONFIG_PAX_KERNEXEC
25610+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
25611+ pgprot_val(forbidden) |= _PAGE_RW;
25612+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
25613+ }
25614+#endif
25615
25616 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
25617
25618@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
25619 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
25620 {
25621 /* change init_mm */
25622+ pax_open_kernel();
25623 set_pte_atomic(kpte, pte);
25624+
25625 #ifdef CONFIG_X86_32
25626 if (!SHARED_KERNEL_PMD) {
25627+
25628+#ifdef CONFIG_PAX_PER_CPU_PGD
25629+ unsigned long cpu;
25630+#else
25631 struct page *page;
25632+#endif
25633
25634+#ifdef CONFIG_PAX_PER_CPU_PGD
25635+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
25636+ pgd_t *pgd = get_cpu_pgd(cpu);
25637+#else
25638 list_for_each_entry(page, &pgd_list, lru) {
25639- pgd_t *pgd;
25640+ pgd_t *pgd = (pgd_t *)page_address(page);
25641+#endif
25642+
25643 pud_t *pud;
25644 pmd_t *pmd;
25645
25646- pgd = (pgd_t *)page_address(page) + pgd_index(address);
25647+ pgd += pgd_index(address);
25648 pud = pud_offset(pgd, address);
25649 pmd = pmd_offset(pud, address);
25650 set_pte_atomic((pte_t *)pmd, pte);
25651 }
25652 }
25653 #endif
25654+ pax_close_kernel();
25655 }
25656
25657 static int
25658diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
25659index e78cd0e..de0a817 100644
25660--- a/arch/x86/mm/pat.c
25661+++ b/arch/x86/mm/pat.c
25662@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct memtype *entry, unsigned long *type)
25663
25664 conflict:
25665 printk(KERN_INFO "%s:%d conflicting memory types "
25666- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
25667+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
25668 new->end, cattr_name(new->type), cattr_name(entry->type));
25669 return -EBUSY;
25670 }
25671@@ -559,7 +559,7 @@ unlock_ret:
25672
25673 if (err) {
25674 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
25675- current->comm, current->pid, start, end);
25676+ current->comm, task_pid_nr(current), start, end);
25677 }
25678
25679 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
25680@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
25681 while (cursor < to) {
25682 if (!devmem_is_allowed(pfn)) {
25683 printk(KERN_INFO
25684- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
25685- current->comm, from, to);
25686+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
25687+ current->comm, from, to, cursor);
25688 return 0;
25689 }
25690 cursor += PAGE_SIZE;
25691@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
25692 printk(KERN_INFO
25693 "%s:%d ioremap_change_attr failed %s "
25694 "for %Lx-%Lx\n",
25695- current->comm, current->pid,
25696+ current->comm, task_pid_nr(current),
25697 cattr_name(flags),
25698 base, (unsigned long long)(base + size));
25699 return -EINVAL;
25700@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
25701 free_memtype(paddr, paddr + size);
25702 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
25703 " for %Lx-%Lx, got %s\n",
25704- current->comm, current->pid,
25705+ current->comm, task_pid_nr(current),
25706 cattr_name(want_flags),
25707 (unsigned long long)paddr,
25708 (unsigned long long)(paddr + size),
25709diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
25710index df3d5c8..c2223e1 100644
25711--- a/arch/x86/mm/pf_in.c
25712+++ b/arch/x86/mm/pf_in.c
25713@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
25714 int i;
25715 enum reason_type rv = OTHERS;
25716
25717- p = (unsigned char *)ins_addr;
25718+ p = (unsigned char *)ktla_ktva(ins_addr);
25719 p += skip_prefix(p, &prf);
25720 p += get_opcode(p, &opcode);
25721
25722@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
25723 struct prefix_bits prf;
25724 int i;
25725
25726- p = (unsigned char *)ins_addr;
25727+ p = (unsigned char *)ktla_ktva(ins_addr);
25728 p += skip_prefix(p, &prf);
25729 p += get_opcode(p, &opcode);
25730
25731@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
25732 struct prefix_bits prf;
25733 int i;
25734
25735- p = (unsigned char *)ins_addr;
25736+ p = (unsigned char *)ktla_ktva(ins_addr);
25737 p += skip_prefix(p, &prf);
25738 p += get_opcode(p, &opcode);
25739
25740@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
25741 int i;
25742 unsigned long rv;
25743
25744- p = (unsigned char *)ins_addr;
25745+ p = (unsigned char *)ktla_ktva(ins_addr);
25746 p += skip_prefix(p, &prf);
25747 p += get_opcode(p, &opcode);
25748 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
25749@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
25750 int i;
25751 unsigned long rv;
25752
25753- p = (unsigned char *)ins_addr;
25754+ p = (unsigned char *)ktla_ktva(ins_addr);
25755 p += skip_prefix(p, &prf);
25756 p += get_opcode(p, &opcode);
25757 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
25758diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
25759index e0e6fad..6b90017 100644
25760--- a/arch/x86/mm/pgtable.c
25761+++ b/arch/x86/mm/pgtable.c
25762@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *pgd)
25763 list_del(&page->lru);
25764 }
25765
25766-#define UNSHARED_PTRS_PER_PGD \
25767- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25768+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25769+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
25770
25771+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25772+{
25773+ while (count--)
25774+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
25775+}
25776+#endif
25777+
25778+#ifdef CONFIG_PAX_PER_CPU_PGD
25779+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
25780+{
25781+ while (count--)
25782+
25783+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25784+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
25785+#else
25786+ *dst++ = *src++;
25787+#endif
25788+
25789+}
25790+#endif
25791+
25792+#ifdef CONFIG_X86_64
25793+#define pxd_t pud_t
25794+#define pyd_t pgd_t
25795+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
25796+#define pxd_free(mm, pud) pud_free((mm), (pud))
25797+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
25798+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
25799+#define PYD_SIZE PGDIR_SIZE
25800+#else
25801+#define pxd_t pmd_t
25802+#define pyd_t pud_t
25803+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
25804+#define pxd_free(mm, pud) pmd_free((mm), (pud))
25805+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
25806+#define pyd_offset(mm ,address) pud_offset((mm), (address))
25807+#define PYD_SIZE PUD_SIZE
25808+#endif
25809+
25810+#ifdef CONFIG_PAX_PER_CPU_PGD
25811+static inline void pgd_ctor(pgd_t *pgd) {}
25812+static inline void pgd_dtor(pgd_t *pgd) {}
25813+#else
25814 static void pgd_ctor(pgd_t *pgd)
25815 {
25816 /* If the pgd points to a shared pagetable level (either the
25817@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
25818 pgd_list_del(pgd);
25819 spin_unlock_irqrestore(&pgd_lock, flags);
25820 }
25821+#endif
25822
25823 /*
25824 * List of all pgd's needed for non-PAE so it can invalidate entries
25825@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
25826 * -- wli
25827 */
25828
25829-#ifdef CONFIG_X86_PAE
25830+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
25831 /*
25832 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
25833 * updating the top-level pagetable entries to guarantee the
25834@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
25835 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
25836 * and initialize the kernel pmds here.
25837 */
25838-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
25839+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
25840
25841 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25842 {
25843@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
25844 */
25845 flush_tlb_mm(mm);
25846 }
25847+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
25848+#define PREALLOCATED_PXDS USER_PGD_PTRS
25849 #else /* !CONFIG_X86_PAE */
25850
25851 /* No need to prepopulate any pagetable entries in non-PAE modes. */
25852-#define PREALLOCATED_PMDS 0
25853+#define PREALLOCATED_PXDS 0
25854
25855 #endif /* CONFIG_X86_PAE */
25856
25857-static void free_pmds(pmd_t *pmds[])
25858+static void free_pxds(pxd_t *pxds[])
25859 {
25860 int i;
25861
25862- for(i = 0; i < PREALLOCATED_PMDS; i++)
25863- if (pmds[i])
25864- free_page((unsigned long)pmds[i]);
25865+ for(i = 0; i < PREALLOCATED_PXDS; i++)
25866+ if (pxds[i])
25867+ free_page((unsigned long)pxds[i]);
25868 }
25869
25870-static int preallocate_pmds(pmd_t *pmds[])
25871+static int preallocate_pxds(pxd_t *pxds[])
25872 {
25873 int i;
25874 bool failed = false;
25875
25876- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25877- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
25878- if (pmd == NULL)
25879+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25880+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
25881+ if (pxd == NULL)
25882 failed = true;
25883- pmds[i] = pmd;
25884+ pxds[i] = pxd;
25885 }
25886
25887 if (failed) {
25888- free_pmds(pmds);
25889+ free_pxds(pxds);
25890 return -ENOMEM;
25891 }
25892
25893@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[])
25894 * preallocate which never got a corresponding vma will need to be
25895 * freed manually.
25896 */
25897-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
25898+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
25899 {
25900 int i;
25901
25902- for(i = 0; i < PREALLOCATED_PMDS; i++) {
25903+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
25904 pgd_t pgd = pgdp[i];
25905
25906 if (pgd_val(pgd) != 0) {
25907- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
25908+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
25909
25910- pgdp[i] = native_make_pgd(0);
25911+ set_pgd(pgdp + i, native_make_pgd(0));
25912
25913- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
25914- pmd_free(mm, pmd);
25915+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
25916+ pxd_free(mm, pxd);
25917 }
25918 }
25919 }
25920
25921-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
25922+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
25923 {
25924- pud_t *pud;
25925+ pyd_t *pyd;
25926 unsigned long addr;
25927 int i;
25928
25929- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
25930+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
25931 return;
25932
25933- pud = pud_offset(pgd, 0);
25934+#ifdef CONFIG_X86_64
25935+ pyd = pyd_offset(mm, 0L);
25936+#else
25937+ pyd = pyd_offset(pgd, 0L);
25938+#endif
25939
25940- for (addr = i = 0; i < PREALLOCATED_PMDS;
25941- i++, pud++, addr += PUD_SIZE) {
25942- pmd_t *pmd = pmds[i];
25943+ for (addr = i = 0; i < PREALLOCATED_PXDS;
25944+ i++, pyd++, addr += PYD_SIZE) {
25945+ pxd_t *pxd = pxds[i];
25946
25947 if (i >= KERNEL_PGD_BOUNDARY)
25948- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25949- sizeof(pmd_t) * PTRS_PER_PMD);
25950+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
25951+ sizeof(pxd_t) * PTRS_PER_PMD);
25952
25953- pud_populate(mm, pud, pmd);
25954+ pyd_populate(mm, pyd, pxd);
25955 }
25956 }
25957
25958 pgd_t *pgd_alloc(struct mm_struct *mm)
25959 {
25960 pgd_t *pgd;
25961- pmd_t *pmds[PREALLOCATED_PMDS];
25962+ pxd_t *pxds[PREALLOCATED_PXDS];
25963+
25964 unsigned long flags;
25965
25966 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
25967@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25968
25969 mm->pgd = pgd;
25970
25971- if (preallocate_pmds(pmds) != 0)
25972+ if (preallocate_pxds(pxds) != 0)
25973 goto out_free_pgd;
25974
25975 if (paravirt_pgd_alloc(mm) != 0)
25976- goto out_free_pmds;
25977+ goto out_free_pxds;
25978
25979 /*
25980 * Make sure that pre-populating the pmds is atomic with
25981@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
25982 spin_lock_irqsave(&pgd_lock, flags);
25983
25984 pgd_ctor(pgd);
25985- pgd_prepopulate_pmd(mm, pgd, pmds);
25986+ pgd_prepopulate_pxd(mm, pgd, pxds);
25987
25988 spin_unlock_irqrestore(&pgd_lock, flags);
25989
25990 return pgd;
25991
25992-out_free_pmds:
25993- free_pmds(pmds);
25994+out_free_pxds:
25995+ free_pxds(pxds);
25996 out_free_pgd:
25997 free_page((unsigned long)pgd);
25998 out:
25999@@ -287,7 +338,7 @@ out:
26000
26001 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
26002 {
26003- pgd_mop_up_pmds(mm, pgd);
26004+ pgd_mop_up_pxds(mm, pgd);
26005 pgd_dtor(pgd);
26006 paravirt_pgd_free(mm, pgd);
26007 free_page((unsigned long)pgd);
26008diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
26009index 46c8834..fcab43d 100644
26010--- a/arch/x86/mm/pgtable_32.c
26011+++ b/arch/x86/mm/pgtable_32.c
26012@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
26013 return;
26014 }
26015 pte = pte_offset_kernel(pmd, vaddr);
26016+
26017+ pax_open_kernel();
26018 if (pte_val(pteval))
26019 set_pte_at(&init_mm, vaddr, pte, pteval);
26020 else
26021 pte_clear(&init_mm, vaddr, pte);
26022+ pax_close_kernel();
26023
26024 /*
26025 * It's enough to flush this one mapping.
26026diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
26027index 513d8ed..978c161 100644
26028--- a/arch/x86/mm/setup_nx.c
26029+++ b/arch/x86/mm/setup_nx.c
26030@@ -4,11 +4,10 @@
26031
26032 #include <asm/pgtable.h>
26033
26034+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
26035 int nx_enabled;
26036
26037-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26038-static int disable_nx __cpuinitdata;
26039-
26040+#ifndef CONFIG_PAX_PAGEEXEC
26041 /*
26042 * noexec = on|off
26043 *
26044@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str)
26045 if (!str)
26046 return -EINVAL;
26047 if (!strncmp(str, "on", 2)) {
26048- __supported_pte_mask |= _PAGE_NX;
26049- disable_nx = 0;
26050+ nx_enabled = 1;
26051 } else if (!strncmp(str, "off", 3)) {
26052- disable_nx = 1;
26053- __supported_pte_mask &= ~_PAGE_NX;
26054+ nx_enabled = 0;
26055 }
26056 return 0;
26057 }
26058 early_param("noexec", noexec_setup);
26059 #endif
26060+#endif
26061
26062 #ifdef CONFIG_X86_PAE
26063 void __init set_nx(void)
26064 {
26065- unsigned int v[4], l, h;
26066+ if (!nx_enabled && cpu_has_nx) {
26067+ unsigned l, h;
26068
26069- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
26070- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
26071-
26072- if ((v[3] & (1 << 20)) && !disable_nx) {
26073- rdmsr(MSR_EFER, l, h);
26074- l |= EFER_NX;
26075- wrmsr(MSR_EFER, l, h);
26076- nx_enabled = 1;
26077- __supported_pte_mask |= _PAGE_NX;
26078- }
26079+ __supported_pte_mask &= ~_PAGE_NX;
26080+ rdmsr(MSR_EFER, l, h);
26081+ l &= ~EFER_NX;
26082+ wrmsr(MSR_EFER, l, h);
26083 }
26084 }
26085 #else
26086@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
26087 unsigned long efer;
26088
26089 rdmsrl(MSR_EFER, efer);
26090- if (!(efer & EFER_NX) || disable_nx)
26091+ if (!(efer & EFER_NX) || !nx_enabled)
26092 __supported_pte_mask &= ~_PAGE_NX;
26093 }
26094 #endif
26095diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
26096index 36fe08e..b123d3a 100644
26097--- a/arch/x86/mm/tlb.c
26098+++ b/arch/x86/mm/tlb.c
26099@@ -61,7 +61,11 @@ void leave_mm(int cpu)
26100 BUG();
26101 cpumask_clear_cpu(cpu,
26102 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
26103+
26104+#ifndef CONFIG_PAX_PER_CPU_PGD
26105 load_cr3(swapper_pg_dir);
26106+#endif
26107+
26108 }
26109 EXPORT_SYMBOL_GPL(leave_mm);
26110
26111diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
26112index 829edf0..672adb3 100644
26113--- a/arch/x86/oprofile/backtrace.c
26114+++ b/arch/x86/oprofile/backtrace.c
26115@@ -115,7 +115,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
26116 {
26117 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
26118
26119- if (!user_mode_vm(regs)) {
26120+ if (!user_mode(regs)) {
26121 unsigned long stack = kernel_stack_pointer(regs);
26122 if (depth)
26123 dump_trace(NULL, regs, (unsigned long *)stack, 0,
26124diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
26125index e6a160a..36deff6 100644
26126--- a/arch/x86/oprofile/op_model_p4.c
26127+++ b/arch/x86/oprofile/op_model_p4.c
26128@@ -50,7 +50,7 @@ static inline void setup_num_counters(void)
26129 #endif
26130 }
26131
26132-static int inline addr_increment(void)
26133+static inline int addr_increment(void)
26134 {
26135 #ifdef CONFIG_SMP
26136 return smp_num_siblings == 2 ? 2 : 1;
26137diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
26138index 1331fcf..03901b2 100644
26139--- a/arch/x86/pci/common.c
26140+++ b/arch/x86/pci/common.c
26141@@ -31,8 +31,8 @@ int noioapicreroute = 1;
26142 int pcibios_last_bus = -1;
26143 unsigned long pirq_table_addr;
26144 struct pci_bus *pci_root_bus;
26145-struct pci_raw_ops *raw_pci_ops;
26146-struct pci_raw_ops *raw_pci_ext_ops;
26147+const struct pci_raw_ops *raw_pci_ops;
26148+const struct pci_raw_ops *raw_pci_ext_ops;
26149
26150 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
26151 int reg, int len, u32 *val)
26152diff --git a/arch/x86/pci/direct.c b/arch/x86/pci/direct.c
26153index 347d882..4baf6b6 100644
26154--- a/arch/x86/pci/direct.c
26155+++ b/arch/x86/pci/direct.c
26156@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int seg, unsigned int bus,
26157
26158 #undef PCI_CONF1_ADDRESS
26159
26160-struct pci_raw_ops pci_direct_conf1 = {
26161+const struct pci_raw_ops pci_direct_conf1 = {
26162 .read = pci_conf1_read,
26163 .write = pci_conf1_write,
26164 };
26165@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int seg, unsigned int bus,
26166
26167 #undef PCI_CONF2_ADDRESS
26168
26169-struct pci_raw_ops pci_direct_conf2 = {
26170+const struct pci_raw_ops pci_direct_conf2 = {
26171 .read = pci_conf2_read,
26172 .write = pci_conf2_write,
26173 };
26174@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
26175 * This should be close to trivial, but it isn't, because there are buggy
26176 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
26177 */
26178-static int __init pci_sanity_check(struct pci_raw_ops *o)
26179+static int __init pci_sanity_check(const struct pci_raw_ops *o)
26180 {
26181 u32 x = 0;
26182 int year, devfn;
26183diff --git a/arch/x86/pci/mmconfig_32.c b/arch/x86/pci/mmconfig_32.c
26184index f10a7e9..0425342 100644
26185--- a/arch/x86/pci/mmconfig_32.c
26186+++ b/arch/x86/pci/mmconfig_32.c
26187@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26188 return 0;
26189 }
26190
26191-static struct pci_raw_ops pci_mmcfg = {
26192+static const struct pci_raw_ops pci_mmcfg = {
26193 .read = pci_mmcfg_read,
26194 .write = pci_mmcfg_write,
26195 };
26196diff --git a/arch/x86/pci/mmconfig_64.c b/arch/x86/pci/mmconfig_64.c
26197index 94349f8..41600a7 100644
26198--- a/arch/x86/pci/mmconfig_64.c
26199+++ b/arch/x86/pci/mmconfig_64.c
26200@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int seg, unsigned int bus,
26201 return 0;
26202 }
26203
26204-static struct pci_raw_ops pci_mmcfg = {
26205+static const struct pci_raw_ops pci_mmcfg = {
26206 .read = pci_mmcfg_read,
26207 .write = pci_mmcfg_write,
26208 };
26209diff --git a/arch/x86/pci/numaq_32.c b/arch/x86/pci/numaq_32.c
26210index 8eb295e..86bd657 100644
26211--- a/arch/x86/pci/numaq_32.c
26212+++ b/arch/x86/pci/numaq_32.c
26213@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned int seg, unsigned int bus,
26214
26215 #undef PCI_CONF1_MQ_ADDRESS
26216
26217-static struct pci_raw_ops pci_direct_conf1_mq = {
26218+static const struct pci_raw_ops pci_direct_conf1_mq = {
26219 .read = pci_conf1_mq_read,
26220 .write = pci_conf1_mq_write
26221 };
26222diff --git a/arch/x86/pci/olpc.c b/arch/x86/pci/olpc.c
26223index b889d82..5a58a0a 100644
26224--- a/arch/x86/pci/olpc.c
26225+++ b/arch/x86/pci/olpc.c
26226@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int seg, unsigned int bus,
26227 return 0;
26228 }
26229
26230-static struct pci_raw_ops pci_olpc_conf = {
26231+static const struct pci_raw_ops pci_olpc_conf = {
26232 .read = pci_olpc_read,
26233 .write = pci_olpc_write,
26234 };
26235diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
26236index 1c975cc..ffd0536 100644
26237--- a/arch/x86/pci/pcbios.c
26238+++ b/arch/x86/pci/pcbios.c
26239@@ -56,50 +56,93 @@ union bios32 {
26240 static struct {
26241 unsigned long address;
26242 unsigned short segment;
26243-} bios32_indirect = { 0, __KERNEL_CS };
26244+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
26245
26246 /*
26247 * Returns the entry point for the given service, NULL on error
26248 */
26249
26250-static unsigned long bios32_service(unsigned long service)
26251+static unsigned long __devinit bios32_service(unsigned long service)
26252 {
26253 unsigned char return_code; /* %al */
26254 unsigned long address; /* %ebx */
26255 unsigned long length; /* %ecx */
26256 unsigned long entry; /* %edx */
26257 unsigned long flags;
26258+ struct desc_struct d, *gdt;
26259
26260 local_irq_save(flags);
26261- __asm__("lcall *(%%edi); cld"
26262+
26263+ gdt = get_cpu_gdt_table(smp_processor_id());
26264+
26265+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
26266+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26267+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
26268+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26269+
26270+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
26271 : "=a" (return_code),
26272 "=b" (address),
26273 "=c" (length),
26274 "=d" (entry)
26275 : "0" (service),
26276 "1" (0),
26277- "D" (&bios32_indirect));
26278+ "D" (&bios32_indirect),
26279+ "r"(__PCIBIOS_DS)
26280+ : "memory");
26281+
26282+ pax_open_kernel();
26283+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
26284+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
26285+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
26286+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
26287+ pax_close_kernel();
26288+
26289 local_irq_restore(flags);
26290
26291 switch (return_code) {
26292- case 0:
26293- return address + entry;
26294- case 0x80: /* Not present */
26295- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26296- return 0;
26297- default: /* Shouldn't happen */
26298- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26299- service, return_code);
26300+ case 0: {
26301+ int cpu;
26302+ unsigned char flags;
26303+
26304+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
26305+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
26306+ printk(KERN_WARNING "bios32_service: not valid\n");
26307 return 0;
26308+ }
26309+ address = address + PAGE_OFFSET;
26310+ length += 16UL; /* some BIOSs underreport this... */
26311+ flags = 4;
26312+ if (length >= 64*1024*1024) {
26313+ length >>= PAGE_SHIFT;
26314+ flags |= 8;
26315+ }
26316+
26317+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
26318+ gdt = get_cpu_gdt_table(cpu);
26319+ pack_descriptor(&d, address, length, 0x9b, flags);
26320+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
26321+ pack_descriptor(&d, address, length, 0x93, flags);
26322+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
26323+ }
26324+ return entry;
26325+ }
26326+ case 0x80: /* Not present */
26327+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
26328+ return 0;
26329+ default: /* Shouldn't happen */
26330+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
26331+ service, return_code);
26332+ return 0;
26333 }
26334 }
26335
26336 static struct {
26337 unsigned long address;
26338 unsigned short segment;
26339-} pci_indirect = { 0, __KERNEL_CS };
26340+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
26341
26342-static int pci_bios_present;
26343+static int pci_bios_present __read_only;
26344
26345 static int __devinit check_pcibios(void)
26346 {
26347@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
26348 unsigned long flags, pcibios_entry;
26349
26350 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
26351- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
26352+ pci_indirect.address = pcibios_entry;
26353
26354 local_irq_save(flags);
26355- __asm__(
26356- "lcall *(%%edi); cld\n\t"
26357+ __asm__("movw %w6, %%ds\n\t"
26358+ "lcall *%%ss:(%%edi); cld\n\t"
26359+ "push %%ss\n\t"
26360+ "pop %%ds\n\t"
26361 "jc 1f\n\t"
26362 "xor %%ah, %%ah\n"
26363 "1:"
26364@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
26365 "=b" (ebx),
26366 "=c" (ecx)
26367 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
26368- "D" (&pci_indirect)
26369+ "D" (&pci_indirect),
26370+ "r" (__PCIBIOS_DS)
26371 : "memory");
26372 local_irq_restore(flags);
26373
26374@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26375
26376 switch (len) {
26377 case 1:
26378- __asm__("lcall *(%%esi); cld\n\t"
26379+ __asm__("movw %w6, %%ds\n\t"
26380+ "lcall *%%ss:(%%esi); cld\n\t"
26381+ "push %%ss\n\t"
26382+ "pop %%ds\n\t"
26383 "jc 1f\n\t"
26384 "xor %%ah, %%ah\n"
26385 "1:"
26386@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26387 : "1" (PCIBIOS_READ_CONFIG_BYTE),
26388 "b" (bx),
26389 "D" ((long)reg),
26390- "S" (&pci_indirect));
26391+ "S" (&pci_indirect),
26392+ "r" (__PCIBIOS_DS));
26393 /*
26394 * Zero-extend the result beyond 8 bits, do not trust the
26395 * BIOS having done it:
26396@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26397 *value &= 0xff;
26398 break;
26399 case 2:
26400- __asm__("lcall *(%%esi); cld\n\t"
26401+ __asm__("movw %w6, %%ds\n\t"
26402+ "lcall *%%ss:(%%esi); cld\n\t"
26403+ "push %%ss\n\t"
26404+ "pop %%ds\n\t"
26405 "jc 1f\n\t"
26406 "xor %%ah, %%ah\n"
26407 "1:"
26408@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26409 : "1" (PCIBIOS_READ_CONFIG_WORD),
26410 "b" (bx),
26411 "D" ((long)reg),
26412- "S" (&pci_indirect));
26413+ "S" (&pci_indirect),
26414+ "r" (__PCIBIOS_DS));
26415 /*
26416 * Zero-extend the result beyond 16 bits, do not trust the
26417 * BIOS having done it:
26418@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26419 *value &= 0xffff;
26420 break;
26421 case 4:
26422- __asm__("lcall *(%%esi); cld\n\t"
26423+ __asm__("movw %w6, %%ds\n\t"
26424+ "lcall *%%ss:(%%esi); cld\n\t"
26425+ "push %%ss\n\t"
26426+ "pop %%ds\n\t"
26427 "jc 1f\n\t"
26428 "xor %%ah, %%ah\n"
26429 "1:"
26430@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
26431 : "1" (PCIBIOS_READ_CONFIG_DWORD),
26432 "b" (bx),
26433 "D" ((long)reg),
26434- "S" (&pci_indirect));
26435+ "S" (&pci_indirect),
26436+ "r" (__PCIBIOS_DS));
26437 break;
26438 }
26439
26440@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26441
26442 switch (len) {
26443 case 1:
26444- __asm__("lcall *(%%esi); cld\n\t"
26445+ __asm__("movw %w6, %%ds\n\t"
26446+ "lcall *%%ss:(%%esi); cld\n\t"
26447+ "push %%ss\n\t"
26448+ "pop %%ds\n\t"
26449 "jc 1f\n\t"
26450 "xor %%ah, %%ah\n"
26451 "1:"
26452@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26453 "c" (value),
26454 "b" (bx),
26455 "D" ((long)reg),
26456- "S" (&pci_indirect));
26457+ "S" (&pci_indirect),
26458+ "r" (__PCIBIOS_DS));
26459 break;
26460 case 2:
26461- __asm__("lcall *(%%esi); cld\n\t"
26462+ __asm__("movw %w6, %%ds\n\t"
26463+ "lcall *%%ss:(%%esi); cld\n\t"
26464+ "push %%ss\n\t"
26465+ "pop %%ds\n\t"
26466 "jc 1f\n\t"
26467 "xor %%ah, %%ah\n"
26468 "1:"
26469@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26470 "c" (value),
26471 "b" (bx),
26472 "D" ((long)reg),
26473- "S" (&pci_indirect));
26474+ "S" (&pci_indirect),
26475+ "r" (__PCIBIOS_DS));
26476 break;
26477 case 4:
26478- __asm__("lcall *(%%esi); cld\n\t"
26479+ __asm__("movw %w6, %%ds\n\t"
26480+ "lcall *%%ss:(%%esi); cld\n\t"
26481+ "push %%ss\n\t"
26482+ "pop %%ds\n\t"
26483 "jc 1f\n\t"
26484 "xor %%ah, %%ah\n"
26485 "1:"
26486@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26487 "c" (value),
26488 "b" (bx),
26489 "D" ((long)reg),
26490- "S" (&pci_indirect));
26491+ "S" (&pci_indirect),
26492+ "r" (__PCIBIOS_DS));
26493 break;
26494 }
26495
26496@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
26497 * Function table for BIOS32 access
26498 */
26499
26500-static struct pci_raw_ops pci_bios_access = {
26501+static const struct pci_raw_ops pci_bios_access = {
26502 .read = pci_bios_read,
26503 .write = pci_bios_write
26504 };
26505@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_access = {
26506 * Try to find PCI BIOS.
26507 */
26508
26509-static struct pci_raw_ops * __devinit pci_find_bios(void)
26510+static const struct pci_raw_ops * __devinit pci_find_bios(void)
26511 {
26512 union bios32 *check;
26513 unsigned char sum;
26514@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26515
26516 DBG("PCI: Fetching IRQ routing table... ");
26517 __asm__("push %%es\n\t"
26518+ "movw %w8, %%ds\n\t"
26519 "push %%ds\n\t"
26520 "pop %%es\n\t"
26521- "lcall *(%%esi); cld\n\t"
26522+ "lcall *%%ss:(%%esi); cld\n\t"
26523 "pop %%es\n\t"
26524+ "push %%ss\n\t"
26525+ "pop %%ds\n"
26526 "jc 1f\n\t"
26527 "xor %%ah, %%ah\n"
26528 "1:"
26529@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
26530 "1" (0),
26531 "D" ((long) &opt),
26532 "S" (&pci_indirect),
26533- "m" (opt)
26534+ "m" (opt),
26535+ "r" (__PCIBIOS_DS)
26536 : "memory");
26537 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
26538 if (ret & 0xff00)
26539@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26540 {
26541 int ret;
26542
26543- __asm__("lcall *(%%esi); cld\n\t"
26544+ __asm__("movw %w5, %%ds\n\t"
26545+ "lcall *%%ss:(%%esi); cld\n\t"
26546+ "push %%ss\n\t"
26547+ "pop %%ds\n"
26548 "jc 1f\n\t"
26549 "xor %%ah, %%ah\n"
26550 "1:"
26551@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
26552 : "0" (PCIBIOS_SET_PCI_HW_INT),
26553 "b" ((dev->bus->number << 8) | dev->devfn),
26554 "c" ((irq << 8) | (pin + 10)),
26555- "S" (&pci_indirect));
26556+ "S" (&pci_indirect),
26557+ "r" (__PCIBIOS_DS));
26558 return !(ret & 0xff00);
26559 }
26560 EXPORT_SYMBOL(pcibios_set_irq_routing);
26561diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
26562index fa0f651..9d8f3d9 100644
26563--- a/arch/x86/power/cpu.c
26564+++ b/arch/x86/power/cpu.c
26565@@ -129,7 +129,7 @@ static void do_fpu_end(void)
26566 static void fix_processor_context(void)
26567 {
26568 int cpu = smp_processor_id();
26569- struct tss_struct *t = &per_cpu(init_tss, cpu);
26570+ struct tss_struct *t = init_tss + cpu;
26571
26572 set_tss_desc(cpu, t); /*
26573 * This just modifies memory; should not be
26574@@ -139,7 +139,9 @@ static void fix_processor_context(void)
26575 */
26576
26577 #ifdef CONFIG_X86_64
26578+ pax_open_kernel();
26579 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
26580+ pax_close_kernel();
26581
26582 syscall_init(); /* This sets MSR_*STAR and related */
26583 #endif
26584diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
26585index dd78ef6..f9d928d 100644
26586--- a/arch/x86/vdso/Makefile
26587+++ b/arch/x86/vdso/Makefile
26588@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
26589 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
26590 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
26591
26592-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26593+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
26594 GCOV_PROFILE := n
26595
26596 #
26597diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
26598index ee55754..0013b2e 100644
26599--- a/arch/x86/vdso/vclock_gettime.c
26600+++ b/arch/x86/vdso/vclock_gettime.c
26601@@ -22,24 +22,48 @@
26602 #include <asm/hpet.h>
26603 #include <asm/unistd.h>
26604 #include <asm/io.h>
26605+#include <asm/fixmap.h>
26606 #include "vextern.h"
26607
26608 #define gtod vdso_vsyscall_gtod_data
26609
26610+notrace noinline long __vdso_fallback_time(long *t)
26611+{
26612+ long secs;
26613+ asm volatile("syscall"
26614+ : "=a" (secs)
26615+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
26616+ return secs;
26617+}
26618+
26619 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
26620 {
26621 long ret;
26622 asm("syscall" : "=a" (ret) :
26623- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
26624+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
26625 return ret;
26626 }
26627
26628+notrace static inline cycle_t __vdso_vread_hpet(void)
26629+{
26630+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
26631+}
26632+
26633+notrace static inline cycle_t __vdso_vread_tsc(void)
26634+{
26635+ cycle_t ret = (cycle_t)vget_cycles();
26636+
26637+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
26638+}
26639+
26640 notrace static inline long vgetns(void)
26641 {
26642 long v;
26643- cycles_t (*vread)(void);
26644- vread = gtod->clock.vread;
26645- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
26646+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
26647+ v = __vdso_vread_tsc();
26648+ else
26649+ v = __vdso_vread_hpet();
26650+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
26651 return (v * gtod->clock.mult) >> gtod->clock.shift;
26652 }
26653
26654@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
26655
26656 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26657 {
26658- if (likely(gtod->sysctl_enabled))
26659+ if (likely(gtod->sysctl_enabled &&
26660+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26661+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26662 switch (clock) {
26663 case CLOCK_REALTIME:
26664 if (likely(gtod->clock.vread))
26665@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
26666 int clock_gettime(clockid_t, struct timespec *)
26667 __attribute__((weak, alias("__vdso_clock_gettime")));
26668
26669+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
26670+{
26671+ long ret;
26672+ asm("syscall" : "=a" (ret) :
26673+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
26674+ return ret;
26675+}
26676+
26677 notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26678 {
26679- long ret;
26680- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
26681+ if (likely(gtod->sysctl_enabled &&
26682+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
26683+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
26684+ {
26685 if (likely(tv != NULL)) {
26686 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
26687 offsetof(struct timespec, tv_nsec) ||
26688@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
26689 }
26690 return 0;
26691 }
26692- asm("syscall" : "=a" (ret) :
26693- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
26694- return ret;
26695+ return __vdso_fallback_gettimeofday(tv, tz);
26696 }
26697 int gettimeofday(struct timeval *, struct timezone *)
26698 __attribute__((weak, alias("__vdso_gettimeofday")));
26699diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
26700index 4e5dd3b..00ba15e 100644
26701--- a/arch/x86/vdso/vdso.lds.S
26702+++ b/arch/x86/vdso/vdso.lds.S
26703@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
26704 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
26705 #include "vextern.h"
26706 #undef VEXTERN
26707+
26708+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
26709+VEXTERN(fallback_gettimeofday)
26710+VEXTERN(fallback_time)
26711+VEXTERN(getcpu)
26712+#undef VEXTERN
26713diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
26714index 58bc00f..d53fb48 100644
26715--- a/arch/x86/vdso/vdso32-setup.c
26716+++ b/arch/x86/vdso/vdso32-setup.c
26717@@ -25,6 +25,7 @@
26718 #include <asm/tlbflush.h>
26719 #include <asm/vdso.h>
26720 #include <asm/proto.h>
26721+#include <asm/mman.h>
26722
26723 enum {
26724 VDSO_DISABLED = 0,
26725@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
26726 void enable_sep_cpu(void)
26727 {
26728 int cpu = get_cpu();
26729- struct tss_struct *tss = &per_cpu(init_tss, cpu);
26730+ struct tss_struct *tss = init_tss + cpu;
26731
26732 if (!boot_cpu_has(X86_FEATURE_SEP)) {
26733 put_cpu();
26734@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
26735 gate_vma.vm_start = FIXADDR_USER_START;
26736 gate_vma.vm_end = FIXADDR_USER_END;
26737 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
26738- gate_vma.vm_page_prot = __P101;
26739+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
26740 /*
26741 * Make sure the vDSO gets into every core dump.
26742 * Dumping its contents makes post-mortem fully interpretable later
26743@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26744 if (compat)
26745 addr = VDSO_HIGH_BASE;
26746 else {
26747- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
26748+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
26749 if (IS_ERR_VALUE(addr)) {
26750 ret = addr;
26751 goto up_fail;
26752 }
26753 }
26754
26755- current->mm->context.vdso = (void *)addr;
26756+ current->mm->context.vdso = addr;
26757
26758 if (compat_uses_vma || !compat) {
26759 /*
26760@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26761 }
26762
26763 current_thread_info()->sysenter_return =
26764- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26765+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
26766
26767 up_fail:
26768 if (ret)
26769- current->mm->context.vdso = NULL;
26770+ current->mm->context.vdso = 0;
26771
26772 up_write(&mm->mmap_sem);
26773
26774@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
26775
26776 const char *arch_vma_name(struct vm_area_struct *vma)
26777 {
26778- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
26779+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
26780 return "[vdso]";
26781+
26782+#ifdef CONFIG_PAX_SEGMEXEC
26783+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
26784+ return "[vdso]";
26785+#endif
26786+
26787 return NULL;
26788 }
26789
26790@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
26791 struct mm_struct *mm = tsk->mm;
26792
26793 /* Check to see if this task was created in compat vdso mode */
26794- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
26795+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
26796 return &gate_vma;
26797 return NULL;
26798 }
26799diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h
26800index 1683ba2..48d07f3 100644
26801--- a/arch/x86/vdso/vextern.h
26802+++ b/arch/x86/vdso/vextern.h
26803@@ -11,6 +11,5 @@
26804 put into vextern.h and be referenced as a pointer with vdso prefix.
26805 The main kernel later fills in the values. */
26806
26807-VEXTERN(jiffies)
26808 VEXTERN(vgetcpu_mode)
26809 VEXTERN(vsyscall_gtod_data)
26810diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
26811index 21e1aeb..2c0b3c4 100644
26812--- a/arch/x86/vdso/vma.c
26813+++ b/arch/x86/vdso/vma.c
26814@@ -17,8 +17,6 @@
26815 #include "vextern.h" /* Just for VMAGIC. */
26816 #undef VEXTERN
26817
26818-unsigned int __read_mostly vdso_enabled = 1;
26819-
26820 extern char vdso_start[], vdso_end[];
26821 extern unsigned short vdso_sync_cpuid;
26822
26823@@ -27,10 +25,8 @@ static unsigned vdso_size;
26824
26825 static inline void *var_ref(void *p, char *name)
26826 {
26827- if (*(void **)p != (void *)VMAGIC) {
26828- printk("VDSO: variable %s broken\n", name);
26829- vdso_enabled = 0;
26830- }
26831+ if (*(void **)p != (void *)VMAGIC)
26832+ panic("VDSO: variable %s broken\n", name);
26833 return p;
26834 }
26835
26836@@ -57,21 +53,18 @@ static int __init init_vdso_vars(void)
26837 if (!vbase)
26838 goto oom;
26839
26840- if (memcmp(vbase, "\177ELF", 4)) {
26841- printk("VDSO: I'm broken; not ELF\n");
26842- vdso_enabled = 0;
26843- }
26844+ if (memcmp(vbase, ELFMAG, SELFMAG))
26845+ panic("VDSO: I'm broken; not ELF\n");
26846
26847 #define VEXTERN(x) \
26848 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
26849 #include "vextern.h"
26850 #undef VEXTERN
26851+ vunmap(vbase);
26852 return 0;
26853
26854 oom:
26855- printk("Cannot allocate vdso\n");
26856- vdso_enabled = 0;
26857- return -ENOMEM;
26858+ panic("Cannot allocate vdso\n");
26859 }
26860 __initcall(init_vdso_vars);
26861
26862@@ -102,13 +95,15 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
26863 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26864 {
26865 struct mm_struct *mm = current->mm;
26866- unsigned long addr;
26867+ unsigned long addr = 0;
26868 int ret;
26869
26870- if (!vdso_enabled)
26871- return 0;
26872-
26873 down_write(&mm->mmap_sem);
26874+
26875+#ifdef CONFIG_PAX_RANDMMAP
26876+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
26877+#endif
26878+
26879 addr = vdso_addr(mm->start_stack, vdso_size);
26880 addr = get_unmapped_area(NULL, addr, vdso_size, 0, 0);
26881 if (IS_ERR_VALUE(addr)) {
26882@@ -116,7 +111,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26883 goto up_fail;
26884 }
26885
26886- current->mm->context.vdso = (void *)addr;
26887+ current->mm->context.vdso = addr;
26888
26889 ret = install_special_mapping(mm, addr, vdso_size,
26890 VM_READ|VM_EXEC|
26891@@ -124,7 +119,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
26892 VM_ALWAYSDUMP,
26893 vdso_pages);
26894 if (ret) {
26895- current->mm->context.vdso = NULL;
26896+ current->mm->context.vdso = 0;
26897 goto up_fail;
26898 }
26899
26900@@ -132,10 +127,3 @@ up_fail:
26901 up_write(&mm->mmap_sem);
26902 return ret;
26903 }
26904-
26905-static __init int vdso_setup(char *s)
26906-{
26907- vdso_enabled = simple_strtoul(s, NULL, 0);
26908- return 0;
26909-}
26910-__setup("vdso=", vdso_setup);
26911diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
26912index 0087b00..eecb34f 100644
26913--- a/arch/x86/xen/enlighten.c
26914+++ b/arch/x86/xen/enlighten.c
26915@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
26916
26917 struct shared_info xen_dummy_shared_info;
26918
26919-void *xen_initial_gdt;
26920-
26921 /*
26922 * Point at some empty memory to start with. We map the real shared_info
26923 * page as soon as fixmap is up and running.
26924@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g)
26925
26926 preempt_disable();
26927
26928- start = __get_cpu_var(idt_desc).address;
26929+ start = (unsigned long)__get_cpu_var(idt_desc).address;
26930 end = start + __get_cpu_var(idt_desc).size + 1;
26931
26932 xen_mc_flush();
26933@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic_ops __initdata = {
26934 #endif
26935 };
26936
26937-static void xen_reboot(int reason)
26938+static __noreturn void xen_reboot(int reason)
26939 {
26940 struct sched_shutdown r = { .reason = reason };
26941
26942@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
26943 BUG();
26944 }
26945
26946-static void xen_restart(char *msg)
26947+static __noreturn void xen_restart(char *msg)
26948 {
26949 xen_reboot(SHUTDOWN_reboot);
26950 }
26951
26952-static void xen_emergency_restart(void)
26953+static __noreturn void xen_emergency_restart(void)
26954 {
26955 xen_reboot(SHUTDOWN_reboot);
26956 }
26957
26958-static void xen_machine_halt(void)
26959+static __noreturn void xen_machine_halt(void)
26960 {
26961 xen_reboot(SHUTDOWN_poweroff);
26962 }
26963@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(void)
26964 */
26965 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
26966
26967-#ifdef CONFIG_X86_64
26968 /* Work out if we support NX */
26969- check_efer();
26970+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
26971+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
26972+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
26973+ unsigned l, h;
26974+
26975+#ifdef CONFIG_X86_PAE
26976+ nx_enabled = 1;
26977+#endif
26978+ __supported_pte_mask |= _PAGE_NX;
26979+ rdmsr(MSR_EFER, l, h);
26980+ l |= EFER_NX;
26981+ wrmsr(MSR_EFER, l, h);
26982+ }
26983 #endif
26984
26985 xen_setup_features();
26986@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(void)
26987
26988 machine_ops = xen_machine_ops;
26989
26990- /*
26991- * The only reliable way to retain the initial address of the
26992- * percpu gdt_page is to remember it here, so we can go and
26993- * mark it RW later, when the initial percpu area is freed.
26994- */
26995- xen_initial_gdt = &per_cpu(gdt_page, 0);
26996-
26997 xen_smp_init();
26998
26999 pgd = (pgd_t *)xen_start_info->pt_base;
27000diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
27001index 3f90a2c..2c2ad84 100644
27002--- a/arch/x86/xen/mmu.c
27003+++ b/arch/x86/xen/mmu.c
27004@@ -1719,6 +1719,9 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27005 convert_pfn_mfn(init_level4_pgt);
27006 convert_pfn_mfn(level3_ident_pgt);
27007 convert_pfn_mfn(level3_kernel_pgt);
27008+ convert_pfn_mfn(level3_vmalloc_start_pgt);
27009+ convert_pfn_mfn(level3_vmalloc_end_pgt);
27010+ convert_pfn_mfn(level3_vmemmap_pgt);
27011
27012 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
27013 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
27014@@ -1737,7 +1740,11 @@ __init pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd,
27015 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
27016 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
27017 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
27018+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
27019+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
27020+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
27021 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
27022+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
27023 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
27024 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
27025
27026@@ -1860,6 +1867,7 @@ static __init void xen_post_allocator_init(void)
27027 pv_mmu_ops.set_pud = xen_set_pud;
27028 #if PAGETABLE_LEVELS == 4
27029 pv_mmu_ops.set_pgd = xen_set_pgd;
27030+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
27031 #endif
27032
27033 /* This will work as long as patching hasn't happened yet
27034@@ -1946,6 +1954,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initdata = {
27035 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
27036 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
27037 .set_pgd = xen_set_pgd_hyper,
27038+ .set_pgd_batched = xen_set_pgd_hyper,
27039
27040 .alloc_pud = xen_alloc_pmd_init,
27041 .release_pud = xen_release_pmd_init,
27042diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
27043index a96204a..fca9b8e 100644
27044--- a/arch/x86/xen/smp.c
27045+++ b/arch/x86/xen/smp.c
27046@@ -168,11 +168,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
27047 {
27048 BUG_ON(smp_processor_id() != 0);
27049 native_smp_prepare_boot_cpu();
27050-
27051- /* We've switched to the "real" per-cpu gdt, so make sure the
27052- old memory can be recycled */
27053- make_lowmem_page_readwrite(xen_initial_gdt);
27054-
27055 xen_setup_vcpu_info_placement();
27056 }
27057
27058@@ -241,12 +236,12 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
27059 gdt = get_cpu_gdt_table(cpu);
27060
27061 ctxt->flags = VGCF_IN_KERNEL;
27062- ctxt->user_regs.ds = __USER_DS;
27063- ctxt->user_regs.es = __USER_DS;
27064+ ctxt->user_regs.ds = __KERNEL_DS;
27065+ ctxt->user_regs.es = __KERNEL_DS;
27066 ctxt->user_regs.ss = __KERNEL_DS;
27067 #ifdef CONFIG_X86_32
27068 ctxt->user_regs.fs = __KERNEL_PERCPU;
27069- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
27070+ savesegment(gs, ctxt->user_regs.gs);
27071 #else
27072 ctxt->gs_base_kernel = per_cpu_offset(cpu);
27073 #endif
27074@@ -297,13 +292,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu)
27075 int rc;
27076
27077 per_cpu(current_task, cpu) = idle;
27078+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
27079 #ifdef CONFIG_X86_32
27080 irq_ctx_init(cpu);
27081 #else
27082 clear_tsk_thread_flag(idle, TIF_FORK);
27083- per_cpu(kernel_stack, cpu) =
27084- (unsigned long)task_stack_page(idle) -
27085- KERNEL_STACK_OFFSET + THREAD_SIZE;
27086+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
27087 #endif
27088 xen_setup_runstate_info(cpu);
27089 xen_setup_timer(cpu);
27090diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
27091index 9a95a9c..4f39e774 100644
27092--- a/arch/x86/xen/xen-asm_32.S
27093+++ b/arch/x86/xen/xen-asm_32.S
27094@@ -83,14 +83,14 @@ ENTRY(xen_iret)
27095 ESP_OFFSET=4 # bytes pushed onto stack
27096
27097 /*
27098- * Store vcpu_info pointer for easy access. Do it this way to
27099- * avoid having to reload %fs
27100+ * Store vcpu_info pointer for easy access.
27101 */
27102 #ifdef CONFIG_SMP
27103- GET_THREAD_INFO(%eax)
27104- movl TI_cpu(%eax), %eax
27105- movl __per_cpu_offset(,%eax,4), %eax
27106- mov per_cpu__xen_vcpu(%eax), %eax
27107+ push %fs
27108+ mov $(__KERNEL_PERCPU), %eax
27109+ mov %eax, %fs
27110+ mov PER_CPU_VAR(xen_vcpu), %eax
27111+ pop %fs
27112 #else
27113 movl per_cpu__xen_vcpu, %eax
27114 #endif
27115diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
27116index 1a5ff24..a187d40 100644
27117--- a/arch/x86/xen/xen-head.S
27118+++ b/arch/x86/xen/xen-head.S
27119@@ -19,6 +19,17 @@ ENTRY(startup_xen)
27120 #ifdef CONFIG_X86_32
27121 mov %esi,xen_start_info
27122 mov $init_thread_union+THREAD_SIZE,%esp
27123+#ifdef CONFIG_SMP
27124+ movl $cpu_gdt_table,%edi
27125+ movl $__per_cpu_load,%eax
27126+ movw %ax,__KERNEL_PERCPU + 2(%edi)
27127+ rorl $16,%eax
27128+ movb %al,__KERNEL_PERCPU + 4(%edi)
27129+ movb %ah,__KERNEL_PERCPU + 7(%edi)
27130+ movl $__per_cpu_end - 1,%eax
27131+ subl $__per_cpu_start,%eax
27132+ movw %ax,__KERNEL_PERCPU + 0(%edi)
27133+#endif
27134 #else
27135 mov %rsi,xen_start_info
27136 mov $init_thread_union+THREAD_SIZE,%rsp
27137diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
27138index f9153a3..51eab3d 100644
27139--- a/arch/x86/xen/xen-ops.h
27140+++ b/arch/x86/xen/xen-ops.h
27141@@ -10,8 +10,6 @@
27142 extern const char xen_hypervisor_callback[];
27143 extern const char xen_failsafe_callback[];
27144
27145-extern void *xen_initial_gdt;
27146-
27147 struct trap_info;
27148 void xen_copy_trap_info(struct trap_info *traps);
27149
27150diff --git a/block/blk-integrity.c b/block/blk-integrity.c
27151index 15c6308..96e83c2 100644
27152--- a/block/blk-integrity.c
27153+++ b/block/blk-integrity.c
27154@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs[] = {
27155 NULL,
27156 };
27157
27158-static struct sysfs_ops integrity_ops = {
27159+static const struct sysfs_ops integrity_ops = {
27160 .show = &integrity_attr_show,
27161 .store = &integrity_attr_store,
27162 };
27163diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
27164index ca56420..f2fc409 100644
27165--- a/block/blk-iopoll.c
27166+++ b/block/blk-iopoll.c
27167@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
27168 }
27169 EXPORT_SYMBOL(blk_iopoll_complete);
27170
27171-static void blk_iopoll_softirq(struct softirq_action *h)
27172+static void blk_iopoll_softirq(void)
27173 {
27174 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
27175 int rearm = 0, budget = blk_iopoll_budget;
27176diff --git a/block/blk-map.c b/block/blk-map.c
27177index 30a7e51..0aeec6a 100644
27178--- a/block/blk-map.c
27179+++ b/block/blk-map.c
27180@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
27181 * direct dma. else, set up kernel bounce buffers
27182 */
27183 uaddr = (unsigned long) ubuf;
27184- if (blk_rq_aligned(q, ubuf, len) && !map_data)
27185+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
27186 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
27187 else
27188 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
27189@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
27190 for (i = 0; i < iov_count; i++) {
27191 unsigned long uaddr = (unsigned long)iov[i].iov_base;
27192
27193+ if (!iov[i].iov_len)
27194+ return -EINVAL;
27195+
27196 if (uaddr & queue_dma_alignment(q)) {
27197 unaligned = 1;
27198 break;
27199 }
27200- if (!iov[i].iov_len)
27201- return -EINVAL;
27202 }
27203
27204 if (unaligned || (q->dma_pad_mask & len) || map_data)
27205@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
27206 if (!len || !kbuf)
27207 return -EINVAL;
27208
27209- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
27210+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
27211 if (do_copy)
27212 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
27213 else
27214diff --git a/block/blk-softirq.c b/block/blk-softirq.c
27215index ee9c216..58d410a 100644
27216--- a/block/blk-softirq.c
27217+++ b/block/blk-softirq.c
27218@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
27219 * Softirq action handler - move entries to local list and loop over them
27220 * while passing them to the queue registered handler.
27221 */
27222-static void blk_done_softirq(struct softirq_action *h)
27223+static void blk_done_softirq(void)
27224 {
27225 struct list_head *cpu_list, local_list;
27226
27227diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
27228index bb9c5ea..5330d48 100644
27229--- a/block/blk-sysfs.c
27230+++ b/block/blk-sysfs.c
27231@@ -414,7 +414,7 @@ static void blk_release_queue(struct kobject *kobj)
27232 kmem_cache_free(blk_requestq_cachep, q);
27233 }
27234
27235-static struct sysfs_ops queue_sysfs_ops = {
27236+static const struct sysfs_ops queue_sysfs_ops = {
27237 .show = queue_attr_show,
27238 .store = queue_attr_store,
27239 };
27240diff --git a/block/bsg.c b/block/bsg.c
27241index 7154a7a..08ac2f0 100644
27242--- a/block/bsg.c
27243+++ b/block/bsg.c
27244@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
27245 struct sg_io_v4 *hdr, struct bsg_device *bd,
27246 fmode_t has_write_perm)
27247 {
27248+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27249+ unsigned char *cmdptr;
27250+
27251 if (hdr->request_len > BLK_MAX_CDB) {
27252 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
27253 if (!rq->cmd)
27254 return -ENOMEM;
27255- }
27256+ cmdptr = rq->cmd;
27257+ } else
27258+ cmdptr = tmpcmd;
27259
27260- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
27261+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
27262 hdr->request_len))
27263 return -EFAULT;
27264
27265+ if (cmdptr != rq->cmd)
27266+ memcpy(rq->cmd, cmdptr, hdr->request_len);
27267+
27268 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
27269 if (blk_verify_command(rq->cmd, has_write_perm))
27270 return -EPERM;
27271@@ -282,7 +290,7 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27272 rq->next_rq = next_rq;
27273 next_rq->cmd_type = rq->cmd_type;
27274
27275- dxferp = (void*)(unsigned long)hdr->din_xferp;
27276+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27277 ret = blk_rq_map_user(q, next_rq, NULL, dxferp,
27278 hdr->din_xfer_len, GFP_KERNEL);
27279 if (ret)
27280@@ -291,10 +299,10 @@ bsg_map_hdr(struct bsg_device *bd, struct sg_io_v4 *hdr, fmode_t has_write_perm,
27281
27282 if (hdr->dout_xfer_len) {
27283 dxfer_len = hdr->dout_xfer_len;
27284- dxferp = (void*)(unsigned long)hdr->dout_xferp;
27285+ dxferp = (void __user *)(unsigned long)hdr->dout_xferp;
27286 } else if (hdr->din_xfer_len) {
27287 dxfer_len = hdr->din_xfer_len;
27288- dxferp = (void*)(unsigned long)hdr->din_xferp;
27289+ dxferp = (void __user *)(unsigned long)hdr->din_xferp;
27290 } else
27291 dxfer_len = 0;
27292
27293@@ -436,7 +444,7 @@ static int blk_complete_sgv4_hdr_rq(struct request *rq, struct sg_io_v4 *hdr,
27294 int len = min_t(unsigned int, hdr->max_response_len,
27295 rq->sense_len);
27296
27297- ret = copy_to_user((void*)(unsigned long)hdr->response,
27298+ ret = copy_to_user((void __user *)(unsigned long)hdr->response,
27299 rq->sense, len);
27300 if (!ret)
27301 hdr->response_len = len;
27302diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
27303index 9bd086c..ca1fc22 100644
27304--- a/block/compat_ioctl.c
27305+++ b/block/compat_ioctl.c
27306@@ -354,7 +354,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
27307 err |= __get_user(f->spec1, &uf->spec1);
27308 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
27309 err |= __get_user(name, &uf->name);
27310- f->name = compat_ptr(name);
27311+ f->name = (void __force_kernel *)compat_ptr(name);
27312 if (err) {
27313 err = -EFAULT;
27314 goto out;
27315diff --git a/block/elevator.c b/block/elevator.c
27316index a847046..75a1746 100644
27317--- a/block/elevator.c
27318+++ b/block/elevator.c
27319@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, struct attribute *attr,
27320 return error;
27321 }
27322
27323-static struct sysfs_ops elv_sysfs_ops = {
27324+static const struct sysfs_ops elv_sysfs_ops = {
27325 .show = elv_attr_show,
27326 .store = elv_attr_store,
27327 };
27328diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
27329index 2be0a97..bded3fd 100644
27330--- a/block/scsi_ioctl.c
27331+++ b/block/scsi_ioctl.c
27332@@ -221,8 +221,20 @@ EXPORT_SYMBOL(blk_verify_command);
27333 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
27334 struct sg_io_hdr *hdr, fmode_t mode)
27335 {
27336- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
27337+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27338+ unsigned char *cmdptr;
27339+
27340+ if (rq->cmd != rq->__cmd)
27341+ cmdptr = rq->cmd;
27342+ else
27343+ cmdptr = tmpcmd;
27344+
27345+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
27346 return -EFAULT;
27347+
27348+ if (cmdptr != rq->cmd)
27349+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
27350+
27351 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
27352 return -EPERM;
27353
27354@@ -431,6 +443,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27355 int err;
27356 unsigned int in_len, out_len, bytes, opcode, cmdlen;
27357 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
27358+ unsigned char tmpcmd[sizeof(rq->__cmd)];
27359+ unsigned char *cmdptr;
27360
27361 if (!sic)
27362 return -EINVAL;
27363@@ -464,9 +478,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
27364 */
27365 err = -EFAULT;
27366 rq->cmd_len = cmdlen;
27367- if (copy_from_user(rq->cmd, sic->data, cmdlen))
27368+
27369+ if (rq->cmd != rq->__cmd)
27370+ cmdptr = rq->cmd;
27371+ else
27372+ cmdptr = tmpcmd;
27373+
27374+ if (copy_from_user(cmdptr, sic->data, cmdlen))
27375 goto error;
27376
27377+ if (rq->cmd != cmdptr)
27378+ memcpy(rq->cmd, cmdptr, cmdlen);
27379+
27380 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
27381 goto error;
27382
27383diff --git a/crypto/cryptd.c b/crypto/cryptd.c
27384index 3533582..f143117 100644
27385--- a/crypto/cryptd.c
27386+++ b/crypto/cryptd.c
27387@@ -50,7 +50,7 @@ struct cryptd_blkcipher_ctx {
27388
27389 struct cryptd_blkcipher_request_ctx {
27390 crypto_completion_t complete;
27391-};
27392+} __no_const;
27393
27394 struct cryptd_hash_ctx {
27395 struct crypto_shash *child;
27396diff --git a/crypto/gf128mul.c b/crypto/gf128mul.c
27397index a90d260..7a9765e 100644
27398--- a/crypto/gf128mul.c
27399+++ b/crypto/gf128mul.c
27400@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128 *b)
27401 for (i = 0; i < 7; ++i)
27402 gf128mul_x_lle(&p[i + 1], &p[i]);
27403
27404- memset(r, 0, sizeof(r));
27405+ memset(r, 0, sizeof(*r));
27406 for (i = 0;;) {
27407 u8 ch = ((u8 *)b)[15 - i];
27408
27409@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128 *b)
27410 for (i = 0; i < 7; ++i)
27411 gf128mul_x_bbe(&p[i + 1], &p[i]);
27412
27413- memset(r, 0, sizeof(r));
27414+ memset(r, 0, sizeof(*r));
27415 for (i = 0;;) {
27416 u8 ch = ((u8 *)b)[i];
27417
27418diff --git a/crypto/serpent.c b/crypto/serpent.c
27419index b651a55..023297d 100644
27420--- a/crypto/serpent.c
27421+++ b/crypto/serpent.c
27422@@ -21,6 +21,7 @@
27423 #include <asm/byteorder.h>
27424 #include <linux/crypto.h>
27425 #include <linux/types.h>
27426+#include <linux/sched.h>
27427
27428 /* Key is padded to the maximum of 256 bits before round key generation.
27429 * Any key length <= 256 bits (32 bytes) is allowed by the algorithm.
27430@@ -224,6 +225,8 @@ static int serpent_setkey(struct crypto_tfm *tfm, const u8 *key,
27431 u32 r0,r1,r2,r3,r4;
27432 int i;
27433
27434+ pax_track_stack();
27435+
27436 /* Copy key, add padding */
27437
27438 for (i = 0; i < keylen; ++i)
27439diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
27440index 0d2cdb8..d8de48d 100644
27441--- a/drivers/acpi/acpi_pad.c
27442+++ b/drivers/acpi/acpi_pad.c
27443@@ -30,7 +30,7 @@
27444 #include <acpi/acpi_bus.h>
27445 #include <acpi/acpi_drivers.h>
27446
27447-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
27448+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
27449 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
27450 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
27451 static DEFINE_MUTEX(isolated_cpus_lock);
27452diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
27453index 3f4602b..2e41d36 100644
27454--- a/drivers/acpi/battery.c
27455+++ b/drivers/acpi/battery.c
27456@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
27457 }
27458
27459 static struct battery_file {
27460- struct file_operations ops;
27461+ const struct file_operations ops;
27462 mode_t mode;
27463 const char *name;
27464 } acpi_battery_file[] = {
27465diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
27466index 7338b6a..82f0257 100644
27467--- a/drivers/acpi/dock.c
27468+++ b/drivers/acpi/dock.c
27469@@ -77,7 +77,7 @@ struct dock_dependent_device {
27470 struct list_head list;
27471 struct list_head hotplug_list;
27472 acpi_handle handle;
27473- struct acpi_dock_ops *ops;
27474+ const struct acpi_dock_ops *ops;
27475 void *context;
27476 };
27477
27478@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifier);
27479 * the dock driver after _DCK is executed.
27480 */
27481 int
27482-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
27483+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
27484 void *context)
27485 {
27486 struct dock_dependent_device *dd;
27487diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
27488index 7c1c59e..2993595 100644
27489--- a/drivers/acpi/osl.c
27490+++ b/drivers/acpi/osl.c
27491@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
27492 void __iomem *virt_addr;
27493
27494 virt_addr = ioremap(phys_addr, width);
27495+ if (!virt_addr)
27496+ return AE_NO_MEMORY;
27497 if (!value)
27498 value = &dummy;
27499
27500@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
27501 void __iomem *virt_addr;
27502
27503 virt_addr = ioremap(phys_addr, width);
27504+ if (!virt_addr)
27505+ return AE_NO_MEMORY;
27506
27507 switch (width) {
27508 case 8:
27509diff --git a/drivers/acpi/power_meter.c b/drivers/acpi/power_meter.c
27510index c216062..eec10d2 100644
27511--- a/drivers/acpi/power_meter.c
27512+++ b/drivers/acpi/power_meter.c
27513@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *dev, struct device_attribute *devattr,
27514 return res;
27515
27516 temp /= 1000;
27517- if (temp < 0)
27518- return -EINVAL;
27519
27520 mutex_lock(&resource->lock);
27521 resource->trip[attr->index - 7] = temp;
27522diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
27523index d0d25e2..961643d 100644
27524--- a/drivers/acpi/proc.c
27525+++ b/drivers/acpi/proc.c
27526@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct file *file,
27527 size_t count, loff_t * ppos)
27528 {
27529 struct list_head *node, *next;
27530- char strbuf[5];
27531- char str[5] = "";
27532- unsigned int len = count;
27533+ char strbuf[5] = {0};
27534 struct acpi_device *found_dev = NULL;
27535
27536- if (len > 4)
27537- len = 4;
27538- if (len < 0)
27539- return -EFAULT;
27540+ if (count > 4)
27541+ count = 4;
27542
27543- if (copy_from_user(strbuf, buffer, len))
27544+ if (copy_from_user(strbuf, buffer, count))
27545 return -EFAULT;
27546- strbuf[len] = '\0';
27547- sscanf(strbuf, "%s", str);
27548+ strbuf[count] = '\0';
27549
27550 mutex_lock(&acpi_device_lock);
27551 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
27552@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct file *file,
27553 if (!dev->wakeup.flags.valid)
27554 continue;
27555
27556- if (!strncmp(dev->pnp.bus_id, str, 4)) {
27557+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
27558 dev->wakeup.state.enabled =
27559 dev->wakeup.state.enabled ? 0 : 1;
27560 found_dev = dev;
27561diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
27562index 7102474..de8ad22 100644
27563--- a/drivers/acpi/processor_core.c
27564+++ b/drivers/acpi/processor_core.c
27565@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
27566 return 0;
27567 }
27568
27569- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
27570+ BUG_ON(pr->id >= nr_cpu_ids);
27571
27572 /*
27573 * Buggy BIOS check
27574diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
27575index d933980..5761f13 100644
27576--- a/drivers/acpi/sbshc.c
27577+++ b/drivers/acpi/sbshc.c
27578@@ -17,7 +17,7 @@
27579
27580 #define PREFIX "ACPI: "
27581
27582-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
27583+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
27584 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
27585
27586 struct acpi_smb_hc {
27587diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
27588index 0458094..6978e7b 100644
27589--- a/drivers/acpi/sleep.c
27590+++ b/drivers/acpi/sleep.c
27591@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(suspend_state_t pm_state)
27592 }
27593 }
27594
27595-static struct platform_suspend_ops acpi_suspend_ops = {
27596+static const struct platform_suspend_ops acpi_suspend_ops = {
27597 .valid = acpi_suspend_state_valid,
27598 .begin = acpi_suspend_begin,
27599 .prepare_late = acpi_pm_prepare,
27600@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspend_state_t pm_state)
27601 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27602 * been requested.
27603 */
27604-static struct platform_suspend_ops acpi_suspend_ops_old = {
27605+static const struct platform_suspend_ops acpi_suspend_ops_old = {
27606 .valid = acpi_suspend_state_valid,
27607 .begin = acpi_suspend_begin_old,
27608 .prepare_late = acpi_pm_disable_gpes,
27609@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
27610 acpi_enable_all_runtime_gpes();
27611 }
27612
27613-static struct platform_hibernation_ops acpi_hibernation_ops = {
27614+static const struct platform_hibernation_ops acpi_hibernation_ops = {
27615 .begin = acpi_hibernation_begin,
27616 .end = acpi_pm_end,
27617 .pre_snapshot = acpi_hibernation_pre_snapshot,
27618@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot_old(void)
27619 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
27620 * been requested.
27621 */
27622-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
27623+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
27624 .begin = acpi_hibernation_begin_old,
27625 .end = acpi_pm_end,
27626 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
27627diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
27628index 05dff63..b662ab7 100644
27629--- a/drivers/acpi/video.c
27630+++ b/drivers/acpi/video.c
27631@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(struct backlight_device *bd)
27632 vd->brightness->levels[request_level]);
27633 }
27634
27635-static struct backlight_ops acpi_backlight_ops = {
27636+static const struct backlight_ops acpi_backlight_ops = {
27637 .get_brightness = acpi_video_get_brightness,
27638 .update_status = acpi_video_set_brightness,
27639 };
27640diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
27641index 6787aab..23ffb0e 100644
27642--- a/drivers/ata/ahci.c
27643+++ b/drivers/ata/ahci.c
27644@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sht = {
27645 .sdev_attrs = ahci_sdev_attrs,
27646 };
27647
27648-static struct ata_port_operations ahci_ops = {
27649+static const struct ata_port_operations ahci_ops = {
27650 .inherits = &sata_pmp_port_ops,
27651
27652 .qc_defer = sata_pmp_qc_defer_cmd_switch,
27653@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_ops = {
27654 .port_stop = ahci_port_stop,
27655 };
27656
27657-static struct ata_port_operations ahci_vt8251_ops = {
27658+static const struct ata_port_operations ahci_vt8251_ops = {
27659 .inherits = &ahci_ops,
27660 .hardreset = ahci_vt8251_hardreset,
27661 };
27662
27663-static struct ata_port_operations ahci_p5wdh_ops = {
27664+static const struct ata_port_operations ahci_p5wdh_ops = {
27665 .inherits = &ahci_ops,
27666 .hardreset = ahci_p5wdh_hardreset,
27667 };
27668
27669-static struct ata_port_operations ahci_sb600_ops = {
27670+static const struct ata_port_operations ahci_sb600_ops = {
27671 .inherits = &ahci_ops,
27672 .softreset = ahci_sb600_softreset,
27673 .pmp_softreset = ahci_sb600_softreset,
27674diff --git a/drivers/ata/ata_generic.c b/drivers/ata/ata_generic.c
27675index 99e7196..4968c77 100644
27676--- a/drivers/ata/ata_generic.c
27677+++ b/drivers/ata/ata_generic.c
27678@@ -104,7 +104,7 @@ static struct scsi_host_template generic_sht = {
27679 ATA_BMDMA_SHT(DRV_NAME),
27680 };
27681
27682-static struct ata_port_operations generic_port_ops = {
27683+static const struct ata_port_operations generic_port_ops = {
27684 .inherits = &ata_bmdma_port_ops,
27685 .cable_detect = ata_cable_unknown,
27686 .set_mode = generic_set_mode,
27687diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c
27688index c33591d..000c121 100644
27689--- a/drivers/ata/ata_piix.c
27690+++ b/drivers/ata/ata_piix.c
27691@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sht = {
27692 ATA_BMDMA_SHT(DRV_NAME),
27693 };
27694
27695-static struct ata_port_operations piix_pata_ops = {
27696+static const struct ata_port_operations piix_pata_ops = {
27697 .inherits = &ata_bmdma32_port_ops,
27698 .cable_detect = ata_cable_40wire,
27699 .set_piomode = piix_set_piomode,
27700@@ -326,22 +326,22 @@ static struct ata_port_operations piix_pata_ops = {
27701 .prereset = piix_pata_prereset,
27702 };
27703
27704-static struct ata_port_operations piix_vmw_ops = {
27705+static const struct ata_port_operations piix_vmw_ops = {
27706 .inherits = &piix_pata_ops,
27707 .bmdma_status = piix_vmw_bmdma_status,
27708 };
27709
27710-static struct ata_port_operations ich_pata_ops = {
27711+static const struct ata_port_operations ich_pata_ops = {
27712 .inherits = &piix_pata_ops,
27713 .cable_detect = ich_pata_cable_detect,
27714 .set_dmamode = ich_set_dmamode,
27715 };
27716
27717-static struct ata_port_operations piix_sata_ops = {
27718+static const struct ata_port_operations piix_sata_ops = {
27719 .inherits = &ata_bmdma_port_ops,
27720 };
27721
27722-static struct ata_port_operations piix_sidpr_sata_ops = {
27723+static const struct ata_port_operations piix_sidpr_sata_ops = {
27724 .inherits = &piix_sata_ops,
27725 .hardreset = sata_std_hardreset,
27726 .scr_read = piix_sidpr_scr_read,
27727diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c
27728index b0882cd..c295d65 100644
27729--- a/drivers/ata/libata-acpi.c
27730+++ b/drivers/ata/libata-acpi.c
27731@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_handle handle, u32 event, void *data)
27732 ata_acpi_uevent(dev->link->ap, dev, event);
27733 }
27734
27735-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27736+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
27737 .handler = ata_acpi_dev_notify_dock,
27738 .uevent = ata_acpi_dev_uevent,
27739 };
27740
27741-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27742+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
27743 .handler = ata_acpi_ap_notify_dock,
27744 .uevent = ata_acpi_ap_uevent,
27745 };
27746diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
27747index d4f7f99..94f603e 100644
27748--- a/drivers/ata/libata-core.c
27749+++ b/drivers/ata/libata-core.c
27750@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
27751 struct ata_port *ap;
27752 unsigned int tag;
27753
27754- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27755+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27756 ap = qc->ap;
27757
27758 qc->flags = 0;
27759@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
27760 struct ata_port *ap;
27761 struct ata_link *link;
27762
27763- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27764+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
27765 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
27766 ap = qc->ap;
27767 link = qc->dev->link;
27768@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device *gendev, void *res)
27769 * LOCKING:
27770 * None.
27771 */
27772-static void ata_finalize_port_ops(struct ata_port_operations *ops)
27773+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
27774 {
27775 static DEFINE_SPINLOCK(lock);
27776 const struct ata_port_operations *cur;
27777@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27778 return;
27779
27780 spin_lock(&lock);
27781+ pax_open_kernel();
27782
27783 for (cur = ops->inherits; cur; cur = cur->inherits) {
27784 void **inherit = (void **)cur;
27785@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
27786 if (IS_ERR(*pp))
27787 *pp = NULL;
27788
27789- ops->inherits = NULL;
27790+ *(struct ata_port_operations **)&ops->inherits = NULL;
27791
27792+ pax_close_kernel();
27793 spin_unlock(&lock);
27794 }
27795
27796@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host)
27797 */
27798 /* KILLME - the only user left is ipr */
27799 void ata_host_init(struct ata_host *host, struct device *dev,
27800- unsigned long flags, struct ata_port_operations *ops)
27801+ unsigned long flags, const struct ata_port_operations *ops)
27802 {
27803 spin_lock_init(&host->lock);
27804 host->dev = dev;
27805@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(struct ata_port *ap)
27806 /* truly dummy */
27807 }
27808
27809-struct ata_port_operations ata_dummy_port_ops = {
27810+const struct ata_port_operations ata_dummy_port_ops = {
27811 .qc_prep = ata_noop_qc_prep,
27812 .qc_issue = ata_dummy_qc_issue,
27813 .error_handler = ata_dummy_error_handler,
27814diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
27815index e5bdb9b..45a8e72 100644
27816--- a/drivers/ata/libata-eh.c
27817+++ b/drivers/ata/libata-eh.c
27818@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
27819 {
27820 struct ata_link *link;
27821
27822+ pax_track_stack();
27823+
27824 ata_for_each_link(link, ap, HOST_FIRST)
27825 ata_eh_link_report(link);
27826 }
27827@@ -3594,7 +3596,7 @@ void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
27828 */
27829 void ata_std_error_handler(struct ata_port *ap)
27830 {
27831- struct ata_port_operations *ops = ap->ops;
27832+ const struct ata_port_operations *ops = ap->ops;
27833 ata_reset_fn_t hardreset = ops->hardreset;
27834
27835 /* ignore built-in hardreset if SCR access is not available */
27836diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
27837index 51f0ffb..19ce3e3 100644
27838--- a/drivers/ata/libata-pmp.c
27839+++ b/drivers/ata/libata-pmp.c
27840@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(struct ata_link *link, int *link_tries)
27841 */
27842 static int sata_pmp_eh_recover(struct ata_port *ap)
27843 {
27844- struct ata_port_operations *ops = ap->ops;
27845+ const struct ata_port_operations *ops = ap->ops;
27846 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
27847 struct ata_link *pmp_link = &ap->link;
27848 struct ata_device *pmp_dev = pmp_link->device;
27849diff --git a/drivers/ata/pata_acpi.c b/drivers/ata/pata_acpi.c
27850index d8f35fe..288180a 100644
27851--- a/drivers/ata/pata_acpi.c
27852+++ b/drivers/ata/pata_acpi.c
27853@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_sht = {
27854 ATA_BMDMA_SHT(DRV_NAME),
27855 };
27856
27857-static struct ata_port_operations pacpi_ops = {
27858+static const struct ata_port_operations pacpi_ops = {
27859 .inherits = &ata_bmdma_port_ops,
27860 .qc_issue = pacpi_qc_issue,
27861 .cable_detect = pacpi_cable_detect,
27862diff --git a/drivers/ata/pata_ali.c b/drivers/ata/pata_ali.c
27863index 9434114..1f2f364 100644
27864--- a/drivers/ata/pata_ali.c
27865+++ b/drivers/ata/pata_ali.c
27866@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht = {
27867 * Port operations for PIO only ALi
27868 */
27869
27870-static struct ata_port_operations ali_early_port_ops = {
27871+static const struct ata_port_operations ali_early_port_ops = {
27872 .inherits = &ata_sff_port_ops,
27873 .cable_detect = ata_cable_40wire,
27874 .set_piomode = ali_set_piomode,
27875@@ -382,7 +382,7 @@ static const struct ata_port_operations ali_dma_base_ops = {
27876 * Port operations for DMA capable ALi without cable
27877 * detect
27878 */
27879-static struct ata_port_operations ali_20_port_ops = {
27880+static const struct ata_port_operations ali_20_port_ops = {
27881 .inherits = &ali_dma_base_ops,
27882 .cable_detect = ata_cable_40wire,
27883 .mode_filter = ali_20_filter,
27884@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20_port_ops = {
27885 /*
27886 * Port operations for DMA capable ALi with cable detect
27887 */
27888-static struct ata_port_operations ali_c2_port_ops = {
27889+static const struct ata_port_operations ali_c2_port_ops = {
27890 .inherits = &ali_dma_base_ops,
27891 .check_atapi_dma = ali_check_atapi_dma,
27892 .cable_detect = ali_c2_cable_detect,
27893@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2_port_ops = {
27894 /*
27895 * Port operations for DMA capable ALi with cable detect
27896 */
27897-static struct ata_port_operations ali_c4_port_ops = {
27898+static const struct ata_port_operations ali_c4_port_ops = {
27899 .inherits = &ali_dma_base_ops,
27900 .check_atapi_dma = ali_check_atapi_dma,
27901 .cable_detect = ali_c2_cable_detect,
27902@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4_port_ops = {
27903 /*
27904 * Port operations for DMA capable ALi with cable detect and LBA48
27905 */
27906-static struct ata_port_operations ali_c5_port_ops = {
27907+static const struct ata_port_operations ali_c5_port_ops = {
27908 .inherits = &ali_dma_base_ops,
27909 .check_atapi_dma = ali_check_atapi_dma,
27910 .dev_config = ali_warn_atapi_dma,
27911diff --git a/drivers/ata/pata_amd.c b/drivers/ata/pata_amd.c
27912index 567f3f7..c8ee0da 100644
27913--- a/drivers/ata/pata_amd.c
27914+++ b/drivers/ata/pata_amd.c
27915@@ -397,28 +397,28 @@ static const struct ata_port_operations amd_base_port_ops = {
27916 .prereset = amd_pre_reset,
27917 };
27918
27919-static struct ata_port_operations amd33_port_ops = {
27920+static const struct ata_port_operations amd33_port_ops = {
27921 .inherits = &amd_base_port_ops,
27922 .cable_detect = ata_cable_40wire,
27923 .set_piomode = amd33_set_piomode,
27924 .set_dmamode = amd33_set_dmamode,
27925 };
27926
27927-static struct ata_port_operations amd66_port_ops = {
27928+static const struct ata_port_operations amd66_port_ops = {
27929 .inherits = &amd_base_port_ops,
27930 .cable_detect = ata_cable_unknown,
27931 .set_piomode = amd66_set_piomode,
27932 .set_dmamode = amd66_set_dmamode,
27933 };
27934
27935-static struct ata_port_operations amd100_port_ops = {
27936+static const struct ata_port_operations amd100_port_ops = {
27937 .inherits = &amd_base_port_ops,
27938 .cable_detect = ata_cable_unknown,
27939 .set_piomode = amd100_set_piomode,
27940 .set_dmamode = amd100_set_dmamode,
27941 };
27942
27943-static struct ata_port_operations amd133_port_ops = {
27944+static const struct ata_port_operations amd133_port_ops = {
27945 .inherits = &amd_base_port_ops,
27946 .cable_detect = amd_cable_detect,
27947 .set_piomode = amd133_set_piomode,
27948@@ -433,13 +433,13 @@ static const struct ata_port_operations nv_base_port_ops = {
27949 .host_stop = nv_host_stop,
27950 };
27951
27952-static struct ata_port_operations nv100_port_ops = {
27953+static const struct ata_port_operations nv100_port_ops = {
27954 .inherits = &nv_base_port_ops,
27955 .set_piomode = nv100_set_piomode,
27956 .set_dmamode = nv100_set_dmamode,
27957 };
27958
27959-static struct ata_port_operations nv133_port_ops = {
27960+static const struct ata_port_operations nv133_port_ops = {
27961 .inherits = &nv_base_port_ops,
27962 .set_piomode = nv133_set_piomode,
27963 .set_dmamode = nv133_set_dmamode,
27964diff --git a/drivers/ata/pata_artop.c b/drivers/ata/pata_artop.c
27965index d332cfd..4b7eaae 100644
27966--- a/drivers/ata/pata_artop.c
27967+++ b/drivers/ata/pata_artop.c
27968@@ -311,7 +311,7 @@ static struct scsi_host_template artop_sht = {
27969 ATA_BMDMA_SHT(DRV_NAME),
27970 };
27971
27972-static struct ata_port_operations artop6210_ops = {
27973+static const struct ata_port_operations artop6210_ops = {
27974 .inherits = &ata_bmdma_port_ops,
27975 .cable_detect = ata_cable_40wire,
27976 .set_piomode = artop6210_set_piomode,
27977@@ -320,7 +320,7 @@ static struct ata_port_operations artop6210_ops = {
27978 .qc_defer = artop6210_qc_defer,
27979 };
27980
27981-static struct ata_port_operations artop6260_ops = {
27982+static const struct ata_port_operations artop6260_ops = {
27983 .inherits = &ata_bmdma_port_ops,
27984 .cable_detect = artop6260_cable_detect,
27985 .set_piomode = artop6260_set_piomode,
27986diff --git a/drivers/ata/pata_at32.c b/drivers/ata/pata_at32.c
27987index 5c129f9..7bb7ccb 100644
27988--- a/drivers/ata/pata_at32.c
27989+++ b/drivers/ata/pata_at32.c
27990@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sht = {
27991 ATA_PIO_SHT(DRV_NAME),
27992 };
27993
27994-static struct ata_port_operations at32_port_ops = {
27995+static const struct ata_port_operations at32_port_ops = {
27996 .inherits = &ata_sff_port_ops,
27997 .cable_detect = ata_cable_40wire,
27998 .set_piomode = pata_at32_set_piomode,
27999diff --git a/drivers/ata/pata_at91.c b/drivers/ata/pata_at91.c
28000index 41c94b1..829006d 100644
28001--- a/drivers/ata/pata_at91.c
28002+++ b/drivers/ata/pata_at91.c
28003@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at91_sht = {
28004 ATA_PIO_SHT(DRV_NAME),
28005 };
28006
28007-static struct ata_port_operations pata_at91_port_ops = {
28008+static const struct ata_port_operations pata_at91_port_ops = {
28009 .inherits = &ata_sff_port_ops,
28010
28011 .sff_data_xfer = pata_at91_data_xfer_noirq,
28012diff --git a/drivers/ata/pata_atiixp.c b/drivers/ata/pata_atiixp.c
28013index ae4454d..d391eb4 100644
28014--- a/drivers/ata/pata_atiixp.c
28015+++ b/drivers/ata/pata_atiixp.c
28016@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_sht = {
28017 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28018 };
28019
28020-static struct ata_port_operations atiixp_port_ops = {
28021+static const struct ata_port_operations atiixp_port_ops = {
28022 .inherits = &ata_bmdma_port_ops,
28023
28024 .qc_prep = ata_sff_dumb_qc_prep,
28025diff --git a/drivers/ata/pata_atp867x.c b/drivers/ata/pata_atp867x.c
28026index 6fe7ded..2a425dc 100644
28027--- a/drivers/ata/pata_atp867x.c
28028+++ b/drivers/ata/pata_atp867x.c
28029@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x_sht = {
28030 ATA_BMDMA_SHT(DRV_NAME),
28031 };
28032
28033-static struct ata_port_operations atp867x_ops = {
28034+static const struct ata_port_operations atp867x_ops = {
28035 .inherits = &ata_bmdma_port_ops,
28036 .cable_detect = atp867x_cable_detect,
28037 .set_piomode = atp867x_set_piomode,
28038diff --git a/drivers/ata/pata_bf54x.c b/drivers/ata/pata_bf54x.c
28039index c4b47a3..b27a367 100644
28040--- a/drivers/ata/pata_bf54x.c
28041+++ b/drivers/ata/pata_bf54x.c
28042@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sht = {
28043 .dma_boundary = ATA_DMA_BOUNDARY,
28044 };
28045
28046-static struct ata_port_operations bfin_pata_ops = {
28047+static const struct ata_port_operations bfin_pata_ops = {
28048 .inherits = &ata_sff_port_ops,
28049
28050 .set_piomode = bfin_set_piomode,
28051diff --git a/drivers/ata/pata_cmd640.c b/drivers/ata/pata_cmd640.c
28052index 5acf9fa..84248be 100644
28053--- a/drivers/ata/pata_cmd640.c
28054+++ b/drivers/ata/pata_cmd640.c
28055@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_sht = {
28056 ATA_BMDMA_SHT(DRV_NAME),
28057 };
28058
28059-static struct ata_port_operations cmd640_port_ops = {
28060+static const struct ata_port_operations cmd640_port_ops = {
28061 .inherits = &ata_bmdma_port_ops,
28062 /* In theory xfer_noirq is not needed once we kill the prefetcher */
28063 .sff_data_xfer = ata_sff_data_xfer_noirq,
28064diff --git a/drivers/ata/pata_cmd64x.c b/drivers/ata/pata_cmd64x.c
28065index ccd2694..c869c3d 100644
28066--- a/drivers/ata/pata_cmd64x.c
28067+++ b/drivers/ata/pata_cmd64x.c
28068@@ -271,18 +271,18 @@ static const struct ata_port_operations cmd64x_base_ops = {
28069 .set_dmamode = cmd64x_set_dmamode,
28070 };
28071
28072-static struct ata_port_operations cmd64x_port_ops = {
28073+static const struct ata_port_operations cmd64x_port_ops = {
28074 .inherits = &cmd64x_base_ops,
28075 .cable_detect = ata_cable_40wire,
28076 };
28077
28078-static struct ata_port_operations cmd646r1_port_ops = {
28079+static const struct ata_port_operations cmd646r1_port_ops = {
28080 .inherits = &cmd64x_base_ops,
28081 .bmdma_stop = cmd646r1_bmdma_stop,
28082 .cable_detect = ata_cable_40wire,
28083 };
28084
28085-static struct ata_port_operations cmd648_port_ops = {
28086+static const struct ata_port_operations cmd648_port_ops = {
28087 .inherits = &cmd64x_base_ops,
28088 .bmdma_stop = cmd648_bmdma_stop,
28089 .cable_detect = cmd648_cable_detect,
28090diff --git a/drivers/ata/pata_cs5520.c b/drivers/ata/pata_cs5520.c
28091index 0df83cf..d7595b0 100644
28092--- a/drivers/ata/pata_cs5520.c
28093+++ b/drivers/ata/pata_cs5520.c
28094@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_sht = {
28095 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28096 };
28097
28098-static struct ata_port_operations cs5520_port_ops = {
28099+static const struct ata_port_operations cs5520_port_ops = {
28100 .inherits = &ata_bmdma_port_ops,
28101 .qc_prep = ata_sff_dumb_qc_prep,
28102 .cable_detect = ata_cable_40wire,
28103diff --git a/drivers/ata/pata_cs5530.c b/drivers/ata/pata_cs5530.c
28104index c974b05..6d26b11 100644
28105--- a/drivers/ata/pata_cs5530.c
28106+++ b/drivers/ata/pata_cs5530.c
28107@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_sht = {
28108 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28109 };
28110
28111-static struct ata_port_operations cs5530_port_ops = {
28112+static const struct ata_port_operations cs5530_port_ops = {
28113 .inherits = &ata_bmdma_port_ops,
28114
28115 .qc_prep = ata_sff_dumb_qc_prep,
28116diff --git a/drivers/ata/pata_cs5535.c b/drivers/ata/pata_cs5535.c
28117index 403f561..aacd26b 100644
28118--- a/drivers/ata/pata_cs5535.c
28119+++ b/drivers/ata/pata_cs5535.c
28120@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_sht = {
28121 ATA_BMDMA_SHT(DRV_NAME),
28122 };
28123
28124-static struct ata_port_operations cs5535_port_ops = {
28125+static const struct ata_port_operations cs5535_port_ops = {
28126 .inherits = &ata_bmdma_port_ops,
28127 .cable_detect = cs5535_cable_detect,
28128 .set_piomode = cs5535_set_piomode,
28129diff --git a/drivers/ata/pata_cs5536.c b/drivers/ata/pata_cs5536.c
28130index 6da4cb4..de24a25 100644
28131--- a/drivers/ata/pata_cs5536.c
28132+++ b/drivers/ata/pata_cs5536.c
28133@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_sht = {
28134 ATA_BMDMA_SHT(DRV_NAME),
28135 };
28136
28137-static struct ata_port_operations cs5536_port_ops = {
28138+static const struct ata_port_operations cs5536_port_ops = {
28139 .inherits = &ata_bmdma_port_ops,
28140 .cable_detect = cs5536_cable_detect,
28141 .set_piomode = cs5536_set_piomode,
28142diff --git a/drivers/ata/pata_cypress.c b/drivers/ata/pata_cypress.c
28143index 8fb040b..b16a9c9 100644
28144--- a/drivers/ata/pata_cypress.c
28145+++ b/drivers/ata/pata_cypress.c
28146@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c693_sht = {
28147 ATA_BMDMA_SHT(DRV_NAME),
28148 };
28149
28150-static struct ata_port_operations cy82c693_port_ops = {
28151+static const struct ata_port_operations cy82c693_port_ops = {
28152 .inherits = &ata_bmdma_port_ops,
28153 .cable_detect = ata_cable_40wire,
28154 .set_piomode = cy82c693_set_piomode,
28155diff --git a/drivers/ata/pata_efar.c b/drivers/ata/pata_efar.c
28156index 2a6412f..555ee11 100644
28157--- a/drivers/ata/pata_efar.c
28158+++ b/drivers/ata/pata_efar.c
28159@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sht = {
28160 ATA_BMDMA_SHT(DRV_NAME),
28161 };
28162
28163-static struct ata_port_operations efar_ops = {
28164+static const struct ata_port_operations efar_ops = {
28165 .inherits = &ata_bmdma_port_ops,
28166 .cable_detect = efar_cable_detect,
28167 .set_piomode = efar_set_piomode,
28168diff --git a/drivers/ata/pata_hpt366.c b/drivers/ata/pata_hpt366.c
28169index b9d8836..0b92030 100644
28170--- a/drivers/ata/pata_hpt366.c
28171+++ b/drivers/ata/pata_hpt366.c
28172@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_sht = {
28173 * Configuration for HPT366/68
28174 */
28175
28176-static struct ata_port_operations hpt366_port_ops = {
28177+static const struct ata_port_operations hpt366_port_ops = {
28178 .inherits = &ata_bmdma_port_ops,
28179 .cable_detect = hpt36x_cable_detect,
28180 .mode_filter = hpt366_filter,
28181diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
28182index 5af7f19..00c4980 100644
28183--- a/drivers/ata/pata_hpt37x.c
28184+++ b/drivers/ata/pata_hpt37x.c
28185@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_sht = {
28186 * Configuration for HPT370
28187 */
28188
28189-static struct ata_port_operations hpt370_port_ops = {
28190+static const struct ata_port_operations hpt370_port_ops = {
28191 .inherits = &ata_bmdma_port_ops,
28192
28193 .bmdma_stop = hpt370_bmdma_stop,
28194@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370_port_ops = {
28195 * Configuration for HPT370A. Close to 370 but less filters
28196 */
28197
28198-static struct ata_port_operations hpt370a_port_ops = {
28199+static const struct ata_port_operations hpt370a_port_ops = {
28200 .inherits = &hpt370_port_ops,
28201 .mode_filter = hpt370a_filter,
28202 };
28203@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370a_port_ops = {
28204 * and DMA mode setting functionality.
28205 */
28206
28207-static struct ata_port_operations hpt372_port_ops = {
28208+static const struct ata_port_operations hpt372_port_ops = {
28209 .inherits = &ata_bmdma_port_ops,
28210
28211 .bmdma_stop = hpt37x_bmdma_stop,
28212@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372_port_ops = {
28213 * but we have a different cable detection procedure for function 1.
28214 */
28215
28216-static struct ata_port_operations hpt374_fn1_port_ops = {
28217+static const struct ata_port_operations hpt374_fn1_port_ops = {
28218 .inherits = &hpt372_port_ops,
28219 .prereset = hpt374_fn1_pre_reset,
28220 };
28221diff --git a/drivers/ata/pata_hpt3x2n.c b/drivers/ata/pata_hpt3x2n.c
28222index 100f227..2e39382 100644
28223--- a/drivers/ata/pata_hpt3x2n.c
28224+++ b/drivers/ata/pata_hpt3x2n.c
28225@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n_sht = {
28226 * Configuration for HPT3x2n.
28227 */
28228
28229-static struct ata_port_operations hpt3x2n_port_ops = {
28230+static const struct ata_port_operations hpt3x2n_port_ops = {
28231 .inherits = &ata_bmdma_port_ops,
28232
28233 .bmdma_stop = hpt3x2n_bmdma_stop,
28234diff --git a/drivers/ata/pata_hpt3x3.c b/drivers/ata/pata_hpt3x3.c
28235index 7e31025..6fca8f4 100644
28236--- a/drivers/ata/pata_hpt3x3.c
28237+++ b/drivers/ata/pata_hpt3x3.c
28238@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_sht = {
28239 ATA_BMDMA_SHT(DRV_NAME),
28240 };
28241
28242-static struct ata_port_operations hpt3x3_port_ops = {
28243+static const struct ata_port_operations hpt3x3_port_ops = {
28244 .inherits = &ata_bmdma_port_ops,
28245 .cable_detect = ata_cable_40wire,
28246 .set_piomode = hpt3x3_set_piomode,
28247diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c
28248index b663b7f..9a26c2a 100644
28249--- a/drivers/ata/pata_icside.c
28250+++ b/drivers/ata/pata_icside.c
28251@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct ata_link *link, unsigned int *classes)
28252 }
28253 }
28254
28255-static struct ata_port_operations pata_icside_port_ops = {
28256+static const struct ata_port_operations pata_icside_port_ops = {
28257 .inherits = &ata_sff_port_ops,
28258 /* no need to build any PRD tables for DMA */
28259 .qc_prep = ata_noop_qc_prep,
28260diff --git a/drivers/ata/pata_isapnp.c b/drivers/ata/pata_isapnp.c
28261index 4bceb88..457dfb6 100644
28262--- a/drivers/ata/pata_isapnp.c
28263+++ b/drivers/ata/pata_isapnp.c
28264@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_sht = {
28265 ATA_PIO_SHT(DRV_NAME),
28266 };
28267
28268-static struct ata_port_operations isapnp_port_ops = {
28269+static const struct ata_port_operations isapnp_port_ops = {
28270 .inherits = &ata_sff_port_ops,
28271 .cable_detect = ata_cable_40wire,
28272 };
28273
28274-static struct ata_port_operations isapnp_noalt_port_ops = {
28275+static const struct ata_port_operations isapnp_noalt_port_ops = {
28276 .inherits = &ata_sff_port_ops,
28277 .cable_detect = ata_cable_40wire,
28278 /* No altstatus so we don't want to use the lost interrupt poll */
28279diff --git a/drivers/ata/pata_it8213.c b/drivers/ata/pata_it8213.c
28280index f156da8..24976e2 100644
28281--- a/drivers/ata/pata_it8213.c
28282+++ b/drivers/ata/pata_it8213.c
28283@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_sht = {
28284 };
28285
28286
28287-static struct ata_port_operations it8213_ops = {
28288+static const struct ata_port_operations it8213_ops = {
28289 .inherits = &ata_bmdma_port_ops,
28290 .cable_detect = it8213_cable_detect,
28291 .set_piomode = it8213_set_piomode,
28292diff --git a/drivers/ata/pata_it821x.c b/drivers/ata/pata_it821x.c
28293index 188bc2f..ca9e785 100644
28294--- a/drivers/ata/pata_it821x.c
28295+++ b/drivers/ata/pata_it821x.c
28296@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_sht = {
28297 ATA_BMDMA_SHT(DRV_NAME),
28298 };
28299
28300-static struct ata_port_operations it821x_smart_port_ops = {
28301+static const struct ata_port_operations it821x_smart_port_ops = {
28302 .inherits = &ata_bmdma_port_ops,
28303
28304 .check_atapi_dma= it821x_check_atapi_dma,
28305@@ -814,7 +814,7 @@ static struct ata_port_operations it821x_smart_port_ops = {
28306 .port_start = it821x_port_start,
28307 };
28308
28309-static struct ata_port_operations it821x_passthru_port_ops = {
28310+static const struct ata_port_operations it821x_passthru_port_ops = {
28311 .inherits = &ata_bmdma_port_ops,
28312
28313 .check_atapi_dma= it821x_check_atapi_dma,
28314@@ -830,7 +830,7 @@ static struct ata_port_operations it821x_passthru_port_ops = {
28315 .port_start = it821x_port_start,
28316 };
28317
28318-static struct ata_port_operations it821x_rdc_port_ops = {
28319+static const struct ata_port_operations it821x_rdc_port_ops = {
28320 .inherits = &ata_bmdma_port_ops,
28321
28322 .check_atapi_dma= it821x_check_atapi_dma,
28323diff --git a/drivers/ata/pata_ixp4xx_cf.c b/drivers/ata/pata_ixp4xx_cf.c
28324index ba54b08..4b952b7 100644
28325--- a/drivers/ata/pata_ixp4xx_cf.c
28326+++ b/drivers/ata/pata_ixp4xx_cf.c
28327@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_sht = {
28328 ATA_PIO_SHT(DRV_NAME),
28329 };
28330
28331-static struct ata_port_operations ixp4xx_port_ops = {
28332+static const struct ata_port_operations ixp4xx_port_ops = {
28333 .inherits = &ata_sff_port_ops,
28334 .sff_data_xfer = ixp4xx_mmio_data_xfer,
28335 .cable_detect = ata_cable_40wire,
28336diff --git a/drivers/ata/pata_jmicron.c b/drivers/ata/pata_jmicron.c
28337index 3a1474a..434b0ff 100644
28338--- a/drivers/ata/pata_jmicron.c
28339+++ b/drivers/ata/pata_jmicron.c
28340@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron_sht = {
28341 ATA_BMDMA_SHT(DRV_NAME),
28342 };
28343
28344-static struct ata_port_operations jmicron_ops = {
28345+static const struct ata_port_operations jmicron_ops = {
28346 .inherits = &ata_bmdma_port_ops,
28347 .prereset = jmicron_pre_reset,
28348 };
28349diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c
28350index 6932e56..220e71d 100644
28351--- a/drivers/ata/pata_legacy.c
28352+++ b/drivers/ata/pata_legacy.c
28353@@ -106,7 +106,7 @@ struct legacy_probe {
28354
28355 struct legacy_controller {
28356 const char *name;
28357- struct ata_port_operations *ops;
28358+ const struct ata_port_operations *ops;
28359 unsigned int pio_mask;
28360 unsigned int flags;
28361 unsigned int pflags;
28362@@ -223,12 +223,12 @@ static const struct ata_port_operations legacy_base_port_ops = {
28363 * pio_mask as well.
28364 */
28365
28366-static struct ata_port_operations simple_port_ops = {
28367+static const struct ata_port_operations simple_port_ops = {
28368 .inherits = &legacy_base_port_ops,
28369 .sff_data_xfer = ata_sff_data_xfer_noirq,
28370 };
28371
28372-static struct ata_port_operations legacy_port_ops = {
28373+static const struct ata_port_operations legacy_port_ops = {
28374 .inherits = &legacy_base_port_ops,
28375 .sff_data_xfer = ata_sff_data_xfer_noirq,
28376 .set_mode = legacy_set_mode,
28377@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(struct ata_device *dev,
28378 return buflen;
28379 }
28380
28381-static struct ata_port_operations pdc20230_port_ops = {
28382+static const struct ata_port_operations pdc20230_port_ops = {
28383 .inherits = &legacy_base_port_ops,
28384 .set_piomode = pdc20230_set_piomode,
28385 .sff_data_xfer = pdc_data_xfer_vlb,
28386@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct ata_port *ap, struct ata_device *adev)
28387 ioread8(ap->ioaddr.status_addr);
28388 }
28389
28390-static struct ata_port_operations ht6560a_port_ops = {
28391+static const struct ata_port_operations ht6560a_port_ops = {
28392 .inherits = &legacy_base_port_ops,
28393 .set_piomode = ht6560a_set_piomode,
28394 };
28395@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct ata_port *ap, struct ata_device *adev)
28396 ioread8(ap->ioaddr.status_addr);
28397 }
28398
28399-static struct ata_port_operations ht6560b_port_ops = {
28400+static const struct ata_port_operations ht6560b_port_ops = {
28401 .inherits = &legacy_base_port_ops,
28402 .set_piomode = ht6560b_set_piomode,
28403 };
28404@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(struct ata_port *ap,
28405 }
28406
28407
28408-static struct ata_port_operations opti82c611a_port_ops = {
28409+static const struct ata_port_operations opti82c611a_port_ops = {
28410 .inherits = &legacy_base_port_ops,
28411 .set_piomode = opti82c611a_set_piomode,
28412 };
28413@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(struct ata_queued_cmd *qc)
28414 return ata_sff_qc_issue(qc);
28415 }
28416
28417-static struct ata_port_operations opti82c46x_port_ops = {
28418+static const struct ata_port_operations opti82c46x_port_ops = {
28419 .inherits = &legacy_base_port_ops,
28420 .set_piomode = opti82c46x_set_piomode,
28421 .qc_issue = opti82c46x_qc_issue,
28422@@ -771,20 +771,20 @@ static int qdi_port(struct platform_device *dev,
28423 return 0;
28424 }
28425
28426-static struct ata_port_operations qdi6500_port_ops = {
28427+static const struct ata_port_operations qdi6500_port_ops = {
28428 .inherits = &legacy_base_port_ops,
28429 .set_piomode = qdi6500_set_piomode,
28430 .qc_issue = qdi_qc_issue,
28431 .sff_data_xfer = vlb32_data_xfer,
28432 };
28433
28434-static struct ata_port_operations qdi6580_port_ops = {
28435+static const struct ata_port_operations qdi6580_port_ops = {
28436 .inherits = &legacy_base_port_ops,
28437 .set_piomode = qdi6580_set_piomode,
28438 .sff_data_xfer = vlb32_data_xfer,
28439 };
28440
28441-static struct ata_port_operations qdi6580dp_port_ops = {
28442+static const struct ata_port_operations qdi6580dp_port_ops = {
28443 .inherits = &legacy_base_port_ops,
28444 .set_piomode = qdi6580dp_set_piomode,
28445 .sff_data_xfer = vlb32_data_xfer,
28446@@ -855,7 +855,7 @@ static int winbond_port(struct platform_device *dev,
28447 return 0;
28448 }
28449
28450-static struct ata_port_operations winbond_port_ops = {
28451+static const struct ata_port_operations winbond_port_ops = {
28452 .inherits = &legacy_base_port_ops,
28453 .set_piomode = winbond_set_piomode,
28454 .sff_data_xfer = vlb32_data_xfer,
28455@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct legacy_probe *probe)
28456 int pio_modes = controller->pio_mask;
28457 unsigned long io = probe->port;
28458 u32 mask = (1 << probe->slot);
28459- struct ata_port_operations *ops = controller->ops;
28460+ const struct ata_port_operations *ops = controller->ops;
28461 struct legacy_data *ld = &legacy_data[probe->slot];
28462 struct ata_host *host = NULL;
28463 struct ata_port *ap;
28464diff --git a/drivers/ata/pata_marvell.c b/drivers/ata/pata_marvell.c
28465index 2096fb7..4d090fc 100644
28466--- a/drivers/ata/pata_marvell.c
28467+++ b/drivers/ata/pata_marvell.c
28468@@ -100,7 +100,7 @@ static struct scsi_host_template marvell_sht = {
28469 ATA_BMDMA_SHT(DRV_NAME),
28470 };
28471
28472-static struct ata_port_operations marvell_ops = {
28473+static const struct ata_port_operations marvell_ops = {
28474 .inherits = &ata_bmdma_port_ops,
28475 .cable_detect = marvell_cable_detect,
28476 .prereset = marvell_pre_reset,
28477diff --git a/drivers/ata/pata_mpc52xx.c b/drivers/ata/pata_mpc52xx.c
28478index 99d41be..7d56aa8 100644
28479--- a/drivers/ata/pata_mpc52xx.c
28480+++ b/drivers/ata/pata_mpc52xx.c
28481@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx_ata_sht = {
28482 ATA_PIO_SHT(DRV_NAME),
28483 };
28484
28485-static struct ata_port_operations mpc52xx_ata_port_ops = {
28486+static const struct ata_port_operations mpc52xx_ata_port_ops = {
28487 .inherits = &ata_bmdma_port_ops,
28488 .sff_dev_select = mpc52xx_ata_dev_select,
28489 .set_piomode = mpc52xx_ata_set_piomode,
28490diff --git a/drivers/ata/pata_mpiix.c b/drivers/ata/pata_mpiix.c
28491index b21f002..0a27e7f 100644
28492--- a/drivers/ata/pata_mpiix.c
28493+++ b/drivers/ata/pata_mpiix.c
28494@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_sht = {
28495 ATA_PIO_SHT(DRV_NAME),
28496 };
28497
28498-static struct ata_port_operations mpiix_port_ops = {
28499+static const struct ata_port_operations mpiix_port_ops = {
28500 .inherits = &ata_sff_port_ops,
28501 .qc_issue = mpiix_qc_issue,
28502 .cable_detect = ata_cable_40wire,
28503diff --git a/drivers/ata/pata_netcell.c b/drivers/ata/pata_netcell.c
28504index f0d52f7..89c3be3 100644
28505--- a/drivers/ata/pata_netcell.c
28506+++ b/drivers/ata/pata_netcell.c
28507@@ -34,7 +34,7 @@ static struct scsi_host_template netcell_sht = {
28508 ATA_BMDMA_SHT(DRV_NAME),
28509 };
28510
28511-static struct ata_port_operations netcell_ops = {
28512+static const struct ata_port_operations netcell_ops = {
28513 .inherits = &ata_bmdma_port_ops,
28514 .cable_detect = ata_cable_80wire,
28515 .read_id = netcell_read_id,
28516diff --git a/drivers/ata/pata_ninja32.c b/drivers/ata/pata_ninja32.c
28517index dd53a66..a3f4317 100644
28518--- a/drivers/ata/pata_ninja32.c
28519+++ b/drivers/ata/pata_ninja32.c
28520@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32_sht = {
28521 ATA_BMDMA_SHT(DRV_NAME),
28522 };
28523
28524-static struct ata_port_operations ninja32_port_ops = {
28525+static const struct ata_port_operations ninja32_port_ops = {
28526 .inherits = &ata_bmdma_port_ops,
28527 .sff_dev_select = ninja32_dev_select,
28528 .cable_detect = ata_cable_40wire,
28529diff --git a/drivers/ata/pata_ns87410.c b/drivers/ata/pata_ns87410.c
28530index ca53fac..9aa93ef 100644
28531--- a/drivers/ata/pata_ns87410.c
28532+++ b/drivers/ata/pata_ns87410.c
28533@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410_sht = {
28534 ATA_PIO_SHT(DRV_NAME),
28535 };
28536
28537-static struct ata_port_operations ns87410_port_ops = {
28538+static const struct ata_port_operations ns87410_port_ops = {
28539 .inherits = &ata_sff_port_ops,
28540 .qc_issue = ns87410_qc_issue,
28541 .cable_detect = ata_cable_40wire,
28542diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c
28543index 773b159..55f454e 100644
28544--- a/drivers/ata/pata_ns87415.c
28545+++ b/drivers/ata/pata_ns87415.c
28546@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct ata_port *ap)
28547 }
28548 #endif /* 87560 SuperIO Support */
28549
28550-static struct ata_port_operations ns87415_pata_ops = {
28551+static const struct ata_port_operations ns87415_pata_ops = {
28552 .inherits = &ata_bmdma_port_ops,
28553
28554 .check_atapi_dma = ns87415_check_atapi_dma,
28555@@ -313,7 +313,7 @@ static struct ata_port_operations ns87415_pata_ops = {
28556 };
28557
28558 #if defined(CONFIG_SUPERIO)
28559-static struct ata_port_operations ns87560_pata_ops = {
28560+static const struct ata_port_operations ns87560_pata_ops = {
28561 .inherits = &ns87415_pata_ops,
28562 .sff_tf_read = ns87560_tf_read,
28563 .sff_check_status = ns87560_check_status,
28564diff --git a/drivers/ata/pata_octeon_cf.c b/drivers/ata/pata_octeon_cf.c
28565index d6f6956..639295b 100644
28566--- a/drivers/ata/pata_octeon_cf.c
28567+++ b/drivers/ata/pata_octeon_cf.c
28568@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(struct ata_queued_cmd *qc)
28569 return 0;
28570 }
28571
28572+/* cannot be const */
28573 static struct ata_port_operations octeon_cf_ops = {
28574 .inherits = &ata_sff_port_ops,
28575 .check_atapi_dma = octeon_cf_check_atapi_dma,
28576diff --git a/drivers/ata/pata_oldpiix.c b/drivers/ata/pata_oldpiix.c
28577index 84ac503..adee1cd 100644
28578--- a/drivers/ata/pata_oldpiix.c
28579+++ b/drivers/ata/pata_oldpiix.c
28580@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix_sht = {
28581 ATA_BMDMA_SHT(DRV_NAME),
28582 };
28583
28584-static struct ata_port_operations oldpiix_pata_ops = {
28585+static const struct ata_port_operations oldpiix_pata_ops = {
28586 .inherits = &ata_bmdma_port_ops,
28587 .qc_issue = oldpiix_qc_issue,
28588 .cable_detect = ata_cable_40wire,
28589diff --git a/drivers/ata/pata_opti.c b/drivers/ata/pata_opti.c
28590index 99eddda..3a4c0aa 100644
28591--- a/drivers/ata/pata_opti.c
28592+++ b/drivers/ata/pata_opti.c
28593@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sht = {
28594 ATA_PIO_SHT(DRV_NAME),
28595 };
28596
28597-static struct ata_port_operations opti_port_ops = {
28598+static const struct ata_port_operations opti_port_ops = {
28599 .inherits = &ata_sff_port_ops,
28600 .cable_detect = ata_cable_40wire,
28601 .set_piomode = opti_set_piomode,
28602diff --git a/drivers/ata/pata_optidma.c b/drivers/ata/pata_optidma.c
28603index 86885a4..8e9968d 100644
28604--- a/drivers/ata/pata_optidma.c
28605+++ b/drivers/ata/pata_optidma.c
28606@@ -337,7 +337,7 @@ static struct scsi_host_template optidma_sht = {
28607 ATA_BMDMA_SHT(DRV_NAME),
28608 };
28609
28610-static struct ata_port_operations optidma_port_ops = {
28611+static const struct ata_port_operations optidma_port_ops = {
28612 .inherits = &ata_bmdma_port_ops,
28613 .cable_detect = ata_cable_40wire,
28614 .set_piomode = optidma_set_pio_mode,
28615@@ -346,7 +346,7 @@ static struct ata_port_operations optidma_port_ops = {
28616 .prereset = optidma_pre_reset,
28617 };
28618
28619-static struct ata_port_operations optiplus_port_ops = {
28620+static const struct ata_port_operations optiplus_port_ops = {
28621 .inherits = &optidma_port_ops,
28622 .set_piomode = optiplus_set_pio_mode,
28623 .set_dmamode = optiplus_set_dma_mode,
28624diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c
28625index 11fb4cc..1a14022 100644
28626--- a/drivers/ata/pata_palmld.c
28627+++ b/drivers/ata/pata_palmld.c
28628@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_sht = {
28629 ATA_PIO_SHT(DRV_NAME),
28630 };
28631
28632-static struct ata_port_operations palmld_port_ops = {
28633+static const struct ata_port_operations palmld_port_ops = {
28634 .inherits = &ata_sff_port_ops,
28635 .sff_data_xfer = ata_sff_data_xfer_noirq,
28636 .cable_detect = ata_cable_40wire,
28637diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c
28638index dc99e26..7f4b1e4 100644
28639--- a/drivers/ata/pata_pcmcia.c
28640+++ b/drivers/ata/pata_pcmcia.c
28641@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_sht = {
28642 ATA_PIO_SHT(DRV_NAME),
28643 };
28644
28645-static struct ata_port_operations pcmcia_port_ops = {
28646+static const struct ata_port_operations pcmcia_port_ops = {
28647 .inherits = &ata_sff_port_ops,
28648 .sff_data_xfer = ata_sff_data_xfer_noirq,
28649 .cable_detect = ata_cable_40wire,
28650 .set_mode = pcmcia_set_mode,
28651 };
28652
28653-static struct ata_port_operations pcmcia_8bit_port_ops = {
28654+static const struct ata_port_operations pcmcia_8bit_port_ops = {
28655 .inherits = &ata_sff_port_ops,
28656 .sff_data_xfer = ata_data_xfer_8bit,
28657 .cable_detect = ata_cable_40wire,
28658@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia_device *pdev)
28659 unsigned long io_base, ctl_base;
28660 void __iomem *io_addr, *ctl_addr;
28661 int n_ports = 1;
28662- struct ata_port_operations *ops = &pcmcia_port_ops;
28663+ const struct ata_port_operations *ops = &pcmcia_port_ops;
28664
28665 info = kzalloc(sizeof(*info), GFP_KERNEL);
28666 if (info == NULL)
28667diff --git a/drivers/ata/pata_pdc2027x.c b/drivers/ata/pata_pdc2027x.c
28668index ca5cad0..3a1f125 100644
28669--- a/drivers/ata/pata_pdc2027x.c
28670+++ b/drivers/ata/pata_pdc2027x.c
28671@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027x_sht = {
28672 ATA_BMDMA_SHT(DRV_NAME),
28673 };
28674
28675-static struct ata_port_operations pdc2027x_pata100_ops = {
28676+static const struct ata_port_operations pdc2027x_pata100_ops = {
28677 .inherits = &ata_bmdma_port_ops,
28678 .check_atapi_dma = pdc2027x_check_atapi_dma,
28679 .cable_detect = pdc2027x_cable_detect,
28680 .prereset = pdc2027x_prereset,
28681 };
28682
28683-static struct ata_port_operations pdc2027x_pata133_ops = {
28684+static const struct ata_port_operations pdc2027x_pata133_ops = {
28685 .inherits = &pdc2027x_pata100_ops,
28686 .mode_filter = pdc2027x_mode_filter,
28687 .set_piomode = pdc2027x_set_piomode,
28688diff --git a/drivers/ata/pata_pdc202xx_old.c b/drivers/ata/pata_pdc202xx_old.c
28689index 2911120..4bf62aa 100644
28690--- a/drivers/ata/pata_pdc202xx_old.c
28691+++ b/drivers/ata/pata_pdc202xx_old.c
28692@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202xx_sht = {
28693 ATA_BMDMA_SHT(DRV_NAME),
28694 };
28695
28696-static struct ata_port_operations pdc2024x_port_ops = {
28697+static const struct ata_port_operations pdc2024x_port_ops = {
28698 .inherits = &ata_bmdma_port_ops,
28699
28700 .cable_detect = ata_cable_40wire,
28701@@ -284,7 +284,7 @@ static struct ata_port_operations pdc2024x_port_ops = {
28702 .sff_exec_command = pdc202xx_exec_command,
28703 };
28704
28705-static struct ata_port_operations pdc2026x_port_ops = {
28706+static const struct ata_port_operations pdc2026x_port_ops = {
28707 .inherits = &pdc2024x_port_ops,
28708
28709 .check_atapi_dma = pdc2026x_check_atapi_dma,
28710diff --git a/drivers/ata/pata_platform.c b/drivers/ata/pata_platform.c
28711index 3f6ebc6..a18c358 100644
28712--- a/drivers/ata/pata_platform.c
28713+++ b/drivers/ata/pata_platform.c
28714@@ -48,7 +48,7 @@ static struct scsi_host_template pata_platform_sht = {
28715 ATA_PIO_SHT(DRV_NAME),
28716 };
28717
28718-static struct ata_port_operations pata_platform_port_ops = {
28719+static const struct ata_port_operations pata_platform_port_ops = {
28720 .inherits = &ata_sff_port_ops,
28721 .sff_data_xfer = ata_sff_data_xfer_noirq,
28722 .cable_detect = ata_cable_unknown,
28723diff --git a/drivers/ata/pata_qdi.c b/drivers/ata/pata_qdi.c
28724index 45879dc..165a9f9 100644
28725--- a/drivers/ata/pata_qdi.c
28726+++ b/drivers/ata/pata_qdi.c
28727@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht = {
28728 ATA_PIO_SHT(DRV_NAME),
28729 };
28730
28731-static struct ata_port_operations qdi6500_port_ops = {
28732+static const struct ata_port_operations qdi6500_port_ops = {
28733 .inherits = &ata_sff_port_ops,
28734 .qc_issue = qdi_qc_issue,
28735 .sff_data_xfer = qdi_data_xfer,
28736@@ -165,7 +165,7 @@ static struct ata_port_operations qdi6500_port_ops = {
28737 .set_piomode = qdi6500_set_piomode,
28738 };
28739
28740-static struct ata_port_operations qdi6580_port_ops = {
28741+static const struct ata_port_operations qdi6580_port_ops = {
28742 .inherits = &qdi6500_port_ops,
28743 .set_piomode = qdi6580_set_piomode,
28744 };
28745diff --git a/drivers/ata/pata_radisys.c b/drivers/ata/pata_radisys.c
28746index 4401b33..716c5cc 100644
28747--- a/drivers/ata/pata_radisys.c
28748+++ b/drivers/ata/pata_radisys.c
28749@@ -187,7 +187,7 @@ static struct scsi_host_template radisys_sht = {
28750 ATA_BMDMA_SHT(DRV_NAME),
28751 };
28752
28753-static struct ata_port_operations radisys_pata_ops = {
28754+static const struct ata_port_operations radisys_pata_ops = {
28755 .inherits = &ata_bmdma_port_ops,
28756 .qc_issue = radisys_qc_issue,
28757 .cable_detect = ata_cable_unknown,
28758diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c
28759index 45f1e10..fab6bca 100644
28760--- a/drivers/ata/pata_rb532_cf.c
28761+++ b/drivers/ata/pata_rb532_cf.c
28762@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handler(int irq, void *dev_instance)
28763 return IRQ_HANDLED;
28764 }
28765
28766-static struct ata_port_operations rb532_pata_port_ops = {
28767+static const struct ata_port_operations rb532_pata_port_ops = {
28768 .inherits = &ata_sff_port_ops,
28769 .sff_data_xfer = ata_sff_data_xfer32,
28770 };
28771diff --git a/drivers/ata/pata_rdc.c b/drivers/ata/pata_rdc.c
28772index c843a1e..b5853c3 100644
28773--- a/drivers/ata/pata_rdc.c
28774+++ b/drivers/ata/pata_rdc.c
28775@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_port *ap, struct ata_device *adev)
28776 pci_write_config_byte(dev, 0x48, udma_enable);
28777 }
28778
28779-static struct ata_port_operations rdc_pata_ops = {
28780+static const struct ata_port_operations rdc_pata_ops = {
28781 .inherits = &ata_bmdma32_port_ops,
28782 .cable_detect = rdc_pata_cable_detect,
28783 .set_piomode = rdc_set_piomode,
28784diff --git a/drivers/ata/pata_rz1000.c b/drivers/ata/pata_rz1000.c
28785index a5e4dfe..080c8c9 100644
28786--- a/drivers/ata/pata_rz1000.c
28787+++ b/drivers/ata/pata_rz1000.c
28788@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_sht = {
28789 ATA_PIO_SHT(DRV_NAME),
28790 };
28791
28792-static struct ata_port_operations rz1000_port_ops = {
28793+static const struct ata_port_operations rz1000_port_ops = {
28794 .inherits = &ata_sff_port_ops,
28795 .cable_detect = ata_cable_40wire,
28796 .set_mode = rz1000_set_mode,
28797diff --git a/drivers/ata/pata_sc1200.c b/drivers/ata/pata_sc1200.c
28798index 3bbed83..e309daf 100644
28799--- a/drivers/ata/pata_sc1200.c
28800+++ b/drivers/ata/pata_sc1200.c
28801@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_sht = {
28802 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
28803 };
28804
28805-static struct ata_port_operations sc1200_port_ops = {
28806+static const struct ata_port_operations sc1200_port_ops = {
28807 .inherits = &ata_bmdma_port_ops,
28808 .qc_prep = ata_sff_dumb_qc_prep,
28809 .qc_issue = sc1200_qc_issue,
28810diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
28811index 4257d6b..4c1d9d5 100644
28812--- a/drivers/ata/pata_scc.c
28813+++ b/drivers/ata/pata_scc.c
28814@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht = {
28815 ATA_BMDMA_SHT(DRV_NAME),
28816 };
28817
28818-static struct ata_port_operations scc_pata_ops = {
28819+static const struct ata_port_operations scc_pata_ops = {
28820 .inherits = &ata_bmdma_port_ops,
28821
28822 .set_piomode = scc_set_piomode,
28823diff --git a/drivers/ata/pata_sch.c b/drivers/ata/pata_sch.c
28824index 99cceb4..e2e0a87 100644
28825--- a/drivers/ata/pata_sch.c
28826+++ b/drivers/ata/pata_sch.c
28827@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht = {
28828 ATA_BMDMA_SHT(DRV_NAME),
28829 };
28830
28831-static struct ata_port_operations sch_pata_ops = {
28832+static const struct ata_port_operations sch_pata_ops = {
28833 .inherits = &ata_bmdma_port_ops,
28834 .cable_detect = ata_cable_unknown,
28835 .set_piomode = sch_set_piomode,
28836diff --git a/drivers/ata/pata_serverworks.c b/drivers/ata/pata_serverworks.c
28837index beaed12..39969f1 100644
28838--- a/drivers/ata/pata_serverworks.c
28839+++ b/drivers/ata/pata_serverworks.c
28840@@ -299,7 +299,7 @@ static struct scsi_host_template serverworks_sht = {
28841 ATA_BMDMA_SHT(DRV_NAME),
28842 };
28843
28844-static struct ata_port_operations serverworks_osb4_port_ops = {
28845+static const struct ata_port_operations serverworks_osb4_port_ops = {
28846 .inherits = &ata_bmdma_port_ops,
28847 .cable_detect = serverworks_cable_detect,
28848 .mode_filter = serverworks_osb4_filter,
28849@@ -307,7 +307,7 @@ static struct ata_port_operations serverworks_osb4_port_ops = {
28850 .set_dmamode = serverworks_set_dmamode,
28851 };
28852
28853-static struct ata_port_operations serverworks_csb_port_ops = {
28854+static const struct ata_port_operations serverworks_csb_port_ops = {
28855 .inherits = &serverworks_osb4_port_ops,
28856 .mode_filter = serverworks_csb_filter,
28857 };
28858diff --git a/drivers/ata/pata_sil680.c b/drivers/ata/pata_sil680.c
28859index a2ace48..0463b44 100644
28860--- a/drivers/ata/pata_sil680.c
28861+++ b/drivers/ata/pata_sil680.c
28862@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_sht = {
28863 ATA_BMDMA_SHT(DRV_NAME),
28864 };
28865
28866-static struct ata_port_operations sil680_port_ops = {
28867+static const struct ata_port_operations sil680_port_ops = {
28868 .inherits = &ata_bmdma32_port_ops,
28869 .cable_detect = sil680_cable_detect,
28870 .set_piomode = sil680_set_piomode,
28871diff --git a/drivers/ata/pata_sis.c b/drivers/ata/pata_sis.c
28872index 488e77b..b3724d5 100644
28873--- a/drivers/ata/pata_sis.c
28874+++ b/drivers/ata/pata_sis.c
28875@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht = {
28876 ATA_BMDMA_SHT(DRV_NAME),
28877 };
28878
28879-static struct ata_port_operations sis_133_for_sata_ops = {
28880+static const struct ata_port_operations sis_133_for_sata_ops = {
28881 .inherits = &ata_bmdma_port_ops,
28882 .set_piomode = sis_133_set_piomode,
28883 .set_dmamode = sis_133_set_dmamode,
28884 .cable_detect = sis_133_cable_detect,
28885 };
28886
28887-static struct ata_port_operations sis_base_ops = {
28888+static const struct ata_port_operations sis_base_ops = {
28889 .inherits = &ata_bmdma_port_ops,
28890 .prereset = sis_pre_reset,
28891 };
28892
28893-static struct ata_port_operations sis_133_ops = {
28894+static const struct ata_port_operations sis_133_ops = {
28895 .inherits = &sis_base_ops,
28896 .set_piomode = sis_133_set_piomode,
28897 .set_dmamode = sis_133_set_dmamode,
28898 .cable_detect = sis_133_cable_detect,
28899 };
28900
28901-static struct ata_port_operations sis_133_early_ops = {
28902+static const struct ata_port_operations sis_133_early_ops = {
28903 .inherits = &sis_base_ops,
28904 .set_piomode = sis_100_set_piomode,
28905 .set_dmamode = sis_133_early_set_dmamode,
28906 .cable_detect = sis_66_cable_detect,
28907 };
28908
28909-static struct ata_port_operations sis_100_ops = {
28910+static const struct ata_port_operations sis_100_ops = {
28911 .inherits = &sis_base_ops,
28912 .set_piomode = sis_100_set_piomode,
28913 .set_dmamode = sis_100_set_dmamode,
28914 .cable_detect = sis_66_cable_detect,
28915 };
28916
28917-static struct ata_port_operations sis_66_ops = {
28918+static const struct ata_port_operations sis_66_ops = {
28919 .inherits = &sis_base_ops,
28920 .set_piomode = sis_old_set_piomode,
28921 .set_dmamode = sis_66_set_dmamode,
28922 .cable_detect = sis_66_cable_detect,
28923 };
28924
28925-static struct ata_port_operations sis_old_ops = {
28926+static const struct ata_port_operations sis_old_ops = {
28927 .inherits = &sis_base_ops,
28928 .set_piomode = sis_old_set_piomode,
28929 .set_dmamode = sis_old_set_dmamode,
28930diff --git a/drivers/ata/pata_sl82c105.c b/drivers/ata/pata_sl82c105.c
28931index 29f733c..43e9ca0 100644
28932--- a/drivers/ata/pata_sl82c105.c
28933+++ b/drivers/ata/pata_sl82c105.c
28934@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c105_sht = {
28935 ATA_BMDMA_SHT(DRV_NAME),
28936 };
28937
28938-static struct ata_port_operations sl82c105_port_ops = {
28939+static const struct ata_port_operations sl82c105_port_ops = {
28940 .inherits = &ata_bmdma_port_ops,
28941 .qc_defer = sl82c105_qc_defer,
28942 .bmdma_start = sl82c105_bmdma_start,
28943diff --git a/drivers/ata/pata_triflex.c b/drivers/ata/pata_triflex.c
28944index f1f13ff..df39e99 100644
28945--- a/drivers/ata/pata_triflex.c
28946+++ b/drivers/ata/pata_triflex.c
28947@@ -178,7 +178,7 @@ static struct scsi_host_template triflex_sht = {
28948 ATA_BMDMA_SHT(DRV_NAME),
28949 };
28950
28951-static struct ata_port_operations triflex_port_ops = {
28952+static const struct ata_port_operations triflex_port_ops = {
28953 .inherits = &ata_bmdma_port_ops,
28954 .bmdma_start = triflex_bmdma_start,
28955 .bmdma_stop = triflex_bmdma_stop,
28956diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
28957index 1d73b8d..98a4b29 100644
28958--- a/drivers/ata/pata_via.c
28959+++ b/drivers/ata/pata_via.c
28960@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht = {
28961 ATA_BMDMA_SHT(DRV_NAME),
28962 };
28963
28964-static struct ata_port_operations via_port_ops = {
28965+static const struct ata_port_operations via_port_ops = {
28966 .inherits = &ata_bmdma_port_ops,
28967 .cable_detect = via_cable_detect,
28968 .set_piomode = via_set_piomode,
28969@@ -429,7 +429,7 @@ static struct ata_port_operations via_port_ops = {
28970 .port_start = via_port_start,
28971 };
28972
28973-static struct ata_port_operations via_port_ops_noirq = {
28974+static const struct ata_port_operations via_port_ops_noirq = {
28975 .inherits = &via_port_ops,
28976 .sff_data_xfer = ata_sff_data_xfer_noirq,
28977 };
28978diff --git a/drivers/ata/pata_winbond.c b/drivers/ata/pata_winbond.c
28979index 6d8619b..ad511c4 100644
28980--- a/drivers/ata/pata_winbond.c
28981+++ b/drivers/ata/pata_winbond.c
28982@@ -125,7 +125,7 @@ static struct scsi_host_template winbond_sht = {
28983 ATA_PIO_SHT(DRV_NAME),
28984 };
28985
28986-static struct ata_port_operations winbond_port_ops = {
28987+static const struct ata_port_operations winbond_port_ops = {
28988 .inherits = &ata_sff_port_ops,
28989 .sff_data_xfer = winbond_data_xfer,
28990 .cable_detect = ata_cable_40wire,
28991diff --git a/drivers/ata/pdc_adma.c b/drivers/ata/pdc_adma.c
28992index 6c65b07..f996ec7 100644
28993--- a/drivers/ata/pdc_adma.c
28994+++ b/drivers/ata/pdc_adma.c
28995@@ -145,7 +145,7 @@ static struct scsi_host_template adma_ata_sht = {
28996 .dma_boundary = ADMA_DMA_BOUNDARY,
28997 };
28998
28999-static struct ata_port_operations adma_ata_ops = {
29000+static const struct ata_port_operations adma_ata_ops = {
29001 .inherits = &ata_sff_port_ops,
29002
29003 .lost_interrupt = ATA_OP_NULL,
29004diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
29005index 172b57e..c49bc1e 100644
29006--- a/drivers/ata/sata_fsl.c
29007+++ b/drivers/ata/sata_fsl.c
29008@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fsl_sht = {
29009 .dma_boundary = ATA_DMA_BOUNDARY,
29010 };
29011
29012-static struct ata_port_operations sata_fsl_ops = {
29013+static const struct ata_port_operations sata_fsl_ops = {
29014 .inherits = &sata_pmp_port_ops,
29015
29016 .qc_defer = ata_std_qc_defer,
29017diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c
29018index 4406902..60603ef 100644
29019--- a/drivers/ata/sata_inic162x.c
29020+++ b/drivers/ata/sata_inic162x.c
29021@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_port *ap)
29022 return 0;
29023 }
29024
29025-static struct ata_port_operations inic_port_ops = {
29026+static const struct ata_port_operations inic_port_ops = {
29027 .inherits = &sata_port_ops,
29028
29029 .check_atapi_dma = inic_check_atapi_dma,
29030diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
29031index cf41126..8107be6 100644
29032--- a/drivers/ata/sata_mv.c
29033+++ b/drivers/ata/sata_mv.c
29034@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht = {
29035 .dma_boundary = MV_DMA_BOUNDARY,
29036 };
29037
29038-static struct ata_port_operations mv5_ops = {
29039+static const struct ata_port_operations mv5_ops = {
29040 .inherits = &ata_sff_port_ops,
29041
29042 .lost_interrupt = ATA_OP_NULL,
29043@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_ops = {
29044 .port_stop = mv_port_stop,
29045 };
29046
29047-static struct ata_port_operations mv6_ops = {
29048+static const struct ata_port_operations mv6_ops = {
29049 .inherits = &mv5_ops,
29050 .dev_config = mv6_dev_config,
29051 .scr_read = mv_scr_read,
29052@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_ops = {
29053 .bmdma_status = mv_bmdma_status,
29054 };
29055
29056-static struct ata_port_operations mv_iie_ops = {
29057+static const struct ata_port_operations mv_iie_ops = {
29058 .inherits = &mv6_ops,
29059 .dev_config = ATA_OP_NULL,
29060 .qc_prep = mv_qc_prep_iie,
29061diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
29062index ae2297c..d5c9c33 100644
29063--- a/drivers/ata/sata_nv.c
29064+++ b/drivers/ata/sata_nv.c
29065@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swncq_sht = {
29066 * cases. Define nv_hardreset() which only kicks in for post-boot
29067 * probing and use it for all variants.
29068 */
29069-static struct ata_port_operations nv_generic_ops = {
29070+static const struct ata_port_operations nv_generic_ops = {
29071 .inherits = &ata_bmdma_port_ops,
29072 .lost_interrupt = ATA_OP_NULL,
29073 .scr_read = nv_scr_read,
29074@@ -472,20 +472,20 @@ static struct ata_port_operations nv_generic_ops = {
29075 .hardreset = nv_hardreset,
29076 };
29077
29078-static struct ata_port_operations nv_nf2_ops = {
29079+static const struct ata_port_operations nv_nf2_ops = {
29080 .inherits = &nv_generic_ops,
29081 .freeze = nv_nf2_freeze,
29082 .thaw = nv_nf2_thaw,
29083 };
29084
29085-static struct ata_port_operations nv_ck804_ops = {
29086+static const struct ata_port_operations nv_ck804_ops = {
29087 .inherits = &nv_generic_ops,
29088 .freeze = nv_ck804_freeze,
29089 .thaw = nv_ck804_thaw,
29090 .host_stop = nv_ck804_host_stop,
29091 };
29092
29093-static struct ata_port_operations nv_adma_ops = {
29094+static const struct ata_port_operations nv_adma_ops = {
29095 .inherits = &nv_ck804_ops,
29096
29097 .check_atapi_dma = nv_adma_check_atapi_dma,
29098@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adma_ops = {
29099 .host_stop = nv_adma_host_stop,
29100 };
29101
29102-static struct ata_port_operations nv_swncq_ops = {
29103+static const struct ata_port_operations nv_swncq_ops = {
29104 .inherits = &nv_generic_ops,
29105
29106 .qc_defer = ata_std_qc_defer,
29107diff --git a/drivers/ata/sata_promise.c b/drivers/ata/sata_promise.c
29108index 07d8d00..6cc70bb 100644
29109--- a/drivers/ata/sata_promise.c
29110+++ b/drivers/ata/sata_promise.c
29111@@ -195,7 +195,7 @@ static const struct ata_port_operations pdc_common_ops = {
29112 .error_handler = pdc_error_handler,
29113 };
29114
29115-static struct ata_port_operations pdc_sata_ops = {
29116+static const struct ata_port_operations pdc_sata_ops = {
29117 .inherits = &pdc_common_ops,
29118 .cable_detect = pdc_sata_cable_detect,
29119 .freeze = pdc_sata_freeze,
29120@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sata_ops = {
29121
29122 /* First-generation chips need a more restrictive ->check_atapi_dma op,
29123 and ->freeze/thaw that ignore the hotplug controls. */
29124-static struct ata_port_operations pdc_old_sata_ops = {
29125+static const struct ata_port_operations pdc_old_sata_ops = {
29126 .inherits = &pdc_sata_ops,
29127 .freeze = pdc_freeze,
29128 .thaw = pdc_thaw,
29129 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
29130 };
29131
29132-static struct ata_port_operations pdc_pata_ops = {
29133+static const struct ata_port_operations pdc_pata_ops = {
29134 .inherits = &pdc_common_ops,
29135 .cable_detect = pdc_pata_cable_detect,
29136 .freeze = pdc_freeze,
29137diff --git a/drivers/ata/sata_qstor.c b/drivers/ata/sata_qstor.c
29138index 326c0cf..36ecebe 100644
29139--- a/drivers/ata/sata_qstor.c
29140+++ b/drivers/ata/sata_qstor.c
29141@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_sht = {
29142 .dma_boundary = QS_DMA_BOUNDARY,
29143 };
29144
29145-static struct ata_port_operations qs_ata_ops = {
29146+static const struct ata_port_operations qs_ata_ops = {
29147 .inherits = &ata_sff_port_ops,
29148
29149 .check_atapi_dma = qs_check_atapi_dma,
29150diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
29151index 3cb69d5..0871d3c 100644
29152--- a/drivers/ata/sata_sil.c
29153+++ b/drivers/ata/sata_sil.c
29154@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht = {
29155 .sg_tablesize = ATA_MAX_PRD
29156 };
29157
29158-static struct ata_port_operations sil_ops = {
29159+static const struct ata_port_operations sil_ops = {
29160 .inherits = &ata_bmdma32_port_ops,
29161 .dev_config = sil_dev_config,
29162 .set_mode = sil_set_mode,
29163diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
29164index e6946fc..eddb794 100644
29165--- a/drivers/ata/sata_sil24.c
29166+++ b/drivers/ata/sata_sil24.c
29167@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_sht = {
29168 .dma_boundary = ATA_DMA_BOUNDARY,
29169 };
29170
29171-static struct ata_port_operations sil24_ops = {
29172+static const struct ata_port_operations sil24_ops = {
29173 .inherits = &sata_pmp_port_ops,
29174
29175 .qc_defer = sil24_qc_defer,
29176diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
29177index f8a91bf..9cb06b6 100644
29178--- a/drivers/ata/sata_sis.c
29179+++ b/drivers/ata/sata_sis.c
29180@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht = {
29181 ATA_BMDMA_SHT(DRV_NAME),
29182 };
29183
29184-static struct ata_port_operations sis_ops = {
29185+static const struct ata_port_operations sis_ops = {
29186 .inherits = &ata_bmdma_port_ops,
29187 .scr_read = sis_scr_read,
29188 .scr_write = sis_scr_write,
29189diff --git a/drivers/ata/sata_svw.c b/drivers/ata/sata_svw.c
29190index 7257f2d..d04c6f5 100644
29191--- a/drivers/ata/sata_svw.c
29192+++ b/drivers/ata/sata_svw.c
29193@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata_sht = {
29194 };
29195
29196
29197-static struct ata_port_operations k2_sata_ops = {
29198+static const struct ata_port_operations k2_sata_ops = {
29199 .inherits = &ata_bmdma_port_ops,
29200 .sff_tf_load = k2_sata_tf_load,
29201 .sff_tf_read = k2_sata_tf_read,
29202diff --git a/drivers/ata/sata_sx4.c b/drivers/ata/sata_sx4.c
29203index bbcf970..cd0df0d 100644
29204--- a/drivers/ata/sata_sx4.c
29205+++ b/drivers/ata/sata_sx4.c
29206@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sata_sht = {
29207 };
29208
29209 /* TODO: inherit from base port_ops after converting to new EH */
29210-static struct ata_port_operations pdc_20621_ops = {
29211+static const struct ata_port_operations pdc_20621_ops = {
29212 .inherits = &ata_sff_port_ops,
29213
29214 .check_atapi_dma = pdc_check_atapi_dma,
29215diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c
29216index e5bff47..089d859 100644
29217--- a/drivers/ata/sata_uli.c
29218+++ b/drivers/ata/sata_uli.c
29219@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht = {
29220 ATA_BMDMA_SHT(DRV_NAME),
29221 };
29222
29223-static struct ata_port_operations uli_ops = {
29224+static const struct ata_port_operations uli_ops = {
29225 .inherits = &ata_bmdma_port_ops,
29226 .scr_read = uli_scr_read,
29227 .scr_write = uli_scr_write,
29228diff --git a/drivers/ata/sata_via.c b/drivers/ata/sata_via.c
29229index f5dcca7..77b94eb 100644
29230--- a/drivers/ata/sata_via.c
29231+++ b/drivers/ata/sata_via.c
29232@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sht = {
29233 ATA_BMDMA_SHT(DRV_NAME),
29234 };
29235
29236-static struct ata_port_operations svia_base_ops = {
29237+static const struct ata_port_operations svia_base_ops = {
29238 .inherits = &ata_bmdma_port_ops,
29239 .sff_tf_load = svia_tf_load,
29240 };
29241
29242-static struct ata_port_operations vt6420_sata_ops = {
29243+static const struct ata_port_operations vt6420_sata_ops = {
29244 .inherits = &svia_base_ops,
29245 .freeze = svia_noop_freeze,
29246 .prereset = vt6420_prereset,
29247 .bmdma_start = vt6420_bmdma_start,
29248 };
29249
29250-static struct ata_port_operations vt6421_pata_ops = {
29251+static const struct ata_port_operations vt6421_pata_ops = {
29252 .inherits = &svia_base_ops,
29253 .cable_detect = vt6421_pata_cable_detect,
29254 .set_piomode = vt6421_set_pio_mode,
29255 .set_dmamode = vt6421_set_dma_mode,
29256 };
29257
29258-static struct ata_port_operations vt6421_sata_ops = {
29259+static const struct ata_port_operations vt6421_sata_ops = {
29260 .inherits = &svia_base_ops,
29261 .scr_read = svia_scr_read,
29262 .scr_write = svia_scr_write,
29263 };
29264
29265-static struct ata_port_operations vt8251_ops = {
29266+static const struct ata_port_operations vt8251_ops = {
29267 .inherits = &svia_base_ops,
29268 .hardreset = sata_std_hardreset,
29269 .scr_read = vt8251_scr_read,
29270diff --git a/drivers/ata/sata_vsc.c b/drivers/ata/sata_vsc.c
29271index 8b2a278..51e65d3 100644
29272--- a/drivers/ata/sata_vsc.c
29273+++ b/drivers/ata/sata_vsc.c
29274@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sata_sht = {
29275 };
29276
29277
29278-static struct ata_port_operations vsc_sata_ops = {
29279+static const struct ata_port_operations vsc_sata_ops = {
29280 .inherits = &ata_bmdma_port_ops,
29281 /* The IRQ handling is not quite standard SFF behaviour so we
29282 cannot use the default lost interrupt handler */
29283diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
29284index 5effec6..7e4019a 100644
29285--- a/drivers/atm/adummy.c
29286+++ b/drivers/atm/adummy.c
29287@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
29288 vcc->pop(vcc, skb);
29289 else
29290 dev_kfree_skb_any(skb);
29291- atomic_inc(&vcc->stats->tx);
29292+ atomic_inc_unchecked(&vcc->stats->tx);
29293
29294 return 0;
29295 }
29296diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
29297index 66e1813..26a27c6 100644
29298--- a/drivers/atm/ambassador.c
29299+++ b/drivers/atm/ambassador.c
29300@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
29301 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
29302
29303 // VC layer stats
29304- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29305+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29306
29307 // free the descriptor
29308 kfree (tx_descr);
29309@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29310 dump_skb ("<<<", vc, skb);
29311
29312 // VC layer stats
29313- atomic_inc(&atm_vcc->stats->rx);
29314+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29315 __net_timestamp(skb);
29316 // end of our responsability
29317 atm_vcc->push (atm_vcc, skb);
29318@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
29319 } else {
29320 PRINTK (KERN_INFO, "dropped over-size frame");
29321 // should we count this?
29322- atomic_inc(&atm_vcc->stats->rx_drop);
29323+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29324 }
29325
29326 } else {
29327@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
29328 }
29329
29330 if (check_area (skb->data, skb->len)) {
29331- atomic_inc(&atm_vcc->stats->tx_err);
29332+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
29333 return -ENOMEM; // ?
29334 }
29335
29336diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
29337index 02ad83d..6daffeb 100644
29338--- a/drivers/atm/atmtcp.c
29339+++ b/drivers/atm/atmtcp.c
29340@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29341 if (vcc->pop) vcc->pop(vcc,skb);
29342 else dev_kfree_skb(skb);
29343 if (dev_data) return 0;
29344- atomic_inc(&vcc->stats->tx_err);
29345+ atomic_inc_unchecked(&vcc->stats->tx_err);
29346 return -ENOLINK;
29347 }
29348 size = skb->len+sizeof(struct atmtcp_hdr);
29349@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29350 if (!new_skb) {
29351 if (vcc->pop) vcc->pop(vcc,skb);
29352 else dev_kfree_skb(skb);
29353- atomic_inc(&vcc->stats->tx_err);
29354+ atomic_inc_unchecked(&vcc->stats->tx_err);
29355 return -ENOBUFS;
29356 }
29357 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
29358@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
29359 if (vcc->pop) vcc->pop(vcc,skb);
29360 else dev_kfree_skb(skb);
29361 out_vcc->push(out_vcc,new_skb);
29362- atomic_inc(&vcc->stats->tx);
29363- atomic_inc(&out_vcc->stats->rx);
29364+ atomic_inc_unchecked(&vcc->stats->tx);
29365+ atomic_inc_unchecked(&out_vcc->stats->rx);
29366 return 0;
29367 }
29368
29369@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29370 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
29371 read_unlock(&vcc_sklist_lock);
29372 if (!out_vcc) {
29373- atomic_inc(&vcc->stats->tx_err);
29374+ atomic_inc_unchecked(&vcc->stats->tx_err);
29375 goto done;
29376 }
29377 skb_pull(skb,sizeof(struct atmtcp_hdr));
29378@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
29379 __net_timestamp(new_skb);
29380 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
29381 out_vcc->push(out_vcc,new_skb);
29382- atomic_inc(&vcc->stats->tx);
29383- atomic_inc(&out_vcc->stats->rx);
29384+ atomic_inc_unchecked(&vcc->stats->tx);
29385+ atomic_inc_unchecked(&out_vcc->stats->rx);
29386 done:
29387 if (vcc->pop) vcc->pop(vcc,skb);
29388 else dev_kfree_skb(skb);
29389diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
29390index 0c30261..3da356e 100644
29391--- a/drivers/atm/eni.c
29392+++ b/drivers/atm/eni.c
29393@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
29394 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
29395 vcc->dev->number);
29396 length = 0;
29397- atomic_inc(&vcc->stats->rx_err);
29398+ atomic_inc_unchecked(&vcc->stats->rx_err);
29399 }
29400 else {
29401 length = ATM_CELL_SIZE-1; /* no HEC */
29402@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29403 size);
29404 }
29405 eff = length = 0;
29406- atomic_inc(&vcc->stats->rx_err);
29407+ atomic_inc_unchecked(&vcc->stats->rx_err);
29408 }
29409 else {
29410 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
29411@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
29412 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
29413 vcc->dev->number,vcc->vci,length,size << 2,descr);
29414 length = eff = 0;
29415- atomic_inc(&vcc->stats->rx_err);
29416+ atomic_inc_unchecked(&vcc->stats->rx_err);
29417 }
29418 }
29419 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
29420@@ -770,7 +770,7 @@ rx_dequeued++;
29421 vcc->push(vcc,skb);
29422 pushed++;
29423 }
29424- atomic_inc(&vcc->stats->rx);
29425+ atomic_inc_unchecked(&vcc->stats->rx);
29426 }
29427 wake_up(&eni_dev->rx_wait);
29428 }
29429@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
29430 PCI_DMA_TODEVICE);
29431 if (vcc->pop) vcc->pop(vcc,skb);
29432 else dev_kfree_skb_irq(skb);
29433- atomic_inc(&vcc->stats->tx);
29434+ atomic_inc_unchecked(&vcc->stats->tx);
29435 wake_up(&eni_dev->tx_wait);
29436 dma_complete++;
29437 }
29438@@ -1570,7 +1570,7 @@ tx_complete++;
29439 /*--------------------------------- entries ---------------------------------*/
29440
29441
29442-static const char *media_name[] __devinitdata = {
29443+static const char *media_name[] __devinitconst = {
29444 "MMF", "SMF", "MMF", "03?", /* 0- 3 */
29445 "UTP", "05?", "06?", "07?", /* 4- 7 */
29446 "TAXI","09?", "10?", "11?", /* 8-11 */
29447diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
29448index cd5049a..a51209f 100644
29449--- a/drivers/atm/firestream.c
29450+++ b/drivers/atm/firestream.c
29451@@ -748,7 +748,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
29452 }
29453 }
29454
29455- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29456+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29457
29458 fs_dprintk (FS_DEBUG_TXMEM, "i");
29459 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
29460@@ -815,7 +815,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29461 #endif
29462 skb_put (skb, qe->p1 & 0xffff);
29463 ATM_SKB(skb)->vcc = atm_vcc;
29464- atomic_inc(&atm_vcc->stats->rx);
29465+ atomic_inc_unchecked(&atm_vcc->stats->rx);
29466 __net_timestamp(skb);
29467 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
29468 atm_vcc->push (atm_vcc, skb);
29469@@ -836,12 +836,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
29470 kfree (pe);
29471 }
29472 if (atm_vcc)
29473- atomic_inc(&atm_vcc->stats->rx_drop);
29474+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29475 break;
29476 case 0x1f: /* Reassembly abort: no buffers. */
29477 /* Silently increment error counter. */
29478 if (atm_vcc)
29479- atomic_inc(&atm_vcc->stats->rx_drop);
29480+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
29481 break;
29482 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
29483 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
29484diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
29485index f766cc4..a34002e 100644
29486--- a/drivers/atm/fore200e.c
29487+++ b/drivers/atm/fore200e.c
29488@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
29489 #endif
29490 /* check error condition */
29491 if (*entry->status & STATUS_ERROR)
29492- atomic_inc(&vcc->stats->tx_err);
29493+ atomic_inc_unchecked(&vcc->stats->tx_err);
29494 else
29495- atomic_inc(&vcc->stats->tx);
29496+ atomic_inc_unchecked(&vcc->stats->tx);
29497 }
29498 }
29499
29500@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29501 if (skb == NULL) {
29502 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
29503
29504- atomic_inc(&vcc->stats->rx_drop);
29505+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29506 return -ENOMEM;
29507 }
29508
29509@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
29510
29511 dev_kfree_skb_any(skb);
29512
29513- atomic_inc(&vcc->stats->rx_drop);
29514+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29515 return -ENOMEM;
29516 }
29517
29518 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29519
29520 vcc->push(vcc, skb);
29521- atomic_inc(&vcc->stats->rx);
29522+ atomic_inc_unchecked(&vcc->stats->rx);
29523
29524 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
29525
29526@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
29527 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
29528 fore200e->atm_dev->number,
29529 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
29530- atomic_inc(&vcc->stats->rx_err);
29531+ atomic_inc_unchecked(&vcc->stats->rx_err);
29532 }
29533 }
29534
29535@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
29536 goto retry_here;
29537 }
29538
29539- atomic_inc(&vcc->stats->tx_err);
29540+ atomic_inc_unchecked(&vcc->stats->tx_err);
29541
29542 fore200e->tx_sat++;
29543 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
29544diff --git a/drivers/atm/he.c b/drivers/atm/he.c
29545index 7066703..2b130de 100644
29546--- a/drivers/atm/he.c
29547+++ b/drivers/atm/he.c
29548@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29549
29550 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
29551 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
29552- atomic_inc(&vcc->stats->rx_drop);
29553+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29554 goto return_host_buffers;
29555 }
29556
29557@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29558 RBRQ_LEN_ERR(he_dev->rbrq_head)
29559 ? "LEN_ERR" : "",
29560 vcc->vpi, vcc->vci);
29561- atomic_inc(&vcc->stats->rx_err);
29562+ atomic_inc_unchecked(&vcc->stats->rx_err);
29563 goto return_host_buffers;
29564 }
29565
29566@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
29567 vcc->push(vcc, skb);
29568 spin_lock(&he_dev->global_lock);
29569
29570- atomic_inc(&vcc->stats->rx);
29571+ atomic_inc_unchecked(&vcc->stats->rx);
29572
29573 return_host_buffers:
29574 ++pdus_assembled;
29575@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
29576 tpd->vcc->pop(tpd->vcc, tpd->skb);
29577 else
29578 dev_kfree_skb_any(tpd->skb);
29579- atomic_inc(&tpd->vcc->stats->tx_err);
29580+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
29581 }
29582 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
29583 return;
29584@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29585 vcc->pop(vcc, skb);
29586 else
29587 dev_kfree_skb_any(skb);
29588- atomic_inc(&vcc->stats->tx_err);
29589+ atomic_inc_unchecked(&vcc->stats->tx_err);
29590 return -EINVAL;
29591 }
29592
29593@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29594 vcc->pop(vcc, skb);
29595 else
29596 dev_kfree_skb_any(skb);
29597- atomic_inc(&vcc->stats->tx_err);
29598+ atomic_inc_unchecked(&vcc->stats->tx_err);
29599 return -EINVAL;
29600 }
29601 #endif
29602@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29603 vcc->pop(vcc, skb);
29604 else
29605 dev_kfree_skb_any(skb);
29606- atomic_inc(&vcc->stats->tx_err);
29607+ atomic_inc_unchecked(&vcc->stats->tx_err);
29608 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29609 return -ENOMEM;
29610 }
29611@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29612 vcc->pop(vcc, skb);
29613 else
29614 dev_kfree_skb_any(skb);
29615- atomic_inc(&vcc->stats->tx_err);
29616+ atomic_inc_unchecked(&vcc->stats->tx_err);
29617 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29618 return -ENOMEM;
29619 }
29620@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
29621 __enqueue_tpd(he_dev, tpd, cid);
29622 spin_unlock_irqrestore(&he_dev->global_lock, flags);
29623
29624- atomic_inc(&vcc->stats->tx);
29625+ atomic_inc_unchecked(&vcc->stats->tx);
29626
29627 return 0;
29628 }
29629diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
29630index 4e49021..01b1512 100644
29631--- a/drivers/atm/horizon.c
29632+++ b/drivers/atm/horizon.c
29633@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
29634 {
29635 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
29636 // VC layer stats
29637- atomic_inc(&vcc->stats->rx);
29638+ atomic_inc_unchecked(&vcc->stats->rx);
29639 __net_timestamp(skb);
29640 // end of our responsability
29641 vcc->push (vcc, skb);
29642@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
29643 dev->tx_iovec = NULL;
29644
29645 // VC layer stats
29646- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
29647+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
29648
29649 // free the skb
29650 hrz_kfree_skb (skb);
29651diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
29652index e33ae00..9deb4ab 100644
29653--- a/drivers/atm/idt77252.c
29654+++ b/drivers/atm/idt77252.c
29655@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
29656 else
29657 dev_kfree_skb(skb);
29658
29659- atomic_inc(&vcc->stats->tx);
29660+ atomic_inc_unchecked(&vcc->stats->tx);
29661 }
29662
29663 atomic_dec(&scq->used);
29664@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29665 if ((sb = dev_alloc_skb(64)) == NULL) {
29666 printk("%s: Can't allocate buffers for aal0.\n",
29667 card->name);
29668- atomic_add(i, &vcc->stats->rx_drop);
29669+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
29670 break;
29671 }
29672 if (!atm_charge(vcc, sb->truesize)) {
29673 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
29674 card->name);
29675- atomic_add(i - 1, &vcc->stats->rx_drop);
29676+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
29677 dev_kfree_skb(sb);
29678 break;
29679 }
29680@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29681 ATM_SKB(sb)->vcc = vcc;
29682 __net_timestamp(sb);
29683 vcc->push(vcc, sb);
29684- atomic_inc(&vcc->stats->rx);
29685+ atomic_inc_unchecked(&vcc->stats->rx);
29686
29687 cell += ATM_CELL_PAYLOAD;
29688 }
29689@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29690 "(CDC: %08x)\n",
29691 card->name, len, rpp->len, readl(SAR_REG_CDC));
29692 recycle_rx_pool_skb(card, rpp);
29693- atomic_inc(&vcc->stats->rx_err);
29694+ atomic_inc_unchecked(&vcc->stats->rx_err);
29695 return;
29696 }
29697 if (stat & SAR_RSQE_CRC) {
29698 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
29699 recycle_rx_pool_skb(card, rpp);
29700- atomic_inc(&vcc->stats->rx_err);
29701+ atomic_inc_unchecked(&vcc->stats->rx_err);
29702 return;
29703 }
29704 if (skb_queue_len(&rpp->queue) > 1) {
29705@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29706 RXPRINTK("%s: Can't alloc RX skb.\n",
29707 card->name);
29708 recycle_rx_pool_skb(card, rpp);
29709- atomic_inc(&vcc->stats->rx_err);
29710+ atomic_inc_unchecked(&vcc->stats->rx_err);
29711 return;
29712 }
29713 if (!atm_charge(vcc, skb->truesize)) {
29714@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29715 __net_timestamp(skb);
29716
29717 vcc->push(vcc, skb);
29718- atomic_inc(&vcc->stats->rx);
29719+ atomic_inc_unchecked(&vcc->stats->rx);
29720
29721 return;
29722 }
29723@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
29724 __net_timestamp(skb);
29725
29726 vcc->push(vcc, skb);
29727- atomic_inc(&vcc->stats->rx);
29728+ atomic_inc_unchecked(&vcc->stats->rx);
29729
29730 if (skb->truesize > SAR_FB_SIZE_3)
29731 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
29732@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
29733 if (vcc->qos.aal != ATM_AAL0) {
29734 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
29735 card->name, vpi, vci);
29736- atomic_inc(&vcc->stats->rx_drop);
29737+ atomic_inc_unchecked(&vcc->stats->rx_drop);
29738 goto drop;
29739 }
29740
29741 if ((sb = dev_alloc_skb(64)) == NULL) {
29742 printk("%s: Can't allocate buffers for AAL0.\n",
29743 card->name);
29744- atomic_inc(&vcc->stats->rx_err);
29745+ atomic_inc_unchecked(&vcc->stats->rx_err);
29746 goto drop;
29747 }
29748
29749@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
29750 ATM_SKB(sb)->vcc = vcc;
29751 __net_timestamp(sb);
29752 vcc->push(vcc, sb);
29753- atomic_inc(&vcc->stats->rx);
29754+ atomic_inc_unchecked(&vcc->stats->rx);
29755
29756 drop:
29757 skb_pull(queue, 64);
29758@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29759
29760 if (vc == NULL) {
29761 printk("%s: NULL connection in send().\n", card->name);
29762- atomic_inc(&vcc->stats->tx_err);
29763+ atomic_inc_unchecked(&vcc->stats->tx_err);
29764 dev_kfree_skb(skb);
29765 return -EINVAL;
29766 }
29767 if (!test_bit(VCF_TX, &vc->flags)) {
29768 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
29769- atomic_inc(&vcc->stats->tx_err);
29770+ atomic_inc_unchecked(&vcc->stats->tx_err);
29771 dev_kfree_skb(skb);
29772 return -EINVAL;
29773 }
29774@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29775 break;
29776 default:
29777 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
29778- atomic_inc(&vcc->stats->tx_err);
29779+ atomic_inc_unchecked(&vcc->stats->tx_err);
29780 dev_kfree_skb(skb);
29781 return -EINVAL;
29782 }
29783
29784 if (skb_shinfo(skb)->nr_frags != 0) {
29785 printk("%s: No scatter-gather yet.\n", card->name);
29786- atomic_inc(&vcc->stats->tx_err);
29787+ atomic_inc_unchecked(&vcc->stats->tx_err);
29788 dev_kfree_skb(skb);
29789 return -EINVAL;
29790 }
29791@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
29792
29793 err = queue_skb(card, vc, skb, oam);
29794 if (err) {
29795- atomic_inc(&vcc->stats->tx_err);
29796+ atomic_inc_unchecked(&vcc->stats->tx_err);
29797 dev_kfree_skb(skb);
29798 return err;
29799 }
29800@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
29801 skb = dev_alloc_skb(64);
29802 if (!skb) {
29803 printk("%s: Out of memory in send_oam().\n", card->name);
29804- atomic_inc(&vcc->stats->tx_err);
29805+ atomic_inc_unchecked(&vcc->stats->tx_err);
29806 return -ENOMEM;
29807 }
29808 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
29809diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
29810index b2c1b37..faa672b 100644
29811--- a/drivers/atm/iphase.c
29812+++ b/drivers/atm/iphase.c
29813@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
29814 status = (u_short) (buf_desc_ptr->desc_mode);
29815 if (status & (RX_CER | RX_PTE | RX_OFL))
29816 {
29817- atomic_inc(&vcc->stats->rx_err);
29818+ atomic_inc_unchecked(&vcc->stats->rx_err);
29819 IF_ERR(printk("IA: bad packet, dropping it");)
29820 if (status & RX_CER) {
29821 IF_ERR(printk(" cause: packet CRC error\n");)
29822@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
29823 len = dma_addr - buf_addr;
29824 if (len > iadev->rx_buf_sz) {
29825 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
29826- atomic_inc(&vcc->stats->rx_err);
29827+ atomic_inc_unchecked(&vcc->stats->rx_err);
29828 goto out_free_desc;
29829 }
29830
29831@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29832 ia_vcc = INPH_IA_VCC(vcc);
29833 if (ia_vcc == NULL)
29834 {
29835- atomic_inc(&vcc->stats->rx_err);
29836+ atomic_inc_unchecked(&vcc->stats->rx_err);
29837 dev_kfree_skb_any(skb);
29838 atm_return(vcc, atm_guess_pdu2truesize(len));
29839 goto INCR_DLE;
29840@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29841 if ((length > iadev->rx_buf_sz) || (length >
29842 (skb->len - sizeof(struct cpcs_trailer))))
29843 {
29844- atomic_inc(&vcc->stats->rx_err);
29845+ atomic_inc_unchecked(&vcc->stats->rx_err);
29846 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
29847 length, skb->len);)
29848 dev_kfree_skb_any(skb);
29849@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *dev)
29850
29851 IF_RX(printk("rx_dle_intr: skb push");)
29852 vcc->push(vcc,skb);
29853- atomic_inc(&vcc->stats->rx);
29854+ atomic_inc_unchecked(&vcc->stats->rx);
29855 iadev->rx_pkt_cnt++;
29856 }
29857 INCR_DLE:
29858@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
29859 {
29860 struct k_sonet_stats *stats;
29861 stats = &PRIV(_ia_dev[board])->sonet_stats;
29862- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
29863- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
29864- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
29865- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
29866- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
29867- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
29868- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
29869- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
29870- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
29871+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
29872+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
29873+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
29874+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
29875+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
29876+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
29877+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
29878+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
29879+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
29880 }
29881 ia_cmds.status = 0;
29882 break;
29883@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29884 if ((desc == 0) || (desc > iadev->num_tx_desc))
29885 {
29886 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
29887- atomic_inc(&vcc->stats->tx);
29888+ atomic_inc_unchecked(&vcc->stats->tx);
29889 if (vcc->pop)
29890 vcc->pop(vcc, skb);
29891 else
29892@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
29893 ATM_DESC(skb) = vcc->vci;
29894 skb_queue_tail(&iadev->tx_dma_q, skb);
29895
29896- atomic_inc(&vcc->stats->tx);
29897+ atomic_inc_unchecked(&vcc->stats->tx);
29898 iadev->tx_pkt_cnt++;
29899 /* Increment transaction counter */
29900 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
29901
29902 #if 0
29903 /* add flow control logic */
29904- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
29905+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
29906 if (iavcc->vc_desc_cnt > 10) {
29907 vcc->tx_quota = vcc->tx_quota * 3 / 4;
29908 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
29909diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
29910index cf97c34..8d30655 100644
29911--- a/drivers/atm/lanai.c
29912+++ b/drivers/atm/lanai.c
29913@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
29914 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
29915 lanai_endtx(lanai, lvcc);
29916 lanai_free_skb(lvcc->tx.atmvcc, skb);
29917- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
29918+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
29919 }
29920
29921 /* Try to fill the buffer - don't call unless there is backlog */
29922@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
29923 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
29924 __net_timestamp(skb);
29925 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
29926- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
29927+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
29928 out:
29929 lvcc->rx.buf.ptr = end;
29930 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
29931@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29932 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
29933 "vcc %d\n", lanai->number, (unsigned int) s, vci);
29934 lanai->stats.service_rxnotaal5++;
29935- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29936+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29937 return 0;
29938 }
29939 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
29940@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29941 int bytes;
29942 read_unlock(&vcc_sklist_lock);
29943 DPRINTK("got trashed rx pdu on vci %d\n", vci);
29944- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29945+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29946 lvcc->stats.x.aal5.service_trash++;
29947 bytes = (SERVICE_GET_END(s) * 16) -
29948 (((unsigned long) lvcc->rx.buf.ptr) -
29949@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29950 }
29951 if (s & SERVICE_STREAM) {
29952 read_unlock(&vcc_sklist_lock);
29953- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29954+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29955 lvcc->stats.x.aal5.service_stream++;
29956 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
29957 "PDU on VCI %d!\n", lanai->number, vci);
29958@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
29959 return 0;
29960 }
29961 DPRINTK("got rx crc error on vci %d\n", vci);
29962- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
29963+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
29964 lvcc->stats.x.aal5.service_rxcrc++;
29965 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
29966 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
29967diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
29968index 3da804b..d3b0eed 100644
29969--- a/drivers/atm/nicstar.c
29970+++ b/drivers/atm/nicstar.c
29971@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29972 if ((vc = (vc_map *) vcc->dev_data) == NULL)
29973 {
29974 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
29975- atomic_inc(&vcc->stats->tx_err);
29976+ atomic_inc_unchecked(&vcc->stats->tx_err);
29977 dev_kfree_skb_any(skb);
29978 return -EINVAL;
29979 }
29980@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29981 if (!vc->tx)
29982 {
29983 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
29984- atomic_inc(&vcc->stats->tx_err);
29985+ atomic_inc_unchecked(&vcc->stats->tx_err);
29986 dev_kfree_skb_any(skb);
29987 return -EINVAL;
29988 }
29989@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29990 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
29991 {
29992 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
29993- atomic_inc(&vcc->stats->tx_err);
29994+ atomic_inc_unchecked(&vcc->stats->tx_err);
29995 dev_kfree_skb_any(skb);
29996 return -EINVAL;
29997 }
29998@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
29999 if (skb_shinfo(skb)->nr_frags != 0)
30000 {
30001 printk("nicstar%d: No scatter-gather yet.\n", card->index);
30002- atomic_inc(&vcc->stats->tx_err);
30003+ atomic_inc_unchecked(&vcc->stats->tx_err);
30004 dev_kfree_skb_any(skb);
30005 return -EINVAL;
30006 }
30007@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
30008
30009 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
30010 {
30011- atomic_inc(&vcc->stats->tx_err);
30012+ atomic_inc_unchecked(&vcc->stats->tx_err);
30013 dev_kfree_skb_any(skb);
30014 return -EIO;
30015 }
30016- atomic_inc(&vcc->stats->tx);
30017+ atomic_inc_unchecked(&vcc->stats->tx);
30018
30019 return 0;
30020 }
30021@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30022 {
30023 printk("nicstar%d: Can't allocate buffers for aal0.\n",
30024 card->index);
30025- atomic_add(i,&vcc->stats->rx_drop);
30026+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
30027 break;
30028 }
30029 if (!atm_charge(vcc, sb->truesize))
30030 {
30031 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
30032 card->index);
30033- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30034+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
30035 dev_kfree_skb_any(sb);
30036 break;
30037 }
30038@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30039 ATM_SKB(sb)->vcc = vcc;
30040 __net_timestamp(sb);
30041 vcc->push(vcc, sb);
30042- atomic_inc(&vcc->stats->rx);
30043+ atomic_inc_unchecked(&vcc->stats->rx);
30044 cell += ATM_CELL_PAYLOAD;
30045 }
30046
30047@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30048 if (iovb == NULL)
30049 {
30050 printk("nicstar%d: Out of iovec buffers.\n", card->index);
30051- atomic_inc(&vcc->stats->rx_drop);
30052+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30053 recycle_rx_buf(card, skb);
30054 return;
30055 }
30056@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30057 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
30058 {
30059 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
30060- atomic_inc(&vcc->stats->rx_err);
30061+ atomic_inc_unchecked(&vcc->stats->rx_err);
30062 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
30063 NS_SKB(iovb)->iovcnt = 0;
30064 iovb->len = 0;
30065@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30066 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
30067 card->index);
30068 which_list(card, skb);
30069- atomic_inc(&vcc->stats->rx_err);
30070+ atomic_inc_unchecked(&vcc->stats->rx_err);
30071 recycle_rx_buf(card, skb);
30072 vc->rx_iov = NULL;
30073 recycle_iov_buf(card, iovb);
30074@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30075 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
30076 card->index);
30077 which_list(card, skb);
30078- atomic_inc(&vcc->stats->rx_err);
30079+ atomic_inc_unchecked(&vcc->stats->rx_err);
30080 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30081 NS_SKB(iovb)->iovcnt);
30082 vc->rx_iov = NULL;
30083@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30084 printk(" - PDU size mismatch.\n");
30085 else
30086 printk(".\n");
30087- atomic_inc(&vcc->stats->rx_err);
30088+ atomic_inc_unchecked(&vcc->stats->rx_err);
30089 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30090 NS_SKB(iovb)->iovcnt);
30091 vc->rx_iov = NULL;
30092@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30093 if (!atm_charge(vcc, skb->truesize))
30094 {
30095 push_rxbufs(card, skb);
30096- atomic_inc(&vcc->stats->rx_drop);
30097+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30098 }
30099 else
30100 {
30101@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30102 ATM_SKB(skb)->vcc = vcc;
30103 __net_timestamp(skb);
30104 vcc->push(vcc, skb);
30105- atomic_inc(&vcc->stats->rx);
30106+ atomic_inc_unchecked(&vcc->stats->rx);
30107 }
30108 }
30109 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
30110@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30111 if (!atm_charge(vcc, sb->truesize))
30112 {
30113 push_rxbufs(card, sb);
30114- atomic_inc(&vcc->stats->rx_drop);
30115+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30116 }
30117 else
30118 {
30119@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30120 ATM_SKB(sb)->vcc = vcc;
30121 __net_timestamp(sb);
30122 vcc->push(vcc, sb);
30123- atomic_inc(&vcc->stats->rx);
30124+ atomic_inc_unchecked(&vcc->stats->rx);
30125 }
30126
30127 push_rxbufs(card, skb);
30128@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30129 if (!atm_charge(vcc, skb->truesize))
30130 {
30131 push_rxbufs(card, skb);
30132- atomic_inc(&vcc->stats->rx_drop);
30133+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30134 }
30135 else
30136 {
30137@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30138 ATM_SKB(skb)->vcc = vcc;
30139 __net_timestamp(skb);
30140 vcc->push(vcc, skb);
30141- atomic_inc(&vcc->stats->rx);
30142+ atomic_inc_unchecked(&vcc->stats->rx);
30143 }
30144
30145 push_rxbufs(card, sb);
30146@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30147 if (hb == NULL)
30148 {
30149 printk("nicstar%d: Out of huge buffers.\n", card->index);
30150- atomic_inc(&vcc->stats->rx_drop);
30151+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30152 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
30153 NS_SKB(iovb)->iovcnt);
30154 vc->rx_iov = NULL;
30155@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30156 }
30157 else
30158 dev_kfree_skb_any(hb);
30159- atomic_inc(&vcc->stats->rx_drop);
30160+ atomic_inc_unchecked(&vcc->stats->rx_drop);
30161 }
30162 else
30163 {
30164@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe)
30165 #endif /* NS_USE_DESTRUCTORS */
30166 __net_timestamp(hb);
30167 vcc->push(vcc, hb);
30168- atomic_inc(&vcc->stats->rx);
30169+ atomic_inc_unchecked(&vcc->stats->rx);
30170 }
30171 }
30172
30173diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
30174index 84c93ff..e6ed269 100644
30175--- a/drivers/atm/solos-pci.c
30176+++ b/drivers/atm/solos-pci.c
30177@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
30178 }
30179 atm_charge(vcc, skb->truesize);
30180 vcc->push(vcc, skb);
30181- atomic_inc(&vcc->stats->rx);
30182+ atomic_inc_unchecked(&vcc->stats->rx);
30183 break;
30184
30185 case PKT_STATUS:
30186@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *buf)
30187 char msg[500];
30188 char item[10];
30189
30190+ pax_track_stack();
30191+
30192 len = buf->len;
30193 for (i = 0; i < len; i++){
30194 if(i % 8 == 0)
30195@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_card *card)
30196 vcc = SKB_CB(oldskb)->vcc;
30197
30198 if (vcc) {
30199- atomic_inc(&vcc->stats->tx);
30200+ atomic_inc_unchecked(&vcc->stats->tx);
30201 solos_pop(vcc, oldskb);
30202 } else
30203 dev_kfree_skb_irq(oldskb);
30204diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
30205index 6dd3f59..ee377f3 100644
30206--- a/drivers/atm/suni.c
30207+++ b/drivers/atm/suni.c
30208@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
30209
30210
30211 #define ADD_LIMITED(s,v) \
30212- atomic_add((v),&stats->s); \
30213- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
30214+ atomic_add_unchecked((v),&stats->s); \
30215+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
30216
30217
30218 static void suni_hz(unsigned long from_timer)
30219diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
30220index fc8cb07..4a80e53 100644
30221--- a/drivers/atm/uPD98402.c
30222+++ b/drivers/atm/uPD98402.c
30223@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
30224 struct sonet_stats tmp;
30225 int error = 0;
30226
30227- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30228+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
30229 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
30230 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
30231 if (zero && !error) {
30232@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
30233
30234
30235 #define ADD_LIMITED(s,v) \
30236- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
30237- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
30238- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30239+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
30240+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
30241+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
30242
30243
30244 static void stat_event(struct atm_dev *dev)
30245@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev *dev)
30246 if (reason & uPD98402_INT_PFM) stat_event(dev);
30247 if (reason & uPD98402_INT_PCO) {
30248 (void) GET(PCOCR); /* clear interrupt cause */
30249- atomic_add(GET(HECCT),
30250+ atomic_add_unchecked(GET(HECCT),
30251 &PRIV(dev)->sonet_stats.uncorr_hcs);
30252 }
30253 if ((reason & uPD98402_INT_RFO) &&
30254@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev *dev)
30255 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
30256 uPD98402_INT_LOS),PIMR); /* enable them */
30257 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
30258- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30259- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
30260- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
30261+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
30262+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
30263+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
30264 return 0;
30265 }
30266
30267diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
30268index 2e9635b..32927b4 100644
30269--- a/drivers/atm/zatm.c
30270+++ b/drivers/atm/zatm.c
30271@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30272 }
30273 if (!size) {
30274 dev_kfree_skb_irq(skb);
30275- if (vcc) atomic_inc(&vcc->stats->rx_err);
30276+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
30277 continue;
30278 }
30279 if (!atm_charge(vcc,skb->truesize)) {
30280@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
30281 skb->len = size;
30282 ATM_SKB(skb)->vcc = vcc;
30283 vcc->push(vcc,skb);
30284- atomic_inc(&vcc->stats->rx);
30285+ atomic_inc_unchecked(&vcc->stats->rx);
30286 }
30287 zout(pos & 0xffff,MTA(mbx));
30288 #if 0 /* probably a stupid idea */
30289@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
30290 skb_queue_head(&zatm_vcc->backlog,skb);
30291 break;
30292 }
30293- atomic_inc(&vcc->stats->tx);
30294+ atomic_inc_unchecked(&vcc->stats->tx);
30295 wake_up(&zatm_vcc->tx_wait);
30296 }
30297
30298diff --git a/drivers/base/bus.c b/drivers/base/bus.c
30299index 63c143e..fece183 100644
30300--- a/drivers/base/bus.c
30301+++ b/drivers/base/bus.c
30302@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kobject *kobj, struct attribute *attr,
30303 return ret;
30304 }
30305
30306-static struct sysfs_ops driver_sysfs_ops = {
30307+static const struct sysfs_ops driver_sysfs_ops = {
30308 .show = drv_attr_show,
30309 .store = drv_attr_store,
30310 };
30311@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kobject *kobj, struct attribute *attr,
30312 return ret;
30313 }
30314
30315-static struct sysfs_ops bus_sysfs_ops = {
30316+static const struct sysfs_ops bus_sysfs_ops = {
30317 .show = bus_attr_show,
30318 .store = bus_attr_store,
30319 };
30320@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset *kset, struct kobject *kobj)
30321 return 0;
30322 }
30323
30324-static struct kset_uevent_ops bus_uevent_ops = {
30325+static const struct kset_uevent_ops bus_uevent_ops = {
30326 .filter = bus_uevent_filter,
30327 };
30328
30329diff --git a/drivers/base/class.c b/drivers/base/class.c
30330index 6e2c3b0..cb61871 100644
30331--- a/drivers/base/class.c
30332+++ b/drivers/base/class.c
30333@@ -63,7 +63,7 @@ static void class_release(struct kobject *kobj)
30334 kfree(cp);
30335 }
30336
30337-static struct sysfs_ops class_sysfs_ops = {
30338+static const struct sysfs_ops class_sysfs_ops = {
30339 .show = class_attr_show,
30340 .store = class_attr_store,
30341 };
30342diff --git a/drivers/base/core.c b/drivers/base/core.c
30343index f33d768..a9358d0 100644
30344--- a/drivers/base/core.c
30345+++ b/drivers/base/core.c
30346@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kobject *kobj, struct attribute *attr,
30347 return ret;
30348 }
30349
30350-static struct sysfs_ops dev_sysfs_ops = {
30351+static const struct sysfs_ops dev_sysfs_ops = {
30352 .show = dev_attr_show,
30353 .store = dev_attr_store,
30354 };
30355@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset, struct kobject *kobj,
30356 return retval;
30357 }
30358
30359-static struct kset_uevent_ops device_uevent_ops = {
30360+static const struct kset_uevent_ops device_uevent_ops = {
30361 .filter = dev_uevent_filter,
30362 .name = dev_uevent_name,
30363 .uevent = dev_uevent,
30364diff --git a/drivers/base/memory.c b/drivers/base/memory.c
30365index 989429c..2272b00 100644
30366--- a/drivers/base/memory.c
30367+++ b/drivers/base/memory.c
30368@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *kset, struct kobject *obj, struct kobj_uev
30369 return retval;
30370 }
30371
30372-static struct kset_uevent_ops memory_uevent_ops = {
30373+static const struct kset_uevent_ops memory_uevent_ops = {
30374 .name = memory_uevent_name,
30375 .uevent = memory_uevent,
30376 };
30377diff --git a/drivers/base/sys.c b/drivers/base/sys.c
30378index 3f202f7..61c4a6f 100644
30379--- a/drivers/base/sys.c
30380+++ b/drivers/base/sys.c
30381@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struct attribute *attr,
30382 return -EIO;
30383 }
30384
30385-static struct sysfs_ops sysfs_ops = {
30386+static const struct sysfs_ops sysfs_ops = {
30387 .show = sysdev_show,
30388 .store = sysdev_store,
30389 };
30390@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
30391 return -EIO;
30392 }
30393
30394-static struct sysfs_ops sysfs_class_ops = {
30395+static const struct sysfs_ops sysfs_class_ops = {
30396 .show = sysdev_class_show,
30397 .store = sysdev_class_store,
30398 };
30399diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
30400index eb4fa19..1954777 100644
30401--- a/drivers/block/DAC960.c
30402+++ b/drivers/block/DAC960.c
30403@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfiguration(DAC960_Controller_T
30404 unsigned long flags;
30405 int Channel, TargetID;
30406
30407+ pax_track_stack();
30408+
30409 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
30410 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
30411 sizeof(DAC960_SCSI_Inquiry_T) +
30412diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
30413index 68b90d9..7e2e3f3 100644
30414--- a/drivers/block/cciss.c
30415+++ b/drivers/block/cciss.c
30416@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
30417 int err;
30418 u32 cp;
30419
30420+ memset(&arg64, 0, sizeof(arg64));
30421+
30422 err = 0;
30423 err |=
30424 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
30425@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ctlr)
30426 /* Wait (up to 20 seconds) for a command to complete */
30427
30428 for (i = 20 * HZ; i > 0; i--) {
30429- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30430+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30431 if (done == FIFO_EMPTY)
30432 schedule_timeout_uninterruptible(1);
30433 else
30434@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h, CommandList_struct *c)
30435 resend_cmd1:
30436
30437 /* Disable interrupt on the board. */
30438- h->access.set_intr_mask(h, CCISS_INTR_OFF);
30439+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
30440
30441 /* Make sure there is room in the command FIFO */
30442 /* Actually it should be completely empty at this time */
30443@@ -2884,13 +2886,13 @@ resend_cmd1:
30444 /* tape side of the driver. */
30445 for (i = 200000; i > 0; i--) {
30446 /* if fifo isn't full go */
30447- if (!(h->access.fifo_full(h)))
30448+ if (!(h->access->fifo_full(h)))
30449 break;
30450 udelay(10);
30451 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
30452 " waiting!\n", h->ctlr);
30453 }
30454- h->access.submit_command(h, c); /* Send the cmd */
30455+ h->access->submit_command(h, c); /* Send the cmd */
30456 do {
30457 complete = pollcomplete(h->ctlr);
30458
30459@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
30460 while (!hlist_empty(&h->reqQ)) {
30461 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
30462 /* can't do anything if fifo is full */
30463- if ((h->access.fifo_full(h))) {
30464+ if ((h->access->fifo_full(h))) {
30465 printk(KERN_WARNING "cciss: fifo full\n");
30466 break;
30467 }
30468@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
30469 h->Qdepth--;
30470
30471 /* Tell the controller execute command */
30472- h->access.submit_command(h, c);
30473+ h->access->submit_command(h, c);
30474
30475 /* Put job onto the completed Q */
30476 addQ(&h->cmpQ, c);
30477@@ -3393,17 +3395,17 @@ startio:
30478
30479 static inline unsigned long get_next_completion(ctlr_info_t *h)
30480 {
30481- return h->access.command_completed(h);
30482+ return h->access->command_completed(h);
30483 }
30484
30485 static inline int interrupt_pending(ctlr_info_t *h)
30486 {
30487- return h->access.intr_pending(h);
30488+ return h->access->intr_pending(h);
30489 }
30490
30491 static inline long interrupt_not_for_us(ctlr_info_t *h)
30492 {
30493- return (((h->access.intr_pending(h) == 0) ||
30494+ return (((h->access->intr_pending(h) == 0) ||
30495 (h->interrupts_enabled == 0)));
30496 }
30497
30498@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
30499 */
30500 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
30501 c->product_name = products[prod_index].product_name;
30502- c->access = *(products[prod_index].access);
30503+ c->access = products[prod_index].access;
30504 c->nr_cmds = c->max_commands - 4;
30505 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
30506 (readb(&c->cfgtable->Signature[1]) != 'I') ||
30507@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30508 }
30509
30510 /* make sure the board interrupts are off */
30511- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
30512+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
30513 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
30514 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
30515 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
30516@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
30517 cciss_scsi_setup(i);
30518
30519 /* Turn the interrupts on so we can service requests */
30520- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
30521+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
30522
30523 /* Get the firmware version */
30524 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
30525diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
30526index 04d6bf8..36e712d 100644
30527--- a/drivers/block/cciss.h
30528+++ b/drivers/block/cciss.h
30529@@ -90,7 +90,7 @@ struct ctlr_info
30530 // information about each logical volume
30531 drive_info_struct *drv[CISS_MAX_LUN];
30532
30533- struct access_method access;
30534+ struct access_method *access;
30535
30536 /* queue and queue Info */
30537 struct hlist_head reqQ;
30538diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
30539index 6422651..bb1bdef 100644
30540--- a/drivers/block/cpqarray.c
30541+++ b/drivers/block/cpqarray.c
30542@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30543 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
30544 goto Enomem4;
30545 }
30546- hba[i]->access.set_intr_mask(hba[i], 0);
30547+ hba[i]->access->set_intr_mask(hba[i], 0);
30548 if (request_irq(hba[i]->intr, do_ida_intr,
30549 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
30550 {
30551@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr( int i, struct pci_dev *pdev)
30552 add_timer(&hba[i]->timer);
30553
30554 /* Enable IRQ now that spinlock and rate limit timer are set up */
30555- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30556+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
30557
30558 for(j=0; j<NWD; j++) {
30559 struct gendisk *disk = ida_gendisk[i][j];
30560@@ -695,7 +695,7 @@ DBGINFO(
30561 for(i=0; i<NR_PRODUCTS; i++) {
30562 if (board_id == products[i].board_id) {
30563 c->product_name = products[i].product_name;
30564- c->access = *(products[i].access);
30565+ c->access = products[i].access;
30566 break;
30567 }
30568 }
30569@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(void)
30570 hba[ctlr]->intr = intr;
30571 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
30572 hba[ctlr]->product_name = products[j].product_name;
30573- hba[ctlr]->access = *(products[j].access);
30574+ hba[ctlr]->access = products[j].access;
30575 hba[ctlr]->ctlr = ctlr;
30576 hba[ctlr]->board_id = board_id;
30577 hba[ctlr]->pci_dev = NULL; /* not PCI */
30578@@ -896,6 +896,8 @@ static void do_ida_request(struct request_queue *q)
30579 struct scatterlist tmp_sg[SG_MAX];
30580 int i, dir, seg;
30581
30582+ pax_track_stack();
30583+
30584 if (blk_queue_plugged(q))
30585 goto startio;
30586
30587@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
30588
30589 while((c = h->reqQ) != NULL) {
30590 /* Can't do anything if we're busy */
30591- if (h->access.fifo_full(h) == 0)
30592+ if (h->access->fifo_full(h) == 0)
30593 return;
30594
30595 /* Get the first entry from the request Q */
30596@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
30597 h->Qdepth--;
30598
30599 /* Tell the controller to do our bidding */
30600- h->access.submit_command(h, c);
30601+ h->access->submit_command(h, c);
30602
30603 /* Get onto the completion Q */
30604 addQ(&h->cmpQ, c);
30605@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30606 unsigned long flags;
30607 __u32 a,a1;
30608
30609- istat = h->access.intr_pending(h);
30610+ istat = h->access->intr_pending(h);
30611 /* Is this interrupt for us? */
30612 if (istat == 0)
30613 return IRQ_NONE;
30614@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
30615 */
30616 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
30617 if (istat & FIFO_NOT_EMPTY) {
30618- while((a = h->access.command_completed(h))) {
30619+ while((a = h->access->command_completed(h))) {
30620 a1 = a; a &= ~3;
30621 if ((c = h->cmpQ) == NULL)
30622 {
30623@@ -1434,11 +1436,11 @@ static int sendcmd(
30624 /*
30625 * Disable interrupt
30626 */
30627- info_p->access.set_intr_mask(info_p, 0);
30628+ info_p->access->set_intr_mask(info_p, 0);
30629 /* Make sure there is room in the command FIFO */
30630 /* Actually it should be completely empty at this time. */
30631 for (i = 200000; i > 0; i--) {
30632- temp = info_p->access.fifo_full(info_p);
30633+ temp = info_p->access->fifo_full(info_p);
30634 if (temp != 0) {
30635 break;
30636 }
30637@@ -1451,7 +1453,7 @@ DBG(
30638 /*
30639 * Send the cmd
30640 */
30641- info_p->access.submit_command(info_p, c);
30642+ info_p->access->submit_command(info_p, c);
30643 complete = pollcomplete(ctlr);
30644
30645 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
30646@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t *host)
30647 * we check the new geometry. Then turn interrupts back on when
30648 * we're done.
30649 */
30650- host->access.set_intr_mask(host, 0);
30651+ host->access->set_intr_mask(host, 0);
30652 getgeometry(ctlr);
30653- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
30654+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
30655
30656 for(i=0; i<NWD; i++) {
30657 struct gendisk *disk = ida_gendisk[ctlr][i];
30658@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
30659 /* Wait (up to 2 seconds) for a command to complete */
30660
30661 for (i = 200000; i > 0; i--) {
30662- done = hba[ctlr]->access.command_completed(hba[ctlr]);
30663+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
30664 if (done == 0) {
30665 udelay(10); /* a short fixed delay */
30666 } else
30667diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
30668index be73e9d..7fbf140 100644
30669--- a/drivers/block/cpqarray.h
30670+++ b/drivers/block/cpqarray.h
30671@@ -99,7 +99,7 @@ struct ctlr_info {
30672 drv_info_t drv[NWD];
30673 struct proc_dir_entry *proc;
30674
30675- struct access_method access;
30676+ struct access_method *access;
30677
30678 cmdlist_t *reqQ;
30679 cmdlist_t *cmpQ;
30680diff --git a/drivers/block/loop.c b/drivers/block/loop.c
30681index 8ec2d70..2804b30 100644
30682--- a/drivers/block/loop.c
30683+++ b/drivers/block/loop.c
30684@@ -282,7 +282,7 @@ static int __do_lo_send_write(struct file *file,
30685 mm_segment_t old_fs = get_fs();
30686
30687 set_fs(get_ds());
30688- bw = file->f_op->write(file, buf, len, &pos);
30689+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
30690 set_fs(old_fs);
30691 if (likely(bw == len))
30692 return 0;
30693diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
30694index 26ada47..083c480 100644
30695--- a/drivers/block/nbd.c
30696+++ b/drivers/block/nbd.c
30697@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *lo, int send, void *buf, int size,
30698 struct kvec iov;
30699 sigset_t blocked, oldset;
30700
30701+ pax_track_stack();
30702+
30703 if (unlikely(!sock)) {
30704 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
30705 lo->disk->disk_name, (send ? "send" : "recv"));
30706@@ -569,6 +571,8 @@ static void do_nbd_request(struct request_queue *q)
30707 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
30708 unsigned int cmd, unsigned long arg)
30709 {
30710+ pax_track_stack();
30711+
30712 switch (cmd) {
30713 case NBD_DISCONNECT: {
30714 struct request sreq;
30715diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
30716index a5d585d..d087be3 100644
30717--- a/drivers/block/pktcdvd.c
30718+++ b/drivers/block/pktcdvd.c
30719@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kobject *kobj,
30720 return len;
30721 }
30722
30723-static struct sysfs_ops kobj_pkt_ops = {
30724+static const struct sysfs_ops kobj_pkt_ops = {
30725 .show = kobj_pkt_show,
30726 .store = kobj_pkt_store
30727 };
30728diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
30729index 6aad99e..89cd142 100644
30730--- a/drivers/char/Kconfig
30731+++ b/drivers/char/Kconfig
30732@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
30733
30734 config DEVKMEM
30735 bool "/dev/kmem virtual device support"
30736- default y
30737+ default n
30738+ depends on !GRKERNSEC_KMEM
30739 help
30740 Say Y here if you want to support the /dev/kmem device. The
30741 /dev/kmem device is rarely used, but can be used for certain
30742@@ -1114,6 +1115,7 @@ config DEVPORT
30743 bool
30744 depends on !M68K
30745 depends on ISA || PCI
30746+ depends on !GRKERNSEC_KMEM
30747 default y
30748
30749 source "drivers/s390/char/Kconfig"
30750diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
30751index a96f319..a778a5b 100644
30752--- a/drivers/char/agp/frontend.c
30753+++ b/drivers/char/agp/frontend.c
30754@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
30755 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
30756 return -EFAULT;
30757
30758- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
30759+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
30760 return -EFAULT;
30761
30762 client = agp_find_client_by_pid(reserve.pid);
30763diff --git a/drivers/char/briq_panel.c b/drivers/char/briq_panel.c
30764index d8cff90..9628e70 100644
30765--- a/drivers/char/briq_panel.c
30766+++ b/drivers/char/briq_panel.c
30767@@ -10,6 +10,7 @@
30768 #include <linux/types.h>
30769 #include <linux/errno.h>
30770 #include <linux/tty.h>
30771+#include <linux/mutex.h>
30772 #include <linux/timer.h>
30773 #include <linux/kernel.h>
30774 #include <linux/wait.h>
30775@@ -36,6 +37,7 @@ static int vfd_is_open;
30776 static unsigned char vfd[40];
30777 static int vfd_cursor;
30778 static unsigned char ledpb, led;
30779+static DEFINE_MUTEX(vfd_mutex);
30780
30781 static void update_vfd(void)
30782 {
30783@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30784 if (!vfd_is_open)
30785 return -EBUSY;
30786
30787+ mutex_lock(&vfd_mutex);
30788 for (;;) {
30789 char c;
30790 if (!indx)
30791 break;
30792- if (get_user(c, buf))
30793+ if (get_user(c, buf)) {
30794+ mutex_unlock(&vfd_mutex);
30795 return -EFAULT;
30796+ }
30797 if (esc) {
30798 set_led(c);
30799 esc = 0;
30800@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct file *file, const char __user *buf, size_
30801 buf++;
30802 }
30803 update_vfd();
30804+ mutex_unlock(&vfd_mutex);
30805
30806 return len;
30807 }
30808diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
30809index 31e7c91..161afc0 100644
30810--- a/drivers/char/genrtc.c
30811+++ b/drivers/char/genrtc.c
30812@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *inode, struct file *file,
30813 switch (cmd) {
30814
30815 case RTC_PLL_GET:
30816+ memset(&pll, 0, sizeof(pll));
30817 if (get_rtc_pll(&pll))
30818 return -EINVAL;
30819 else
30820diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
30821index 006466d..a2bb21c 100644
30822--- a/drivers/char/hpet.c
30823+++ b/drivers/char/hpet.c
30824@@ -430,7 +430,7 @@ static int hpet_release(struct inode *inode, struct file *file)
30825 return 0;
30826 }
30827
30828-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
30829+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
30830
30831 static int
30832 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
30833@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
30834 }
30835
30836 static int
30837-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30838+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
30839 {
30840 struct hpet_timer __iomem *timer;
30841 struct hpet __iomem *hpet;
30842@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
30843 {
30844 struct hpet_info info;
30845
30846+ memset(&info, 0, sizeof(info));
30847+
30848 if (devp->hd_ireqfreq)
30849 info.hi_ireqfreq =
30850 hpet_time_div(hpetp, devp->hd_ireqfreq);
30851- else
30852- info.hi_ireqfreq = 0;
30853 info.hi_flags =
30854 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
30855 info.hi_hpet = hpetp->hp_which;
30856diff --git a/drivers/char/hvc_beat.c b/drivers/char/hvc_beat.c
30857index 0afc8b8..6913fc3 100644
30858--- a/drivers/char/hvc_beat.c
30859+++ b/drivers/char/hvc_beat.c
30860@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t vtermno, const char *buf, int cnt)
30861 return cnt;
30862 }
30863
30864-static struct hv_ops hvc_beat_get_put_ops = {
30865+static const struct hv_ops hvc_beat_get_put_ops = {
30866 .get_chars = hvc_beat_get_chars,
30867 .put_chars = hvc_beat_put_chars,
30868 };
30869diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
30870index 98097f2..407dddc 100644
30871--- a/drivers/char/hvc_console.c
30872+++ b/drivers/char/hvc_console.c
30873@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_index(int index)
30874 * console interfaces but can still be used as a tty device. This has to be
30875 * static because kmalloc will not work during early console init.
30876 */
30877-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30878+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
30879 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
30880 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
30881
30882@@ -249,7 +249,7 @@ static void destroy_hvc_struct(struct kref *kref)
30883 * vty adapters do NOT get an hvc_instantiate() callback since they
30884 * appear after early console init.
30885 */
30886-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
30887+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
30888 {
30889 struct hvc_struct *hp;
30890
30891@@ -758,7 +758,7 @@ static const struct tty_operations hvc_ops = {
30892 };
30893
30894 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
30895- struct hv_ops *ops, int outbuf_size)
30896+ const struct hv_ops *ops, int outbuf_size)
30897 {
30898 struct hvc_struct *hp;
30899 int i;
30900diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
30901index 10950ca..ed176c3 100644
30902--- a/drivers/char/hvc_console.h
30903+++ b/drivers/char/hvc_console.h
30904@@ -55,7 +55,7 @@ struct hvc_struct {
30905 int outbuf_size;
30906 int n_outbuf;
30907 uint32_t vtermno;
30908- struct hv_ops *ops;
30909+ const struct hv_ops *ops;
30910 int irq_requested;
30911 int data;
30912 struct winsize ws;
30913@@ -76,11 +76,11 @@ struct hv_ops {
30914 };
30915
30916 /* Register a vterm and a slot index for use as a console (console_init) */
30917-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
30918+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
30919
30920 /* register a vterm for hvc tty operation (module_init or hotplug add) */
30921 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
30922- struct hv_ops *ops, int outbuf_size);
30923+ const struct hv_ops *ops, int outbuf_size);
30924 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
30925 extern int hvc_remove(struct hvc_struct *hp);
30926
30927diff --git a/drivers/char/hvc_iseries.c b/drivers/char/hvc_iseries.c
30928index 936d05b..fd02426 100644
30929--- a/drivers/char/hvc_iseries.c
30930+++ b/drivers/char/hvc_iseries.c
30931@@ -197,7 +197,7 @@ done:
30932 return sent;
30933 }
30934
30935-static struct hv_ops hvc_get_put_ops = {
30936+static const struct hv_ops hvc_get_put_ops = {
30937 .get_chars = get_chars,
30938 .put_chars = put_chars,
30939 .notifier_add = notifier_add_irq,
30940diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
30941index b0e168f..69cda2a 100644
30942--- a/drivers/char/hvc_iucv.c
30943+++ b/drivers/char/hvc_iucv.c
30944@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(struct device *dev)
30945
30946
30947 /* HVC operations */
30948-static struct hv_ops hvc_iucv_ops = {
30949+static const struct hv_ops hvc_iucv_ops = {
30950 .get_chars = hvc_iucv_get_chars,
30951 .put_chars = hvc_iucv_put_chars,
30952 .notifier_add = hvc_iucv_notifier_add,
30953diff --git a/drivers/char/hvc_rtas.c b/drivers/char/hvc_rtas.c
30954index 88590d0..61c4a61 100644
30955--- a/drivers/char/hvc_rtas.c
30956+++ b/drivers/char/hvc_rtas.c
30957@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
30958 return i;
30959 }
30960
30961-static struct hv_ops hvc_rtas_get_put_ops = {
30962+static const struct hv_ops hvc_rtas_get_put_ops = {
30963 .get_chars = hvc_rtas_read_console,
30964 .put_chars = hvc_rtas_write_console,
30965 };
30966diff --git a/drivers/char/hvc_udbg.c b/drivers/char/hvc_udbg.c
30967index bd63ba8..b0957e6 100644
30968--- a/drivers/char/hvc_udbg.c
30969+++ b/drivers/char/hvc_udbg.c
30970@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno, char *buf, int count)
30971 return i;
30972 }
30973
30974-static struct hv_ops hvc_udbg_ops = {
30975+static const struct hv_ops hvc_udbg_ops = {
30976 .get_chars = hvc_udbg_get,
30977 .put_chars = hvc_udbg_put,
30978 };
30979diff --git a/drivers/char/hvc_vio.c b/drivers/char/hvc_vio.c
30980index 10be343..27370e9 100644
30981--- a/drivers/char/hvc_vio.c
30982+++ b/drivers/char/hvc_vio.c
30983@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t vtermno, char *buf, int count)
30984 return got;
30985 }
30986
30987-static struct hv_ops hvc_get_put_ops = {
30988+static const struct hv_ops hvc_get_put_ops = {
30989 .get_chars = filtered_get_chars,
30990 .put_chars = hvc_put_chars,
30991 .notifier_add = notifier_add_irq,
30992diff --git a/drivers/char/hvc_xen.c b/drivers/char/hvc_xen.c
30993index a6ee32b..94f8c26 100644
30994--- a/drivers/char/hvc_xen.c
30995+++ b/drivers/char/hvc_xen.c
30996@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno, char *buf, int len)
30997 return recv;
30998 }
30999
31000-static struct hv_ops hvc_ops = {
31001+static const struct hv_ops hvc_ops = {
31002 .get_chars = read_console,
31003 .put_chars = write_console,
31004 .notifier_add = notifier_add_irq,
31005diff --git a/drivers/char/hvcs.c b/drivers/char/hvcs.c
31006index 266b858..f3ee0bb 100644
31007--- a/drivers/char/hvcs.c
31008+++ b/drivers/char/hvcs.c
31009@@ -82,6 +82,7 @@
31010 #include <asm/hvcserver.h>
31011 #include <asm/uaccess.h>
31012 #include <asm/vio.h>
31013+#include <asm/local.h>
31014
31015 /*
31016 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
31017@@ -269,7 +270,7 @@ struct hvcs_struct {
31018 unsigned int index;
31019
31020 struct tty_struct *tty;
31021- int open_count;
31022+ local_t open_count;
31023
31024 /*
31025 * Used to tell the driver kernel_thread what operations need to take
31026@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
31027
31028 spin_lock_irqsave(&hvcsd->lock, flags);
31029
31030- if (hvcsd->open_count > 0) {
31031+ if (local_read(&hvcsd->open_count) > 0) {
31032 spin_unlock_irqrestore(&hvcsd->lock, flags);
31033 printk(KERN_INFO "HVCS: vterm state unchanged. "
31034 "The hvcs device node is still in use.\n");
31035@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
31036 if ((retval = hvcs_partner_connect(hvcsd)))
31037 goto error_release;
31038
31039- hvcsd->open_count = 1;
31040+ local_set(&hvcsd->open_count, 1);
31041 hvcsd->tty = tty;
31042 tty->driver_data = hvcsd;
31043
31044@@ -1169,7 +1170,7 @@ fast_open:
31045
31046 spin_lock_irqsave(&hvcsd->lock, flags);
31047 kref_get(&hvcsd->kref);
31048- hvcsd->open_count++;
31049+ local_inc(&hvcsd->open_count);
31050 hvcsd->todo_mask |= HVCS_SCHED_READ;
31051 spin_unlock_irqrestore(&hvcsd->lock, flags);
31052
31053@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31054 hvcsd = tty->driver_data;
31055
31056 spin_lock_irqsave(&hvcsd->lock, flags);
31057- if (--hvcsd->open_count == 0) {
31058+ if (local_dec_and_test(&hvcsd->open_count)) {
31059
31060 vio_disable_interrupts(hvcsd->vdev);
31061
31062@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
31063 free_irq(irq, hvcsd);
31064 kref_put(&hvcsd->kref, destroy_hvcs_struct);
31065 return;
31066- } else if (hvcsd->open_count < 0) {
31067+ } else if (local_read(&hvcsd->open_count) < 0) {
31068 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
31069 " is missmanaged.\n",
31070- hvcsd->vdev->unit_address, hvcsd->open_count);
31071+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
31072 }
31073
31074 spin_unlock_irqrestore(&hvcsd->lock, flags);
31075@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31076
31077 spin_lock_irqsave(&hvcsd->lock, flags);
31078 /* Preserve this so that we know how many kref refs to put */
31079- temp_open_count = hvcsd->open_count;
31080+ temp_open_count = local_read(&hvcsd->open_count);
31081
31082 /*
31083 * Don't kref put inside the spinlock because the destruction
31084@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struct * tty)
31085 hvcsd->tty->driver_data = NULL;
31086 hvcsd->tty = NULL;
31087
31088- hvcsd->open_count = 0;
31089+ local_set(&hvcsd->open_count, 0);
31090
31091 /* This will drop any buffered data on the floor which is OK in a hangup
31092 * scenario. */
31093@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct *tty,
31094 * the middle of a write operation? This is a crummy place to do this
31095 * but we want to keep it all in the spinlock.
31096 */
31097- if (hvcsd->open_count <= 0) {
31098+ if (local_read(&hvcsd->open_count) <= 0) {
31099 spin_unlock_irqrestore(&hvcsd->lock, flags);
31100 return -ENODEV;
31101 }
31102@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_struct *tty)
31103 {
31104 struct hvcs_struct *hvcsd = tty->driver_data;
31105
31106- if (!hvcsd || hvcsd->open_count <= 0)
31107+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
31108 return 0;
31109
31110 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
31111diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
31112index ec5e3f8..02455ba 100644
31113--- a/drivers/char/ipmi/ipmi_msghandler.c
31114+++ b/drivers/char/ipmi/ipmi_msghandler.c
31115@@ -414,7 +414,7 @@ struct ipmi_smi {
31116 struct proc_dir_entry *proc_dir;
31117 char proc_dir_name[10];
31118
31119- atomic_t stats[IPMI_NUM_STATS];
31120+ atomic_unchecked_t stats[IPMI_NUM_STATS];
31121
31122 /*
31123 * run_to_completion duplicate of smb_info, smi_info
31124@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
31125
31126
31127 #define ipmi_inc_stat(intf, stat) \
31128- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
31129+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
31130 #define ipmi_get_stat(intf, stat) \
31131- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
31132+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
31133
31134 static int is_lan_addr(struct ipmi_addr *addr)
31135 {
31136@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
31137 INIT_LIST_HEAD(&intf->cmd_rcvrs);
31138 init_waitqueue_head(&intf->waitq);
31139 for (i = 0; i < IPMI_NUM_STATS; i++)
31140- atomic_set(&intf->stats[i], 0);
31141+ atomic_set_unchecked(&intf->stats[i], 0);
31142
31143 intf->proc_dir = NULL;
31144
31145@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
31146 struct ipmi_smi_msg smi_msg;
31147 struct ipmi_recv_msg recv_msg;
31148
31149+ pax_track_stack();
31150+
31151 si = (struct ipmi_system_interface_addr *) &addr;
31152 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
31153 si->channel = IPMI_BMC_CHANNEL;
31154diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
31155index abae8c9..8021979 100644
31156--- a/drivers/char/ipmi/ipmi_si_intf.c
31157+++ b/drivers/char/ipmi/ipmi_si_intf.c
31158@@ -277,7 +277,7 @@ struct smi_info {
31159 unsigned char slave_addr;
31160
31161 /* Counters and things for the proc filesystem. */
31162- atomic_t stats[SI_NUM_STATS];
31163+ atomic_unchecked_t stats[SI_NUM_STATS];
31164
31165 struct task_struct *thread;
31166
31167@@ -285,9 +285,9 @@ struct smi_info {
31168 };
31169
31170 #define smi_inc_stat(smi, stat) \
31171- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
31172+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
31173 #define smi_get_stat(smi, stat) \
31174- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
31175+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
31176
31177 #define SI_MAX_PARMS 4
31178
31179@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info *new_smi)
31180 atomic_set(&new_smi->req_events, 0);
31181 new_smi->run_to_completion = 0;
31182 for (i = 0; i < SI_NUM_STATS; i++)
31183- atomic_set(&new_smi->stats[i], 0);
31184+ atomic_set_unchecked(&new_smi->stats[i], 0);
31185
31186 new_smi->interrupt_disabled = 0;
31187 atomic_set(&new_smi->stop_operation, 0);
31188diff --git a/drivers/char/istallion.c b/drivers/char/istallion.c
31189index 402838f..55e2200 100644
31190--- a/drivers/char/istallion.c
31191+++ b/drivers/char/istallion.c
31192@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios = {
31193 * re-used for each stats call.
31194 */
31195 static comstats_t stli_comstats;
31196-static combrd_t stli_brdstats;
31197 static struct asystats stli_cdkstats;
31198
31199 /*****************************************************************************/
31200@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __user *bp)
31201 {
31202 struct stlibrd *brdp;
31203 unsigned int i;
31204+ combrd_t stli_brdstats;
31205
31206 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
31207 return -EFAULT;
31208@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stliport __user *arg)
31209 struct stliport stli_dummyport;
31210 struct stliport *portp;
31211
31212+ pax_track_stack();
31213+
31214 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
31215 return -EFAULT;
31216 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
31217@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stlibrd __user *arg)
31218 struct stlibrd stli_dummybrd;
31219 struct stlibrd *brdp;
31220
31221+ pax_track_stack();
31222+
31223 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
31224 return -EFAULT;
31225 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
31226diff --git a/drivers/char/keyboard.c b/drivers/char/keyboard.c
31227index 950837c..e55a288 100644
31228--- a/drivers/char/keyboard.c
31229+++ b/drivers/char/keyboard.c
31230@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
31231 kbd->kbdmode == VC_MEDIUMRAW) &&
31232 value != KVAL(K_SAK))
31233 return; /* SAK is allowed even in raw mode */
31234+
31235+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
31236+ {
31237+ void *func = fn_handler[value];
31238+ if (func == fn_show_state || func == fn_show_ptregs ||
31239+ func == fn_show_mem)
31240+ return;
31241+ }
31242+#endif
31243+
31244 fn_handler[value](vc);
31245 }
31246
31247@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_ids[] = {
31248 .evbit = { BIT_MASK(EV_SND) },
31249 },
31250
31251- { }, /* Terminating entry */
31252+ { 0 }, /* Terminating entry */
31253 };
31254
31255 MODULE_DEVICE_TABLE(input, kbd_ids);
31256diff --git a/drivers/char/mbcs.c b/drivers/char/mbcs.c
31257index 87c67b4..230527a 100644
31258--- a/drivers/char/mbcs.c
31259+++ b/drivers/char/mbcs.c
31260@@ -799,7 +799,7 @@ static int mbcs_remove(struct cx_dev *dev)
31261 return 0;
31262 }
31263
31264-static const struct cx_device_id __devinitdata mbcs_id_table[] = {
31265+static const struct cx_device_id __devinitconst mbcs_id_table[] = {
31266 {
31267 .part_num = MBCS_PART_NUM,
31268 .mfg_num = MBCS_MFG_NUM,
31269diff --git a/drivers/char/mem.c b/drivers/char/mem.c
31270index 1270f64..8495f49 100644
31271--- a/drivers/char/mem.c
31272+++ b/drivers/char/mem.c
31273@@ -18,6 +18,7 @@
31274 #include <linux/raw.h>
31275 #include <linux/tty.h>
31276 #include <linux/capability.h>
31277+#include <linux/security.h>
31278 #include <linux/ptrace.h>
31279 #include <linux/device.h>
31280 #include <linux/highmem.h>
31281@@ -35,6 +36,10 @@
31282 # include <linux/efi.h>
31283 #endif
31284
31285+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31286+extern struct file_operations grsec_fops;
31287+#endif
31288+
31289 static inline unsigned long size_inside_page(unsigned long start,
31290 unsigned long size)
31291 {
31292@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31293
31294 while (cursor < to) {
31295 if (!devmem_is_allowed(pfn)) {
31296+#ifdef CONFIG_GRKERNSEC_KMEM
31297+ gr_handle_mem_readwrite(from, to);
31298+#else
31299 printk(KERN_INFO
31300 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
31301 current->comm, from, to);
31302+#endif
31303 return 0;
31304 }
31305 cursor += PAGE_SIZE;
31306@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31307 }
31308 return 1;
31309 }
31310+#elif defined(CONFIG_GRKERNSEC_KMEM)
31311+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31312+{
31313+ return 0;
31314+}
31315 #else
31316 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
31317 {
31318@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31319 #endif
31320
31321 while (count > 0) {
31322+ char *temp;
31323+
31324 /*
31325 * Handle first page in case it's not aligned
31326 */
31327@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * file, char __user * buf,
31328 if (!ptr)
31329 return -EFAULT;
31330
31331- if (copy_to_user(buf, ptr, sz)) {
31332+#ifdef CONFIG_PAX_USERCOPY
31333+ temp = kmalloc(sz, GFP_KERNEL);
31334+ if (!temp) {
31335+ unxlate_dev_mem_ptr(p, ptr);
31336+ return -ENOMEM;
31337+ }
31338+ memcpy(temp, ptr, sz);
31339+#else
31340+ temp = ptr;
31341+#endif
31342+
31343+ if (copy_to_user(buf, temp, sz)) {
31344+
31345+#ifdef CONFIG_PAX_USERCOPY
31346+ kfree(temp);
31347+#endif
31348+
31349 unxlate_dev_mem_ptr(p, ptr);
31350 return -EFAULT;
31351 }
31352
31353+#ifdef CONFIG_PAX_USERCOPY
31354+ kfree(temp);
31355+#endif
31356+
31357 unxlate_dev_mem_ptr(p, ptr);
31358
31359 buf += sz;
31360@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31361 size_t count, loff_t *ppos)
31362 {
31363 unsigned long p = *ppos;
31364- ssize_t low_count, read, sz;
31365+ ssize_t low_count, read, sz, err = 0;
31366 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
31367- int err = 0;
31368
31369 read = 0;
31370 if (p < (unsigned long) high_memory) {
31371@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31372 }
31373 #endif
31374 while (low_count > 0) {
31375+ char *temp;
31376+
31377 sz = size_inside_page(p, low_count);
31378
31379 /*
31380@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
31381 */
31382 kbuf = xlate_dev_kmem_ptr((char *)p);
31383
31384- if (copy_to_user(buf, kbuf, sz))
31385+#ifdef CONFIG_PAX_USERCOPY
31386+ temp = kmalloc(sz, GFP_KERNEL);
31387+ if (!temp)
31388+ return -ENOMEM;
31389+ memcpy(temp, kbuf, sz);
31390+#else
31391+ temp = kbuf;
31392+#endif
31393+
31394+ err = copy_to_user(buf, temp, sz);
31395+
31396+#ifdef CONFIG_PAX_USERCOPY
31397+ kfree(temp);
31398+#endif
31399+
31400+ if (err)
31401 return -EFAULT;
31402 buf += sz;
31403 p += sz;
31404@@ -889,6 +941,9 @@ static const struct memdev {
31405 #ifdef CONFIG_CRASH_DUMP
31406 [12] = { "oldmem", 0, &oldmem_fops, NULL },
31407 #endif
31408+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
31409+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
31410+#endif
31411 };
31412
31413 static int memory_open(struct inode *inode, struct file *filp)
31414diff --git a/drivers/char/pcmcia/ipwireless/tty.c b/drivers/char/pcmcia/ipwireless/tty.c
31415index 674b3ab..a8d1970 100644
31416--- a/drivers/char/pcmcia/ipwireless/tty.c
31417+++ b/drivers/char/pcmcia/ipwireless/tty.c
31418@@ -29,6 +29,7 @@
31419 #include <linux/tty_driver.h>
31420 #include <linux/tty_flip.h>
31421 #include <linux/uaccess.h>
31422+#include <asm/local.h>
31423
31424 #include "tty.h"
31425 #include "network.h"
31426@@ -51,7 +52,7 @@ struct ipw_tty {
31427 int tty_type;
31428 struct ipw_network *network;
31429 struct tty_struct *linux_tty;
31430- int open_count;
31431+ local_t open_count;
31432 unsigned int control_lines;
31433 struct mutex ipw_tty_mutex;
31434 int tx_bytes_queued;
31435@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31436 mutex_unlock(&tty->ipw_tty_mutex);
31437 return -ENODEV;
31438 }
31439- if (tty->open_count == 0)
31440+ if (local_read(&tty->open_count) == 0)
31441 tty->tx_bytes_queued = 0;
31442
31443- tty->open_count++;
31444+ local_inc(&tty->open_count);
31445
31446 tty->linux_tty = linux_tty;
31447 linux_tty->driver_data = tty;
31448@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
31449
31450 static void do_ipw_close(struct ipw_tty *tty)
31451 {
31452- tty->open_count--;
31453-
31454- if (tty->open_count == 0) {
31455+ if (local_dec_return(&tty->open_count) == 0) {
31456 struct tty_struct *linux_tty = tty->linux_tty;
31457
31458 if (linux_tty != NULL) {
31459@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
31460 return;
31461
31462 mutex_lock(&tty->ipw_tty_mutex);
31463- if (tty->open_count == 0) {
31464+ if (local_read(&tty->open_count) == 0) {
31465 mutex_unlock(&tty->ipw_tty_mutex);
31466 return;
31467 }
31468@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
31469 return;
31470 }
31471
31472- if (!tty->open_count) {
31473+ if (!local_read(&tty->open_count)) {
31474 mutex_unlock(&tty->ipw_tty_mutex);
31475 return;
31476 }
31477@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *linux_tty,
31478 return -ENODEV;
31479
31480 mutex_lock(&tty->ipw_tty_mutex);
31481- if (!tty->open_count) {
31482+ if (!local_read(&tty->open_count)) {
31483 mutex_unlock(&tty->ipw_tty_mutex);
31484 return -EINVAL;
31485 }
31486@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
31487 if (!tty)
31488 return -ENODEV;
31489
31490- if (!tty->open_count)
31491+ if (!local_read(&tty->open_count))
31492 return -EINVAL;
31493
31494 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
31495@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
31496 if (!tty)
31497 return 0;
31498
31499- if (!tty->open_count)
31500+ if (!local_read(&tty->open_count))
31501 return 0;
31502
31503 return tty->tx_bytes_queued;
31504@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty, struct file *file)
31505 if (!tty)
31506 return -ENODEV;
31507
31508- if (!tty->open_count)
31509+ if (!local_read(&tty->open_count))
31510 return -EINVAL;
31511
31512 return get_control_lines(tty);
31513@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tty, struct file *file,
31514 if (!tty)
31515 return -ENODEV;
31516
31517- if (!tty->open_count)
31518+ if (!local_read(&tty->open_count))
31519 return -EINVAL;
31520
31521 return set_control_lines(tty, set, clear);
31522@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty, struct file *file,
31523 if (!tty)
31524 return -ENODEV;
31525
31526- if (!tty->open_count)
31527+ if (!local_read(&tty->open_count))
31528 return -EINVAL;
31529
31530 /* FIXME: Exactly how is the tty object locked here .. */
31531@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
31532 against a parallel ioctl etc */
31533 mutex_lock(&ttyj->ipw_tty_mutex);
31534 }
31535- while (ttyj->open_count)
31536+ while (local_read(&ttyj->open_count))
31537 do_ipw_close(ttyj);
31538 ipwireless_disassociate_network_ttys(network,
31539 ttyj->channel_idx);
31540diff --git a/drivers/char/pty.c b/drivers/char/pty.c
31541index 62f282e..e45c45c 100644
31542--- a/drivers/char/pty.c
31543+++ b/drivers/char/pty.c
31544@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
31545 register_sysctl_table(pty_root_table);
31546
31547 /* Now create the /dev/ptmx special device */
31548+ pax_open_kernel();
31549 tty_default_fops(&ptmx_fops);
31550- ptmx_fops.open = ptmx_open;
31551+ *(void **)&ptmx_fops.open = ptmx_open;
31552+ pax_close_kernel();
31553
31554 cdev_init(&ptmx_cdev, &ptmx_fops);
31555 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
31556diff --git a/drivers/char/random.c b/drivers/char/random.c
31557index 3a19e2d..6ed09d3 100644
31558--- a/drivers/char/random.c
31559+++ b/drivers/char/random.c
31560@@ -254,8 +254,13 @@
31561 /*
31562 * Configuration information
31563 */
31564+#ifdef CONFIG_GRKERNSEC_RANDNET
31565+#define INPUT_POOL_WORDS 512
31566+#define OUTPUT_POOL_WORDS 128
31567+#else
31568 #define INPUT_POOL_WORDS 128
31569 #define OUTPUT_POOL_WORDS 32
31570+#endif
31571 #define SEC_XFER_SIZE 512
31572
31573 /*
31574@@ -292,10 +297,17 @@ static struct poolinfo {
31575 int poolwords;
31576 int tap1, tap2, tap3, tap4, tap5;
31577 } poolinfo_table[] = {
31578+#ifdef CONFIG_GRKERNSEC_RANDNET
31579+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
31580+ { 512, 411, 308, 208, 104, 1 },
31581+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
31582+ { 128, 103, 76, 51, 25, 1 },
31583+#else
31584 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
31585 { 128, 103, 76, 51, 25, 1 },
31586 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
31587 { 32, 26, 20, 14, 7, 1 },
31588+#endif
31589 #if 0
31590 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
31591 { 2048, 1638, 1231, 819, 411, 1 },
31592@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
31593 #include <linux/sysctl.h>
31594
31595 static int min_read_thresh = 8, min_write_thresh;
31596-static int max_read_thresh = INPUT_POOL_WORDS * 32;
31597+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
31598 static int max_write_thresh = INPUT_POOL_WORDS * 32;
31599 static char sysctl_bootid[16];
31600
31601diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c
31602index 0e29a23..0efc2c2 100644
31603--- a/drivers/char/rocket.c
31604+++ b/drivers/char/rocket.c
31605@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info, struct rocket_ports __user *retports)
31606 struct rocket_ports tmp;
31607 int board;
31608
31609+ pax_track_stack();
31610+
31611 if (!retports)
31612 return -EFAULT;
31613 memset(&tmp, 0, sizeof (tmp));
31614diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
31615index 8c262aa..4d3b058 100644
31616--- a/drivers/char/sonypi.c
31617+++ b/drivers/char/sonypi.c
31618@@ -55,6 +55,7 @@
31619 #include <asm/uaccess.h>
31620 #include <asm/io.h>
31621 #include <asm/system.h>
31622+#include <asm/local.h>
31623
31624 #include <linux/sonypi.h>
31625
31626@@ -491,7 +492,7 @@ static struct sonypi_device {
31627 spinlock_t fifo_lock;
31628 wait_queue_head_t fifo_proc_list;
31629 struct fasync_struct *fifo_async;
31630- int open_count;
31631+ local_t open_count;
31632 int model;
31633 struct input_dev *input_jog_dev;
31634 struct input_dev *input_key_dev;
31635@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
31636 static int sonypi_misc_release(struct inode *inode, struct file *file)
31637 {
31638 mutex_lock(&sonypi_device.lock);
31639- sonypi_device.open_count--;
31640+ local_dec(&sonypi_device.open_count);
31641 mutex_unlock(&sonypi_device.lock);
31642 return 0;
31643 }
31644@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
31645 lock_kernel();
31646 mutex_lock(&sonypi_device.lock);
31647 /* Flush input queue on first open */
31648- if (!sonypi_device.open_count)
31649+ if (!local_read(&sonypi_device.open_count))
31650 kfifo_reset(sonypi_device.fifo);
31651- sonypi_device.open_count++;
31652+ local_inc(&sonypi_device.open_count);
31653 mutex_unlock(&sonypi_device.lock);
31654 unlock_kernel();
31655 return 0;
31656diff --git a/drivers/char/stallion.c b/drivers/char/stallion.c
31657index db6dcfa..13834cb 100644
31658--- a/drivers/char/stallion.c
31659+++ b/drivers/char/stallion.c
31660@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlport __user *arg)
31661 struct stlport stl_dummyport;
31662 struct stlport *portp;
31663
31664+ pax_track_stack();
31665+
31666 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
31667 return -EFAULT;
31668 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
31669diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
31670index a0789f6..cea3902 100644
31671--- a/drivers/char/tpm/tpm.c
31672+++ b/drivers/char/tpm/tpm.c
31673@@ -405,7 +405,7 @@ static ssize_t tpm_transmit(struct tpm_chip *chip, const char *buf,
31674 chip->vendor.req_complete_val)
31675 goto out_recv;
31676
31677- if ((status == chip->vendor.req_canceled)) {
31678+ if (status == chip->vendor.req_canceled) {
31679 dev_err(chip->dev, "Operation Canceled\n");
31680 rc = -ECANCELED;
31681 goto out;
31682@@ -824,6 +824,8 @@ ssize_t tpm_show_pubek(struct device *dev, struct device_attribute *attr,
31683
31684 struct tpm_chip *chip = dev_get_drvdata(dev);
31685
31686+ pax_track_stack();
31687+
31688 tpm_cmd.header.in = tpm_readpubek_header;
31689 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
31690 "attempting to read the PUBEK");
31691diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
31692index bf2170f..ce8cab9 100644
31693--- a/drivers/char/tpm/tpm_bios.c
31694+++ b/drivers/char/tpm/tpm_bios.c
31695@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
31696 event = addr;
31697
31698 if ((event->event_type == 0 && event->event_size == 0) ||
31699- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
31700+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
31701 return NULL;
31702
31703 return addr;
31704@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
31705 return NULL;
31706
31707 if ((event->event_type == 0 && event->event_size == 0) ||
31708- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
31709+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
31710 return NULL;
31711
31712 (*pos)++;
31713@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
31714 int i;
31715
31716 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
31717- seq_putc(m, data[i]);
31718+ if (!seq_putc(m, data[i]))
31719+ return -EFAULT;
31720
31721 return 0;
31722 }
31723@@ -409,8 +410,13 @@ static int read_log(struct tpm_bios_log *log)
31724 log->bios_event_log_end = log->bios_event_log + len;
31725
31726 virt = acpi_os_map_memory(start, len);
31727+ if (!virt) {
31728+ kfree(log->bios_event_log);
31729+ log->bios_event_log = NULL;
31730+ return -EFAULT;
31731+ }
31732
31733- memcpy(log->bios_event_log, virt, len);
31734+ memcpy(log->bios_event_log, (const char __force_kernel *)virt, len);
31735
31736 acpi_os_unmap_memory(virt, len);
31737 return 0;
31738diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
31739index 123cedf..6664cb4 100644
31740--- a/drivers/char/tty_io.c
31741+++ b/drivers/char/tty_io.c
31742@@ -146,7 +146,7 @@ static int tty_open(struct inode *, struct file *);
31743 static int tty_release(struct inode *, struct file *);
31744 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
31745 #ifdef CONFIG_COMPAT
31746-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31747+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31748 unsigned long arg);
31749 #else
31750 #define tty_compat_ioctl NULL
31751@@ -1774,6 +1774,7 @@ got_driver:
31752
31753 if (IS_ERR(tty)) {
31754 mutex_unlock(&tty_mutex);
31755+ tty_driver_kref_put(driver);
31756 return PTR_ERR(tty);
31757 }
31758 }
31759@@ -2603,8 +2604,10 @@ long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
31760 return retval;
31761 }
31762
31763+EXPORT_SYMBOL(tty_ioctl);
31764+
31765 #ifdef CONFIG_COMPAT
31766-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31767+long tty_compat_ioctl(struct file *file, unsigned int cmd,
31768 unsigned long arg)
31769 {
31770 struct inode *inode = file->f_dentry->d_inode;
31771@@ -2628,6 +2631,8 @@ static long tty_compat_ioctl(struct file *file, unsigned int cmd,
31772
31773 return retval;
31774 }
31775+
31776+EXPORT_SYMBOL(tty_compat_ioctl);
31777 #endif
31778
31779 /*
31780@@ -3073,7 +3078,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
31781
31782 void tty_default_fops(struct file_operations *fops)
31783 {
31784- *fops = tty_fops;
31785+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
31786 }
31787
31788 /*
31789diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
31790index d814a3d..b55b9c9 100644
31791--- a/drivers/char/tty_ldisc.c
31792+++ b/drivers/char/tty_ldisc.c
31793@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *ld)
31794 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
31795 struct tty_ldisc_ops *ldo = ld->ops;
31796
31797- ldo->refcount--;
31798+ atomic_dec(&ldo->refcount);
31799 module_put(ldo->owner);
31800 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31801
31802@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
31803 spin_lock_irqsave(&tty_ldisc_lock, flags);
31804 tty_ldiscs[disc] = new_ldisc;
31805 new_ldisc->num = disc;
31806- new_ldisc->refcount = 0;
31807+ atomic_set(&new_ldisc->refcount, 0);
31808 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31809
31810 return ret;
31811@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
31812 return -EINVAL;
31813
31814 spin_lock_irqsave(&tty_ldisc_lock, flags);
31815- if (tty_ldiscs[disc]->refcount)
31816+ if (atomic_read(&tty_ldiscs[disc]->refcount))
31817 ret = -EBUSY;
31818 else
31819 tty_ldiscs[disc] = NULL;
31820@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
31821 if (ldops) {
31822 ret = ERR_PTR(-EAGAIN);
31823 if (try_module_get(ldops->owner)) {
31824- ldops->refcount++;
31825+ atomic_inc(&ldops->refcount);
31826 ret = ldops;
31827 }
31828 }
31829@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
31830 unsigned long flags;
31831
31832 spin_lock_irqsave(&tty_ldisc_lock, flags);
31833- ldops->refcount--;
31834+ atomic_dec(&ldops->refcount);
31835 module_put(ldops->owner);
31836 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
31837 }
31838diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
31839index a035ae3..c27fe2c 100644
31840--- a/drivers/char/virtio_console.c
31841+++ b/drivers/char/virtio_console.c
31842@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *buf, int count)
31843 * virtqueue, so we let the drivers do some boutique early-output thing. */
31844 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
31845 {
31846- virtio_cons.put_chars = put_chars;
31847+ pax_open_kernel();
31848+ *(void **)&virtio_cons.put_chars = put_chars;
31849+ pax_close_kernel();
31850 return hvc_instantiate(0, 0, &virtio_cons);
31851 }
31852
31853@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(struct virtio_device *dev)
31854 out_vq = vqs[1];
31855
31856 /* Start using the new console output. */
31857- virtio_cons.get_chars = get_chars;
31858- virtio_cons.put_chars = put_chars;
31859- virtio_cons.notifier_add = notifier_add_vio;
31860- virtio_cons.notifier_del = notifier_del_vio;
31861- virtio_cons.notifier_hangup = notifier_del_vio;
31862+ pax_open_kernel();
31863+ *(void **)&virtio_cons.get_chars = get_chars;
31864+ *(void **)&virtio_cons.put_chars = put_chars;
31865+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
31866+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
31867+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
31868+ pax_close_kernel();
31869
31870 /* The first argument of hvc_alloc() is the virtual console number, so
31871 * we use zero. The second argument is the parameter for the
31872diff --git a/drivers/char/vt.c b/drivers/char/vt.c
31873index 0c80c68..53d59c1 100644
31874--- a/drivers/char/vt.c
31875+++ b/drivers/char/vt.c
31876@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier);
31877
31878 static void notify_write(struct vc_data *vc, unsigned int unicode)
31879 {
31880- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
31881+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
31882 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
31883 }
31884
31885diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
31886index 6351a26..999af95 100644
31887--- a/drivers/char/vt_ioctl.c
31888+++ b/drivers/char/vt_ioctl.c
31889@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31890 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
31891 return -EFAULT;
31892
31893- if (!capable(CAP_SYS_TTY_CONFIG))
31894- perm = 0;
31895-
31896 switch (cmd) {
31897 case KDGKBENT:
31898 key_map = key_maps[s];
31899@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm, struct kbd_str
31900 val = (i ? K_HOLE : K_NOSUCHMAP);
31901 return put_user(val, &user_kbe->kb_value);
31902 case KDSKBENT:
31903+ if (!capable(CAP_SYS_TTY_CONFIG))
31904+ perm = 0;
31905+
31906 if (!perm)
31907 return -EPERM;
31908+
31909 if (!i && v == K_NOSUCHMAP) {
31910 /* deallocate map */
31911 key_map = key_maps[s];
31912@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31913 int i, j, k;
31914 int ret;
31915
31916- if (!capable(CAP_SYS_TTY_CONFIG))
31917- perm = 0;
31918-
31919 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
31920 if (!kbs) {
31921 ret = -ENOMEM;
31922@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
31923 kfree(kbs);
31924 return ((p && *p) ? -EOVERFLOW : 0);
31925 case KDSKBSENT:
31926+ if (!capable(CAP_SYS_TTY_CONFIG))
31927+ perm = 0;
31928+
31929 if (!perm) {
31930 ret = -EPERM;
31931 goto reterr;
31932diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
31933index c7ae026..1769c1d 100644
31934--- a/drivers/cpufreq/cpufreq.c
31935+++ b/drivers/cpufreq/cpufreq.c
31936@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct kobject *kobj)
31937 complete(&policy->kobj_unregister);
31938 }
31939
31940-static struct sysfs_ops sysfs_ops = {
31941+static const struct sysfs_ops sysfs_ops = {
31942 .show = show,
31943 .store = store,
31944 };
31945diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
31946index 97b0038..2056670 100644
31947--- a/drivers/cpuidle/sysfs.c
31948+++ b/drivers/cpuidle/sysfs.c
31949@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobject * kobj, struct attribute * attr,
31950 return ret;
31951 }
31952
31953-static struct sysfs_ops cpuidle_sysfs_ops = {
31954+static const struct sysfs_ops cpuidle_sysfs_ops = {
31955 .show = cpuidle_show,
31956 .store = cpuidle_store,
31957 };
31958@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct kobject * kobj,
31959 return ret;
31960 }
31961
31962-static struct sysfs_ops cpuidle_state_sysfs_ops = {
31963+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
31964 .show = cpuidle_state_show,
31965 };
31966
31967@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpuidle = {
31968 .release = cpuidle_state_sysfs_release,
31969 };
31970
31971-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31972+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
31973 {
31974 kobject_put(&device->kobjs[i]->kobj);
31975 wait_for_completion(&device->kobjs[i]->kobj_unregister);
31976diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
31977index 5f753fc..0377ae9 100644
31978--- a/drivers/crypto/hifn_795x.c
31979+++ b/drivers/crypto/hifn_795x.c
31980@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device *dev, int encdec, u8 snum)
31981 0xCA, 0x34, 0x2B, 0x2E};
31982 struct scatterlist sg;
31983
31984+ pax_track_stack();
31985+
31986 memset(src, 0, sizeof(src));
31987 memset(ctx.key, 0, sizeof(ctx.key));
31988
31989diff --git a/drivers/crypto/padlock-aes.c b/drivers/crypto/padlock-aes.c
31990index 71e6482..de8d96c 100644
31991--- a/drivers/crypto/padlock-aes.c
31992+++ b/drivers/crypto/padlock-aes.c
31993@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
31994 struct crypto_aes_ctx gen_aes;
31995 int cpu;
31996
31997+ pax_track_stack();
31998+
31999 if (key_len % 8) {
32000 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
32001 return -EINVAL;
32002diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
32003index dcc4ab7..cc834bb 100644
32004--- a/drivers/dma/ioat/dma.c
32005+++ b/drivers/dma/ioat/dma.c
32006@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
32007 return entry->show(&chan->common, page);
32008 }
32009
32010-struct sysfs_ops ioat_sysfs_ops = {
32011+const struct sysfs_ops ioat_sysfs_ops = {
32012 .show = ioat_attr_show,
32013 };
32014
32015diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
32016index bbc3e78..f2db62c 100644
32017--- a/drivers/dma/ioat/dma.h
32018+++ b/drivers/dma/ioat/dma.h
32019@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
32020 unsigned long *phys_complete);
32021 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
32022 void ioat_kobject_del(struct ioatdma_device *device);
32023-extern struct sysfs_ops ioat_sysfs_ops;
32024+extern const struct sysfs_ops ioat_sysfs_ops;
32025 extern struct ioat_sysfs_entry ioat_version_attr;
32026 extern struct ioat_sysfs_entry ioat_cap_attr;
32027 #endif /* IOATDMA_H */
32028diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
32029index 9908c9e..3ceb0e5 100644
32030--- a/drivers/dma/ioat/dma_v3.c
32031+++ b/drivers/dma/ioat/dma_v3.c
32032@@ -71,10 +71,10 @@
32033 /* provide a lookup table for setting the source address in the base or
32034 * extended descriptor of an xor or pq descriptor
32035 */
32036-static const u8 xor_idx_to_desc __read_mostly = 0xd0;
32037-static const u8 xor_idx_to_field[] __read_mostly = { 1, 4, 5, 6, 7, 0, 1, 2 };
32038-static const u8 pq_idx_to_desc __read_mostly = 0xf8;
32039-static const u8 pq_idx_to_field[] __read_mostly = { 1, 4, 5, 0, 1, 2, 4, 5 };
32040+static const u8 xor_idx_to_desc = 0xd0;
32041+static const u8 xor_idx_to_field[] = { 1, 4, 5, 6, 7, 0, 1, 2 };
32042+static const u8 pq_idx_to_desc = 0xf8;
32043+static const u8 pq_idx_to_field[] = { 1, 4, 5, 0, 1, 2, 4, 5 };
32044
32045 static dma_addr_t xor_get_src(struct ioat_raw_descriptor *descs[2], int idx)
32046 {
32047diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
32048index 85c464a..afd1e73 100644
32049--- a/drivers/edac/amd64_edac.c
32050+++ b/drivers/edac/amd64_edac.c
32051@@ -3099,7 +3099,7 @@ static void __devexit amd64_remove_one_instance(struct pci_dev *pdev)
32052 * PCI core identifies what devices are on a system during boot, and then
32053 * inquiry this table to see if this driver is for a given device found.
32054 */
32055-static const struct pci_device_id amd64_pci_table[] __devinitdata = {
32056+static const struct pci_device_id amd64_pci_table[] __devinitconst = {
32057 {
32058 .vendor = PCI_VENDOR_ID_AMD,
32059 .device = PCI_DEVICE_ID_AMD_K8_NB_MEMCTL,
32060diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
32061index 2b95f1a..4f52793 100644
32062--- a/drivers/edac/amd76x_edac.c
32063+++ b/drivers/edac/amd76x_edac.c
32064@@ -322,7 +322,7 @@ static void __devexit amd76x_remove_one(struct pci_dev *pdev)
32065 edac_mc_free(mci);
32066 }
32067
32068-static const struct pci_device_id amd76x_pci_tbl[] __devinitdata = {
32069+static const struct pci_device_id amd76x_pci_tbl[] __devinitconst = {
32070 {
32071 PCI_VEND_DEV(AMD, FE_GATE_700C), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32072 AMD762},
32073diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
32074index d205d49..74c9672 100644
32075--- a/drivers/edac/e752x_edac.c
32076+++ b/drivers/edac/e752x_edac.c
32077@@ -1282,7 +1282,7 @@ static void __devexit e752x_remove_one(struct pci_dev *pdev)
32078 edac_mc_free(mci);
32079 }
32080
32081-static const struct pci_device_id e752x_pci_tbl[] __devinitdata = {
32082+static const struct pci_device_id e752x_pci_tbl[] __devinitconst = {
32083 {
32084 PCI_VEND_DEV(INTEL, 7520_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32085 E7520},
32086diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
32087index c7d11cc..c59c1ca 100644
32088--- a/drivers/edac/e7xxx_edac.c
32089+++ b/drivers/edac/e7xxx_edac.c
32090@@ -526,7 +526,7 @@ static void __devexit e7xxx_remove_one(struct pci_dev *pdev)
32091 edac_mc_free(mci);
32092 }
32093
32094-static const struct pci_device_id e7xxx_pci_tbl[] __devinitdata = {
32095+static const struct pci_device_id e7xxx_pci_tbl[] __devinitconst = {
32096 {
32097 PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32098 E7205},
32099diff --git a/drivers/edac/edac_device_sysfs.c b/drivers/edac/edac_device_sysfs.c
32100index 5376457..5fdedbc 100644
32101--- a/drivers/edac/edac_device_sysfs.c
32102+++ b/drivers/edac/edac_device_sysfs.c
32103@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(struct kobject *kobj,
32104 }
32105
32106 /* edac_dev file operations for an 'ctl_info' */
32107-static struct sysfs_ops device_ctl_info_ops = {
32108+static const struct sysfs_ops device_ctl_info_ops = {
32109 .show = edac_dev_ctl_info_show,
32110 .store = edac_dev_ctl_info_store
32111 };
32112@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(struct kobject *kobj,
32113 }
32114
32115 /* edac_dev file operations for an 'instance' */
32116-static struct sysfs_ops device_instance_ops = {
32117+static const struct sysfs_ops device_instance_ops = {
32118 .show = edac_dev_instance_show,
32119 .store = edac_dev_instance_store
32120 };
32121@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(struct kobject *kobj,
32122 }
32123
32124 /* edac_dev file operations for a 'block' */
32125-static struct sysfs_ops device_block_ops = {
32126+static const struct sysfs_ops device_block_ops = {
32127 .show = edac_dev_block_show,
32128 .store = edac_dev_block_store
32129 };
32130diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
32131index e1d4ce0..88840e9 100644
32132--- a/drivers/edac/edac_mc_sysfs.c
32133+++ b/drivers/edac/edac_mc_sysfs.c
32134@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kobject *kobj, struct attribute *attr,
32135 return -EIO;
32136 }
32137
32138-static struct sysfs_ops csrowfs_ops = {
32139+static const struct sysfs_ops csrowfs_ops = {
32140 .show = csrowdev_show,
32141 .store = csrowdev_store
32142 };
32143@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobject *kobj, struct attribute *attr,
32144 }
32145
32146 /* Intermediate show/store table */
32147-static struct sysfs_ops mci_ops = {
32148+static const struct sysfs_ops mci_ops = {
32149 .show = mcidev_show,
32150 .store = mcidev_store
32151 };
32152diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
32153index 422728c..d8d9c88 100644
32154--- a/drivers/edac/edac_pci_sysfs.c
32155+++ b/drivers/edac/edac_pci_sysfs.c
32156@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
32157 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
32158 static int edac_pci_poll_msec = 1000; /* one second workq period */
32159
32160-static atomic_t pci_parity_count = ATOMIC_INIT(0);
32161-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
32162+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
32163+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
32164
32165 static struct kobject *edac_pci_top_main_kobj;
32166 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
32167@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(struct kobject *kobj,
32168 }
32169
32170 /* fs_ops table */
32171-static struct sysfs_ops pci_instance_ops = {
32172+static const struct sysfs_ops pci_instance_ops = {
32173 .show = edac_pci_instance_show,
32174 .store = edac_pci_instance_store
32175 };
32176@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct kobject *kobj,
32177 return -EIO;
32178 }
32179
32180-static struct sysfs_ops edac_pci_sysfs_ops = {
32181+static const struct sysfs_ops edac_pci_sysfs_ops = {
32182 .show = edac_pci_dev_show,
32183 .store = edac_pci_dev_store
32184 };
32185@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32186 edac_printk(KERN_CRIT, EDAC_PCI,
32187 "Signaled System Error on %s\n",
32188 pci_name(dev));
32189- atomic_inc(&pci_nonparity_count);
32190+ atomic_inc_unchecked(&pci_nonparity_count);
32191 }
32192
32193 if (status & (PCI_STATUS_PARITY)) {
32194@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32195 "Master Data Parity Error on %s\n",
32196 pci_name(dev));
32197
32198- atomic_inc(&pci_parity_count);
32199+ atomic_inc_unchecked(&pci_parity_count);
32200 }
32201
32202 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32203@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32204 "Detected Parity Error on %s\n",
32205 pci_name(dev));
32206
32207- atomic_inc(&pci_parity_count);
32208+ atomic_inc_unchecked(&pci_parity_count);
32209 }
32210 }
32211
32212@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32213 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
32214 "Signaled System Error on %s\n",
32215 pci_name(dev));
32216- atomic_inc(&pci_nonparity_count);
32217+ atomic_inc_unchecked(&pci_nonparity_count);
32218 }
32219
32220 if (status & (PCI_STATUS_PARITY)) {
32221@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32222 "Master Data Parity Error on "
32223 "%s\n", pci_name(dev));
32224
32225- atomic_inc(&pci_parity_count);
32226+ atomic_inc_unchecked(&pci_parity_count);
32227 }
32228
32229 if (status & (PCI_STATUS_DETECTED_PARITY)) {
32230@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
32231 "Detected Parity Error on %s\n",
32232 pci_name(dev));
32233
32234- atomic_inc(&pci_parity_count);
32235+ atomic_inc_unchecked(&pci_parity_count);
32236 }
32237 }
32238 }
32239@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
32240 if (!check_pci_errors)
32241 return;
32242
32243- before_count = atomic_read(&pci_parity_count);
32244+ before_count = atomic_read_unchecked(&pci_parity_count);
32245
32246 /* scan all PCI devices looking for a Parity Error on devices and
32247 * bridges.
32248@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
32249 /* Only if operator has selected panic on PCI Error */
32250 if (edac_pci_get_panic_on_pe()) {
32251 /* If the count is different 'after' from 'before' */
32252- if (before_count != atomic_read(&pci_parity_count))
32253+ if (before_count != atomic_read_unchecked(&pci_parity_count))
32254 panic("EDAC: PCI Parity Error");
32255 }
32256 }
32257diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
32258index 6c9a0f2..9c1cf7e 100644
32259--- a/drivers/edac/i3000_edac.c
32260+++ b/drivers/edac/i3000_edac.c
32261@@ -471,7 +471,7 @@ static void __devexit i3000_remove_one(struct pci_dev *pdev)
32262 edac_mc_free(mci);
32263 }
32264
32265-static const struct pci_device_id i3000_pci_tbl[] __devinitdata = {
32266+static const struct pci_device_id i3000_pci_tbl[] __devinitconst = {
32267 {
32268 PCI_VEND_DEV(INTEL, 3000_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32269 I3000},
32270diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
32271index fde4db9..fe108f9 100644
32272--- a/drivers/edac/i3200_edac.c
32273+++ b/drivers/edac/i3200_edac.c
32274@@ -444,7 +444,7 @@ static void __devexit i3200_remove_one(struct pci_dev *pdev)
32275 edac_mc_free(mci);
32276 }
32277
32278-static const struct pci_device_id i3200_pci_tbl[] __devinitdata = {
32279+static const struct pci_device_id i3200_pci_tbl[] __devinitconst = {
32280 {
32281 PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32282 I3200},
32283diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
32284index adc10a2..57d4ccf 100644
32285--- a/drivers/edac/i5000_edac.c
32286+++ b/drivers/edac/i5000_edac.c
32287@@ -1516,7 +1516,7 @@ static void __devexit i5000_remove_one(struct pci_dev *pdev)
32288 *
32289 * The "E500P" device is the first device supported.
32290 */
32291-static const struct pci_device_id i5000_pci_tbl[] __devinitdata = {
32292+static const struct pci_device_id i5000_pci_tbl[] __devinitconst = {
32293 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_I5000_DEV16),
32294 .driver_data = I5000P},
32295
32296diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
32297index 22db05a..b2b5503 100644
32298--- a/drivers/edac/i5100_edac.c
32299+++ b/drivers/edac/i5100_edac.c
32300@@ -944,7 +944,7 @@ static void __devexit i5100_remove_one(struct pci_dev *pdev)
32301 edac_mc_free(mci);
32302 }
32303
32304-static const struct pci_device_id i5100_pci_tbl[] __devinitdata = {
32305+static const struct pci_device_id i5100_pci_tbl[] __devinitconst = {
32306 /* Device 16, Function 0, Channel 0 Memory Map, Error Flag/Mask, ... */
32307 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5100_16) },
32308 { 0, }
32309diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
32310index f99d106..f050710 100644
32311--- a/drivers/edac/i5400_edac.c
32312+++ b/drivers/edac/i5400_edac.c
32313@@ -1383,7 +1383,7 @@ static void __devexit i5400_remove_one(struct pci_dev *pdev)
32314 *
32315 * The "E500P" device is the first device supported.
32316 */
32317-static const struct pci_device_id i5400_pci_tbl[] __devinitdata = {
32318+static const struct pci_device_id i5400_pci_tbl[] __devinitconst = {
32319 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5400_ERR)},
32320 {0,} /* 0 terminated list. */
32321 };
32322diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
32323index 577760a..9ce16ce 100644
32324--- a/drivers/edac/i82443bxgx_edac.c
32325+++ b/drivers/edac/i82443bxgx_edac.c
32326@@ -381,7 +381,7 @@ static void __devexit i82443bxgx_edacmc_remove_one(struct pci_dev *pdev)
32327
32328 EXPORT_SYMBOL_GPL(i82443bxgx_edacmc_remove_one);
32329
32330-static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitdata = {
32331+static const struct pci_device_id i82443bxgx_pci_tbl[] __devinitconst = {
32332 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_0)},
32333 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443BX_2)},
32334 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0)},
32335diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
32336index c0088ba..64a7b98 100644
32337--- a/drivers/edac/i82860_edac.c
32338+++ b/drivers/edac/i82860_edac.c
32339@@ -271,7 +271,7 @@ static void __devexit i82860_remove_one(struct pci_dev *pdev)
32340 edac_mc_free(mci);
32341 }
32342
32343-static const struct pci_device_id i82860_pci_tbl[] __devinitdata = {
32344+static const struct pci_device_id i82860_pci_tbl[] __devinitconst = {
32345 {
32346 PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32347 I82860},
32348diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
32349index b2d83b9..a34357b 100644
32350--- a/drivers/edac/i82875p_edac.c
32351+++ b/drivers/edac/i82875p_edac.c
32352@@ -512,7 +512,7 @@ static void __devexit i82875p_remove_one(struct pci_dev *pdev)
32353 edac_mc_free(mci);
32354 }
32355
32356-static const struct pci_device_id i82875p_pci_tbl[] __devinitdata = {
32357+static const struct pci_device_id i82875p_pci_tbl[] __devinitconst = {
32358 {
32359 PCI_VEND_DEV(INTEL, 82875_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32360 I82875P},
32361diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
32362index 2eed3ea..87bbbd1 100644
32363--- a/drivers/edac/i82975x_edac.c
32364+++ b/drivers/edac/i82975x_edac.c
32365@@ -586,7 +586,7 @@ static void __devexit i82975x_remove_one(struct pci_dev *pdev)
32366 edac_mc_free(mci);
32367 }
32368
32369-static const struct pci_device_id i82975x_pci_tbl[] __devinitdata = {
32370+static const struct pci_device_id i82975x_pci_tbl[] __devinitconst = {
32371 {
32372 PCI_VEND_DEV(INTEL, 82975_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32373 I82975X
32374diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
32375index 9900675..78ac2b6 100644
32376--- a/drivers/edac/r82600_edac.c
32377+++ b/drivers/edac/r82600_edac.c
32378@@ -374,7 +374,7 @@ static void __devexit r82600_remove_one(struct pci_dev *pdev)
32379 edac_mc_free(mci);
32380 }
32381
32382-static const struct pci_device_id r82600_pci_tbl[] __devinitdata = {
32383+static const struct pci_device_id r82600_pci_tbl[] __devinitconst = {
32384 {
32385 PCI_DEVICE(PCI_VENDOR_ID_RADISYS, R82600_BRIDGE_ID)
32386 },
32387diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
32388index d4ec605..4cfec4e 100644
32389--- a/drivers/edac/x38_edac.c
32390+++ b/drivers/edac/x38_edac.c
32391@@ -441,7 +441,7 @@ static void __devexit x38_remove_one(struct pci_dev *pdev)
32392 edac_mc_free(mci);
32393 }
32394
32395-static const struct pci_device_id x38_pci_tbl[] __devinitdata = {
32396+static const struct pci_device_id x38_pci_tbl[] __devinitconst = {
32397 {
32398 PCI_VEND_DEV(INTEL, X38_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0,
32399 X38},
32400diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
32401index 3fc2ceb..daf098f 100644
32402--- a/drivers/firewire/core-card.c
32403+++ b/drivers/firewire/core-card.c
32404@@ -558,7 +558,7 @@ void fw_card_release(struct kref *kref)
32405
32406 void fw_core_remove_card(struct fw_card *card)
32407 {
32408- struct fw_card_driver dummy_driver = dummy_driver_template;
32409+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
32410
32411 card->driver->update_phy_reg(card, 4,
32412 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
32413diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
32414index 4560d8f..36db24a 100644
32415--- a/drivers/firewire/core-cdev.c
32416+++ b/drivers/firewire/core-cdev.c
32417@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct client *client,
32418 int ret;
32419
32420 if ((request->channels == 0 && request->bandwidth == 0) ||
32421- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
32422- request->bandwidth < 0)
32423+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
32424 return -EINVAL;
32425
32426 r = kmalloc(sizeof(*r), GFP_KERNEL);
32427diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
32428index da628c7..cf54a2c 100644
32429--- a/drivers/firewire/core-transaction.c
32430+++ b/drivers/firewire/core-transaction.c
32431@@ -36,6 +36,7 @@
32432 #include <linux/string.h>
32433 #include <linux/timer.h>
32434 #include <linux/types.h>
32435+#include <linux/sched.h>
32436
32437 #include <asm/byteorder.h>
32438
32439@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
32440 struct transaction_callback_data d;
32441 struct fw_transaction t;
32442
32443+ pax_track_stack();
32444+
32445 init_completion(&d.done);
32446 d.payload = payload;
32447 fw_send_request(card, &t, tcode, destination_id, generation, speed,
32448diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
32449index 7ff6e75..a2965d9 100644
32450--- a/drivers/firewire/core.h
32451+++ b/drivers/firewire/core.h
32452@@ -86,6 +86,7 @@ struct fw_card_driver {
32453
32454 int (*stop_iso)(struct fw_iso_context *ctx);
32455 };
32456+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
32457
32458 void fw_card_initialize(struct fw_card *card,
32459 const struct fw_card_driver *driver, struct device *device);
32460diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
32461index 3a2ccb0..82fd7c4 100644
32462--- a/drivers/firmware/dmi_scan.c
32463+++ b/drivers/firmware/dmi_scan.c
32464@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
32465 }
32466 }
32467 else {
32468- /*
32469- * no iounmap() for that ioremap(); it would be a no-op, but
32470- * it's so early in setup that sucker gets confused into doing
32471- * what it shouldn't if we actually call it.
32472- */
32473 p = dmi_ioremap(0xF0000, 0x10000);
32474 if (p == NULL)
32475 goto error;
32476@@ -667,7 +662,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
32477 if (buf == NULL)
32478 return -1;
32479
32480- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
32481+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
32482
32483 iounmap(buf);
32484 return 0;
32485diff --git a/drivers/firmware/edd.c b/drivers/firmware/edd.c
32486index 9e4f59d..110e24e 100644
32487--- a/drivers/firmware/edd.c
32488+++ b/drivers/firmware/edd.c
32489@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, struct attribute *attr, char *buf)
32490 return ret;
32491 }
32492
32493-static struct sysfs_ops edd_attr_ops = {
32494+static const struct sysfs_ops edd_attr_ops = {
32495 .show = edd_attr_show,
32496 };
32497
32498diff --git a/drivers/firmware/efivars.c b/drivers/firmware/efivars.c
32499index f4f709d..082f06e 100644
32500--- a/drivers/firmware/efivars.c
32501+++ b/drivers/firmware/efivars.c
32502@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct kobject *kobj, struct attribute *attr,
32503 return ret;
32504 }
32505
32506-static struct sysfs_ops efivar_attr_ops = {
32507+static const struct sysfs_ops efivar_attr_ops = {
32508 .show = efivar_attr_show,
32509 .store = efivar_attr_store,
32510 };
32511diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
32512index 051d1eb..0a5d4e7 100644
32513--- a/drivers/firmware/iscsi_ibft.c
32514+++ b/drivers/firmware/iscsi_ibft.c
32515@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struct kobject *kobj,
32516 return ret;
32517 }
32518
32519-static struct sysfs_ops ibft_attr_ops = {
32520+static const struct sysfs_ops ibft_attr_ops = {
32521 .show = ibft_show_attribute,
32522 };
32523
32524diff --git a/drivers/firmware/memmap.c b/drivers/firmware/memmap.c
32525index 56f9234..8c58c7b 100644
32526--- a/drivers/firmware/memmap.c
32527+++ b/drivers/firmware/memmap.c
32528@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
32529 NULL
32530 };
32531
32532-static struct sysfs_ops memmap_attr_ops = {
32533+static const struct sysfs_ops memmap_attr_ops = {
32534 .show = memmap_attr_show,
32535 };
32536
32537diff --git a/drivers/gpio/vr41xx_giu.c b/drivers/gpio/vr41xx_giu.c
32538index b16c9a8..2af7d3f 100644
32539--- a/drivers/gpio/vr41xx_giu.c
32540+++ b/drivers/gpio/vr41xx_giu.c
32541@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
32542 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
32543 maskl, pendl, maskh, pendh);
32544
32545- atomic_inc(&irq_err_count);
32546+ atomic_inc_unchecked(&irq_err_count);
32547
32548 return -EINVAL;
32549 }
32550diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
32551index bea6efc..3dc0f42 100644
32552--- a/drivers/gpu/drm/drm_crtc.c
32553+++ b/drivers/gpu/drm/drm_crtc.c
32554@@ -1323,7 +1323,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32555 */
32556 if ((out_resp->count_modes >= mode_count) && mode_count) {
32557 copied = 0;
32558- mode_ptr = (struct drm_mode_modeinfo *)(unsigned long)out_resp->modes_ptr;
32559+ mode_ptr = (struct drm_mode_modeinfo __user *)(unsigned long)out_resp->modes_ptr;
32560 list_for_each_entry(mode, &connector->modes, head) {
32561 drm_crtc_convert_to_umode(&u_mode, mode);
32562 if (copy_to_user(mode_ptr + copied,
32563@@ -1338,8 +1338,8 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32564
32565 if ((out_resp->count_props >= props_count) && props_count) {
32566 copied = 0;
32567- prop_ptr = (uint32_t *)(unsigned long)(out_resp->props_ptr);
32568- prop_values = (uint64_t *)(unsigned long)(out_resp->prop_values_ptr);
32569+ prop_ptr = (uint32_t __user *)(unsigned long)(out_resp->props_ptr);
32570+ prop_values = (uint64_t __user *)(unsigned long)(out_resp->prop_values_ptr);
32571 for (i = 0; i < DRM_CONNECTOR_MAX_PROPERTY; i++) {
32572 if (connector->property_ids[i] != 0) {
32573 if (put_user(connector->property_ids[i],
32574@@ -1361,7 +1361,7 @@ int drm_mode_getconnector(struct drm_device *dev, void *data,
32575
32576 if ((out_resp->count_encoders >= encoders_count) && encoders_count) {
32577 copied = 0;
32578- encoder_ptr = (uint32_t *)(unsigned long)(out_resp->encoders_ptr);
32579+ encoder_ptr = (uint32_t __user *)(unsigned long)(out_resp->encoders_ptr);
32580 for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
32581 if (connector->encoder_ids[i] != 0) {
32582 if (put_user(connector->encoder_ids[i],
32583@@ -1513,7 +1513,7 @@ int drm_mode_setcrtc(struct drm_device *dev, void *data,
32584 }
32585
32586 for (i = 0; i < crtc_req->count_connectors; i++) {
32587- set_connectors_ptr = (uint32_t *)(unsigned long)crtc_req->set_connectors_ptr;
32588+ set_connectors_ptr = (uint32_t __user *)(unsigned long)crtc_req->set_connectors_ptr;
32589 if (get_user(out_id, &set_connectors_ptr[i])) {
32590 ret = -EFAULT;
32591 goto out;
32592@@ -2118,7 +2118,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32593 out_resp->flags = property->flags;
32594
32595 if ((out_resp->count_values >= value_count) && value_count) {
32596- values_ptr = (uint64_t *)(unsigned long)out_resp->values_ptr;
32597+ values_ptr = (uint64_t __user *)(unsigned long)out_resp->values_ptr;
32598 for (i = 0; i < value_count; i++) {
32599 if (copy_to_user(values_ptr + i, &property->values[i], sizeof(uint64_t))) {
32600 ret = -EFAULT;
32601@@ -2131,7 +2131,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32602 if (property->flags & DRM_MODE_PROP_ENUM) {
32603 if ((out_resp->count_enum_blobs >= enum_count) && enum_count) {
32604 copied = 0;
32605- enum_ptr = (struct drm_mode_property_enum *)(unsigned long)out_resp->enum_blob_ptr;
32606+ enum_ptr = (struct drm_mode_property_enum __user *)(unsigned long)out_resp->enum_blob_ptr;
32607 list_for_each_entry(prop_enum, &property->enum_blob_list, head) {
32608
32609 if (copy_to_user(&enum_ptr[copied].value, &prop_enum->value, sizeof(uint64_t))) {
32610@@ -2154,7 +2154,7 @@ int drm_mode_getproperty_ioctl(struct drm_device *dev,
32611 if ((out_resp->count_enum_blobs >= blob_count) && blob_count) {
32612 copied = 0;
32613 blob_id_ptr = (uint32_t *)(unsigned long)out_resp->enum_blob_ptr;
32614- blob_length_ptr = (uint32_t *)(unsigned long)out_resp->values_ptr;
32615+ blob_length_ptr = (uint32_t __user *)(unsigned long)out_resp->values_ptr;
32616
32617 list_for_each_entry(prop_blob, &property->enum_blob_list, head) {
32618 if (put_user(prop_blob->base.id, blob_id_ptr + copied)) {
32619@@ -2226,7 +2226,7 @@ int drm_mode_getblob_ioctl(struct drm_device *dev,
32620 blob = obj_to_blob(obj);
32621
32622 if (out_resp->length == blob->length) {
32623- blob_ptr = (void *)(unsigned long)out_resp->data;
32624+ blob_ptr = (void __user *)(unsigned long)out_resp->data;
32625 if (copy_to_user(blob_ptr, blob->data, blob->length)){
32626 ret = -EFAULT;
32627 goto done;
32628diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
32629index 1b8745d..92fdbf6 100644
32630--- a/drivers/gpu/drm/drm_crtc_helper.c
32631+++ b/drivers/gpu/drm/drm_crtc_helper.c
32632@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
32633 struct drm_crtc *tmp;
32634 int crtc_mask = 1;
32635
32636- WARN(!crtc, "checking null crtc?");
32637+ BUG_ON(!crtc);
32638
32639 dev = crtc->dev;
32640
32641@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
32642
32643 adjusted_mode = drm_mode_duplicate(dev, mode);
32644
32645+ pax_track_stack();
32646+
32647 crtc->enabled = drm_helper_crtc_in_use(crtc);
32648
32649 if (!crtc->enabled)
32650diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
32651index 0e27d98..dec8768 100644
32652--- a/drivers/gpu/drm/drm_drv.c
32653+++ b/drivers/gpu/drm/drm_drv.c
32654@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
32655 char *kdata = NULL;
32656
32657 atomic_inc(&dev->ioctl_count);
32658- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
32659+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
32660 ++file_priv->ioctl_count;
32661
32662 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
32663diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
32664index 519161e..98c840c 100644
32665--- a/drivers/gpu/drm/drm_fops.c
32666+++ b/drivers/gpu/drm/drm_fops.c
32667@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device * dev)
32668 }
32669
32670 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
32671- atomic_set(&dev->counts[i], 0);
32672+ atomic_set_unchecked(&dev->counts[i], 0);
32673
32674 dev->sigdata.lock = NULL;
32675
32676@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct file *filp)
32677
32678 retcode = drm_open_helper(inode, filp, dev);
32679 if (!retcode) {
32680- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
32681+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
32682 spin_lock(&dev->count_lock);
32683- if (!dev->open_count++) {
32684+ if (local_inc_return(&dev->open_count) == 1) {
32685 spin_unlock(&dev->count_lock);
32686 retcode = drm_setup(dev);
32687 goto out;
32688@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, struct file *filp)
32689
32690 lock_kernel();
32691
32692- DRM_DEBUG("open_count = %d\n", dev->open_count);
32693+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
32694
32695 if (dev->driver->preclose)
32696 dev->driver->preclose(dev, file_priv);
32697@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, struct file *filp)
32698 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
32699 task_pid_nr(current),
32700 (long)old_encode_dev(file_priv->minor->device),
32701- dev->open_count);
32702+ local_read(&dev->open_count));
32703
32704 /* Release any auth tokens that might point to this file_priv,
32705 (do that under the drm_global_mutex) */
32706@@ -529,9 +529,9 @@ int drm_release(struct inode *inode, struct file *filp)
32707 * End inline drm_release
32708 */
32709
32710- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
32711+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
32712 spin_lock(&dev->count_lock);
32713- if (!--dev->open_count) {
32714+ if (local_dec_and_test(&dev->open_count)) {
32715 if (atomic_read(&dev->ioctl_count)) {
32716 DRM_ERROR("Device busy: %d\n",
32717 atomic_read(&dev->ioctl_count));
32718diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
32719index 8bf3770..79422805 100644
32720--- a/drivers/gpu/drm/drm_gem.c
32721+++ b/drivers/gpu/drm/drm_gem.c
32722@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
32723 spin_lock_init(&dev->object_name_lock);
32724 idr_init(&dev->object_name_idr);
32725 atomic_set(&dev->object_count, 0);
32726- atomic_set(&dev->object_memory, 0);
32727+ atomic_set_unchecked(&dev->object_memory, 0);
32728 atomic_set(&dev->pin_count, 0);
32729- atomic_set(&dev->pin_memory, 0);
32730+ atomic_set_unchecked(&dev->pin_memory, 0);
32731 atomic_set(&dev->gtt_count, 0);
32732- atomic_set(&dev->gtt_memory, 0);
32733+ atomic_set_unchecked(&dev->gtt_memory, 0);
32734
32735 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
32736 if (!mm) {
32737@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
32738 goto fput;
32739 }
32740 atomic_inc(&dev->object_count);
32741- atomic_add(obj->size, &dev->object_memory);
32742+ atomic_add_unchecked(obj->size, &dev->object_memory);
32743 return obj;
32744 fput:
32745 fput(obj->filp);
32746@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
32747
32748 fput(obj->filp);
32749 atomic_dec(&dev->object_count);
32750- atomic_sub(obj->size, &dev->object_memory);
32751+ atomic_sub_unchecked(obj->size, &dev->object_memory);
32752 kfree(obj);
32753 }
32754 EXPORT_SYMBOL(drm_gem_object_free);
32755diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
32756index f0f6c6b..34af322 100644
32757--- a/drivers/gpu/drm/drm_info.c
32758+++ b/drivers/gpu/drm/drm_info.c
32759@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
32760 struct drm_local_map *map;
32761 struct drm_map_list *r_list;
32762
32763- /* Hardcoded from _DRM_FRAME_BUFFER,
32764- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
32765- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
32766- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
32767+ static const char * const types[] = {
32768+ [_DRM_FRAME_BUFFER] = "FB",
32769+ [_DRM_REGISTERS] = "REG",
32770+ [_DRM_SHM] = "SHM",
32771+ [_DRM_AGP] = "AGP",
32772+ [_DRM_SCATTER_GATHER] = "SG",
32773+ [_DRM_CONSISTENT] = "PCI",
32774+ [_DRM_GEM] = "GEM" };
32775 const char *type;
32776 int i;
32777
32778@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
32779 map = r_list->map;
32780 if (!map)
32781 continue;
32782- if (map->type < 0 || map->type > 5)
32783+ if (map->type >= ARRAY_SIZE(types))
32784 type = "??";
32785 else
32786 type = types[map->type];
32787@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file *m, void* data)
32788 struct drm_device *dev = node->minor->dev;
32789
32790 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
32791- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
32792+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
32793 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
32794- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
32795- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
32796+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
32797+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
32798 seq_printf(m, "%d gtt total\n", dev->gtt_total);
32799 return 0;
32800 }
32801@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, void *data)
32802 mutex_lock(&dev->struct_mutex);
32803 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
32804 atomic_read(&dev->vma_count),
32805+#ifdef CONFIG_GRKERNSEC_HIDESYM
32806+ NULL, 0);
32807+#else
32808 high_memory, (u64)virt_to_phys(high_memory));
32809+#endif
32810
32811 list_for_each_entry(pt, &dev->vmalist, head) {
32812 vma = pt->vma;
32813@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, void *data)
32814 continue;
32815 seq_printf(m,
32816 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
32817- pt->pid, vma->vm_start, vma->vm_end,
32818+ pt->pid,
32819+#ifdef CONFIG_GRKERNSEC_HIDESYM
32820+ 0, 0,
32821+#else
32822+ vma->vm_start, vma->vm_end,
32823+#endif
32824 vma->vm_flags & VM_READ ? 'r' : '-',
32825 vma->vm_flags & VM_WRITE ? 'w' : '-',
32826 vma->vm_flags & VM_EXEC ? 'x' : '-',
32827 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
32828 vma->vm_flags & VM_LOCKED ? 'l' : '-',
32829 vma->vm_flags & VM_IO ? 'i' : '-',
32830+#ifdef CONFIG_GRKERNSEC_HIDESYM
32831+ 0);
32832+#else
32833 vma->vm_pgoff);
32834+#endif
32835
32836 #if defined(__i386__)
32837 pgprot = pgprot_val(vma->vm_page_prot);
32838diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
32839index 282d9fd..71e5f11 100644
32840--- a/drivers/gpu/drm/drm_ioc32.c
32841+++ b/drivers/gpu/drm/drm_ioc32.c
32842@@ -463,7 +463,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
32843 request = compat_alloc_user_space(nbytes);
32844 if (!access_ok(VERIFY_WRITE, request, nbytes))
32845 return -EFAULT;
32846- list = (struct drm_buf_desc *) (request + 1);
32847+ list = (struct drm_buf_desc __user *) (request + 1);
32848
32849 if (__put_user(count, &request->count)
32850 || __put_user(list, &request->list))
32851@@ -525,7 +525,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
32852 request = compat_alloc_user_space(nbytes);
32853 if (!access_ok(VERIFY_WRITE, request, nbytes))
32854 return -EFAULT;
32855- list = (struct drm_buf_pub *) (request + 1);
32856+ list = (struct drm_buf_pub __user *) (request + 1);
32857
32858 if (__put_user(count, &request->count)
32859 || __put_user(list, &request->list))
32860diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
32861index 9b9ff46..4ea724c 100644
32862--- a/drivers/gpu/drm/drm_ioctl.c
32863+++ b/drivers/gpu/drm/drm_ioctl.c
32864@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, void *data,
32865 stats->data[i].value =
32866 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
32867 else
32868- stats->data[i].value = atomic_read(&dev->counts[i]);
32869+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
32870 stats->data[i].type = dev->types[i];
32871 }
32872
32873diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
32874index e2f70a5..c703e86 100644
32875--- a/drivers/gpu/drm/drm_lock.c
32876+++ b/drivers/gpu/drm/drm_lock.c
32877@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32878 if (drm_lock_take(&master->lock, lock->context)) {
32879 master->lock.file_priv = file_priv;
32880 master->lock.lock_time = jiffies;
32881- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
32882+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
32883 break; /* Got lock */
32884 }
32885
32886@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
32887 return -EINVAL;
32888 }
32889
32890- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
32891+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
32892
32893 /* kernel_context_switch isn't used by any of the x86 drm
32894 * modules but is required by the Sparc driver.
32895diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
32896index 7d1d88c..b9131b2 100644
32897--- a/drivers/gpu/drm/i810/i810_dma.c
32898+++ b/drivers/gpu/drm/i810/i810_dma.c
32899@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
32900 dma->buflist[vertex->idx],
32901 vertex->discard, vertex->used);
32902
32903- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32904- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32905+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
32906+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32907 sarea_priv->last_enqueue = dev_priv->counter - 1;
32908 sarea_priv->last_dispatch = (int)hw_status[5];
32909
32910@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
32911 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
32912 mc->last_render);
32913
32914- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32915- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
32916+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
32917+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
32918 sarea_priv->last_enqueue = dev_priv->counter - 1;
32919 sarea_priv->last_dispatch = (int)hw_status[5];
32920
32921diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
32922index 21e2691..7321edd 100644
32923--- a/drivers/gpu/drm/i810/i810_drv.h
32924+++ b/drivers/gpu/drm/i810/i810_drv.h
32925@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
32926 int page_flipping;
32927
32928 wait_queue_head_t irq_queue;
32929- atomic_t irq_received;
32930- atomic_t irq_emitted;
32931+ atomic_unchecked_t irq_received;
32932+ atomic_unchecked_t irq_emitted;
32933
32934 int front_offset;
32935 } drm_i810_private_t;
32936diff --git a/drivers/gpu/drm/i830/i830_drv.h b/drivers/gpu/drm/i830/i830_drv.h
32937index da82afe..48a45de 100644
32938--- a/drivers/gpu/drm/i830/i830_drv.h
32939+++ b/drivers/gpu/drm/i830/i830_drv.h
32940@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
32941 int page_flipping;
32942
32943 wait_queue_head_t irq_queue;
32944- atomic_t irq_received;
32945- atomic_t irq_emitted;
32946+ atomic_unchecked_t irq_received;
32947+ atomic_unchecked_t irq_emitted;
32948
32949 int use_mi_batchbuffer_start;
32950
32951diff --git a/drivers/gpu/drm/i830/i830_irq.c b/drivers/gpu/drm/i830/i830_irq.c
32952index 91ec2bb..6f21fab 100644
32953--- a/drivers/gpu/drm/i830/i830_irq.c
32954+++ b/drivers/gpu/drm/i830/i830_irq.c
32955@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_IRQ_ARGS)
32956
32957 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
32958
32959- atomic_inc(&dev_priv->irq_received);
32960+ atomic_inc_unchecked(&dev_priv->irq_received);
32961 wake_up_interruptible(&dev_priv->irq_queue);
32962
32963 return IRQ_HANDLED;
32964@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_device * dev)
32965
32966 DRM_DEBUG("%s\n", __func__);
32967
32968- atomic_inc(&dev_priv->irq_emitted);
32969+ atomic_inc_unchecked(&dev_priv->irq_emitted);
32970
32971 BEGIN_LP_RING(2);
32972 OUT_RING(0);
32973 OUT_RING(GFX_OP_USER_INTERRUPT);
32974 ADVANCE_LP_RING();
32975
32976- return atomic_read(&dev_priv->irq_emitted);
32977+ return atomic_read_unchecked(&dev_priv->irq_emitted);
32978 }
32979
32980 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32981@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32982
32983 DRM_DEBUG("%s\n", __func__);
32984
32985- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32986+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32987 return 0;
32988
32989 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
32990@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_device * dev, int irq_nr)
32991
32992 for (;;) {
32993 __set_current_state(TASK_INTERRUPTIBLE);
32994- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
32995+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
32996 break;
32997 if ((signed)(end - jiffies) <= 0) {
32998 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
32999@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct drm_device * dev)
33000 I830_WRITE16(I830REG_HWSTAM, 0xffff);
33001 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
33002 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
33003- atomic_set(&dev_priv->irq_received, 0);
33004- atomic_set(&dev_priv->irq_emitted, 0);
33005+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33006+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
33007 init_waitqueue_head(&dev_priv->irq_queue);
33008 }
33009
33010diff --git a/drivers/gpu/drm/i915/dvo.h b/drivers/gpu/drm/i915/dvo.h
33011index 288fc50..c6092055 100644
33012--- a/drivers/gpu/drm/i915/dvo.h
33013+++ b/drivers/gpu/drm/i915/dvo.h
33014@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
33015 *
33016 * \return singly-linked list of modes or NULL if no modes found.
33017 */
33018- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
33019+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
33020
33021 /**
33022 * Clean up driver-specific bits of the output
33023 */
33024- void (*destroy) (struct intel_dvo_device *dvo);
33025+ void (* const destroy) (struct intel_dvo_device *dvo);
33026
33027 /**
33028 * Debugging hook to dump device registers to log file
33029 */
33030- void (*dump_regs)(struct intel_dvo_device *dvo);
33031+ void (* const dump_regs)(struct intel_dvo_device *dvo);
33032 };
33033
33034-extern struct intel_dvo_dev_ops sil164_ops;
33035-extern struct intel_dvo_dev_ops ch7xxx_ops;
33036-extern struct intel_dvo_dev_ops ivch_ops;
33037-extern struct intel_dvo_dev_ops tfp410_ops;
33038-extern struct intel_dvo_dev_ops ch7017_ops;
33039+extern const struct intel_dvo_dev_ops sil164_ops;
33040+extern const struct intel_dvo_dev_ops ch7xxx_ops;
33041+extern const struct intel_dvo_dev_ops ivch_ops;
33042+extern const struct intel_dvo_dev_ops tfp410_ops;
33043+extern const struct intel_dvo_dev_ops ch7017_ops;
33044
33045 #endif /* _INTEL_DVO_H */
33046diff --git a/drivers/gpu/drm/i915/dvo_ch7017.c b/drivers/gpu/drm/i915/dvo_ch7017.c
33047index 621815b..499d82e 100644
33048--- a/drivers/gpu/drm/i915/dvo_ch7017.c
33049+++ b/drivers/gpu/drm/i915/dvo_ch7017.c
33050@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_dvo_device *dvo)
33051 }
33052 }
33053
33054-struct intel_dvo_dev_ops ch7017_ops = {
33055+const struct intel_dvo_dev_ops ch7017_ops = {
33056 .init = ch7017_init,
33057 .detect = ch7017_detect,
33058 .mode_valid = ch7017_mode_valid,
33059diff --git a/drivers/gpu/drm/i915/dvo_ch7xxx.c b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33060index a9b8962..ac769ba 100644
33061--- a/drivers/gpu/drm/i915/dvo_ch7xxx.c
33062+++ b/drivers/gpu/drm/i915/dvo_ch7xxx.c
33063@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_dvo_device *dvo)
33064 }
33065 }
33066
33067-struct intel_dvo_dev_ops ch7xxx_ops = {
33068+const struct intel_dvo_dev_ops ch7xxx_ops = {
33069 .init = ch7xxx_init,
33070 .detect = ch7xxx_detect,
33071 .mode_valid = ch7xxx_mode_valid,
33072diff --git a/drivers/gpu/drm/i915/dvo_ivch.c b/drivers/gpu/drm/i915/dvo_ivch.c
33073index aa176f9..ed2930c 100644
33074--- a/drivers/gpu/drm/i915/dvo_ivch.c
33075+++ b/drivers/gpu/drm/i915/dvo_ivch.c
33076@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
33077 }
33078 }
33079
33080-struct intel_dvo_dev_ops ivch_ops= {
33081+const struct intel_dvo_dev_ops ivch_ops= {
33082 .init = ivch_init,
33083 .dpms = ivch_dpms,
33084 .save = ivch_save,
33085diff --git a/drivers/gpu/drm/i915/dvo_sil164.c b/drivers/gpu/drm/i915/dvo_sil164.c
33086index e1c1f73..7dbebcf 100644
33087--- a/drivers/gpu/drm/i915/dvo_sil164.c
33088+++ b/drivers/gpu/drm/i915/dvo_sil164.c
33089@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_dvo_device *dvo)
33090 }
33091 }
33092
33093-struct intel_dvo_dev_ops sil164_ops = {
33094+const struct intel_dvo_dev_ops sil164_ops = {
33095 .init = sil164_init,
33096 .detect = sil164_detect,
33097 .mode_valid = sil164_mode_valid,
33098diff --git a/drivers/gpu/drm/i915/dvo_tfp410.c b/drivers/gpu/drm/i915/dvo_tfp410.c
33099index 16dce84..7e1b6f8 100644
33100--- a/drivers/gpu/drm/i915/dvo_tfp410.c
33101+++ b/drivers/gpu/drm/i915/dvo_tfp410.c
33102@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_dvo_device *dvo)
33103 }
33104 }
33105
33106-struct intel_dvo_dev_ops tfp410_ops = {
33107+const struct intel_dvo_dev_ops tfp410_ops = {
33108 .init = tfp410_init,
33109 .detect = tfp410_detect,
33110 .mode_valid = tfp410_mode_valid,
33111diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
33112index 7e859d6..7d1cf2b 100644
33113--- a/drivers/gpu/drm/i915/i915_debugfs.c
33114+++ b/drivers/gpu/drm/i915/i915_debugfs.c
33115@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
33116 I915_READ(GTIMR));
33117 }
33118 seq_printf(m, "Interrupts received: %d\n",
33119- atomic_read(&dev_priv->irq_received));
33120+ atomic_read_unchecked(&dev_priv->irq_received));
33121 if (dev_priv->hw_status_page != NULL) {
33122 seq_printf(m, "Current sequence: %d\n",
33123 i915_get_gem_seqno(dev));
33124diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
33125index 5449239..7e4f68d 100644
33126--- a/drivers/gpu/drm/i915/i915_drv.c
33127+++ b/drivers/gpu/drm/i915/i915_drv.c
33128@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
33129 return i915_resume(dev);
33130 }
33131
33132-static struct vm_operations_struct i915_gem_vm_ops = {
33133+static const struct vm_operations_struct i915_gem_vm_ops = {
33134 .fault = i915_gem_fault,
33135 .open = drm_gem_vm_open,
33136 .close = drm_gem_vm_close,
33137diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
33138index 97163f7..c24c7c7 100644
33139--- a/drivers/gpu/drm/i915/i915_drv.h
33140+++ b/drivers/gpu/drm/i915/i915_drv.h
33141@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
33142 /* display clock increase/decrease */
33143 /* pll clock increase/decrease */
33144 /* clock gating init */
33145-};
33146+} __no_const;
33147
33148 typedef struct drm_i915_private {
33149 struct drm_device *dev;
33150@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
33151 int page_flipping;
33152
33153 wait_queue_head_t irq_queue;
33154- atomic_t irq_received;
33155+ atomic_unchecked_t irq_received;
33156 /** Protects user_irq_refcount and irq_mask_reg */
33157 spinlock_t user_irq_lock;
33158 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
33159diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
33160index 27a3074..eb3f959 100644
33161--- a/drivers/gpu/drm/i915/i915_gem.c
33162+++ b/drivers/gpu/drm/i915/i915_gem.c
33163@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
33164
33165 args->aper_size = dev->gtt_total;
33166 args->aper_available_size = (args->aper_size -
33167- atomic_read(&dev->pin_memory));
33168+ atomic_read_unchecked(&dev->pin_memory));
33169
33170 return 0;
33171 }
33172@@ -2058,7 +2058,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
33173
33174 if (obj_priv->gtt_space) {
33175 atomic_dec(&dev->gtt_count);
33176- atomic_sub(obj->size, &dev->gtt_memory);
33177+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
33178
33179 drm_mm_put_block(obj_priv->gtt_space);
33180 obj_priv->gtt_space = NULL;
33181@@ -2701,7 +2701,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
33182 goto search_free;
33183 }
33184 atomic_inc(&dev->gtt_count);
33185- atomic_add(obj->size, &dev->gtt_memory);
33186+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
33187
33188 /* Assert that the object is not currently in any GPU domain. As it
33189 * wasn't in the GTT, there shouldn't be any way it could have been in
33190@@ -3755,9 +3755,9 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
33191 "%d/%d gtt bytes\n",
33192 atomic_read(&dev->object_count),
33193 atomic_read(&dev->pin_count),
33194- atomic_read(&dev->object_memory),
33195- atomic_read(&dev->pin_memory),
33196- atomic_read(&dev->gtt_memory),
33197+ atomic_read_unchecked(&dev->object_memory),
33198+ atomic_read_unchecked(&dev->pin_memory),
33199+ atomic_read_unchecked(&dev->gtt_memory),
33200 dev->gtt_total);
33201 }
33202 goto err;
33203@@ -3989,7 +3989,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
33204 */
33205 if (obj_priv->pin_count == 1) {
33206 atomic_inc(&dev->pin_count);
33207- atomic_add(obj->size, &dev->pin_memory);
33208+ atomic_add_unchecked(obj->size, &dev->pin_memory);
33209 if (!obj_priv->active &&
33210 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
33211 !list_empty(&obj_priv->list))
33212@@ -4022,7 +4022,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
33213 list_move_tail(&obj_priv->list,
33214 &dev_priv->mm.inactive_list);
33215 atomic_dec(&dev->pin_count);
33216- atomic_sub(obj->size, &dev->pin_memory);
33217+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
33218 }
33219 i915_verify_inactive(dev, __FILE__, __LINE__);
33220 }
33221diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
33222index 63f28ad..f5469da 100644
33223--- a/drivers/gpu/drm/i915/i915_irq.c
33224+++ b/drivers/gpu/drm/i915/i915_irq.c
33225@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
33226 int irq_received;
33227 int ret = IRQ_NONE;
33228
33229- atomic_inc(&dev_priv->irq_received);
33230+ atomic_inc_unchecked(&dev_priv->irq_received);
33231
33232 if (IS_IGDNG(dev))
33233 return igdng_irq_handler(dev);
33234@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
33235 {
33236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
33237
33238- atomic_set(&dev_priv->irq_received, 0);
33239+ atomic_set_unchecked(&dev_priv->irq_received, 0);
33240
33241 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
33242 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
33243diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
33244index 5d9c6a7..d1b0e29 100644
33245--- a/drivers/gpu/drm/i915/intel_sdvo.c
33246+++ b/drivers/gpu/drm/i915/intel_sdvo.c
33247@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
33248 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
33249
33250 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
33251- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33252+ pax_open_kernel();
33253+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
33254+ pax_close_kernel();
33255
33256 /* Read the regs to test if we can talk to the device */
33257 for (i = 0; i < 0x40; i++) {
33258diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
33259index be6c6b9..8615d9c 100644
33260--- a/drivers/gpu/drm/mga/mga_drv.h
33261+++ b/drivers/gpu/drm/mga/mga_drv.h
33262@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
33263 u32 clear_cmd;
33264 u32 maccess;
33265
33266- atomic_t vbl_received; /**< Number of vblanks received. */
33267+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
33268 wait_queue_head_t fence_queue;
33269- atomic_t last_fence_retired;
33270+ atomic_unchecked_t last_fence_retired;
33271 u32 next_fence_to_post;
33272
33273 unsigned int fb_cpp;
33274diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
33275index daa6041..a28a5da 100644
33276--- a/drivers/gpu/drm/mga/mga_irq.c
33277+++ b/drivers/gpu/drm/mga/mga_irq.c
33278@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
33279 if (crtc != 0)
33280 return 0;
33281
33282- return atomic_read(&dev_priv->vbl_received);
33283+ return atomic_read_unchecked(&dev_priv->vbl_received);
33284 }
33285
33286
33287@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33288 /* VBLANK interrupt */
33289 if (status & MGA_VLINEPEN) {
33290 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
33291- atomic_inc(&dev_priv->vbl_received);
33292+ atomic_inc_unchecked(&dev_priv->vbl_received);
33293 drm_handle_vblank(dev, 0);
33294 handled = 1;
33295 }
33296@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
33297 MGA_WRITE(MGA_PRIMEND, prim_end);
33298 }
33299
33300- atomic_inc(&dev_priv->last_fence_retired);
33301+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
33302 DRM_WAKEUP(&dev_priv->fence_queue);
33303 handled = 1;
33304 }
33305@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
33306 * using fences.
33307 */
33308 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
33309- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
33310+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
33311 - *sequence) <= (1 << 23)));
33312
33313 *sequence = cur_fence;
33314diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
33315index 4c39a40..b22a9ea 100644
33316--- a/drivers/gpu/drm/r128/r128_cce.c
33317+++ b/drivers/gpu/drm/r128/r128_cce.c
33318@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device * dev, drm_r128_init_t * init)
33319
33320 /* GH: Simple idle check.
33321 */
33322- atomic_set(&dev_priv->idle_count, 0);
33323+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33324
33325 /* We don't support anything other than bus-mastering ring mode,
33326 * but the ring can be in either AGP or PCI space for the ring
33327diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
33328index 3c60829..4faf484 100644
33329--- a/drivers/gpu/drm/r128/r128_drv.h
33330+++ b/drivers/gpu/drm/r128/r128_drv.h
33331@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
33332 int is_pci;
33333 unsigned long cce_buffers_offset;
33334
33335- atomic_t idle_count;
33336+ atomic_unchecked_t idle_count;
33337
33338 int page_flipping;
33339 int current_page;
33340 u32 crtc_offset;
33341 u32 crtc_offset_cntl;
33342
33343- atomic_t vbl_received;
33344+ atomic_unchecked_t vbl_received;
33345
33346 u32 color_fmt;
33347 unsigned int front_offset;
33348diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
33349index 69810fb..97bf17a 100644
33350--- a/drivers/gpu/drm/r128/r128_irq.c
33351+++ b/drivers/gpu/drm/r128/r128_irq.c
33352@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
33353 if (crtc != 0)
33354 return 0;
33355
33356- return atomic_read(&dev_priv->vbl_received);
33357+ return atomic_read_unchecked(&dev_priv->vbl_received);
33358 }
33359
33360 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33361@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
33362 /* VBLANK interrupt */
33363 if (status & R128_CRTC_VBLANK_INT) {
33364 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
33365- atomic_inc(&dev_priv->vbl_received);
33366+ atomic_inc_unchecked(&dev_priv->vbl_received);
33367 drm_handle_vblank(dev, 0);
33368 return IRQ_HANDLED;
33369 }
33370diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
33371index af2665c..51922d2 100644
33372--- a/drivers/gpu/drm/r128/r128_state.c
33373+++ b/drivers/gpu/drm/r128/r128_state.c
33374@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_private_t * dev_priv,
33375
33376 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
33377 {
33378- if (atomic_read(&dev_priv->idle_count) == 0) {
33379+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
33380 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
33381 } else {
33382- atomic_set(&dev_priv->idle_count, 0);
33383+ atomic_set_unchecked(&dev_priv->idle_count, 0);
33384 }
33385 }
33386
33387diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
33388index dd72b91..8644b3c 100644
33389--- a/drivers/gpu/drm/radeon/atom.c
33390+++ b/drivers/gpu/drm/radeon/atom.c
33391@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct card_info *card, void *bios)
33392 char name[512];
33393 int i;
33394
33395+ pax_track_stack();
33396+
33397 ctx->card = card;
33398 ctx->bios = bios;
33399
33400diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
33401index 0d79577..efaa7a5 100644
33402--- a/drivers/gpu/drm/radeon/mkregtable.c
33403+++ b/drivers/gpu/drm/radeon/mkregtable.c
33404@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
33405 regex_t mask_rex;
33406 regmatch_t match[4];
33407 char buf[1024];
33408- size_t end;
33409+ long end;
33410 int len;
33411 int done = 0;
33412 int r;
33413 unsigned o;
33414 struct offset *offset;
33415 char last_reg_s[10];
33416- int last_reg;
33417+ unsigned long last_reg;
33418
33419 if (regcomp
33420 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
33421diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
33422index 6735213..38c2c67 100644
33423--- a/drivers/gpu/drm/radeon/radeon.h
33424+++ b/drivers/gpu/drm/radeon/radeon.h
33425@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device *rdev);
33426 */
33427 struct radeon_fence_driver {
33428 uint32_t scratch_reg;
33429- atomic_t seq;
33430+ atomic_unchecked_t seq;
33431 uint32_t last_seq;
33432 unsigned long count_timeout;
33433 wait_queue_head_t queue;
33434@@ -640,7 +640,7 @@ struct radeon_asic {
33435 uint32_t offset, uint32_t obj_size);
33436 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
33437 void (*bandwidth_update)(struct radeon_device *rdev);
33438-};
33439+} __no_const;
33440
33441 /*
33442 * Asic structures
33443diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
33444index 4e928b9..d8b6008 100644
33445--- a/drivers/gpu/drm/radeon/radeon_atombios.c
33446+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
33447@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
33448 bool linkb;
33449 struct radeon_i2c_bus_rec ddc_bus;
33450
33451+ pax_track_stack();
33452+
33453 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33454
33455 if (data_offset == 0)
33456@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
33457 }
33458 }
33459
33460-struct bios_connector {
33461+static struct bios_connector {
33462 bool valid;
33463 uint16_t line_mux;
33464 uint16_t devices;
33465 int connector_type;
33466 struct radeon_i2c_bus_rec ddc_bus;
33467-};
33468+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33469
33470 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33471 drm_device
33472@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
33473 uint8_t dac;
33474 union atom_supported_devices *supported_devices;
33475 int i, j;
33476- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
33477
33478 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
33479
33480diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
33481index 083a181..ccccae0 100644
33482--- a/drivers/gpu/drm/radeon/radeon_display.c
33483+++ b/drivers/gpu/drm/radeon/radeon_display.c
33484@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pll *pll,
33485
33486 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
33487 error = freq - current_freq;
33488- error = error < 0 ? 0xffffffff : error;
33489+ error = (int32_t)error < 0 ? 0xffffffff : error;
33490 } else
33491 error = abs(current_freq - freq);
33492 vco_diff = abs(vco - best_vco);
33493diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
33494index 76e4070..193fa7f 100644
33495--- a/drivers/gpu/drm/radeon/radeon_drv.h
33496+++ b/drivers/gpu/drm/radeon/radeon_drv.h
33497@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
33498
33499 /* SW interrupt */
33500 wait_queue_head_t swi_queue;
33501- atomic_t swi_emitted;
33502+ atomic_unchecked_t swi_emitted;
33503 int vblank_crtc;
33504 uint32_t irq_enable_reg;
33505 uint32_t r500_disp_irq_reg;
33506diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c
33507index 3beb26d..6ce9c4a 100644
33508--- a/drivers/gpu/drm/radeon/radeon_fence.c
33509+++ b/drivers/gpu/drm/radeon/radeon_fence.c
33510@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence)
33511 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
33512 return 0;
33513 }
33514- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
33515+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
33516 if (!rdev->cp.ready) {
33517 /* FIXME: cp is not running assume everythings is done right
33518 * away
33519@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct radeon_device *rdev)
33520 return r;
33521 }
33522 WREG32(rdev->fence_drv.scratch_reg, 0);
33523- atomic_set(&rdev->fence_drv.seq, 0);
33524+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
33525 INIT_LIST_HEAD(&rdev->fence_drv.created);
33526 INIT_LIST_HEAD(&rdev->fence_drv.emited);
33527 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
33528diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
33529index a1bf11d..4a123c0 100644
33530--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
33531+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
33532@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
33533 request = compat_alloc_user_space(sizeof(*request));
33534 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
33535 || __put_user(req32.param, &request->param)
33536- || __put_user((void __user *)(unsigned long)req32.value,
33537+ || __put_user((unsigned long)req32.value,
33538 &request->value))
33539 return -EFAULT;
33540
33541diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
33542index b79ecc4..8dab92d 100644
33543--- a/drivers/gpu/drm/radeon/radeon_irq.c
33544+++ b/drivers/gpu/drm/radeon/radeon_irq.c
33545@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_device * dev)
33546 unsigned int ret;
33547 RING_LOCALS;
33548
33549- atomic_inc(&dev_priv->swi_emitted);
33550- ret = atomic_read(&dev_priv->swi_emitted);
33551+ atomic_inc_unchecked(&dev_priv->swi_emitted);
33552+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
33553
33554 BEGIN_RING(4);
33555 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
33556@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
33557 drm_radeon_private_t *dev_priv =
33558 (drm_radeon_private_t *) dev->dev_private;
33559
33560- atomic_set(&dev_priv->swi_emitted, 0);
33561+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
33562 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
33563
33564 dev->max_vblank_count = 0x001fffff;
33565diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
33566index 4747910..48ca4b3 100644
33567--- a/drivers/gpu/drm/radeon/radeon_state.c
33568+++ b/drivers/gpu/drm/radeon/radeon_state.c
33569@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
33570 {
33571 drm_radeon_private_t *dev_priv = dev->dev_private;
33572 drm_radeon_getparam_t *param = data;
33573- int value;
33574+ int value = 0;
33575
33576 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
33577
33578diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
33579index 1381e06..0e53b17 100644
33580--- a/drivers/gpu/drm/radeon/radeon_ttm.c
33581+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
33582@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_device *rdev)
33583 DRM_INFO("radeon: ttm finalized\n");
33584 }
33585
33586-static struct vm_operations_struct radeon_ttm_vm_ops;
33587-static const struct vm_operations_struct *ttm_vm_ops = NULL;
33588-
33589-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33590-{
33591- struct ttm_buffer_object *bo;
33592- int r;
33593-
33594- bo = (struct ttm_buffer_object *)vma->vm_private_data;
33595- if (bo == NULL) {
33596- return VM_FAULT_NOPAGE;
33597- }
33598- r = ttm_vm_ops->fault(vma, vmf);
33599- return r;
33600-}
33601-
33602 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33603 {
33604 struct drm_file *file_priv;
33605 struct radeon_device *rdev;
33606- int r;
33607
33608 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
33609 return drm_mmap(filp, vma);
33610@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
33611
33612 file_priv = (struct drm_file *)filp->private_data;
33613 rdev = file_priv->minor->dev->dev_private;
33614- if (rdev == NULL) {
33615+ if (!rdev)
33616 return -EINVAL;
33617- }
33618- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33619- if (unlikely(r != 0)) {
33620- return r;
33621- }
33622- if (unlikely(ttm_vm_ops == NULL)) {
33623- ttm_vm_ops = vma->vm_ops;
33624- radeon_ttm_vm_ops = *ttm_vm_ops;
33625- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
33626- }
33627- vma->vm_ops = &radeon_ttm_vm_ops;
33628- return 0;
33629+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
33630 }
33631
33632
33633diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
33634index b12ff76..0bd0c6e 100644
33635--- a/drivers/gpu/drm/radeon/rs690.c
33636+++ b/drivers/gpu/drm/radeon/rs690.c
33637@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
33638 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
33639 rdev->pm.sideport_bandwidth.full)
33640 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
33641- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
33642+ read_delay_latency.full = rfixed_const(800 * 1000);
33643 read_delay_latency.full = rfixed_div(read_delay_latency,
33644 rdev->pm.igp_sideport_mclk);
33645+ a.full = rfixed_const(370);
33646+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
33647 } else {
33648 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
33649 rdev->pm.k8_bandwidth.full)
33650diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
33651index 0ed436e..e6e7ce3 100644
33652--- a/drivers/gpu/drm/ttm/ttm_bo.c
33653+++ b/drivers/gpu/drm/ttm/ttm_bo.c
33654@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_attrs[] = {
33655 NULL
33656 };
33657
33658-static struct sysfs_ops ttm_bo_global_ops = {
33659+static const struct sysfs_ops ttm_bo_global_ops = {
33660 .show = &ttm_bo_global_show
33661 };
33662
33663diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33664index 1c040d0..f9e4af8 100644
33665--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
33666+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
33667@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33668 {
33669 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
33670 vma->vm_private_data;
33671- struct ttm_bo_device *bdev = bo->bdev;
33672+ struct ttm_bo_device *bdev;
33673 unsigned long bus_base;
33674 unsigned long bus_offset;
33675 unsigned long bus_size;
33676@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
33677 unsigned long address = (unsigned long)vmf->virtual_address;
33678 int retval = VM_FAULT_NOPAGE;
33679
33680+ if (!bo)
33681+ return VM_FAULT_NOPAGE;
33682+ bdev = bo->bdev;
33683+
33684 /*
33685 * Work around locking order reversal in fault / nopfn
33686 * between mmap_sem and bo_reserve: Perform a trylock operation
33687diff --git a/drivers/gpu/drm/ttm/ttm_global.c b/drivers/gpu/drm/ttm/ttm_global.c
33688index b170071..28ae90e 100644
33689--- a/drivers/gpu/drm/ttm/ttm_global.c
33690+++ b/drivers/gpu/drm/ttm/ttm_global.c
33691@@ -36,7 +36,7 @@
33692 struct ttm_global_item {
33693 struct mutex mutex;
33694 void *object;
33695- int refcount;
33696+ atomic_t refcount;
33697 };
33698
33699 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
33700@@ -49,7 +49,7 @@ void ttm_global_init(void)
33701 struct ttm_global_item *item = &glob[i];
33702 mutex_init(&item->mutex);
33703 item->object = NULL;
33704- item->refcount = 0;
33705+ atomic_set(&item->refcount, 0);
33706 }
33707 }
33708
33709@@ -59,7 +59,7 @@ void ttm_global_release(void)
33710 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
33711 struct ttm_global_item *item = &glob[i];
33712 BUG_ON(item->object != NULL);
33713- BUG_ON(item->refcount != 0);
33714+ BUG_ON(atomic_read(&item->refcount) != 0);
33715 }
33716 }
33717
33718@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33719 void *object;
33720
33721 mutex_lock(&item->mutex);
33722- if (item->refcount == 0) {
33723+ if (atomic_read(&item->refcount) == 0) {
33724 item->object = kzalloc(ref->size, GFP_KERNEL);
33725 if (unlikely(item->object == NULL)) {
33726 ret = -ENOMEM;
33727@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_global_reference *ref)
33728 goto out_err;
33729
33730 }
33731- ++item->refcount;
33732+ atomic_inc(&item->refcount);
33733 ref->object = item->object;
33734 object = item->object;
33735 mutex_unlock(&item->mutex);
33736@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_global_reference *ref)
33737 struct ttm_global_item *item = &glob[ref->global_type];
33738
33739 mutex_lock(&item->mutex);
33740- BUG_ON(item->refcount == 0);
33741+ BUG_ON(atomic_read(&item->refcount) == 0);
33742 BUG_ON(ref->object != item->object);
33743- if (--item->refcount == 0) {
33744+ if (atomic_dec_and_test(&item->refcount)) {
33745 ref->release(ref);
33746 item->object = NULL;
33747 }
33748diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
33749index 072c281..d8ef483 100644
33750--- a/drivers/gpu/drm/ttm/ttm_memory.c
33751+++ b/drivers/gpu/drm/ttm/ttm_memory.c
33752@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_attrs[] = {
33753 NULL
33754 };
33755
33756-static struct sysfs_ops ttm_mem_zone_ops = {
33757+static const struct sysfs_ops ttm_mem_zone_ops = {
33758 .show = &ttm_mem_zone_show,
33759 .store = &ttm_mem_zone_store
33760 };
33761diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
33762index cafcb84..b8e66cc 100644
33763--- a/drivers/gpu/drm/via/via_drv.h
33764+++ b/drivers/gpu/drm/via/via_drv.h
33765@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
33766 typedef uint32_t maskarray_t[5];
33767
33768 typedef struct drm_via_irq {
33769- atomic_t irq_received;
33770+ atomic_unchecked_t irq_received;
33771 uint32_t pending_mask;
33772 uint32_t enable_mask;
33773 wait_queue_head_t irq_queue;
33774@@ -75,7 +75,7 @@ typedef struct drm_via_private {
33775 struct timeval last_vblank;
33776 int last_vblank_valid;
33777 unsigned usec_per_vblank;
33778- atomic_t vbl_received;
33779+ atomic_unchecked_t vbl_received;
33780 drm_via_state_t hc_state;
33781 char pci_buf[VIA_PCI_BUF_SIZE];
33782 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
33783diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
33784index 5935b88..127a8a6 100644
33785--- a/drivers/gpu/drm/via/via_irq.c
33786+++ b/drivers/gpu/drm/via/via_irq.c
33787@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
33788 if (crtc != 0)
33789 return 0;
33790
33791- return atomic_read(&dev_priv->vbl_received);
33792+ return atomic_read_unchecked(&dev_priv->vbl_received);
33793 }
33794
33795 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33796@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33797
33798 status = VIA_READ(VIA_REG_INTERRUPT);
33799 if (status & VIA_IRQ_VBLANK_PENDING) {
33800- atomic_inc(&dev_priv->vbl_received);
33801- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
33802+ atomic_inc_unchecked(&dev_priv->vbl_received);
33803+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
33804 do_gettimeofday(&cur_vblank);
33805 if (dev_priv->last_vblank_valid) {
33806 dev_priv->usec_per_vblank =
33807@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33808 dev_priv->last_vblank = cur_vblank;
33809 dev_priv->last_vblank_valid = 1;
33810 }
33811- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
33812+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
33813 DRM_DEBUG("US per vblank is: %u\n",
33814 dev_priv->usec_per_vblank);
33815 }
33816@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
33817
33818 for (i = 0; i < dev_priv->num_irqs; ++i) {
33819 if (status & cur_irq->pending_mask) {
33820- atomic_inc(&cur_irq->irq_received);
33821+ atomic_inc_unchecked(&cur_irq->irq_received);
33822 DRM_WAKEUP(&cur_irq->irq_queue);
33823 handled = 1;
33824 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
33825@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
33826 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33827 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
33828 masks[irq][4]));
33829- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
33830+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
33831 } else {
33832 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
33833 (((cur_irq_sequence =
33834- atomic_read(&cur_irq->irq_received)) -
33835+ atomic_read_unchecked(&cur_irq->irq_received)) -
33836 *sequence) <= (1 << 23)));
33837 }
33838 *sequence = cur_irq_sequence;
33839@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct drm_device * dev)
33840 }
33841
33842 for (i = 0; i < dev_priv->num_irqs; ++i) {
33843- atomic_set(&cur_irq->irq_received, 0);
33844+ atomic_set_unchecked(&cur_irq->irq_received, 0);
33845 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
33846 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
33847 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
33848@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
33849 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
33850 case VIA_IRQ_RELATIVE:
33851 irqwait->request.sequence +=
33852- atomic_read(&cur_irq->irq_received);
33853+ atomic_read_unchecked(&cur_irq->irq_received);
33854 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
33855 case VIA_IRQ_ABSOLUTE:
33856 break;
33857diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
33858index aa8688d..6a0140c 100644
33859--- a/drivers/gpu/vga/vgaarb.c
33860+++ b/drivers/gpu/vga/vgaarb.c
33861@@ -894,14 +894,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
33862 uc = &priv->cards[i];
33863 }
33864
33865- if (!uc)
33866- return -EINVAL;
33867+ if (!uc) {
33868+ ret_val = -EINVAL;
33869+ goto done;
33870+ }
33871
33872- if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
33873- return -EINVAL;
33874+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
33875+ ret_val = -EINVAL;
33876+ goto done;
33877+ }
33878
33879- if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
33880- return -EINVAL;
33881+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
33882+ ret_val = -EINVAL;
33883+ goto done;
33884+ }
33885
33886 vga_put(pdev, io_state);
33887
33888diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
33889index 11f8069..4783396 100644
33890--- a/drivers/hid/hid-core.c
33891+++ b/drivers/hid/hid-core.c
33892@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device *hdev)
33893
33894 int hid_add_device(struct hid_device *hdev)
33895 {
33896- static atomic_t id = ATOMIC_INIT(0);
33897+ static atomic_unchecked_t id = ATOMIC_INIT(0);
33898 int ret;
33899
33900 if (WARN_ON(hdev->status & HID_STAT_ADDED))
33901@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hdev)
33902 /* XXX hack, any other cleaner solution after the driver core
33903 * is converted to allow more than 20 bytes as the device name? */
33904 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
33905- hdev->vendor, hdev->product, atomic_inc_return(&id));
33906+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
33907
33908 ret = device_add(&hdev->dev);
33909 if (!ret)
33910diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
33911index 8b6ee24..70f657d 100644
33912--- a/drivers/hid/usbhid/hiddev.c
33913+++ b/drivers/hid/usbhid/hiddev.c
33914@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
33915 return put_user(HID_VERSION, (int __user *)arg);
33916
33917 case HIDIOCAPPLICATION:
33918- if (arg < 0 || arg >= hid->maxapplication)
33919+ if (arg >= hid->maxapplication)
33920 return -EINVAL;
33921
33922 for (i = 0; i < hid->maxcollection; i++)
33923diff --git a/drivers/hwmon/lis3lv02d.c b/drivers/hwmon/lis3lv02d.c
33924index 5d5ed69..f40533e 100644
33925--- a/drivers/hwmon/lis3lv02d.c
33926+++ b/drivers/hwmon/lis3lv02d.c
33927@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *dummy)
33928 * the lid is closed. This leads to interrupts as soon as a little move
33929 * is done.
33930 */
33931- atomic_inc(&lis3_dev.count);
33932+ atomic_inc_unchecked(&lis3_dev.count);
33933
33934 wake_up_interruptible(&lis3_dev.misc_wait);
33935 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
33936@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
33937 if (test_and_set_bit(0, &lis3_dev.misc_opened))
33938 return -EBUSY; /* already open */
33939
33940- atomic_set(&lis3_dev.count, 0);
33941+ atomic_set_unchecked(&lis3_dev.count, 0);
33942
33943 /*
33944 * The sensor can generate interrupts for free-fall and direction
33945@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
33946 add_wait_queue(&lis3_dev.misc_wait, &wait);
33947 while (true) {
33948 set_current_state(TASK_INTERRUPTIBLE);
33949- data = atomic_xchg(&lis3_dev.count, 0);
33950+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
33951 if (data)
33952 break;
33953
33954@@ -244,7 +244,7 @@ out:
33955 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
33956 {
33957 poll_wait(file, &lis3_dev.misc_wait, wait);
33958- if (atomic_read(&lis3_dev.count))
33959+ if (atomic_read_unchecked(&lis3_dev.count))
33960 return POLLIN | POLLRDNORM;
33961 return 0;
33962 }
33963diff --git a/drivers/hwmon/lis3lv02d.h b/drivers/hwmon/lis3lv02d.h
33964index 7cdd76f..fe0efdf 100644
33965--- a/drivers/hwmon/lis3lv02d.h
33966+++ b/drivers/hwmon/lis3lv02d.h
33967@@ -201,7 +201,7 @@ struct lis3lv02d {
33968
33969 struct input_polled_dev *idev; /* input device */
33970 struct platform_device *pdev; /* platform device */
33971- atomic_t count; /* interrupt count after last read */
33972+ atomic_unchecked_t count; /* interrupt count after last read */
33973 int xcalib; /* calibrated null value for x */
33974 int ycalib; /* calibrated null value for y */
33975 int zcalib; /* calibrated null value for z */
33976diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
33977index 740785e..5a5c6c6 100644
33978--- a/drivers/hwmon/sht15.c
33979+++ b/drivers/hwmon/sht15.c
33980@@ -112,7 +112,7 @@ struct sht15_data {
33981 int supply_uV;
33982 int supply_uV_valid;
33983 struct work_struct update_supply_work;
33984- atomic_t interrupt_handled;
33985+ atomic_unchecked_t interrupt_handled;
33986 };
33987
33988 /**
33989@@ -245,13 +245,13 @@ static inline int sht15_update_single_val(struct sht15_data *data,
33990 return ret;
33991
33992 gpio_direction_input(data->pdata->gpio_data);
33993- atomic_set(&data->interrupt_handled, 0);
33994+ atomic_set_unchecked(&data->interrupt_handled, 0);
33995
33996 enable_irq(gpio_to_irq(data->pdata->gpio_data));
33997 if (gpio_get_value(data->pdata->gpio_data) == 0) {
33998 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
33999 /* Only relevant if the interrupt hasn't occured. */
34000- if (!atomic_read(&data->interrupt_handled))
34001+ if (!atomic_read_unchecked(&data->interrupt_handled))
34002 schedule_work(&data->read_work);
34003 }
34004 ret = wait_event_timeout(data->wait_queue,
34005@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
34006 struct sht15_data *data = d;
34007 /* First disable the interrupt */
34008 disable_irq_nosync(irq);
34009- atomic_inc(&data->interrupt_handled);
34010+ atomic_inc_unchecked(&data->interrupt_handled);
34011 /* Then schedule a reading work struct */
34012 if (data->flag != SHT15_READING_NOTHING)
34013 schedule_work(&data->read_work);
34014@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
34015 here as could have gone low in meantime so verify
34016 it hasn't!
34017 */
34018- atomic_set(&data->interrupt_handled, 0);
34019+ atomic_set_unchecked(&data->interrupt_handled, 0);
34020 enable_irq(gpio_to_irq(data->pdata->gpio_data));
34021 /* If still not occured or another handler has been scheduled */
34022 if (gpio_get_value(data->pdata->gpio_data)
34023- || atomic_read(&data->interrupt_handled))
34024+ || atomic_read_unchecked(&data->interrupt_handled))
34025 return;
34026 }
34027 /* Read the data back from the device */
34028diff --git a/drivers/hwmon/w83791d.c b/drivers/hwmon/w83791d.c
34029index 97851c5..cb40626 100644
34030--- a/drivers/hwmon/w83791d.c
34031+++ b/drivers/hwmon/w83791d.c
34032@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_client *client, int kind,
34033 struct i2c_board_info *info);
34034 static int w83791d_remove(struct i2c_client *client);
34035
34036-static int w83791d_read(struct i2c_client *client, u8 register);
34037-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
34038+static int w83791d_read(struct i2c_client *client, u8 reg);
34039+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
34040 static struct w83791d_data *w83791d_update_device(struct device *dev);
34041
34042 #ifdef DEBUG
34043diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
34044index 378fcb5..5e91fa8 100644
34045--- a/drivers/i2c/busses/i2c-amd756-s4882.c
34046+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
34047@@ -43,7 +43,7 @@
34048 extern struct i2c_adapter amd756_smbus;
34049
34050 static struct i2c_adapter *s4882_adapter;
34051-static struct i2c_algorithm *s4882_algo;
34052+static i2c_algorithm_no_const *s4882_algo;
34053
34054 /* Wrapper access functions for multiplexed SMBus */
34055 static DEFINE_MUTEX(amd756_lock);
34056diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
34057index 29015eb..af2d8e9 100644
34058--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
34059+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
34060@@ -41,7 +41,7 @@
34061 extern struct i2c_adapter *nforce2_smbus;
34062
34063 static struct i2c_adapter *s4985_adapter;
34064-static struct i2c_algorithm *s4985_algo;
34065+static i2c_algorithm_no_const *s4985_algo;
34066
34067 /* Wrapper access functions for multiplexed SMBus */
34068 static DEFINE_MUTEX(nforce2_lock);
34069diff --git a/drivers/ide/aec62xx.c b/drivers/ide/aec62xx.c
34070index 878f8ec..12376fc 100644
34071--- a/drivers/ide/aec62xx.c
34072+++ b/drivers/ide/aec62xx.c
34073@@ -180,7 +180,7 @@ static const struct ide_port_ops atp86x_port_ops = {
34074 .cable_detect = atp86x_cable_detect,
34075 };
34076
34077-static const struct ide_port_info aec62xx_chipsets[] __devinitdata = {
34078+static const struct ide_port_info aec62xx_chipsets[] __devinitconst = {
34079 { /* 0: AEC6210 */
34080 .name = DRV_NAME,
34081 .init_chipset = init_chipset_aec62xx,
34082diff --git a/drivers/ide/alim15x3.c b/drivers/ide/alim15x3.c
34083index e59b6de..4b4fc65 100644
34084--- a/drivers/ide/alim15x3.c
34085+++ b/drivers/ide/alim15x3.c
34086@@ -509,7 +509,7 @@ static const struct ide_dma_ops ali_dma_ops = {
34087 .dma_sff_read_status = ide_dma_sff_read_status,
34088 };
34089
34090-static const struct ide_port_info ali15x3_chipset __devinitdata = {
34091+static const struct ide_port_info ali15x3_chipset __devinitconst = {
34092 .name = DRV_NAME,
34093 .init_chipset = init_chipset_ali15x3,
34094 .init_hwif = init_hwif_ali15x3,
34095diff --git a/drivers/ide/amd74xx.c b/drivers/ide/amd74xx.c
34096index 628cd2e..087a414 100644
34097--- a/drivers/ide/amd74xx.c
34098+++ b/drivers/ide/amd74xx.c
34099@@ -221,7 +221,7 @@ static const struct ide_port_ops amd_port_ops = {
34100 .udma_mask = udma, \
34101 }
34102
34103-static const struct ide_port_info amd74xx_chipsets[] __devinitdata = {
34104+static const struct ide_port_info amd74xx_chipsets[] __devinitconst = {
34105 /* 0: AMD7401 */ DECLARE_AMD_DEV(0x00, ATA_UDMA2),
34106 /* 1: AMD7409 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA4),
34107 /* 2: AMD7411/7441 */ DECLARE_AMD_DEV(ATA_SWDMA2, ATA_UDMA5),
34108diff --git a/drivers/ide/atiixp.c b/drivers/ide/atiixp.c
34109index 837322b..837fd71 100644
34110--- a/drivers/ide/atiixp.c
34111+++ b/drivers/ide/atiixp.c
34112@@ -137,7 +137,7 @@ static const struct ide_port_ops atiixp_port_ops = {
34113 .cable_detect = atiixp_cable_detect,
34114 };
34115
34116-static const struct ide_port_info atiixp_pci_info[] __devinitdata = {
34117+static const struct ide_port_info atiixp_pci_info[] __devinitconst = {
34118 { /* 0: IXP200/300/400/700 */
34119 .name = DRV_NAME,
34120 .enablebits = {{0x48,0x01,0x00}, {0x48,0x08,0x00}},
34121diff --git a/drivers/ide/cmd64x.c b/drivers/ide/cmd64x.c
34122index ca0c46f..d55318a 100644
34123--- a/drivers/ide/cmd64x.c
34124+++ b/drivers/ide/cmd64x.c
34125@@ -372,7 +372,7 @@ static const struct ide_dma_ops cmd646_rev1_dma_ops = {
34126 .dma_sff_read_status = ide_dma_sff_read_status,
34127 };
34128
34129-static const struct ide_port_info cmd64x_chipsets[] __devinitdata = {
34130+static const struct ide_port_info cmd64x_chipsets[] __devinitconst = {
34131 { /* 0: CMD643 */
34132 .name = DRV_NAME,
34133 .init_chipset = init_chipset_cmd64x,
34134diff --git a/drivers/ide/cs5520.c b/drivers/ide/cs5520.c
34135index 09f98ed..cebc5bc 100644
34136--- a/drivers/ide/cs5520.c
34137+++ b/drivers/ide/cs5520.c
34138@@ -93,7 +93,7 @@ static const struct ide_port_ops cs5520_port_ops = {
34139 .set_dma_mode = cs5520_set_dma_mode,
34140 };
34141
34142-static const struct ide_port_info cyrix_chipset __devinitdata = {
34143+static const struct ide_port_info cyrix_chipset __devinitconst = {
34144 .name = DRV_NAME,
34145 .enablebits = { { 0x60, 0x01, 0x01 }, { 0x60, 0x02, 0x02 } },
34146 .port_ops = &cs5520_port_ops,
34147diff --git a/drivers/ide/cs5530.c b/drivers/ide/cs5530.c
34148index 40bf05e..7d58ca0 100644
34149--- a/drivers/ide/cs5530.c
34150+++ b/drivers/ide/cs5530.c
34151@@ -244,7 +244,7 @@ static const struct ide_port_ops cs5530_port_ops = {
34152 .udma_filter = cs5530_udma_filter,
34153 };
34154
34155-static const struct ide_port_info cs5530_chipset __devinitdata = {
34156+static const struct ide_port_info cs5530_chipset __devinitconst = {
34157 .name = DRV_NAME,
34158 .init_chipset = init_chipset_cs5530,
34159 .init_hwif = init_hwif_cs5530,
34160diff --git a/drivers/ide/cs5535.c b/drivers/ide/cs5535.c
34161index 983d957..53e6172 100644
34162--- a/drivers/ide/cs5535.c
34163+++ b/drivers/ide/cs5535.c
34164@@ -170,7 +170,7 @@ static const struct ide_port_ops cs5535_port_ops = {
34165 .cable_detect = cs5535_cable_detect,
34166 };
34167
34168-static const struct ide_port_info cs5535_chipset __devinitdata = {
34169+static const struct ide_port_info cs5535_chipset __devinitconst = {
34170 .name = DRV_NAME,
34171 .port_ops = &cs5535_port_ops,
34172 .host_flags = IDE_HFLAG_SINGLE | IDE_HFLAG_POST_SET_MODE,
34173diff --git a/drivers/ide/cy82c693.c b/drivers/ide/cy82c693.c
34174index 74fc540..8e933d8 100644
34175--- a/drivers/ide/cy82c693.c
34176+++ b/drivers/ide/cy82c693.c
34177@@ -288,7 +288,7 @@ static const struct ide_port_ops cy82c693_port_ops = {
34178 .set_dma_mode = cy82c693_set_dma_mode,
34179 };
34180
34181-static const struct ide_port_info cy82c693_chipset __devinitdata = {
34182+static const struct ide_port_info cy82c693_chipset __devinitconst = {
34183 .name = DRV_NAME,
34184 .init_iops = init_iops_cy82c693,
34185 .port_ops = &cy82c693_port_ops,
34186diff --git a/drivers/ide/hpt366.c b/drivers/ide/hpt366.c
34187index 7ce68ef..e78197d 100644
34188--- a/drivers/ide/hpt366.c
34189+++ b/drivers/ide/hpt366.c
34190@@ -507,7 +507,7 @@ static struct hpt_timings hpt37x_timings = {
34191 }
34192 };
34193
34194-static const struct hpt_info hpt36x __devinitdata = {
34195+static const struct hpt_info hpt36x __devinitconst = {
34196 .chip_name = "HPT36x",
34197 .chip_type = HPT36x,
34198 .udma_mask = HPT366_ALLOW_ATA66_3 ? (HPT366_ALLOW_ATA66_4 ? ATA_UDMA4 : ATA_UDMA3) : ATA_UDMA2,
34199@@ -515,7 +515,7 @@ static const struct hpt_info hpt36x __devinitdata = {
34200 .timings = &hpt36x_timings
34201 };
34202
34203-static const struct hpt_info hpt370 __devinitdata = {
34204+static const struct hpt_info hpt370 __devinitconst = {
34205 .chip_name = "HPT370",
34206 .chip_type = HPT370,
34207 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34208@@ -523,7 +523,7 @@ static const struct hpt_info hpt370 __devinitdata = {
34209 .timings = &hpt37x_timings
34210 };
34211
34212-static const struct hpt_info hpt370a __devinitdata = {
34213+static const struct hpt_info hpt370a __devinitconst = {
34214 .chip_name = "HPT370A",
34215 .chip_type = HPT370A,
34216 .udma_mask = HPT370_ALLOW_ATA100_5 ? ATA_UDMA5 : ATA_UDMA4,
34217@@ -531,7 +531,7 @@ static const struct hpt_info hpt370a __devinitdata = {
34218 .timings = &hpt37x_timings
34219 };
34220
34221-static const struct hpt_info hpt374 __devinitdata = {
34222+static const struct hpt_info hpt374 __devinitconst = {
34223 .chip_name = "HPT374",
34224 .chip_type = HPT374,
34225 .udma_mask = ATA_UDMA5,
34226@@ -539,7 +539,7 @@ static const struct hpt_info hpt374 __devinitdata = {
34227 .timings = &hpt37x_timings
34228 };
34229
34230-static const struct hpt_info hpt372 __devinitdata = {
34231+static const struct hpt_info hpt372 __devinitconst = {
34232 .chip_name = "HPT372",
34233 .chip_type = HPT372,
34234 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34235@@ -547,7 +547,7 @@ static const struct hpt_info hpt372 __devinitdata = {
34236 .timings = &hpt37x_timings
34237 };
34238
34239-static const struct hpt_info hpt372a __devinitdata = {
34240+static const struct hpt_info hpt372a __devinitconst = {
34241 .chip_name = "HPT372A",
34242 .chip_type = HPT372A,
34243 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34244@@ -555,7 +555,7 @@ static const struct hpt_info hpt372a __devinitdata = {
34245 .timings = &hpt37x_timings
34246 };
34247
34248-static const struct hpt_info hpt302 __devinitdata = {
34249+static const struct hpt_info hpt302 __devinitconst = {
34250 .chip_name = "HPT302",
34251 .chip_type = HPT302,
34252 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34253@@ -563,7 +563,7 @@ static const struct hpt_info hpt302 __devinitdata = {
34254 .timings = &hpt37x_timings
34255 };
34256
34257-static const struct hpt_info hpt371 __devinitdata = {
34258+static const struct hpt_info hpt371 __devinitconst = {
34259 .chip_name = "HPT371",
34260 .chip_type = HPT371,
34261 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34262@@ -571,7 +571,7 @@ static const struct hpt_info hpt371 __devinitdata = {
34263 .timings = &hpt37x_timings
34264 };
34265
34266-static const struct hpt_info hpt372n __devinitdata = {
34267+static const struct hpt_info hpt372n __devinitconst = {
34268 .chip_name = "HPT372N",
34269 .chip_type = HPT372N,
34270 .udma_mask = HPT372_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34271@@ -579,7 +579,7 @@ static const struct hpt_info hpt372n __devinitdata = {
34272 .timings = &hpt37x_timings
34273 };
34274
34275-static const struct hpt_info hpt302n __devinitdata = {
34276+static const struct hpt_info hpt302n __devinitconst = {
34277 .chip_name = "HPT302N",
34278 .chip_type = HPT302N,
34279 .udma_mask = HPT302_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34280@@ -587,7 +587,7 @@ static const struct hpt_info hpt302n __devinitdata = {
34281 .timings = &hpt37x_timings
34282 };
34283
34284-static const struct hpt_info hpt371n __devinitdata = {
34285+static const struct hpt_info hpt371n __devinitconst = {
34286 .chip_name = "HPT371N",
34287 .chip_type = HPT371N,
34288 .udma_mask = HPT371_ALLOW_ATA133_6 ? ATA_UDMA6 : ATA_UDMA5,
34289@@ -1422,7 +1422,7 @@ static const struct ide_dma_ops hpt36x_dma_ops = {
34290 .dma_sff_read_status = ide_dma_sff_read_status,
34291 };
34292
34293-static const struct ide_port_info hpt366_chipsets[] __devinitdata = {
34294+static const struct ide_port_info hpt366_chipsets[] __devinitconst = {
34295 { /* 0: HPT36x */
34296 .name = DRV_NAME,
34297 .init_chipset = init_chipset_hpt366,
34298diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
34299index 2de76cc..74186a1 100644
34300--- a/drivers/ide/ide-cd.c
34301+++ b/drivers/ide/ide-cd.c
34302@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
34303 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
34304 if ((unsigned long)buf & alignment
34305 || blk_rq_bytes(rq) & q->dma_pad_mask
34306- || object_is_on_stack(buf))
34307+ || object_starts_on_stack(buf))
34308 drive->dma = 0;
34309 }
34310 }
34311diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
34312index fefbdfc..62ff465 100644
34313--- a/drivers/ide/ide-floppy.c
34314+++ b/drivers/ide/ide-floppy.c
34315@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_drive_t *drive)
34316 u8 pc_buf[256], header_len, desc_cnt;
34317 int i, rc = 1, blocks, length;
34318
34319+ pax_track_stack();
34320+
34321 ide_debug_log(IDE_DBG_FUNC, "enter");
34322
34323 drive->bios_cyl = 0;
34324diff --git a/drivers/ide/ide-pci-generic.c b/drivers/ide/ide-pci-generic.c
34325index 39d4e01..11538ce 100644
34326--- a/drivers/ide/ide-pci-generic.c
34327+++ b/drivers/ide/ide-pci-generic.c
34328@@ -53,7 +53,7 @@ static const struct ide_port_ops netcell_port_ops = {
34329 .udma_mask = ATA_UDMA6, \
34330 }
34331
34332-static const struct ide_port_info generic_chipsets[] __devinitdata = {
34333+static const struct ide_port_info generic_chipsets[] __devinitconst = {
34334 /* 0: Unknown */
34335 DECLARE_GENERIC_PCI_DEV(0),
34336
34337diff --git a/drivers/ide/it8172.c b/drivers/ide/it8172.c
34338index 0d266a5..aaca790 100644
34339--- a/drivers/ide/it8172.c
34340+++ b/drivers/ide/it8172.c
34341@@ -115,7 +115,7 @@ static const struct ide_port_ops it8172_port_ops = {
34342 .set_dma_mode = it8172_set_dma_mode,
34343 };
34344
34345-static const struct ide_port_info it8172_port_info __devinitdata = {
34346+static const struct ide_port_info it8172_port_info __devinitconst = {
34347 .name = DRV_NAME,
34348 .port_ops = &it8172_port_ops,
34349 .enablebits = { {0x41, 0x80, 0x80}, {0x00, 0x00, 0x00} },
34350diff --git a/drivers/ide/it8213.c b/drivers/ide/it8213.c
34351index 4797616..4be488a 100644
34352--- a/drivers/ide/it8213.c
34353+++ b/drivers/ide/it8213.c
34354@@ -156,7 +156,7 @@ static const struct ide_port_ops it8213_port_ops = {
34355 .cable_detect = it8213_cable_detect,
34356 };
34357
34358-static const struct ide_port_info it8213_chipset __devinitdata = {
34359+static const struct ide_port_info it8213_chipset __devinitconst = {
34360 .name = DRV_NAME,
34361 .enablebits = { {0x41, 0x80, 0x80} },
34362 .port_ops = &it8213_port_ops,
34363diff --git a/drivers/ide/it821x.c b/drivers/ide/it821x.c
34364index 51aa745..146ee60 100644
34365--- a/drivers/ide/it821x.c
34366+++ b/drivers/ide/it821x.c
34367@@ -627,7 +627,7 @@ static const struct ide_port_ops it821x_port_ops = {
34368 .cable_detect = it821x_cable_detect,
34369 };
34370
34371-static const struct ide_port_info it821x_chipset __devinitdata = {
34372+static const struct ide_port_info it821x_chipset __devinitconst = {
34373 .name = DRV_NAME,
34374 .init_chipset = init_chipset_it821x,
34375 .init_hwif = init_hwif_it821x,
34376diff --git a/drivers/ide/jmicron.c b/drivers/ide/jmicron.c
34377index bf2be64..9270098 100644
34378--- a/drivers/ide/jmicron.c
34379+++ b/drivers/ide/jmicron.c
34380@@ -102,7 +102,7 @@ static const struct ide_port_ops jmicron_port_ops = {
34381 .cable_detect = jmicron_cable_detect,
34382 };
34383
34384-static const struct ide_port_info jmicron_chipset __devinitdata = {
34385+static const struct ide_port_info jmicron_chipset __devinitconst = {
34386 .name = DRV_NAME,
34387 .enablebits = { { 0x40, 0x01, 0x01 }, { 0x40, 0x10, 0x10 } },
34388 .port_ops = &jmicron_port_ops,
34389diff --git a/drivers/ide/ns87415.c b/drivers/ide/ns87415.c
34390index 95327a2..73f78d8 100644
34391--- a/drivers/ide/ns87415.c
34392+++ b/drivers/ide/ns87415.c
34393@@ -293,7 +293,7 @@ static const struct ide_dma_ops ns87415_dma_ops = {
34394 .dma_sff_read_status = superio_dma_sff_read_status,
34395 };
34396
34397-static const struct ide_port_info ns87415_chipset __devinitdata = {
34398+static const struct ide_port_info ns87415_chipset __devinitconst = {
34399 .name = DRV_NAME,
34400 .init_hwif = init_hwif_ns87415,
34401 .tp_ops = &ns87415_tp_ops,
34402diff --git a/drivers/ide/opti621.c b/drivers/ide/opti621.c
34403index f1d70d6..e1de05b 100644
34404--- a/drivers/ide/opti621.c
34405+++ b/drivers/ide/opti621.c
34406@@ -202,7 +202,7 @@ static const struct ide_port_ops opti621_port_ops = {
34407 .set_pio_mode = opti621_set_pio_mode,
34408 };
34409
34410-static const struct ide_port_info opti621_chipset __devinitdata = {
34411+static const struct ide_port_info opti621_chipset __devinitconst = {
34412 .name = DRV_NAME,
34413 .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} },
34414 .port_ops = &opti621_port_ops,
34415diff --git a/drivers/ide/pdc202xx_new.c b/drivers/ide/pdc202xx_new.c
34416index 65ba823..7311f4d 100644
34417--- a/drivers/ide/pdc202xx_new.c
34418+++ b/drivers/ide/pdc202xx_new.c
34419@@ -465,7 +465,7 @@ static const struct ide_port_ops pdcnew_port_ops = {
34420 .udma_mask = udma, \
34421 }
34422
34423-static const struct ide_port_info pdcnew_chipsets[] __devinitdata = {
34424+static const struct ide_port_info pdcnew_chipsets[] __devinitconst = {
34425 /* 0: PDC202{68,70} */ DECLARE_PDCNEW_DEV(ATA_UDMA5),
34426 /* 1: PDC202{69,71,75,76,77} */ DECLARE_PDCNEW_DEV(ATA_UDMA6),
34427 };
34428diff --git a/drivers/ide/pdc202xx_old.c b/drivers/ide/pdc202xx_old.c
34429index cb812f3..af816ef 100644
34430--- a/drivers/ide/pdc202xx_old.c
34431+++ b/drivers/ide/pdc202xx_old.c
34432@@ -285,7 +285,7 @@ static const struct ide_dma_ops pdc2026x_dma_ops = {
34433 .max_sectors = sectors, \
34434 }
34435
34436-static const struct ide_port_info pdc202xx_chipsets[] __devinitdata = {
34437+static const struct ide_port_info pdc202xx_chipsets[] __devinitconst = {
34438 { /* 0: PDC20246 */
34439 .name = DRV_NAME,
34440 .init_chipset = init_chipset_pdc202xx,
34441diff --git a/drivers/ide/piix.c b/drivers/ide/piix.c
34442index bf14f39..15c4b98 100644
34443--- a/drivers/ide/piix.c
34444+++ b/drivers/ide/piix.c
34445@@ -344,7 +344,7 @@ static const struct ide_port_ops ich_port_ops = {
34446 .udma_mask = udma, \
34447 }
34448
34449-static const struct ide_port_info piix_pci_info[] __devinitdata = {
34450+static const struct ide_port_info piix_pci_info[] __devinitconst = {
34451 /* 0: MPIIX */
34452 { /*
34453 * MPIIX actually has only a single IDE channel mapped to
34454diff --git a/drivers/ide/rz1000.c b/drivers/ide/rz1000.c
34455index a6414a8..c04173e 100644
34456--- a/drivers/ide/rz1000.c
34457+++ b/drivers/ide/rz1000.c
34458@@ -38,7 +38,7 @@ static int __devinit rz1000_disable_readahead(struct pci_dev *dev)
34459 }
34460 }
34461
34462-static const struct ide_port_info rz1000_chipset __devinitdata = {
34463+static const struct ide_port_info rz1000_chipset __devinitconst = {
34464 .name = DRV_NAME,
34465 .host_flags = IDE_HFLAG_NO_DMA,
34466 };
34467diff --git a/drivers/ide/sc1200.c b/drivers/ide/sc1200.c
34468index d467478..9203942 100644
34469--- a/drivers/ide/sc1200.c
34470+++ b/drivers/ide/sc1200.c
34471@@ -290,7 +290,7 @@ static const struct ide_dma_ops sc1200_dma_ops = {
34472 .dma_sff_read_status = ide_dma_sff_read_status,
34473 };
34474
34475-static const struct ide_port_info sc1200_chipset __devinitdata = {
34476+static const struct ide_port_info sc1200_chipset __devinitconst = {
34477 .name = DRV_NAME,
34478 .port_ops = &sc1200_port_ops,
34479 .dma_ops = &sc1200_dma_ops,
34480diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
34481index 1104bb3..59c5194 100644
34482--- a/drivers/ide/scc_pata.c
34483+++ b/drivers/ide/scc_pata.c
34484@@ -811,7 +811,7 @@ static const struct ide_dma_ops scc_dma_ops = {
34485 .dma_sff_read_status = scc_dma_sff_read_status,
34486 };
34487
34488-static const struct ide_port_info scc_chipset __devinitdata = {
34489+static const struct ide_port_info scc_chipset __devinitconst = {
34490 .name = "sccIDE",
34491 .init_iops = init_iops_scc,
34492 .init_dma = scc_init_dma,
34493diff --git a/drivers/ide/serverworks.c b/drivers/ide/serverworks.c
34494index b6554ef..6cc2cc3 100644
34495--- a/drivers/ide/serverworks.c
34496+++ b/drivers/ide/serverworks.c
34497@@ -353,7 +353,7 @@ static const struct ide_port_ops svwks_port_ops = {
34498 .cable_detect = svwks_cable_detect,
34499 };
34500
34501-static const struct ide_port_info serverworks_chipsets[] __devinitdata = {
34502+static const struct ide_port_info serverworks_chipsets[] __devinitconst = {
34503 { /* 0: OSB4 */
34504 .name = DRV_NAME,
34505 .init_chipset = init_chipset_svwks,
34506diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
34507index ab3db61..afed580 100644
34508--- a/drivers/ide/setup-pci.c
34509+++ b/drivers/ide/setup-pci.c
34510@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev1, struct pci_dev *dev2,
34511 int ret, i, n_ports = dev2 ? 4 : 2;
34512 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
34513
34514+ pax_track_stack();
34515+
34516 for (i = 0; i < n_ports / 2; i++) {
34517 ret = ide_setup_pci_controller(pdev[i], d, !i);
34518 if (ret < 0)
34519diff --git a/drivers/ide/siimage.c b/drivers/ide/siimage.c
34520index d95df52..0b03a39 100644
34521--- a/drivers/ide/siimage.c
34522+++ b/drivers/ide/siimage.c
34523@@ -719,7 +719,7 @@ static const struct ide_dma_ops sil_dma_ops = {
34524 .udma_mask = ATA_UDMA6, \
34525 }
34526
34527-static const struct ide_port_info siimage_chipsets[] __devinitdata = {
34528+static const struct ide_port_info siimage_chipsets[] __devinitconst = {
34529 /* 0: SiI680 */ DECLARE_SII_DEV(&sil_pata_port_ops),
34530 /* 1: SiI3112 */ DECLARE_SII_DEV(&sil_sata_port_ops)
34531 };
34532diff --git a/drivers/ide/sis5513.c b/drivers/ide/sis5513.c
34533index 3b88eba..ca8699d 100644
34534--- a/drivers/ide/sis5513.c
34535+++ b/drivers/ide/sis5513.c
34536@@ -561,7 +561,7 @@ static const struct ide_port_ops sis_ata133_port_ops = {
34537 .cable_detect = sis_cable_detect,
34538 };
34539
34540-static const struct ide_port_info sis5513_chipset __devinitdata = {
34541+static const struct ide_port_info sis5513_chipset __devinitconst = {
34542 .name = DRV_NAME,
34543 .init_chipset = init_chipset_sis5513,
34544 .enablebits = { {0x4a, 0x02, 0x02}, {0x4a, 0x04, 0x04} },
34545diff --git a/drivers/ide/sl82c105.c b/drivers/ide/sl82c105.c
34546index d698da4..fca42a4 100644
34547--- a/drivers/ide/sl82c105.c
34548+++ b/drivers/ide/sl82c105.c
34549@@ -319,7 +319,7 @@ static const struct ide_dma_ops sl82c105_dma_ops = {
34550 .dma_sff_read_status = ide_dma_sff_read_status,
34551 };
34552
34553-static const struct ide_port_info sl82c105_chipset __devinitdata = {
34554+static const struct ide_port_info sl82c105_chipset __devinitconst = {
34555 .name = DRV_NAME,
34556 .init_chipset = init_chipset_sl82c105,
34557 .enablebits = {{0x40,0x01,0x01}, {0x40,0x10,0x10}},
34558diff --git a/drivers/ide/slc90e66.c b/drivers/ide/slc90e66.c
34559index 1ccfb40..83d5779 100644
34560--- a/drivers/ide/slc90e66.c
34561+++ b/drivers/ide/slc90e66.c
34562@@ -131,7 +131,7 @@ static const struct ide_port_ops slc90e66_port_ops = {
34563 .cable_detect = slc90e66_cable_detect,
34564 };
34565
34566-static const struct ide_port_info slc90e66_chipset __devinitdata = {
34567+static const struct ide_port_info slc90e66_chipset __devinitconst = {
34568 .name = DRV_NAME,
34569 .enablebits = { {0x41, 0x80, 0x80}, {0x43, 0x80, 0x80} },
34570 .port_ops = &slc90e66_port_ops,
34571diff --git a/drivers/ide/tc86c001.c b/drivers/ide/tc86c001.c
34572index 05a93d6..5f9e325 100644
34573--- a/drivers/ide/tc86c001.c
34574+++ b/drivers/ide/tc86c001.c
34575@@ -190,7 +190,7 @@ static const struct ide_dma_ops tc86c001_dma_ops = {
34576 .dma_sff_read_status = ide_dma_sff_read_status,
34577 };
34578
34579-static const struct ide_port_info tc86c001_chipset __devinitdata = {
34580+static const struct ide_port_info tc86c001_chipset __devinitconst = {
34581 .name = DRV_NAME,
34582 .init_hwif = init_hwif_tc86c001,
34583 .port_ops = &tc86c001_port_ops,
34584diff --git a/drivers/ide/triflex.c b/drivers/ide/triflex.c
34585index 8773c3b..7907d6c 100644
34586--- a/drivers/ide/triflex.c
34587+++ b/drivers/ide/triflex.c
34588@@ -92,7 +92,7 @@ static const struct ide_port_ops triflex_port_ops = {
34589 .set_dma_mode = triflex_set_mode,
34590 };
34591
34592-static const struct ide_port_info triflex_device __devinitdata = {
34593+static const struct ide_port_info triflex_device __devinitconst = {
34594 .name = DRV_NAME,
34595 .enablebits = {{0x80, 0x01, 0x01}, {0x80, 0x02, 0x02}},
34596 .port_ops = &triflex_port_ops,
34597diff --git a/drivers/ide/trm290.c b/drivers/ide/trm290.c
34598index 4b42ca0..e494a98 100644
34599--- a/drivers/ide/trm290.c
34600+++ b/drivers/ide/trm290.c
34601@@ -324,7 +324,7 @@ static struct ide_dma_ops trm290_dma_ops = {
34602 .dma_check = trm290_dma_check,
34603 };
34604
34605-static const struct ide_port_info trm290_chipset __devinitdata = {
34606+static const struct ide_port_info trm290_chipset __devinitconst = {
34607 .name = DRV_NAME,
34608 .init_hwif = init_hwif_trm290,
34609 .tp_ops = &trm290_tp_ops,
34610diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
34611index 028de26..520d5d5 100644
34612--- a/drivers/ide/via82cxxx.c
34613+++ b/drivers/ide/via82cxxx.c
34614@@ -374,7 +374,7 @@ static const struct ide_port_ops via_port_ops = {
34615 .cable_detect = via82cxxx_cable_detect,
34616 };
34617
34618-static const struct ide_port_info via82cxxx_chipset __devinitdata = {
34619+static const struct ide_port_info via82cxxx_chipset __devinitconst = {
34620 .name = DRV_NAME,
34621 .init_chipset = init_chipset_via82cxxx,
34622 .enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
34623diff --git a/drivers/ieee1394/dv1394.c b/drivers/ieee1394/dv1394.c
34624index 2cd00b5..14de699 100644
34625--- a/drivers/ieee1394/dv1394.c
34626+++ b/drivers/ieee1394/dv1394.c
34627@@ -739,7 +739,7 @@ static void frame_prepare(struct video_card *video, unsigned int this_frame)
34628 based upon DIF section and sequence
34629 */
34630
34631-static void inline
34632+static inline void
34633 frame_put_packet (struct frame *f, struct packet *p)
34634 {
34635 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
34636diff --git a/drivers/ieee1394/hosts.c b/drivers/ieee1394/hosts.c
34637index e947d8f..6a966b9 100644
34638--- a/drivers/ieee1394/hosts.c
34639+++ b/drivers/ieee1394/hosts.c
34640@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso *iso, enum isoctl_cmd command,
34641 }
34642
34643 static struct hpsb_host_driver dummy_driver = {
34644+ .name = "dummy",
34645 .transmit_packet = dummy_transmit_packet,
34646 .devctl = dummy_devctl,
34647 .isoctl = dummy_isoctl
34648diff --git a/drivers/ieee1394/init_ohci1394_dma.c b/drivers/ieee1394/init_ohci1394_dma.c
34649index ddaab6e..8d37435 100644
34650--- a/drivers/ieee1394/init_ohci1394_dma.c
34651+++ b/drivers/ieee1394/init_ohci1394_dma.c
34652@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_controllers(void)
34653 for (func = 0; func < 8; func++) {
34654 u32 class = read_pci_config(num,slot,func,
34655 PCI_CLASS_REVISION);
34656- if ((class == 0xffffffff))
34657+ if (class == 0xffffffff)
34658 continue; /* No device at this func */
34659
34660 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
34661diff --git a/drivers/ieee1394/ohci1394.c b/drivers/ieee1394/ohci1394.c
34662index 65c1429..5d8c11f 100644
34663--- a/drivers/ieee1394/ohci1394.c
34664+++ b/drivers/ieee1394/ohci1394.c
34665@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_DRIVER_NAME , ## args)
34666 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
34667
34668 /* Module Parameters */
34669-static int phys_dma = 1;
34670+static int phys_dma;
34671 module_param(phys_dma, int, 0444);
34672-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
34673+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
34674
34675 static void dma_trm_tasklet(unsigned long data);
34676 static void dma_trm_reset(struct dma_trm_ctx *d);
34677diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
34678index f199896..78c9fc8 100644
34679--- a/drivers/ieee1394/sbp2.c
34680+++ b/drivers/ieee1394/sbp2.c
34681@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 protocol driver");
34682 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
34683 MODULE_LICENSE("GPL");
34684
34685-static int sbp2_module_init(void)
34686+static int __init sbp2_module_init(void)
34687 {
34688 int ret;
34689
34690diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
34691index a5dea6b..0cefe8f 100644
34692--- a/drivers/infiniband/core/cm.c
34693+++ b/drivers/infiniband/core/cm.c
34694@@ -112,7 +112,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
34695
34696 struct cm_counter_group {
34697 struct kobject obj;
34698- atomic_long_t counter[CM_ATTR_COUNT];
34699+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
34700 };
34701
34702 struct cm_counter_attribute {
34703@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm_work *work,
34704 struct ib_mad_send_buf *msg = NULL;
34705 int ret;
34706
34707- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34708+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34709 counter[CM_REQ_COUNTER]);
34710
34711 /* Quick state check to discard duplicate REQs. */
34712@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
34713 if (!cm_id_priv)
34714 return;
34715
34716- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34717+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34718 counter[CM_REP_COUNTER]);
34719 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
34720 if (ret)
34721@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work *work)
34722 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
34723 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
34724 spin_unlock_irq(&cm_id_priv->lock);
34725- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34726+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34727 counter[CM_RTU_COUNTER]);
34728 goto out;
34729 }
34730@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_work *work)
34731 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
34732 dreq_msg->local_comm_id);
34733 if (!cm_id_priv) {
34734- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34735+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34736 counter[CM_DREQ_COUNTER]);
34737 cm_issue_drep(work->port, work->mad_recv_wc);
34738 return -EINVAL;
34739@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_work *work)
34740 case IB_CM_MRA_REP_RCVD:
34741 break;
34742 case IB_CM_TIMEWAIT:
34743- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34744+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34745 counter[CM_DREQ_COUNTER]);
34746 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34747 goto unlock;
34748@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_work *work)
34749 cm_free_msg(msg);
34750 goto deref;
34751 case IB_CM_DREQ_RCVD:
34752- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34753+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34754 counter[CM_DREQ_COUNTER]);
34755 goto unlock;
34756 default:
34757@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work *work)
34758 ib_modify_mad(cm_id_priv->av.port->mad_agent,
34759 cm_id_priv->msg, timeout)) {
34760 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
34761- atomic_long_inc(&work->port->
34762+ atomic_long_inc_unchecked(&work->port->
34763 counter_group[CM_RECV_DUPLICATES].
34764 counter[CM_MRA_COUNTER]);
34765 goto out;
34766@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work *work)
34767 break;
34768 case IB_CM_MRA_REQ_RCVD:
34769 case IB_CM_MRA_REP_RCVD:
34770- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34771+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34772 counter[CM_MRA_COUNTER]);
34773 /* fall through */
34774 default:
34775@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work *work)
34776 case IB_CM_LAP_IDLE:
34777 break;
34778 case IB_CM_MRA_LAP_SENT:
34779- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34780+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34781 counter[CM_LAP_COUNTER]);
34782 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
34783 goto unlock;
34784@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work *work)
34785 cm_free_msg(msg);
34786 goto deref;
34787 case IB_CM_LAP_RCVD:
34788- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34789+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34790 counter[CM_LAP_COUNTER]);
34791 goto unlock;
34792 default:
34793@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
34794 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
34795 if (cur_cm_id_priv) {
34796 spin_unlock_irq(&cm.lock);
34797- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
34798+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
34799 counter[CM_SIDR_REQ_COUNTER]);
34800 goto out; /* Duplicate message. */
34801 }
34802@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
34803 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
34804 msg->retries = 1;
34805
34806- atomic_long_add(1 + msg->retries,
34807+ atomic_long_add_unchecked(1 + msg->retries,
34808 &port->counter_group[CM_XMIT].counter[attr_index]);
34809 if (msg->retries)
34810- atomic_long_add(msg->retries,
34811+ atomic_long_add_unchecked(msg->retries,
34812 &port->counter_group[CM_XMIT_RETRIES].
34813 counter[attr_index]);
34814
34815@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
34816 }
34817
34818 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
34819- atomic_long_inc(&port->counter_group[CM_RECV].
34820+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
34821 counter[attr_id - CM_ATTR_ID_OFFSET]);
34822
34823 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
34824@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
34825 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
34826
34827 return sprintf(buf, "%ld\n",
34828- atomic_long_read(&group->counter[cm_attr->index]));
34829+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
34830 }
34831
34832-static struct sysfs_ops cm_counter_ops = {
34833+static const struct sysfs_ops cm_counter_ops = {
34834 .show = cm_show_counter
34835 };
34836
34837diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
34838index 8fd3a6f..61d8075 100644
34839--- a/drivers/infiniband/core/cma.c
34840+++ b/drivers/infiniband/core/cma.c
34841@@ -2267,6 +2267,9 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv,
34842
34843 req.private_data_len = sizeof(struct cma_hdr) +
34844 conn_param->private_data_len;
34845+ if (req.private_data_len < conn_param->private_data_len)
34846+ return -EINVAL;
34847+
34848 req.private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34849 if (!req.private_data)
34850 return -ENOMEM;
34851@@ -2314,6 +2317,9 @@ static int cma_connect_ib(struct rdma_id_private *id_priv,
34852 memset(&req, 0, sizeof req);
34853 offset = cma_user_data_offset(id_priv->id.ps);
34854 req.private_data_len = offset + conn_param->private_data_len;
34855+ if (req.private_data_len < conn_param->private_data_len)
34856+ return -EINVAL;
34857+
34858 private_data = kzalloc(req.private_data_len, GFP_ATOMIC);
34859 if (!private_data)
34860 return -ENOMEM;
34861diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
34862index 4507043..14ad522 100644
34863--- a/drivers/infiniband/core/fmr_pool.c
34864+++ b/drivers/infiniband/core/fmr_pool.c
34865@@ -97,8 +97,8 @@ struct ib_fmr_pool {
34866
34867 struct task_struct *thread;
34868
34869- atomic_t req_ser;
34870- atomic_t flush_ser;
34871+ atomic_unchecked_t req_ser;
34872+ atomic_unchecked_t flush_ser;
34873
34874 wait_queue_head_t force_wait;
34875 };
34876@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34877 struct ib_fmr_pool *pool = pool_ptr;
34878
34879 do {
34880- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
34881+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
34882 ib_fmr_batch_release(pool);
34883
34884- atomic_inc(&pool->flush_ser);
34885+ atomic_inc_unchecked(&pool->flush_ser);
34886 wake_up_interruptible(&pool->force_wait);
34887
34888 if (pool->flush_function)
34889@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
34890 }
34891
34892 set_current_state(TASK_INTERRUPTIBLE);
34893- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
34894+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
34895 !kthread_should_stop())
34896 schedule();
34897 __set_current_state(TASK_RUNNING);
34898@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
34899 pool->dirty_watermark = params->dirty_watermark;
34900 pool->dirty_len = 0;
34901 spin_lock_init(&pool->pool_lock);
34902- atomic_set(&pool->req_ser, 0);
34903- atomic_set(&pool->flush_ser, 0);
34904+ atomic_set_unchecked(&pool->req_ser, 0);
34905+ atomic_set_unchecked(&pool->flush_ser, 0);
34906 init_waitqueue_head(&pool->force_wait);
34907
34908 pool->thread = kthread_run(ib_fmr_cleanup_thread,
34909@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
34910 }
34911 spin_unlock_irq(&pool->pool_lock);
34912
34913- serial = atomic_inc_return(&pool->req_ser);
34914+ serial = atomic_inc_return_unchecked(&pool->req_ser);
34915 wake_up_process(pool->thread);
34916
34917 if (wait_event_interruptible(pool->force_wait,
34918- atomic_read(&pool->flush_ser) - serial >= 0))
34919+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
34920 return -EINTR;
34921
34922 return 0;
34923@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
34924 } else {
34925 list_add_tail(&fmr->list, &pool->dirty_list);
34926 if (++pool->dirty_len >= pool->dirty_watermark) {
34927- atomic_inc(&pool->req_ser);
34928+ atomic_inc_unchecked(&pool->req_ser);
34929 wake_up_process(pool->thread);
34930 }
34931 }
34932diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
34933index 158a214..1558bb7 100644
34934--- a/drivers/infiniband/core/sysfs.c
34935+++ b/drivers/infiniband/core/sysfs.c
34936@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kobject *kobj,
34937 return port_attr->show(p, port_attr, buf);
34938 }
34939
34940-static struct sysfs_ops port_sysfs_ops = {
34941+static const struct sysfs_ops port_sysfs_ops = {
34942 .show = port_attr_show
34943 };
34944
34945diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c
34946index 5440da0..1194ecb 100644
34947--- a/drivers/infiniband/core/uverbs_marshall.c
34948+++ b/drivers/infiniband/core/uverbs_marshall.c
34949@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
34950 dst->grh.sgid_index = src->grh.sgid_index;
34951 dst->grh.hop_limit = src->grh.hop_limit;
34952 dst->grh.traffic_class = src->grh.traffic_class;
34953+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
34954 dst->dlid = src->dlid;
34955 dst->sl = src->sl;
34956 dst->src_path_bits = src->src_path_bits;
34957 dst->static_rate = src->static_rate;
34958 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
34959 dst->port_num = src->port_num;
34960+ dst->reserved = 0;
34961 }
34962 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
34963
34964 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34965 struct ib_qp_attr *src)
34966 {
34967+ dst->qp_state = src->qp_state;
34968 dst->cur_qp_state = src->cur_qp_state;
34969 dst->path_mtu = src->path_mtu;
34970 dst->path_mig_state = src->path_mig_state;
34971@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
34972 dst->rnr_retry = src->rnr_retry;
34973 dst->alt_port_num = src->alt_port_num;
34974 dst->alt_timeout = src->alt_timeout;
34975+ memset(dst->reserved, 0, sizeof(dst->reserved));
34976 }
34977 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
34978
34979diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c
34980index 100da85..62e6b88 100644
34981--- a/drivers/infiniband/hw/ipath/ipath_fs.c
34982+++ b/drivers/infiniband/hw/ipath/ipath_fs.c
34983@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,
34984 struct infinipath_counters counters;
34985 struct ipath_devdata *dd;
34986
34987+ pax_track_stack();
34988+
34989 dd = file->f_path.dentry->d_inode->i_private;
34990 dd->ipath_f_read_counters(dd, &counters);
34991
34992diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
34993index cbde0cf..afaf55c 100644
34994--- a/drivers/infiniband/hw/nes/nes.c
34995+++ b/drivers/infiniband/hw/nes/nes.c
34996@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
34997 LIST_HEAD(nes_adapter_list);
34998 static LIST_HEAD(nes_dev_list);
34999
35000-atomic_t qps_destroyed;
35001+atomic_unchecked_t qps_destroyed;
35002
35003 static unsigned int ee_flsh_adapter;
35004 static unsigned int sysfs_nonidx_addr;
35005@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
35006 struct nes_adapter *nesadapter = nesdev->nesadapter;
35007 u32 qp_id;
35008
35009- atomic_inc(&qps_destroyed);
35010+ atomic_inc_unchecked(&qps_destroyed);
35011
35012 /* Free the control structures */
35013
35014diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
35015index bcc6abc..9c76b2f 100644
35016--- a/drivers/infiniband/hw/nes/nes.h
35017+++ b/drivers/infiniband/hw/nes/nes.h
35018@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
35019 extern unsigned int wqm_quanta;
35020 extern struct list_head nes_adapter_list;
35021
35022-extern atomic_t cm_connects;
35023-extern atomic_t cm_accepts;
35024-extern atomic_t cm_disconnects;
35025-extern atomic_t cm_closes;
35026-extern atomic_t cm_connecteds;
35027-extern atomic_t cm_connect_reqs;
35028-extern atomic_t cm_rejects;
35029-extern atomic_t mod_qp_timouts;
35030-extern atomic_t qps_created;
35031-extern atomic_t qps_destroyed;
35032-extern atomic_t sw_qps_destroyed;
35033+extern atomic_unchecked_t cm_connects;
35034+extern atomic_unchecked_t cm_accepts;
35035+extern atomic_unchecked_t cm_disconnects;
35036+extern atomic_unchecked_t cm_closes;
35037+extern atomic_unchecked_t cm_connecteds;
35038+extern atomic_unchecked_t cm_connect_reqs;
35039+extern atomic_unchecked_t cm_rejects;
35040+extern atomic_unchecked_t mod_qp_timouts;
35041+extern atomic_unchecked_t qps_created;
35042+extern atomic_unchecked_t qps_destroyed;
35043+extern atomic_unchecked_t sw_qps_destroyed;
35044 extern u32 mh_detected;
35045 extern u32 mh_pauses_sent;
35046 extern u32 cm_packets_sent;
35047@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
35048 extern u32 cm_listens_created;
35049 extern u32 cm_listens_destroyed;
35050 extern u32 cm_backlog_drops;
35051-extern atomic_t cm_loopbacks;
35052-extern atomic_t cm_nodes_created;
35053-extern atomic_t cm_nodes_destroyed;
35054-extern atomic_t cm_accel_dropped_pkts;
35055-extern atomic_t cm_resets_recvd;
35056+extern atomic_unchecked_t cm_loopbacks;
35057+extern atomic_unchecked_t cm_nodes_created;
35058+extern atomic_unchecked_t cm_nodes_destroyed;
35059+extern atomic_unchecked_t cm_accel_dropped_pkts;
35060+extern atomic_unchecked_t cm_resets_recvd;
35061
35062 extern u32 int_mod_timer_init;
35063 extern u32 int_mod_cq_depth_256;
35064diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
35065index 73473db..5ed06e8 100644
35066--- a/drivers/infiniband/hw/nes/nes_cm.c
35067+++ b/drivers/infiniband/hw/nes/nes_cm.c
35068@@ -69,11 +69,11 @@ u32 cm_packets_received;
35069 u32 cm_listens_created;
35070 u32 cm_listens_destroyed;
35071 u32 cm_backlog_drops;
35072-atomic_t cm_loopbacks;
35073-atomic_t cm_nodes_created;
35074-atomic_t cm_nodes_destroyed;
35075-atomic_t cm_accel_dropped_pkts;
35076-atomic_t cm_resets_recvd;
35077+atomic_unchecked_t cm_loopbacks;
35078+atomic_unchecked_t cm_nodes_created;
35079+atomic_unchecked_t cm_nodes_destroyed;
35080+atomic_unchecked_t cm_accel_dropped_pkts;
35081+atomic_unchecked_t cm_resets_recvd;
35082
35083 static inline int mini_cm_accelerated(struct nes_cm_core *,
35084 struct nes_cm_node *);
35085@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
35086
35087 static struct nes_cm_core *g_cm_core;
35088
35089-atomic_t cm_connects;
35090-atomic_t cm_accepts;
35091-atomic_t cm_disconnects;
35092-atomic_t cm_closes;
35093-atomic_t cm_connecteds;
35094-atomic_t cm_connect_reqs;
35095-atomic_t cm_rejects;
35096+atomic_unchecked_t cm_connects;
35097+atomic_unchecked_t cm_accepts;
35098+atomic_unchecked_t cm_disconnects;
35099+atomic_unchecked_t cm_closes;
35100+atomic_unchecked_t cm_connecteds;
35101+atomic_unchecked_t cm_connect_reqs;
35102+atomic_unchecked_t cm_rejects;
35103
35104
35105 /**
35106@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
35107 cm_node->rem_mac);
35108
35109 add_hte_node(cm_core, cm_node);
35110- atomic_inc(&cm_nodes_created);
35111+ atomic_inc_unchecked(&cm_nodes_created);
35112
35113 return cm_node;
35114 }
35115@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
35116 }
35117
35118 atomic_dec(&cm_core->node_cnt);
35119- atomic_inc(&cm_nodes_destroyed);
35120+ atomic_inc_unchecked(&cm_nodes_destroyed);
35121 nesqp = cm_node->nesqp;
35122 if (nesqp) {
35123 nesqp->cm_node = NULL;
35124@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
35125
35126 static void drop_packet(struct sk_buff *skb)
35127 {
35128- atomic_inc(&cm_accel_dropped_pkts);
35129+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35130 dev_kfree_skb_any(skb);
35131 }
35132
35133@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
35134
35135 int reset = 0; /* whether to send reset in case of err.. */
35136 int passive_state;
35137- atomic_inc(&cm_resets_recvd);
35138+ atomic_inc_unchecked(&cm_resets_recvd);
35139 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
35140 " refcnt=%d\n", cm_node, cm_node->state,
35141 atomic_read(&cm_node->ref_count));
35142@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
35143 rem_ref_cm_node(cm_node->cm_core, cm_node);
35144 return NULL;
35145 }
35146- atomic_inc(&cm_loopbacks);
35147+ atomic_inc_unchecked(&cm_loopbacks);
35148 loopbackremotenode->loopbackpartner = cm_node;
35149 loopbackremotenode->tcp_cntxt.rcv_wscale =
35150 NES_CM_DEFAULT_RCV_WND_SCALE;
35151@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
35152 add_ref_cm_node(cm_node);
35153 } else if (cm_node->state == NES_CM_STATE_TSA) {
35154 rem_ref_cm_node(cm_core, cm_node);
35155- atomic_inc(&cm_accel_dropped_pkts);
35156+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
35157 dev_kfree_skb_any(skb);
35158 break;
35159 }
35160@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35161
35162 if ((cm_id) && (cm_id->event_handler)) {
35163 if (issue_disconn) {
35164- atomic_inc(&cm_disconnects);
35165+ atomic_inc_unchecked(&cm_disconnects);
35166 cm_event.event = IW_CM_EVENT_DISCONNECT;
35167 cm_event.status = disconn_status;
35168 cm_event.local_addr = cm_id->local_addr;
35169@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
35170 }
35171
35172 if (issue_close) {
35173- atomic_inc(&cm_closes);
35174+ atomic_inc_unchecked(&cm_closes);
35175 nes_disconnect(nesqp, 1);
35176
35177 cm_id->provider_data = nesqp;
35178@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35179
35180 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
35181 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
35182- atomic_inc(&cm_accepts);
35183+ atomic_inc_unchecked(&cm_accepts);
35184
35185 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
35186 atomic_read(&nesvnic->netdev->refcnt));
35187@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
35188
35189 struct nes_cm_core *cm_core;
35190
35191- atomic_inc(&cm_rejects);
35192+ atomic_inc_unchecked(&cm_rejects);
35193 cm_node = (struct nes_cm_node *) cm_id->provider_data;
35194 loopback = cm_node->loopbackpartner;
35195 cm_core = cm_node->cm_core;
35196@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
35197 ntohl(cm_id->local_addr.sin_addr.s_addr),
35198 ntohs(cm_id->local_addr.sin_port));
35199
35200- atomic_inc(&cm_connects);
35201+ atomic_inc_unchecked(&cm_connects);
35202 nesqp->active_conn = 1;
35203
35204 /* cache the cm_id in the qp */
35205@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct nes_cm_event *event)
35206 if (nesqp->destroyed) {
35207 return;
35208 }
35209- atomic_inc(&cm_connecteds);
35210+ atomic_inc_unchecked(&cm_connecteds);
35211 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
35212 " local port 0x%04X. jiffies = %lu.\n",
35213 nesqp->hwqp.qp_id,
35214@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm_event *event)
35215
35216 ret = cm_id->event_handler(cm_id, &cm_event);
35217 cm_id->add_ref(cm_id);
35218- atomic_inc(&cm_closes);
35219+ atomic_inc_unchecked(&cm_closes);
35220 cm_event.event = IW_CM_EVENT_CLOSE;
35221 cm_event.status = IW_CM_EVENT_STATUS_OK;
35222 cm_event.provider_data = cm_id->provider_data;
35223@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
35224 return;
35225 cm_id = cm_node->cm_id;
35226
35227- atomic_inc(&cm_connect_reqs);
35228+ atomic_inc_unchecked(&cm_connect_reqs);
35229 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35230 cm_node, cm_id, jiffies);
35231
35232@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
35233 return;
35234 cm_id = cm_node->cm_id;
35235
35236- atomic_inc(&cm_connect_reqs);
35237+ atomic_inc_unchecked(&cm_connect_reqs);
35238 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
35239 cm_node, cm_id, jiffies);
35240
35241diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
35242index e593af3..870694a 100644
35243--- a/drivers/infiniband/hw/nes/nes_nic.c
35244+++ b/drivers/infiniband/hw/nes/nes_nic.c
35245@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35246 target_stat_values[++index] = mh_detected;
35247 target_stat_values[++index] = mh_pauses_sent;
35248 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
35249- target_stat_values[++index] = atomic_read(&cm_connects);
35250- target_stat_values[++index] = atomic_read(&cm_accepts);
35251- target_stat_values[++index] = atomic_read(&cm_disconnects);
35252- target_stat_values[++index] = atomic_read(&cm_connecteds);
35253- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
35254- target_stat_values[++index] = atomic_read(&cm_rejects);
35255- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
35256- target_stat_values[++index] = atomic_read(&qps_created);
35257- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
35258- target_stat_values[++index] = atomic_read(&qps_destroyed);
35259- target_stat_values[++index] = atomic_read(&cm_closes);
35260+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
35261+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
35262+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
35263+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
35264+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
35265+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
35266+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
35267+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
35268+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
35269+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
35270+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
35271 target_stat_values[++index] = cm_packets_sent;
35272 target_stat_values[++index] = cm_packets_bounced;
35273 target_stat_values[++index] = cm_packets_created;
35274@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
35275 target_stat_values[++index] = cm_listens_created;
35276 target_stat_values[++index] = cm_listens_destroyed;
35277 target_stat_values[++index] = cm_backlog_drops;
35278- target_stat_values[++index] = atomic_read(&cm_loopbacks);
35279- target_stat_values[++index] = atomic_read(&cm_nodes_created);
35280- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
35281- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
35282- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
35283+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
35284+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
35285+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
35286+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
35287+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
35288 target_stat_values[++index] = int_mod_timer_init;
35289 target_stat_values[++index] = int_mod_cq_depth_1;
35290 target_stat_values[++index] = int_mod_cq_depth_4;
35291diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
35292index a680c42..f914deb 100644
35293--- a/drivers/infiniband/hw/nes/nes_verbs.c
35294+++ b/drivers/infiniband/hw/nes/nes_verbs.c
35295@@ -45,9 +45,9 @@
35296
35297 #include <rdma/ib_umem.h>
35298
35299-atomic_t mod_qp_timouts;
35300-atomic_t qps_created;
35301-atomic_t sw_qps_destroyed;
35302+atomic_unchecked_t mod_qp_timouts;
35303+atomic_unchecked_t qps_created;
35304+atomic_unchecked_t sw_qps_destroyed;
35305
35306 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
35307
35308@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
35309 if (init_attr->create_flags)
35310 return ERR_PTR(-EINVAL);
35311
35312- atomic_inc(&qps_created);
35313+ atomic_inc_unchecked(&qps_created);
35314 switch (init_attr->qp_type) {
35315 case IB_QPT_RC:
35316 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
35317@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
35318 struct iw_cm_event cm_event;
35319 int ret;
35320
35321- atomic_inc(&sw_qps_destroyed);
35322+ atomic_inc_unchecked(&sw_qps_destroyed);
35323 nesqp->destroyed = 1;
35324
35325 /* Blow away the connection if it exists. */
35326diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
35327index ac11be0..3883c04 100644
35328--- a/drivers/input/gameport/gameport.c
35329+++ b/drivers/input/gameport/gameport.c
35330@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
35331 */
35332 static void gameport_init_port(struct gameport *gameport)
35333 {
35334- static atomic_t gameport_no = ATOMIC_INIT(0);
35335+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
35336
35337 __module_get(THIS_MODULE);
35338
35339 mutex_init(&gameport->drv_mutex);
35340 device_initialize(&gameport->dev);
35341- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
35342+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
35343 gameport->dev.bus = &gameport_bus;
35344 gameport->dev.release = gameport_release_port;
35345 if (gameport->parent)
35346diff --git a/drivers/input/input.c b/drivers/input/input.c
35347index c82ae82..8cfb9cb 100644
35348--- a/drivers/input/input.c
35349+++ b/drivers/input/input.c
35350@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
35351 */
35352 int input_register_device(struct input_dev *dev)
35353 {
35354- static atomic_t input_no = ATOMIC_INIT(0);
35355+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
35356 struct input_handler *handler;
35357 const char *path;
35358 int error;
35359@@ -1585,7 +1585,7 @@ int input_register_device(struct input_dev *dev)
35360 dev->setkeycode = input_default_setkeycode;
35361
35362 dev_set_name(&dev->dev, "input%ld",
35363- (unsigned long) atomic_inc_return(&input_no) - 1);
35364+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
35365
35366 error = device_add(&dev->dev);
35367 if (error)
35368diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
35369index ca13a6b..b032b0c 100644
35370--- a/drivers/input/joystick/sidewinder.c
35371+++ b/drivers/input/joystick/sidewinder.c
35372@@ -30,6 +30,7 @@
35373 #include <linux/kernel.h>
35374 #include <linux/module.h>
35375 #include <linux/slab.h>
35376+#include <linux/sched.h>
35377 #include <linux/init.h>
35378 #include <linux/input.h>
35379 #include <linux/gameport.h>
35380@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
35381 unsigned char buf[SW_LENGTH];
35382 int i;
35383
35384+ pax_track_stack();
35385+
35386 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
35387
35388 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
35389diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
35390index 79e3edc..01412b9 100644
35391--- a/drivers/input/joystick/xpad.c
35392+++ b/drivers/input/joystick/xpad.c
35393@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
35394
35395 static int xpad_led_probe(struct usb_xpad *xpad)
35396 {
35397- static atomic_t led_seq = ATOMIC_INIT(0);
35398+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
35399 long led_no;
35400 struct xpad_led *led;
35401 struct led_classdev *led_cdev;
35402@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
35403 if (!led)
35404 return -ENOMEM;
35405
35406- led_no = (long)atomic_inc_return(&led_seq) - 1;
35407+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
35408
35409 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
35410 led->xpad = xpad;
35411diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
35412index 0236f0d..c7327f1 100644
35413--- a/drivers/input/serio/serio.c
35414+++ b/drivers/input/serio/serio.c
35415@@ -527,7 +527,7 @@ static void serio_release_port(struct device *dev)
35416 */
35417 static void serio_init_port(struct serio *serio)
35418 {
35419- static atomic_t serio_no = ATOMIC_INIT(0);
35420+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
35421
35422 __module_get(THIS_MODULE);
35423
35424@@ -536,7 +536,7 @@ static void serio_init_port(struct serio *serio)
35425 mutex_init(&serio->drv_mutex);
35426 device_initialize(&serio->dev);
35427 dev_set_name(&serio->dev, "serio%ld",
35428- (long)atomic_inc_return(&serio_no) - 1);
35429+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
35430 serio->dev.bus = &serio_bus;
35431 serio->dev.release = serio_release_port;
35432 if (serio->parent) {
35433diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
35434index 33dcd8d..2783d25 100644
35435--- a/drivers/isdn/gigaset/common.c
35436+++ b/drivers/isdn/gigaset/common.c
35437@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
35438 cs->commands_pending = 0;
35439 cs->cur_at_seq = 0;
35440 cs->gotfwver = -1;
35441- cs->open_count = 0;
35442+ local_set(&cs->open_count, 0);
35443 cs->dev = NULL;
35444 cs->tty = NULL;
35445 cs->tty_dev = NULL;
35446diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
35447index a2f6125..6a70677 100644
35448--- a/drivers/isdn/gigaset/gigaset.h
35449+++ b/drivers/isdn/gigaset/gigaset.h
35450@@ -34,6 +34,7 @@
35451 #include <linux/tty_driver.h>
35452 #include <linux/list.h>
35453 #include <asm/atomic.h>
35454+#include <asm/local.h>
35455
35456 #define GIG_VERSION {0,5,0,0}
35457 #define GIG_COMPAT {0,4,0,0}
35458@@ -446,7 +447,7 @@ struct cardstate {
35459 spinlock_t cmdlock;
35460 unsigned curlen, cmdbytes;
35461
35462- unsigned open_count;
35463+ local_t open_count;
35464 struct tty_struct *tty;
35465 struct tasklet_struct if_wake_tasklet;
35466 unsigned control_state;
35467diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
35468index b3065b8..c7e8cc9 100644
35469--- a/drivers/isdn/gigaset/interface.c
35470+++ b/drivers/isdn/gigaset/interface.c
35471@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
35472 return -ERESTARTSYS; // FIXME -EINTR?
35473 tty->driver_data = cs;
35474
35475- ++cs->open_count;
35476-
35477- if (cs->open_count == 1) {
35478+ if (local_inc_return(&cs->open_count) == 1) {
35479 spin_lock_irqsave(&cs->lock, flags);
35480 cs->tty = tty;
35481 spin_unlock_irqrestore(&cs->lock, flags);
35482@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *tty, struct file *filp)
35483
35484 if (!cs->connected)
35485 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35486- else if (!cs->open_count)
35487+ else if (!local_read(&cs->open_count))
35488 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35489 else {
35490- if (!--cs->open_count) {
35491+ if (!local_dec_return(&cs->open_count)) {
35492 spin_lock_irqsave(&cs->lock, flags);
35493 cs->tty = NULL;
35494 spin_unlock_irqrestore(&cs->lock, flags);
35495@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
35496 if (!cs->connected) {
35497 gig_dbg(DEBUG_IF, "not connected");
35498 retval = -ENODEV;
35499- } else if (!cs->open_count)
35500+ } else if (!local_read(&cs->open_count))
35501 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35502 else {
35503 retval = 0;
35504@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
35505 if (!cs->connected) {
35506 gig_dbg(DEBUG_IF, "not connected");
35507 retval = -ENODEV;
35508- } else if (!cs->open_count)
35509+ } else if (!local_read(&cs->open_count))
35510 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35511 else if (cs->mstate != MS_LOCKED) {
35512 dev_warn(cs->dev, "can't write to unlocked device\n");
35513@@ -395,7 +393,7 @@ static int if_write_room(struct tty_struct *tty)
35514 if (!cs->connected) {
35515 gig_dbg(DEBUG_IF, "not connected");
35516 retval = -ENODEV;
35517- } else if (!cs->open_count)
35518+ } else if (!local_read(&cs->open_count))
35519 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35520 else if (cs->mstate != MS_LOCKED) {
35521 dev_warn(cs->dev, "can't write to unlocked device\n");
35522@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty_struct *tty)
35523
35524 if (!cs->connected)
35525 gig_dbg(DEBUG_IF, "not connected");
35526- else if (!cs->open_count)
35527+ else if (!local_read(&cs->open_count))
35528 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35529 else if (cs->mstate != MS_LOCKED)
35530 dev_warn(cs->dev, "can't write to unlocked device\n");
35531@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struct *tty)
35532
35533 if (!cs->connected)
35534 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35535- else if (!cs->open_count)
35536+ else if (!local_read(&cs->open_count))
35537 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35538 else {
35539 //FIXME
35540@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_struct *tty)
35541
35542 if (!cs->connected)
35543 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
35544- else if (!cs->open_count)
35545+ else if (!local_read(&cs->open_count))
35546 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35547 else {
35548 //FIXME
35549@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
35550 goto out;
35551 }
35552
35553- if (!cs->open_count) {
35554+ if (!local_read(&cs->open_count)) {
35555 dev_warn(cs->dev, "%s: device not opened\n", __func__);
35556 goto out;
35557 }
35558diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
35559index a7c0083..62a7cb6 100644
35560--- a/drivers/isdn/hardware/avm/b1.c
35561+++ b/drivers/isdn/hardware/avm/b1.c
35562@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart * t4file)
35563 }
35564 if (left) {
35565 if (t4file->user) {
35566- if (copy_from_user(buf, dp, left))
35567+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35568 return -EFAULT;
35569 } else {
35570 memcpy(buf, dp, left);
35571@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capiloaddatapart * config)
35572 }
35573 if (left) {
35574 if (config->user) {
35575- if (copy_from_user(buf, dp, left))
35576+ if (left > sizeof buf || copy_from_user(buf, dp, left))
35577 return -EFAULT;
35578 } else {
35579 memcpy(buf, dp, left);
35580diff --git a/drivers/isdn/hardware/eicon/capidtmf.c b/drivers/isdn/hardware/eicon/capidtmf.c
35581index f130724..c373c68 100644
35582--- a/drivers/isdn/hardware/eicon/capidtmf.c
35583+++ b/drivers/isdn/hardware/eicon/capidtmf.c
35584@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_state *p_state, byte *buffer, word leng
35585 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
35586 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
35587
35588+ pax_track_stack();
35589
35590 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
35591 {
35592diff --git a/drivers/isdn/hardware/eicon/capifunc.c b/drivers/isdn/hardware/eicon/capifunc.c
35593index 4d425c6..a9be6c4 100644
35594--- a/drivers/isdn/hardware/eicon/capifunc.c
35595+++ b/drivers/isdn/hardware/eicon/capifunc.c
35596@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
35597 IDI_SYNC_REQ req;
35598 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35599
35600+ pax_track_stack();
35601+
35602 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35603
35604 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35605diff --git a/drivers/isdn/hardware/eicon/diddfunc.c b/drivers/isdn/hardware/eicon/diddfunc.c
35606index 3029234..ef0d9e2 100644
35607--- a/drivers/isdn/hardware/eicon/diddfunc.c
35608+++ b/drivers/isdn/hardware/eicon/diddfunc.c
35609@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35610 IDI_SYNC_REQ req;
35611 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35612
35613+ pax_track_stack();
35614+
35615 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35616
35617 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35618diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c
35619index d36a4c0..11e7d1a 100644
35620--- a/drivers/isdn/hardware/eicon/divasfunc.c
35621+++ b/drivers/isdn/hardware/eicon/divasfunc.c
35622@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35623 IDI_SYNC_REQ req;
35624 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35625
35626+ pax_track_stack();
35627+
35628 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35629
35630 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35631diff --git a/drivers/isdn/hardware/eicon/divasync.h b/drivers/isdn/hardware/eicon/divasync.h
35632index 85784a7..a19ca98 100644
35633--- a/drivers/isdn/hardware/eicon/divasync.h
35634+++ b/drivers/isdn/hardware/eicon/divasync.h
35635@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
35636 } diva_didd_add_adapter_t;
35637 typedef struct _diva_didd_remove_adapter {
35638 IDI_CALL p_request;
35639-} diva_didd_remove_adapter_t;
35640+} __no_const diva_didd_remove_adapter_t;
35641 typedef struct _diva_didd_read_adapter_array {
35642 void * buffer;
35643 dword length;
35644diff --git a/drivers/isdn/hardware/eicon/idifunc.c b/drivers/isdn/hardware/eicon/idifunc.c
35645index db87d51..7d09acf 100644
35646--- a/drivers/isdn/hardware/eicon/idifunc.c
35647+++ b/drivers/isdn/hardware/eicon/idifunc.c
35648@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35649 IDI_SYNC_REQ req;
35650 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35651
35652+ pax_track_stack();
35653+
35654 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35655
35656 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35657diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
35658index ae89fb8..0fab299 100644
35659--- a/drivers/isdn/hardware/eicon/message.c
35660+++ b/drivers/isdn/hardware/eicon/message.c
35661@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
35662 dword d;
35663 word w;
35664
35665+ pax_track_stack();
35666+
35667 a = plci->adapter;
35668 Id = ((word)plci->Id<<8)|a->Id;
35669 PUT_WORD(&SS_Ind[4],0x0000);
35670@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE *bp, word b_channel_info,
35671 word j, n, w;
35672 dword d;
35673
35674+ pax_track_stack();
35675+
35676
35677 for(i=0;i<8;i++) bp_parms[i].length = 0;
35678 for(i=0;i<2;i++) global_config[i].length = 0;
35679@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARSE *bp)
35680 const byte llc3[] = {4,3,2,2,6,6,0};
35681 const byte header[] = {0,2,3,3,0,0,0};
35682
35683+ pax_track_stack();
35684+
35685 for(i=0;i<8;i++) bp_parms[i].length = 0;
35686 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
35687 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
35688@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI_ADAPTER * a, PLCI * plci)
35689 word appl_number_group_type[MAX_APPL];
35690 PLCI *auxplci;
35691
35692+ pax_track_stack();
35693+
35694 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
35695
35696 if(!a->group_optimization_enabled)
35697diff --git a/drivers/isdn/hardware/eicon/mntfunc.c b/drivers/isdn/hardware/eicon/mntfunc.c
35698index a564b75..f3cf8b5 100644
35699--- a/drivers/isdn/hardware/eicon/mntfunc.c
35700+++ b/drivers/isdn/hardware/eicon/mntfunc.c
35701@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_didd(void)
35702 IDI_SYNC_REQ req;
35703 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
35704
35705+ pax_track_stack();
35706+
35707 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
35708
35709 for (x = 0; x < MAX_DESCRIPTORS; x++) {
35710diff --git a/drivers/isdn/hardware/eicon/xdi_adapter.h b/drivers/isdn/hardware/eicon/xdi_adapter.h
35711index a3bd163..8956575 100644
35712--- a/drivers/isdn/hardware/eicon/xdi_adapter.h
35713+++ b/drivers/isdn/hardware/eicon/xdi_adapter.h
35714@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
35715 typedef struct _diva_os_idi_adapter_interface {
35716 diva_init_card_proc_t cleanup_adapter_proc;
35717 diva_cmd_card_proc_t cmd_proc;
35718-} diva_os_idi_adapter_interface_t;
35719+} __no_const diva_os_idi_adapter_interface_t;
35720
35721 typedef struct _diva_os_xdi_adapter {
35722 struct list_head link;
35723diff --git a/drivers/isdn/i4l/isdn_common.c b/drivers/isdn/i4l/isdn_common.c
35724index adb1e8c..21b590b 100644
35725--- a/drivers/isdn/i4l/isdn_common.c
35726+++ b/drivers/isdn/i4l/isdn_common.c
35727@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
35728 } iocpar;
35729 void __user *argp = (void __user *)arg;
35730
35731+ pax_track_stack();
35732+
35733 #define name iocpar.name
35734 #define bname iocpar.bname
35735 #define iocts iocpar.iocts
35736diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
35737index bf7997a..cf091db 100644
35738--- a/drivers/isdn/icn/icn.c
35739+++ b/drivers/isdn/icn/icn.c
35740@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len, int user, icn_card * card)
35741 if (count > len)
35742 count = len;
35743 if (user) {
35744- if (copy_from_user(msg, buf, count))
35745+ if (count > sizeof msg || copy_from_user(msg, buf, count))
35746 return -EFAULT;
35747 } else
35748 memcpy(msg, buf, count);
35749diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
35750index feb0fa4..f76f830 100644
35751--- a/drivers/isdn/mISDN/socket.c
35752+++ b/drivers/isdn/mISDN/socket.c
35753@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35754 if (dev) {
35755 struct mISDN_devinfo di;
35756
35757+ memset(&di, 0, sizeof(di));
35758 di.id = dev->id;
35759 di.Dprotocols = dev->Dprotocols;
35760 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35761@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
35762 if (dev) {
35763 struct mISDN_devinfo di;
35764
35765+ memset(&di, 0, sizeof(di));
35766 di.id = dev->id;
35767 di.Dprotocols = dev->Dprotocols;
35768 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
35769diff --git a/drivers/isdn/sc/interrupt.c b/drivers/isdn/sc/interrupt.c
35770index 485be8b..f0225bc 100644
35771--- a/drivers/isdn/sc/interrupt.c
35772+++ b/drivers/isdn/sc/interrupt.c
35773@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35774 }
35775 else if(callid>=0x0000 && callid<=0x7FFF)
35776 {
35777+ int len;
35778+
35779 pr_debug("%s: Got Incoming Call\n",
35780 sc_adapter[card]->devicename);
35781- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
35782- strcpy(setup.eazmsn,
35783- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
35784+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
35785+ sizeof(setup.phone));
35786+ if (len >= sizeof(setup.phone))
35787+ continue;
35788+ len = strlcpy(setup.eazmsn,
35789+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35790+ sizeof(setup.eazmsn));
35791+ if (len >= sizeof(setup.eazmsn))
35792+ continue;
35793 setup.si1 = 7;
35794 setup.si2 = 0;
35795 setup.plan = 0;
35796@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy, void *card_inst)
35797 * Handle a GetMyNumber Rsp
35798 */
35799 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
35800- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
35801+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
35802+ rcvmsg.msg_data.byte_array,
35803+ sizeof(rcvmsg.msg_data.byte_array));
35804 continue;
35805 }
35806
35807diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
35808index 8744d24..d1f9a9a 100644
35809--- a/drivers/lguest/core.c
35810+++ b/drivers/lguest/core.c
35811@@ -91,9 +91,17 @@ static __init int map_switcher(void)
35812 * it's worked so far. The end address needs +1 because __get_vm_area
35813 * allocates an extra guard page, so we need space for that.
35814 */
35815+
35816+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
35817+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35818+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
35819+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35820+#else
35821 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
35822 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
35823 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
35824+#endif
35825+
35826 if (!switcher_vma) {
35827 err = -ENOMEM;
35828 printk("lguest: could not map switcher pages high\n");
35829@@ -118,7 +126,7 @@ static __init int map_switcher(void)
35830 * Now the Switcher is mapped at the right address, we can't fail!
35831 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
35832 */
35833- memcpy(switcher_vma->addr, start_switcher_text,
35834+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
35835 end_switcher_text - start_switcher_text);
35836
35837 printk(KERN_INFO "lguest: mapped switcher at %p\n",
35838diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
35839index 6ae3888..8b38145 100644
35840--- a/drivers/lguest/x86/core.c
35841+++ b/drivers/lguest/x86/core.c
35842@@ -59,7 +59,7 @@ static struct {
35843 /* Offset from where switcher.S was compiled to where we've copied it */
35844 static unsigned long switcher_offset(void)
35845 {
35846- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
35847+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
35848 }
35849
35850 /* This cpu's struct lguest_pages. */
35851@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
35852 * These copies are pretty cheap, so we do them unconditionally: */
35853 /* Save the current Host top-level page directory.
35854 */
35855+
35856+#ifdef CONFIG_PAX_PER_CPU_PGD
35857+ pages->state.host_cr3 = read_cr3();
35858+#else
35859 pages->state.host_cr3 = __pa(current->mm->pgd);
35860+#endif
35861+
35862 /*
35863 * Set up the Guest's page tables to see this CPU's pages (and no
35864 * other CPU's pages).
35865@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
35866 * compiled-in switcher code and the high-mapped copy we just made.
35867 */
35868 for (i = 0; i < IDT_ENTRIES; i++)
35869- default_idt_entries[i] += switcher_offset();
35870+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
35871
35872 /*
35873 * Set up the Switcher's per-cpu areas.
35874@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
35875 * it will be undisturbed when we switch. To change %cs and jump we
35876 * need this structure to feed to Intel's "lcall" instruction.
35877 */
35878- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
35879+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
35880 lguest_entry.segment = LGUEST_CS;
35881
35882 /*
35883diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
35884index 40634b0..4f5855e 100644
35885--- a/drivers/lguest/x86/switcher_32.S
35886+++ b/drivers/lguest/x86/switcher_32.S
35887@@ -87,6 +87,7 @@
35888 #include <asm/page.h>
35889 #include <asm/segment.h>
35890 #include <asm/lguest.h>
35891+#include <asm/processor-flags.h>
35892
35893 // We mark the start of the code to copy
35894 // It's placed in .text tho it's never run here
35895@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
35896 // Changes type when we load it: damn Intel!
35897 // For after we switch over our page tables
35898 // That entry will be read-only: we'd crash.
35899+
35900+#ifdef CONFIG_PAX_KERNEXEC
35901+ mov %cr0, %edx
35902+ xor $X86_CR0_WP, %edx
35903+ mov %edx, %cr0
35904+#endif
35905+
35906 movl $(GDT_ENTRY_TSS*8), %edx
35907 ltr %dx
35908
35909@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
35910 // Let's clear it again for our return.
35911 // The GDT descriptor of the Host
35912 // Points to the table after two "size" bytes
35913- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
35914+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
35915 // Clear "used" from type field (byte 5, bit 2)
35916- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
35917+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
35918+
35919+#ifdef CONFIG_PAX_KERNEXEC
35920+ mov %cr0, %eax
35921+ xor $X86_CR0_WP, %eax
35922+ mov %eax, %cr0
35923+#endif
35924
35925 // Once our page table's switched, the Guest is live!
35926 // The Host fades as we run this final step.
35927@@ -295,13 +309,12 @@ deliver_to_host:
35928 // I consulted gcc, and it gave
35929 // These instructions, which I gladly credit:
35930 leal (%edx,%ebx,8), %eax
35931- movzwl (%eax),%edx
35932- movl 4(%eax), %eax
35933- xorw %ax, %ax
35934- orl %eax, %edx
35935+ movl 4(%eax), %edx
35936+ movw (%eax), %dx
35937 // Now the address of the handler's in %edx
35938 // We call it now: its "iret" drops us home.
35939- jmp *%edx
35940+ ljmp $__KERNEL_CS, $1f
35941+1: jmp *%edx
35942
35943 // Every interrupt can come to us here
35944 // But we must truly tell each apart.
35945diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c
35946index 588a5b0..b71db89 100644
35947--- a/drivers/macintosh/macio_asic.c
35948+++ b/drivers/macintosh/macio_asic.c
35949@@ -701,7 +701,7 @@ static void __devexit macio_pci_remove(struct pci_dev* pdev)
35950 * MacIO is matched against any Apple ID, it's probe() function
35951 * will then decide wether it applies or not
35952 */
35953-static const struct pci_device_id __devinitdata pci_ids [] = { {
35954+static const struct pci_device_id __devinitconst pci_ids [] = { {
35955 .vendor = PCI_VENDOR_ID_APPLE,
35956 .device = PCI_ANY_ID,
35957 .subvendor = PCI_ANY_ID,
35958diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
35959index a348bb0..ecd9b3f 100644
35960--- a/drivers/macintosh/via-pmu-backlight.c
35961+++ b/drivers/macintosh/via-pmu-backlight.c
35962@@ -15,7 +15,7 @@
35963
35964 #define MAX_PMU_LEVEL 0xFF
35965
35966-static struct backlight_ops pmu_backlight_data;
35967+static const struct backlight_ops pmu_backlight_data;
35968 static DEFINE_SPINLOCK(pmu_backlight_lock);
35969 static int sleeping, uses_pmu_bl;
35970 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
35971@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(struct backlight_device *bd)
35972 return bd->props.brightness;
35973 }
35974
35975-static struct backlight_ops pmu_backlight_data = {
35976+static const struct backlight_ops pmu_backlight_data = {
35977 .get_brightness = pmu_backlight_get_brightness,
35978 .update_status = pmu_backlight_update_status,
35979
35980diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c
35981index 6f308a4..b5f7ff7 100644
35982--- a/drivers/macintosh/via-pmu.c
35983+++ b/drivers/macintosh/via-pmu.c
35984@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state_t state)
35985 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
35986 }
35987
35988-static struct platform_suspend_ops pmu_pm_ops = {
35989+static const struct platform_suspend_ops pmu_pm_ops = {
35990 .enter = powerbook_sleep,
35991 .valid = pmu_sleep_valid,
35992 };
35993diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
35994index 818b617..4656e38 100644
35995--- a/drivers/md/dm-ioctl.c
35996+++ b/drivers/md/dm-ioctl.c
35997@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
35998 cmd == DM_LIST_VERSIONS_CMD)
35999 return 0;
36000
36001- if ((cmd == DM_DEV_CREATE_CMD)) {
36002+ if (cmd == DM_DEV_CREATE_CMD) {
36003 if (!*param->name) {
36004 DMWARN("name not supplied when creating device");
36005 return -EINVAL;
36006diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
36007index 6021d0a..a878643 100644
36008--- a/drivers/md/dm-raid1.c
36009+++ b/drivers/md/dm-raid1.c
36010@@ -41,7 +41,7 @@ enum dm_raid1_error {
36011
36012 struct mirror {
36013 struct mirror_set *ms;
36014- atomic_t error_count;
36015+ atomic_unchecked_t error_count;
36016 unsigned long error_type;
36017 struct dm_dev *dev;
36018 sector_t offset;
36019@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36020 * simple way to tell if a device has encountered
36021 * errors.
36022 */
36023- atomic_inc(&m->error_count);
36024+ atomic_inc_unchecked(&m->error_count);
36025
36026 if (test_and_set_bit(error_type, &m->error_type))
36027 return;
36028@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
36029 }
36030
36031 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
36032- if (!atomic_read(&new->error_count)) {
36033+ if (!atomic_read_unchecked(&new->error_count)) {
36034 set_default_mirror(new);
36035 break;
36036 }
36037@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
36038 struct mirror *m = get_default_mirror(ms);
36039
36040 do {
36041- if (likely(!atomic_read(&m->error_count)))
36042+ if (likely(!atomic_read_unchecked(&m->error_count)))
36043 return m;
36044
36045 if (m-- == ms->mirror)
36046@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
36047 {
36048 struct mirror *default_mirror = get_default_mirror(m->ms);
36049
36050- return !atomic_read(&default_mirror->error_count);
36051+ return !atomic_read_unchecked(&default_mirror->error_count);
36052 }
36053
36054 static int mirror_available(struct mirror_set *ms, struct bio *bio)
36055@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
36056 */
36057 if (likely(region_in_sync(ms, region, 1)))
36058 m = choose_mirror(ms, bio->bi_sector);
36059- else if (m && atomic_read(&m->error_count))
36060+ else if (m && atomic_read_unchecked(&m->error_count))
36061 m = NULL;
36062
36063 if (likely(m))
36064@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
36065 }
36066
36067 ms->mirror[mirror].ms = ms;
36068- atomic_set(&(ms->mirror[mirror].error_count), 0);
36069+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
36070 ms->mirror[mirror].error_type = 0;
36071 ms->mirror[mirror].offset = offset;
36072
36073@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_target *ti)
36074 */
36075 static char device_status_char(struct mirror *m)
36076 {
36077- if (!atomic_read(&(m->error_count)))
36078+ if (!atomic_read_unchecked(&(m->error_count)))
36079 return 'A';
36080
36081 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
36082diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
36083index bd58703..9f26571 100644
36084--- a/drivers/md/dm-stripe.c
36085+++ b/drivers/md/dm-stripe.c
36086@@ -20,7 +20,7 @@ struct stripe {
36087 struct dm_dev *dev;
36088 sector_t physical_start;
36089
36090- atomic_t error_count;
36091+ atomic_unchecked_t error_count;
36092 };
36093
36094 struct stripe_c {
36095@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
36096 kfree(sc);
36097 return r;
36098 }
36099- atomic_set(&(sc->stripe[i].error_count), 0);
36100+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
36101 }
36102
36103 ti->private = sc;
36104@@ -257,7 +257,7 @@ static int stripe_status(struct dm_target *ti,
36105 DMEMIT("%d ", sc->stripes);
36106 for (i = 0; i < sc->stripes; i++) {
36107 DMEMIT("%s ", sc->stripe[i].dev->name);
36108- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
36109+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
36110 'D' : 'A';
36111 }
36112 buffer[i] = '\0';
36113@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio,
36114 */
36115 for (i = 0; i < sc->stripes; i++)
36116 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
36117- atomic_inc(&(sc->stripe[i].error_count));
36118- if (atomic_read(&(sc->stripe[i].error_count)) <
36119+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
36120+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
36121 DM_IO_ERROR_THRESHOLD)
36122 queue_work(kstriped, &sc->kstriped_ws);
36123 }
36124diff --git a/drivers/md/dm-sysfs.c b/drivers/md/dm-sysfs.c
36125index 4b04590..13a77b2 100644
36126--- a/drivers/md/dm-sysfs.c
36127+++ b/drivers/md/dm-sysfs.c
36128@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
36129 NULL,
36130 };
36131
36132-static struct sysfs_ops dm_sysfs_ops = {
36133+static const struct sysfs_ops dm_sysfs_ops = {
36134 .show = dm_attr_show,
36135 };
36136
36137diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
36138index 03345bb..332250d 100644
36139--- a/drivers/md/dm-table.c
36140+++ b/drivers/md/dm-table.c
36141@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
36142 if (!dev_size)
36143 return 0;
36144
36145- if ((start >= dev_size) || (start + len > dev_size)) {
36146+ if ((start >= dev_size) || (len > dev_size - start)) {
36147 DMWARN("%s: %s too small for target: "
36148 "start=%llu, len=%llu, dev_size=%llu",
36149 dm_device_name(ti->table->md), bdevname(bdev, b),
36150diff --git a/drivers/md/dm.c b/drivers/md/dm.c
36151index c988ac2..c418141 100644
36152--- a/drivers/md/dm.c
36153+++ b/drivers/md/dm.c
36154@@ -165,9 +165,9 @@ struct mapped_device {
36155 /*
36156 * Event handling.
36157 */
36158- atomic_t event_nr;
36159+ atomic_unchecked_t event_nr;
36160 wait_queue_head_t eventq;
36161- atomic_t uevent_seq;
36162+ atomic_unchecked_t uevent_seq;
36163 struct list_head uevent_list;
36164 spinlock_t uevent_lock; /* Protect access to uevent_list */
36165
36166@@ -1776,8 +1776,8 @@ static struct mapped_device *alloc_dev(int minor)
36167 rwlock_init(&md->map_lock);
36168 atomic_set(&md->holders, 1);
36169 atomic_set(&md->open_count, 0);
36170- atomic_set(&md->event_nr, 0);
36171- atomic_set(&md->uevent_seq, 0);
36172+ atomic_set_unchecked(&md->event_nr, 0);
36173+ atomic_set_unchecked(&md->uevent_seq, 0);
36174 INIT_LIST_HEAD(&md->uevent_list);
36175 spin_lock_init(&md->uevent_lock);
36176
36177@@ -1927,7 +1927,7 @@ static void event_callback(void *context)
36178
36179 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
36180
36181- atomic_inc(&md->event_nr);
36182+ atomic_inc_unchecked(&md->event_nr);
36183 wake_up(&md->eventq);
36184 }
36185
36186@@ -2562,18 +2562,18 @@ void dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
36187
36188 uint32_t dm_next_uevent_seq(struct mapped_device *md)
36189 {
36190- return atomic_add_return(1, &md->uevent_seq);
36191+ return atomic_add_return_unchecked(1, &md->uevent_seq);
36192 }
36193
36194 uint32_t dm_get_event_nr(struct mapped_device *md)
36195 {
36196- return atomic_read(&md->event_nr);
36197+ return atomic_read_unchecked(&md->event_nr);
36198 }
36199
36200 int dm_wait_event(struct mapped_device *md, int event_nr)
36201 {
36202 return wait_event_interruptible(md->eventq,
36203- (event_nr != atomic_read(&md->event_nr)));
36204+ (event_nr != atomic_read_unchecked(&md->event_nr)));
36205 }
36206
36207 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
36208diff --git a/drivers/md/md.c b/drivers/md/md.c
36209index 4ce6e2f..7a9530a 100644
36210--- a/drivers/md/md.c
36211+++ b/drivers/md/md.c
36212@@ -153,10 +153,10 @@ static int start_readonly;
36213 * start build, activate spare
36214 */
36215 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
36216-static atomic_t md_event_count;
36217+static atomic_unchecked_t md_event_count;
36218 void md_new_event(mddev_t *mddev)
36219 {
36220- atomic_inc(&md_event_count);
36221+ atomic_inc_unchecked(&md_event_count);
36222 wake_up(&md_event_waiters);
36223 }
36224 EXPORT_SYMBOL_GPL(md_new_event);
36225@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
36226 */
36227 static void md_new_event_inintr(mddev_t *mddev)
36228 {
36229- atomic_inc(&md_event_count);
36230+ atomic_inc_unchecked(&md_event_count);
36231 wake_up(&md_event_waiters);
36232 }
36233
36234@@ -1226,7 +1226,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
36235
36236 rdev->preferred_minor = 0xffff;
36237 rdev->data_offset = le64_to_cpu(sb->data_offset);
36238- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36239+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
36240
36241 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
36242 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
36243@@ -1400,7 +1400,7 @@ static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev)
36244 else
36245 sb->resync_offset = cpu_to_le64(0);
36246
36247- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
36248+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
36249
36250 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
36251 sb->size = cpu_to_le64(mddev->dev_sectors);
36252@@ -2222,7 +2222,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
36253 static ssize_t
36254 errors_show(mdk_rdev_t *rdev, char *page)
36255 {
36256- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
36257+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
36258 }
36259
36260 static ssize_t
36261@@ -2231,7 +2231,7 @@ errors_store(mdk_rdev_t *rdev, const char *buf, size_t len)
36262 char *e;
36263 unsigned long n = simple_strtoul(buf, &e, 10);
36264 if (*buf && (*e == 0 || *e == '\n')) {
36265- atomic_set(&rdev->corrected_errors, n);
36266+ atomic_set_unchecked(&rdev->corrected_errors, n);
36267 return len;
36268 }
36269 return -EINVAL;
36270@@ -2525,7 +2525,7 @@ static void rdev_free(struct kobject *ko)
36271 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
36272 kfree(rdev);
36273 }
36274-static struct sysfs_ops rdev_sysfs_ops = {
36275+static const struct sysfs_ops rdev_sysfs_ops = {
36276 .show = rdev_attr_show,
36277 .store = rdev_attr_store,
36278 };
36279@@ -2574,8 +2574,8 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
36280 rdev->data_offset = 0;
36281 rdev->sb_events = 0;
36282 atomic_set(&rdev->nr_pending, 0);
36283- atomic_set(&rdev->read_errors, 0);
36284- atomic_set(&rdev->corrected_errors, 0);
36285+ atomic_set_unchecked(&rdev->read_errors, 0);
36286+ atomic_set_unchecked(&rdev->corrected_errors, 0);
36287
36288 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
36289 if (!size) {
36290@@ -3895,7 +3895,7 @@ static void md_free(struct kobject *ko)
36291 kfree(mddev);
36292 }
36293
36294-static struct sysfs_ops md_sysfs_ops = {
36295+static const struct sysfs_ops md_sysfs_ops = {
36296 .show = md_attr_show,
36297 .store = md_attr_store,
36298 };
36299@@ -4482,7 +4482,8 @@ out:
36300 err = 0;
36301 blk_integrity_unregister(disk);
36302 md_new_event(mddev);
36303- sysfs_notify_dirent(mddev->sysfs_state);
36304+ if (mddev->sysfs_state)
36305+ sysfs_notify_dirent(mddev->sysfs_state);
36306 return err;
36307 }
36308
36309@@ -5962,7 +5963,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36310
36311 spin_unlock(&pers_lock);
36312 seq_printf(seq, "\n");
36313- mi->event = atomic_read(&md_event_count);
36314+ mi->event = atomic_read_unchecked(&md_event_count);
36315 return 0;
36316 }
36317 if (v == (void*)2) {
36318@@ -6051,7 +6052,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
36319 chunk_kb ? "KB" : "B");
36320 if (bitmap->file) {
36321 seq_printf(seq, ", file: ");
36322- seq_path(seq, &bitmap->file->f_path, " \t\n");
36323+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
36324 }
36325
36326 seq_printf(seq, "\n");
36327@@ -6085,7 +6086,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
36328 else {
36329 struct seq_file *p = file->private_data;
36330 p->private = mi;
36331- mi->event = atomic_read(&md_event_count);
36332+ mi->event = atomic_read_unchecked(&md_event_count);
36333 }
36334 return error;
36335 }
36336@@ -6101,7 +6102,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
36337 /* always allow read */
36338 mask = POLLIN | POLLRDNORM;
36339
36340- if (mi->event != atomic_read(&md_event_count))
36341+ if (mi->event != atomic_read_unchecked(&md_event_count))
36342 mask |= POLLERR | POLLPRI;
36343 return mask;
36344 }
36345@@ -6145,7 +6146,7 @@ static int is_mddev_idle(mddev_t *mddev, int init)
36346 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
36347 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
36348 (int)part_stat_read(&disk->part0, sectors[1]) -
36349- atomic_read(&disk->sync_io);
36350+ atomic_read_unchecked(&disk->sync_io);
36351 /* sync IO will cause sync_io to increase before the disk_stats
36352 * as sync_io is counted when a request starts, and
36353 * disk_stats is counted when it completes.
36354diff --git a/drivers/md/md.h b/drivers/md/md.h
36355index 87430fe..0024a4c 100644
36356--- a/drivers/md/md.h
36357+++ b/drivers/md/md.h
36358@@ -94,10 +94,10 @@ struct mdk_rdev_s
36359 * only maintained for arrays that
36360 * support hot removal
36361 */
36362- atomic_t read_errors; /* number of consecutive read errors that
36363+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
36364 * we have tried to ignore.
36365 */
36366- atomic_t corrected_errors; /* number of corrected read errors,
36367+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
36368 * for reporting to userspace and storing
36369 * in superblock.
36370 */
36371@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_rdev_t *rdev, mddev_t *mddev)
36372
36373 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
36374 {
36375- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36376+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
36377 }
36378
36379 struct mdk_personality
36380diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
36381index 968cb14..f0ad2e4 100644
36382--- a/drivers/md/raid1.c
36383+++ b/drivers/md/raid1.c
36384@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
36385 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
36386 continue;
36387 rdev = conf->mirrors[d].rdev;
36388- atomic_add(s, &rdev->corrected_errors);
36389+ atomic_add_unchecked(s, &rdev->corrected_errors);
36390 if (sync_page_io(rdev->bdev,
36391 sect + rdev->data_offset,
36392 s<<9,
36393@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf, int read_disk,
36394 /* Well, this device is dead */
36395 md_error(mddev, rdev);
36396 else {
36397- atomic_add(s, &rdev->corrected_errors);
36398+ atomic_add_unchecked(s, &rdev->corrected_errors);
36399 printk(KERN_INFO
36400 "raid1:%s: read error corrected "
36401 "(%d sectors at %llu on %s)\n",
36402diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
36403index 1b4e232..cf0f534 100644
36404--- a/drivers/md/raid10.c
36405+++ b/drivers/md/raid10.c
36406@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bio, int error)
36407 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
36408 set_bit(R10BIO_Uptodate, &r10_bio->state);
36409 else {
36410- atomic_add(r10_bio->sectors,
36411+ atomic_add_unchecked(r10_bio->sectors,
36412 &conf->mirrors[d].rdev->corrected_errors);
36413 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
36414 md_error(r10_bio->mddev,
36415@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf, mddev_t *mddev, r10bio_t *r10_bio)
36416 test_bit(In_sync, &rdev->flags)) {
36417 atomic_inc(&rdev->nr_pending);
36418 rcu_read_unlock();
36419- atomic_add(s, &rdev->corrected_errors);
36420+ atomic_add_unchecked(s, &rdev->corrected_errors);
36421 if (sync_page_io(rdev->bdev,
36422 r10_bio->devs[sl].addr +
36423 sect + rdev->data_offset,
36424diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
36425index 883215d..675bf47 100644
36426--- a/drivers/md/raid5.c
36427+++ b/drivers/md/raid5.c
36428@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
36429 bi->bi_next = NULL;
36430 if ((rw & WRITE) &&
36431 test_bit(R5_ReWrite, &sh->dev[i].flags))
36432- atomic_add(STRIPE_SECTORS,
36433+ atomic_add_unchecked(STRIPE_SECTORS,
36434 &rdev->corrected_errors);
36435 generic_make_request(bi);
36436 } else {
36437@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struct bio * bi, int error)
36438 clear_bit(R5_ReadError, &sh->dev[i].flags);
36439 clear_bit(R5_ReWrite, &sh->dev[i].flags);
36440 }
36441- if (atomic_read(&conf->disks[i].rdev->read_errors))
36442- atomic_set(&conf->disks[i].rdev->read_errors, 0);
36443+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
36444+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
36445 } else {
36446 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
36447 int retry = 0;
36448 rdev = conf->disks[i].rdev;
36449
36450 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
36451- atomic_inc(&rdev->read_errors);
36452+ atomic_inc_unchecked(&rdev->read_errors);
36453 if (conf->mddev->degraded >= conf->max_degraded)
36454 printk_rl(KERN_WARNING
36455 "raid5:%s: read error not correctable "
36456@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
36457 (unsigned long long)(sh->sector
36458 + rdev->data_offset),
36459 bdn);
36460- else if (atomic_read(&rdev->read_errors)
36461+ else if (atomic_read_unchecked(&rdev->read_errors)
36462 > conf->max_nr_stripes)
36463 printk(KERN_WARNING
36464 "raid5:%s: Too many read errors, failing device %s.\n",
36465@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous)
36466 sector_t r_sector;
36467 struct stripe_head sh2;
36468
36469+ pax_track_stack();
36470
36471 chunk_offset = sector_div(new_sector, sectors_per_chunk);
36472 stripe = new_sector;
36473diff --git a/drivers/media/common/saa7146_hlp.c b/drivers/media/common/saa7146_hlp.c
36474index 05bde9c..2f31d40 100644
36475--- a/drivers/media/common/saa7146_hlp.c
36476+++ b/drivers/media/common/saa7146_hlp.c
36477@@ -353,6 +353,8 @@ static void calculate_clipping_registers_rect(struct saa7146_dev *dev, struct sa
36478
36479 int x[32], y[32], w[32], h[32];
36480
36481+ pax_track_stack();
36482+
36483 /* clear out memory */
36484 memset(&line_list[0], 0x00, sizeof(u32)*32);
36485 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
36486diff --git a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36487index cb22da5..82b686e 100644
36488--- a/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36489+++ b/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
36490@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot, u8 * eb
36491 u8 buf[HOST_LINK_BUF_SIZE];
36492 int i;
36493
36494+ pax_track_stack();
36495+
36496 dprintk("%s\n", __func__);
36497
36498 /* check if we have space for a link buf in the rx_buffer */
36499@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(struct file *file,
36500 unsigned long timeout;
36501 int written;
36502
36503+ pax_track_stack();
36504+
36505 dprintk("%s\n", __func__);
36506
36507 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
36508diff --git a/drivers/media/dvb/dvb-core/dvb_demux.h b/drivers/media/dvb/dvb-core/dvb_demux.h
36509index 2fe05d0..a3289c4 100644
36510--- a/drivers/media/dvb/dvb-core/dvb_demux.h
36511+++ b/drivers/media/dvb/dvb-core/dvb_demux.h
36512@@ -71,7 +71,7 @@ struct dvb_demux_feed {
36513 union {
36514 dmx_ts_cb ts;
36515 dmx_section_cb sec;
36516- } cb;
36517+ } __no_const cb;
36518
36519 struct dvb_demux *demux;
36520 void *priv;
36521diff --git a/drivers/media/dvb/dvb-core/dvbdev.c b/drivers/media/dvb/dvb-core/dvbdev.c
36522index 94159b9..376bd8e 100644
36523--- a/drivers/media/dvb/dvb-core/dvbdev.c
36524+++ b/drivers/media/dvb/dvb-core/dvbdev.c
36525@@ -191,7 +191,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
36526 const struct dvb_device *template, void *priv, int type)
36527 {
36528 struct dvb_device *dvbdev;
36529- struct file_operations *dvbdevfops;
36530+ file_operations_no_const *dvbdevfops;
36531 struct device *clsdev;
36532 int minor;
36533 int id;
36534diff --git a/drivers/media/dvb/dvb-usb/cxusb.c b/drivers/media/dvb/dvb-usb/cxusb.c
36535index 2a53dd0..db8c07a 100644
36536--- a/drivers/media/dvb/dvb-usb/cxusb.c
36537+++ b/drivers/media/dvb/dvb-usb/cxusb.c
36538@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
36539 struct dib0700_adapter_state {
36540 int (*set_param_save) (struct dvb_frontend *,
36541 struct dvb_frontend_parameters *);
36542-};
36543+} __no_const;
36544
36545 static int dib7070_set_param_override(struct dvb_frontend *fe,
36546 struct dvb_frontend_parameters *fep)
36547diff --git a/drivers/media/dvb/dvb-usb/dib0700_core.c b/drivers/media/dvb/dvb-usb/dib0700_core.c
36548index db7f7f7..f55e96f 100644
36549--- a/drivers/media/dvb/dvb-usb/dib0700_core.c
36550+++ b/drivers/media/dvb/dvb-usb/dib0700_core.c
36551@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb_device *udev, const struct firmware *fw
36552
36553 u8 buf[260];
36554
36555+ pax_track_stack();
36556+
36557 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
36558 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
36559
36560diff --git a/drivers/media/dvb/dvb-usb/dib0700_devices.c b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36561index 524acf5..5ffc403 100644
36562--- a/drivers/media/dvb/dvb-usb/dib0700_devices.c
36563+++ b/drivers/media/dvb/dvb-usb/dib0700_devices.c
36564@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "force the activation of Low-Noise-Amplif
36565
36566 struct dib0700_adapter_state {
36567 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
36568-};
36569+} __no_const;
36570
36571 /* Hauppauge Nova-T 500 (aka Bristol)
36572 * has a LNA on GPIO0 which is enabled by setting 1 */
36573diff --git a/drivers/media/dvb/frontends/dib3000.h b/drivers/media/dvb/frontends/dib3000.h
36574index ba91735..4261d84 100644
36575--- a/drivers/media/dvb/frontends/dib3000.h
36576+++ b/drivers/media/dvb/frontends/dib3000.h
36577@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
36578 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
36579 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
36580 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
36581-};
36582+} __no_const;
36583
36584 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
36585 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
36586diff --git a/drivers/media/dvb/frontends/or51211.c b/drivers/media/dvb/frontends/or51211.c
36587index c709ce6..b3fe620 100644
36588--- a/drivers/media/dvb/frontends/or51211.c
36589+++ b/drivers/media/dvb/frontends/or51211.c
36590@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct dvb_frontend* fe,
36591 u8 tudata[585];
36592 int i;
36593
36594+ pax_track_stack();
36595+
36596 dprintk("Firmware is %zd bytes\n",fw->size);
36597
36598 /* Get eprom data */
36599diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
36600index 482d0f3..ee1e202 100644
36601--- a/drivers/media/radio/radio-cadet.c
36602+++ b/drivers/media/radio/radio-cadet.c
36603@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
36604 while (i < count && dev->rdsin != dev->rdsout)
36605 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
36606
36607- if (copy_to_user(data, readbuf, i))
36608+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
36609 return -EFAULT;
36610 return i;
36611 }
36612diff --git a/drivers/media/video/cx18/cx18-driver.c b/drivers/media/video/cx18/cx18-driver.c
36613index 6dd51e2..0359b92 100644
36614--- a/drivers/media/video/cx18/cx18-driver.c
36615+++ b/drivers/media/video/cx18/cx18-driver.c
36616@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl[] __devinitdata = {
36617
36618 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
36619
36620-static atomic_t cx18_instance = ATOMIC_INIT(0);
36621+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
36622
36623 /* Parameter declarations */
36624 static int cardtype[CX18_MAX_CARDS];
36625@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, struct tveeprom *tv)
36626 struct i2c_client c;
36627 u8 eedata[256];
36628
36629+ pax_track_stack();
36630+
36631 memset(&c, 0, sizeof(c));
36632 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
36633 c.adapter = &cx->i2c_adap[0];
36634@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct pci_dev *pci_dev,
36635 struct cx18 *cx;
36636
36637 /* FIXME - module parameter arrays constrain max instances */
36638- i = atomic_inc_return(&cx18_instance) - 1;
36639+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
36640 if (i >= CX18_MAX_CARDS) {
36641 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
36642 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
36643diff --git a/drivers/media/video/ivtv/ivtv-driver.c b/drivers/media/video/ivtv/ivtv-driver.c
36644index 463ec34..2f4625a 100644
36645--- a/drivers/media/video/ivtv/ivtv-driver.c
36646+++ b/drivers/media/video/ivtv/ivtv-driver.c
36647@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl[] __devinitdata = {
36648 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
36649
36650 /* ivtv instance counter */
36651-static atomic_t ivtv_instance = ATOMIC_INIT(0);
36652+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
36653
36654 /* Parameter declarations */
36655 static int cardtype[IVTV_MAX_CARDS];
36656diff --git a/drivers/media/video/omap24xxcam.c b/drivers/media/video/omap24xxcam.c
36657index 5fc4ac0..652a54a 100644
36658--- a/drivers/media/video/omap24xxcam.c
36659+++ b/drivers/media/video/omap24xxcam.c
36660@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
36661 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
36662
36663 do_gettimeofday(&vb->ts);
36664- vb->field_count = atomic_add_return(2, &fh->field_count);
36665+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
36666 if (csr & csr_error) {
36667 vb->state = VIDEOBUF_ERROR;
36668 if (!atomic_read(&fh->cam->in_reset)) {
36669diff --git a/drivers/media/video/omap24xxcam.h b/drivers/media/video/omap24xxcam.h
36670index 2ce67f5..cf26a5b 100644
36671--- a/drivers/media/video/omap24xxcam.h
36672+++ b/drivers/media/video/omap24xxcam.h
36673@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
36674 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
36675 struct videobuf_queue vbq;
36676 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
36677- atomic_t field_count; /* field counter for videobuf_buffer */
36678+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
36679 /* accessing cam here doesn't need serialisation: it's constant */
36680 struct omap24xxcam_device *cam;
36681 };
36682diff --git a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36683index 299afa4..eb47459 100644
36684--- a/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36685+++ b/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
36686@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw *hdw)
36687 u8 *eeprom;
36688 struct tveeprom tvdata;
36689
36690+ pax_track_stack();
36691+
36692 memset(&tvdata,0,sizeof(tvdata));
36693
36694 eeprom = pvr2_eeprom_fetch(hdw);
36695diff --git a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36696index 5b152ff..3320638 100644
36697--- a/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36698+++ b/drivers/media/video/pvrusb2/pvrusb2-hdw-internal.h
36699@@ -195,7 +195,7 @@ struct pvr2_hdw {
36700
36701 /* I2C stuff */
36702 struct i2c_adapter i2c_adap;
36703- struct i2c_algorithm i2c_algo;
36704+ i2c_algorithm_no_const i2c_algo;
36705 pvr2_i2c_func i2c_func[PVR2_I2C_FUNC_CNT];
36706 int i2c_cx25840_hack_state;
36707 int i2c_linked;
36708diff --git a/drivers/media/video/saa7134/saa6752hs.c b/drivers/media/video/saa7134/saa6752hs.c
36709index 1eabff6..8e2313a 100644
36710--- a/drivers/media/video/saa7134/saa6752hs.c
36711+++ b/drivers/media/video/saa7134/saa6752hs.c
36712@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_subdev *sd, u32 leading_null_bytes)
36713 unsigned char localPAT[256];
36714 unsigned char localPMT[256];
36715
36716+ pax_track_stack();
36717+
36718 /* Set video format - must be done first as it resets other settings */
36719 set_reg8(client, 0x41, h->video_format);
36720
36721diff --git a/drivers/media/video/saa7164/saa7164-cmd.c b/drivers/media/video/saa7164/saa7164-cmd.c
36722index 9c1d3ac..b1b49e9 100644
36723--- a/drivers/media/video/saa7164/saa7164-cmd.c
36724+++ b/drivers/media/video/saa7164/saa7164-cmd.c
36725@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_dev *dev)
36726 wait_queue_head_t *q = 0;
36727 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36728
36729+ pax_track_stack();
36730+
36731 /* While any outstand message on the bus exists... */
36732 do {
36733
36734@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_dev *dev)
36735 u8 tmp[512];
36736 dprintk(DBGLVL_CMD, "%s()\n", __func__);
36737
36738+ pax_track_stack();
36739+
36740 while (loop) {
36741
36742 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
36743diff --git a/drivers/media/video/usbvideo/ibmcam.c b/drivers/media/video/usbvideo/ibmcam.c
36744index b085496..cde0270 100644
36745--- a/drivers/media/video/usbvideo/ibmcam.c
36746+++ b/drivers/media/video/usbvideo/ibmcam.c
36747@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] = {
36748 static int __init ibmcam_init(void)
36749 {
36750 struct usbvideo_cb cbTbl;
36751- memset(&cbTbl, 0, sizeof(cbTbl));
36752- cbTbl.probe = ibmcam_probe;
36753- cbTbl.setupOnOpen = ibmcam_setup_on_open;
36754- cbTbl.videoStart = ibmcam_video_start;
36755- cbTbl.videoStop = ibmcam_video_stop;
36756- cbTbl.processData = ibmcam_ProcessIsocData;
36757- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36758- cbTbl.adjustPicture = ibmcam_adjust_picture;
36759- cbTbl.getFPS = ibmcam_calculate_fps;
36760+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
36761+ *(void **)&cbTbl.probe = ibmcam_probe;
36762+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
36763+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
36764+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
36765+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
36766+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36767+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
36768+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
36769 return usbvideo_register(
36770 &cams,
36771 MAX_IBMCAM,
36772diff --git a/drivers/media/video/usbvideo/konicawc.c b/drivers/media/video/usbvideo/konicawc.c
36773index 31d57f2..600b735 100644
36774--- a/drivers/media/video/usbvideo/konicawc.c
36775+++ b/drivers/media/video/usbvideo/konicawc.c
36776@@ -225,7 +225,7 @@ static void konicawc_register_input(struct konicawc *cam, struct usb_device *dev
36777 int error;
36778
36779 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36780- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36781+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36782
36783 cam->input = input_dev = input_allocate_device();
36784 if (!input_dev) {
36785@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
36786 struct usbvideo_cb cbTbl;
36787 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
36788 DRIVER_DESC "\n");
36789- memset(&cbTbl, 0, sizeof(cbTbl));
36790- cbTbl.probe = konicawc_probe;
36791- cbTbl.setupOnOpen = konicawc_setup_on_open;
36792- cbTbl.processData = konicawc_process_isoc;
36793- cbTbl.getFPS = konicawc_calculate_fps;
36794- cbTbl.setVideoMode = konicawc_set_video_mode;
36795- cbTbl.startDataPump = konicawc_start_data;
36796- cbTbl.stopDataPump = konicawc_stop_data;
36797- cbTbl.adjustPicture = konicawc_adjust_picture;
36798- cbTbl.userFree = konicawc_free_uvd;
36799+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
36800+ *(void **)&cbTbl.probe = konicawc_probe;
36801+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
36802+ *(void **)&cbTbl.processData = konicawc_process_isoc;
36803+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
36804+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
36805+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
36806+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
36807+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
36808+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
36809 return usbvideo_register(
36810 &cams,
36811 MAX_CAMERAS,
36812diff --git a/drivers/media/video/usbvideo/quickcam_messenger.c b/drivers/media/video/usbvideo/quickcam_messenger.c
36813index 803d3e4..c4d1b96 100644
36814--- a/drivers/media/video/usbvideo/quickcam_messenger.c
36815+++ b/drivers/media/video/usbvideo/quickcam_messenger.c
36816@@ -89,7 +89,7 @@ static void qcm_register_input(struct qcm *cam, struct usb_device *dev)
36817 int error;
36818
36819 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
36820- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36821+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
36822
36823 cam->input = input_dev = input_allocate_device();
36824 if (!input_dev) {
36825diff --git a/drivers/media/video/usbvideo/ultracam.c b/drivers/media/video/usbvideo/ultracam.c
36826index fbd1b63..292f9f0 100644
36827--- a/drivers/media/video/usbvideo/ultracam.c
36828+++ b/drivers/media/video/usbvideo/ultracam.c
36829@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
36830 {
36831 struct usbvideo_cb cbTbl;
36832 memset(&cbTbl, 0, sizeof(cbTbl));
36833- cbTbl.probe = ultracam_probe;
36834- cbTbl.setupOnOpen = ultracam_setup_on_open;
36835- cbTbl.videoStart = ultracam_video_start;
36836- cbTbl.videoStop = ultracam_video_stop;
36837- cbTbl.processData = ultracam_ProcessIsocData;
36838- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36839- cbTbl.adjustPicture = ultracam_adjust_picture;
36840- cbTbl.getFPS = ultracam_calculate_fps;
36841+ *(void **)&cbTbl.probe = ultracam_probe;
36842+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
36843+ *(void **)&cbTbl.videoStart = ultracam_video_start;
36844+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
36845+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
36846+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
36847+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
36848+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
36849 return usbvideo_register(
36850 &cams,
36851 MAX_CAMERAS,
36852diff --git a/drivers/media/video/usbvideo/usbvideo.c b/drivers/media/video/usbvideo/usbvideo.c
36853index dea8b32..34f6878 100644
36854--- a/drivers/media/video/usbvideo/usbvideo.c
36855+++ b/drivers/media/video/usbvideo/usbvideo.c
36856@@ -697,15 +697,15 @@ int usbvideo_register(
36857 __func__, cams, base_size, num_cams);
36858
36859 /* Copy callbacks, apply defaults for those that are not set */
36860- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
36861+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
36862 if (cams->cb.getFrame == NULL)
36863- cams->cb.getFrame = usbvideo_GetFrame;
36864+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
36865 if (cams->cb.disconnect == NULL)
36866- cams->cb.disconnect = usbvideo_Disconnect;
36867+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
36868 if (cams->cb.startDataPump == NULL)
36869- cams->cb.startDataPump = usbvideo_StartDataPump;
36870+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
36871 if (cams->cb.stopDataPump == NULL)
36872- cams->cb.stopDataPump = usbvideo_StopDataPump;
36873+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
36874
36875 cams->num_cameras = num_cams;
36876 cams->cam = (struct uvd *) &cams[1];
36877diff --git a/drivers/media/video/usbvideo/usbvideo.h b/drivers/media/video/usbvideo/usbvideo.h
36878index c66985b..7fa143a 100644
36879--- a/drivers/media/video/usbvideo/usbvideo.h
36880+++ b/drivers/media/video/usbvideo/usbvideo.h
36881@@ -268,7 +268,7 @@ struct usbvideo_cb {
36882 int (*startDataPump)(struct uvd *uvd);
36883 void (*stopDataPump)(struct uvd *uvd);
36884 int (*setVideoMode)(struct uvd *uvd, struct video_window *vw);
36885-};
36886+} __no_const;
36887
36888 struct usbvideo {
36889 int num_cameras; /* As allocated */
36890diff --git a/drivers/media/video/usbvision/usbvision-core.c b/drivers/media/video/usbvision/usbvision-core.c
36891index e0f91e4..37554ea 100644
36892--- a/drivers/media/video/usbvision/usbvision-core.c
36893+++ b/drivers/media/video/usbvision/usbvision-core.c
36894@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_compress(struct usb_usbvision *usbvision,
36895 unsigned char rv, gv, bv;
36896 static unsigned char *Y, *U, *V;
36897
36898+ pax_track_stack();
36899+
36900 frame = usbvision->curFrame;
36901 imageSize = frame->frmwidth * frame->frmheight;
36902 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
36903diff --git a/drivers/media/video/v4l2-device.c b/drivers/media/video/v4l2-device.c
36904index 0d06e7c..3d17d24 100644
36905--- a/drivers/media/video/v4l2-device.c
36906+++ b/drivers/media/video/v4l2-device.c
36907@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *dev, struct v4l2_device *v4l2_dev)
36908 EXPORT_SYMBOL_GPL(v4l2_device_register);
36909
36910 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
36911- atomic_t *instance)
36912+ atomic_unchecked_t *instance)
36913 {
36914- int num = atomic_inc_return(instance) - 1;
36915+ int num = atomic_inc_return_unchecked(instance) - 1;
36916 int len = strlen(basename);
36917
36918 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
36919diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
36920index 032ebae..6a3532c 100644
36921--- a/drivers/media/video/videobuf-dma-sg.c
36922+++ b/drivers/media/video/videobuf-dma-sg.c
36923@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
36924 {
36925 struct videobuf_queue q;
36926
36927+ pax_track_stack();
36928+
36929 /* Required to make generic handler to call __videobuf_alloc */
36930 q.int_ops = &sg_ops;
36931
36932diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
36933index b6992b7..9fa7547 100644
36934--- a/drivers/message/fusion/mptbase.c
36935+++ b/drivers/message/fusion/mptbase.c
36936@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eo
36937 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
36938 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
36939
36940+#ifdef CONFIG_GRKERNSEC_HIDESYM
36941+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36942+ NULL, NULL);
36943+#else
36944 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
36945 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
36946+#endif
36947+
36948 /*
36949 * Rounding UP to nearest 4-kB boundary here...
36950 */
36951diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
36952index 83873e3..e360e9a 100644
36953--- a/drivers/message/fusion/mptsas.c
36954+++ b/drivers/message/fusion/mptsas.c
36955@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
36956 return 0;
36957 }
36958
36959+static inline void
36960+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36961+{
36962+ if (phy_info->port_details) {
36963+ phy_info->port_details->rphy = rphy;
36964+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36965+ ioc->name, rphy));
36966+ }
36967+
36968+ if (rphy) {
36969+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36970+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36971+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36972+ ioc->name, rphy, rphy->dev.release));
36973+ }
36974+}
36975+
36976 /* no mutex */
36977 static void
36978 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
36979@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
36980 return NULL;
36981 }
36982
36983-static inline void
36984-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
36985-{
36986- if (phy_info->port_details) {
36987- phy_info->port_details->rphy = rphy;
36988- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
36989- ioc->name, rphy));
36990- }
36991-
36992- if (rphy) {
36993- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
36994- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
36995- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
36996- ioc->name, rphy, rphy->dev.release));
36997- }
36998-}
36999-
37000 static inline struct sas_port *
37001 mptsas_get_port(struct mptsas_phyinfo *phy_info)
37002 {
37003diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
37004index bd096ca..332cf76 100644
37005--- a/drivers/message/fusion/mptscsih.c
37006+++ b/drivers/message/fusion/mptscsih.c
37007@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
37008
37009 h = shost_priv(SChost);
37010
37011- if (h) {
37012- if (h->info_kbuf == NULL)
37013- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37014- return h->info_kbuf;
37015- h->info_kbuf[0] = '\0';
37016+ if (!h)
37017+ return NULL;
37018
37019- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37020- h->info_kbuf[size-1] = '\0';
37021- }
37022+ if (h->info_kbuf == NULL)
37023+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
37024+ return h->info_kbuf;
37025+ h->info_kbuf[0] = '\0';
37026+
37027+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
37028+ h->info_kbuf[size-1] = '\0';
37029
37030 return h->info_kbuf;
37031 }
37032diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
37033index efba702..59b2c0f 100644
37034--- a/drivers/message/i2o/i2o_config.c
37035+++ b/drivers/message/i2o/i2o_config.c
37036@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned long arg)
37037 struct i2o_message *msg;
37038 unsigned int iop;
37039
37040+ pax_track_stack();
37041+
37042 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
37043 return -EFAULT;
37044
37045diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
37046index 7045c45..c07b170 100644
37047--- a/drivers/message/i2o/i2o_proc.c
37048+++ b/drivers/message/i2o/i2o_proc.c
37049@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
37050 "Array Controller Device"
37051 };
37052
37053-static char *chtostr(u8 * chars, int n)
37054-{
37055- char tmp[256];
37056- tmp[0] = 0;
37057- return strncat(tmp, (char *)chars, n);
37058-}
37059-
37060 static int i2o_report_query_status(struct seq_file *seq, int block_status,
37061 char *group)
37062 {
37063@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
37064
37065 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
37066 seq_printf(seq, "%-#8x", ddm_table.module_id);
37067- seq_printf(seq, "%-29s",
37068- chtostr(ddm_table.module_name_version, 28));
37069+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
37070 seq_printf(seq, "%9d ", ddm_table.data_size);
37071 seq_printf(seq, "%8d", ddm_table.code_size);
37072
37073@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
37074
37075 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
37076 seq_printf(seq, "%-#8x", dst->module_id);
37077- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
37078- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
37079+ seq_printf(seq, "%-.28s", dst->module_name_version);
37080+ seq_printf(seq, "%-.8s", dst->date);
37081 seq_printf(seq, "%8d ", dst->module_size);
37082 seq_printf(seq, "%8d ", dst->mpb_size);
37083 seq_printf(seq, "0x%04x", dst->module_flags);
37084@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
37085 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
37086 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
37087 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
37088- seq_printf(seq, "Vendor info : %s\n",
37089- chtostr((u8 *) (work32 + 2), 16));
37090- seq_printf(seq, "Product info : %s\n",
37091- chtostr((u8 *) (work32 + 6), 16));
37092- seq_printf(seq, "Description : %s\n",
37093- chtostr((u8 *) (work32 + 10), 16));
37094- seq_printf(seq, "Product rev. : %s\n",
37095- chtostr((u8 *) (work32 + 14), 8));
37096+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
37097+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
37098+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
37099+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
37100
37101 seq_printf(seq, "Serial number : ");
37102 print_serial_number(seq, (u8 *) (work32 + 16),
37103@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
37104 }
37105
37106 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
37107- seq_printf(seq, "Module name : %s\n",
37108- chtostr(result.module_name, 24));
37109- seq_printf(seq, "Module revision : %s\n",
37110- chtostr(result.module_rev, 8));
37111+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
37112+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
37113
37114 seq_printf(seq, "Serial number : ");
37115 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
37116@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
37117 return 0;
37118 }
37119
37120- seq_printf(seq, "Device name : %s\n",
37121- chtostr(result.device_name, 64));
37122- seq_printf(seq, "Service name : %s\n",
37123- chtostr(result.service_name, 64));
37124- seq_printf(seq, "Physical name : %s\n",
37125- chtostr(result.physical_location, 64));
37126- seq_printf(seq, "Instance number : %s\n",
37127- chtostr(result.instance_number, 4));
37128+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
37129+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
37130+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
37131+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
37132
37133 return 0;
37134 }
37135diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
37136index 27cf4af..b1205b8 100644
37137--- a/drivers/message/i2o/iop.c
37138+++ b/drivers/message/i2o/iop.c
37139@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
37140
37141 spin_lock_irqsave(&c->context_list_lock, flags);
37142
37143- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
37144- atomic_inc(&c->context_list_counter);
37145+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
37146+ atomic_inc_unchecked(&c->context_list_counter);
37147
37148- entry->context = atomic_read(&c->context_list_counter);
37149+ entry->context = atomic_read_unchecked(&c->context_list_counter);
37150
37151 list_add(&entry->list, &c->context_list);
37152
37153@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(void)
37154
37155 #if BITS_PER_LONG == 64
37156 spin_lock_init(&c->context_list_lock);
37157- atomic_set(&c->context_list_counter, 0);
37158+ atomic_set_unchecked(&c->context_list_counter, 0);
37159 INIT_LIST_HEAD(&c->context_list);
37160 #endif
37161
37162diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
37163index 78e3e85..66c9a0d 100644
37164--- a/drivers/mfd/ab3100-core.c
37165+++ b/drivers/mfd/ab3100-core.c
37166@@ -777,7 +777,7 @@ struct ab_family_id {
37167 char *name;
37168 };
37169
37170-static const struct ab_family_id ids[] __initdata = {
37171+static const struct ab_family_id ids[] __initconst = {
37172 /* AB3100 */
37173 {
37174 .id = 0xc0,
37175diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
37176index 8d8c932..8104515 100644
37177--- a/drivers/mfd/wm8350-i2c.c
37178+++ b/drivers/mfd/wm8350-i2c.c
37179@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
37180 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
37181 int ret;
37182
37183+ pax_track_stack();
37184+
37185 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
37186 return -EINVAL;
37187
37188diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
37189index e4ff50b..4cc3f04 100644
37190--- a/drivers/misc/kgdbts.c
37191+++ b/drivers/misc/kgdbts.c
37192@@ -118,7 +118,7 @@
37193 } while (0)
37194 #define MAX_CONFIG_LEN 40
37195
37196-static struct kgdb_io kgdbts_io_ops;
37197+static const struct kgdb_io kgdbts_io_ops;
37198 static char get_buf[BUFMAX];
37199 static int get_buf_cnt;
37200 static char put_buf[BUFMAX];
37201@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void)
37202 module_put(THIS_MODULE);
37203 }
37204
37205-static struct kgdb_io kgdbts_io_ops = {
37206+static const struct kgdb_io kgdbts_io_ops = {
37207 .name = "kgdbts",
37208 .read_char = kgdbts_get_char,
37209 .write_char = kgdbts_put_char,
37210diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
37211index 37e7cfc..67cfb76 100644
37212--- a/drivers/misc/sgi-gru/gruhandles.c
37213+++ b/drivers/misc/sgi-gru/gruhandles.c
37214@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37215
37216 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
37217 {
37218- atomic_long_inc(&mcs_op_statistics[op].count);
37219- atomic_long_add(clks, &mcs_op_statistics[op].total);
37220+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
37221+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
37222 if (mcs_op_statistics[op].max < clks)
37223 mcs_op_statistics[op].max = clks;
37224 }
37225diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
37226index 3f2375c..467c6e6 100644
37227--- a/drivers/misc/sgi-gru/gruprocfs.c
37228+++ b/drivers/misc/sgi-gru/gruprocfs.c
37229@@ -32,9 +32,9 @@
37230
37231 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
37232
37233-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
37234+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
37235 {
37236- unsigned long val = atomic_long_read(v);
37237+ unsigned long val = atomic_long_read_unchecked(v);
37238
37239 if (val)
37240 seq_printf(s, "%16lu %s\n", val, id);
37241@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
37242 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
37243
37244 for (op = 0; op < mcsop_last; op++) {
37245- count = atomic_long_read(&mcs_op_statistics[op].count);
37246- total = atomic_long_read(&mcs_op_statistics[op].total);
37247+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
37248+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
37249 max = mcs_op_statistics[op].max;
37250 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
37251 count ? total / count : 0, max);
37252diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
37253index 46990bc..4a251b5 100644
37254--- a/drivers/misc/sgi-gru/grutables.h
37255+++ b/drivers/misc/sgi-gru/grutables.h
37256@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
37257 * GRU statistics.
37258 */
37259 struct gru_stats_s {
37260- atomic_long_t vdata_alloc;
37261- atomic_long_t vdata_free;
37262- atomic_long_t gts_alloc;
37263- atomic_long_t gts_free;
37264- atomic_long_t vdata_double_alloc;
37265- atomic_long_t gts_double_allocate;
37266- atomic_long_t assign_context;
37267- atomic_long_t assign_context_failed;
37268- atomic_long_t free_context;
37269- atomic_long_t load_user_context;
37270- atomic_long_t load_kernel_context;
37271- atomic_long_t lock_kernel_context;
37272- atomic_long_t unlock_kernel_context;
37273- atomic_long_t steal_user_context;
37274- atomic_long_t steal_kernel_context;
37275- atomic_long_t steal_context_failed;
37276- atomic_long_t nopfn;
37277- atomic_long_t break_cow;
37278- atomic_long_t asid_new;
37279- atomic_long_t asid_next;
37280- atomic_long_t asid_wrap;
37281- atomic_long_t asid_reuse;
37282- atomic_long_t intr;
37283- atomic_long_t intr_mm_lock_failed;
37284- atomic_long_t call_os;
37285- atomic_long_t call_os_offnode_reference;
37286- atomic_long_t call_os_check_for_bug;
37287- atomic_long_t call_os_wait_queue;
37288- atomic_long_t user_flush_tlb;
37289- atomic_long_t user_unload_context;
37290- atomic_long_t user_exception;
37291- atomic_long_t set_context_option;
37292- atomic_long_t migrate_check;
37293- atomic_long_t migrated_retarget;
37294- atomic_long_t migrated_unload;
37295- atomic_long_t migrated_unload_delay;
37296- atomic_long_t migrated_nopfn_retarget;
37297- atomic_long_t migrated_nopfn_unload;
37298- atomic_long_t tlb_dropin;
37299- atomic_long_t tlb_dropin_fail_no_asid;
37300- atomic_long_t tlb_dropin_fail_upm;
37301- atomic_long_t tlb_dropin_fail_invalid;
37302- atomic_long_t tlb_dropin_fail_range_active;
37303- atomic_long_t tlb_dropin_fail_idle;
37304- atomic_long_t tlb_dropin_fail_fmm;
37305- atomic_long_t tlb_dropin_fail_no_exception;
37306- atomic_long_t tlb_dropin_fail_no_exception_war;
37307- atomic_long_t tfh_stale_on_fault;
37308- atomic_long_t mmu_invalidate_range;
37309- atomic_long_t mmu_invalidate_page;
37310- atomic_long_t mmu_clear_flush_young;
37311- atomic_long_t flush_tlb;
37312- atomic_long_t flush_tlb_gru;
37313- atomic_long_t flush_tlb_gru_tgh;
37314- atomic_long_t flush_tlb_gru_zero_asid;
37315+ atomic_long_unchecked_t vdata_alloc;
37316+ atomic_long_unchecked_t vdata_free;
37317+ atomic_long_unchecked_t gts_alloc;
37318+ atomic_long_unchecked_t gts_free;
37319+ atomic_long_unchecked_t vdata_double_alloc;
37320+ atomic_long_unchecked_t gts_double_allocate;
37321+ atomic_long_unchecked_t assign_context;
37322+ atomic_long_unchecked_t assign_context_failed;
37323+ atomic_long_unchecked_t free_context;
37324+ atomic_long_unchecked_t load_user_context;
37325+ atomic_long_unchecked_t load_kernel_context;
37326+ atomic_long_unchecked_t lock_kernel_context;
37327+ atomic_long_unchecked_t unlock_kernel_context;
37328+ atomic_long_unchecked_t steal_user_context;
37329+ atomic_long_unchecked_t steal_kernel_context;
37330+ atomic_long_unchecked_t steal_context_failed;
37331+ atomic_long_unchecked_t nopfn;
37332+ atomic_long_unchecked_t break_cow;
37333+ atomic_long_unchecked_t asid_new;
37334+ atomic_long_unchecked_t asid_next;
37335+ atomic_long_unchecked_t asid_wrap;
37336+ atomic_long_unchecked_t asid_reuse;
37337+ atomic_long_unchecked_t intr;
37338+ atomic_long_unchecked_t intr_mm_lock_failed;
37339+ atomic_long_unchecked_t call_os;
37340+ atomic_long_unchecked_t call_os_offnode_reference;
37341+ atomic_long_unchecked_t call_os_check_for_bug;
37342+ atomic_long_unchecked_t call_os_wait_queue;
37343+ atomic_long_unchecked_t user_flush_tlb;
37344+ atomic_long_unchecked_t user_unload_context;
37345+ atomic_long_unchecked_t user_exception;
37346+ atomic_long_unchecked_t set_context_option;
37347+ atomic_long_unchecked_t migrate_check;
37348+ atomic_long_unchecked_t migrated_retarget;
37349+ atomic_long_unchecked_t migrated_unload;
37350+ atomic_long_unchecked_t migrated_unload_delay;
37351+ atomic_long_unchecked_t migrated_nopfn_retarget;
37352+ atomic_long_unchecked_t migrated_nopfn_unload;
37353+ atomic_long_unchecked_t tlb_dropin;
37354+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
37355+ atomic_long_unchecked_t tlb_dropin_fail_upm;
37356+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
37357+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
37358+ atomic_long_unchecked_t tlb_dropin_fail_idle;
37359+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
37360+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
37361+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
37362+ atomic_long_unchecked_t tfh_stale_on_fault;
37363+ atomic_long_unchecked_t mmu_invalidate_range;
37364+ atomic_long_unchecked_t mmu_invalidate_page;
37365+ atomic_long_unchecked_t mmu_clear_flush_young;
37366+ atomic_long_unchecked_t flush_tlb;
37367+ atomic_long_unchecked_t flush_tlb_gru;
37368+ atomic_long_unchecked_t flush_tlb_gru_tgh;
37369+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
37370
37371- atomic_long_t copy_gpa;
37372+ atomic_long_unchecked_t copy_gpa;
37373
37374- atomic_long_t mesq_receive;
37375- atomic_long_t mesq_receive_none;
37376- atomic_long_t mesq_send;
37377- atomic_long_t mesq_send_failed;
37378- atomic_long_t mesq_noop;
37379- atomic_long_t mesq_send_unexpected_error;
37380- atomic_long_t mesq_send_lb_overflow;
37381- atomic_long_t mesq_send_qlimit_reached;
37382- atomic_long_t mesq_send_amo_nacked;
37383- atomic_long_t mesq_send_put_nacked;
37384- atomic_long_t mesq_qf_not_full;
37385- atomic_long_t mesq_qf_locked;
37386- atomic_long_t mesq_qf_noop_not_full;
37387- atomic_long_t mesq_qf_switch_head_failed;
37388- atomic_long_t mesq_qf_unexpected_error;
37389- atomic_long_t mesq_noop_unexpected_error;
37390- atomic_long_t mesq_noop_lb_overflow;
37391- atomic_long_t mesq_noop_qlimit_reached;
37392- atomic_long_t mesq_noop_amo_nacked;
37393- atomic_long_t mesq_noop_put_nacked;
37394+ atomic_long_unchecked_t mesq_receive;
37395+ atomic_long_unchecked_t mesq_receive_none;
37396+ atomic_long_unchecked_t mesq_send;
37397+ atomic_long_unchecked_t mesq_send_failed;
37398+ atomic_long_unchecked_t mesq_noop;
37399+ atomic_long_unchecked_t mesq_send_unexpected_error;
37400+ atomic_long_unchecked_t mesq_send_lb_overflow;
37401+ atomic_long_unchecked_t mesq_send_qlimit_reached;
37402+ atomic_long_unchecked_t mesq_send_amo_nacked;
37403+ atomic_long_unchecked_t mesq_send_put_nacked;
37404+ atomic_long_unchecked_t mesq_qf_not_full;
37405+ atomic_long_unchecked_t mesq_qf_locked;
37406+ atomic_long_unchecked_t mesq_qf_noop_not_full;
37407+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
37408+ atomic_long_unchecked_t mesq_qf_unexpected_error;
37409+ atomic_long_unchecked_t mesq_noop_unexpected_error;
37410+ atomic_long_unchecked_t mesq_noop_lb_overflow;
37411+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
37412+ atomic_long_unchecked_t mesq_noop_amo_nacked;
37413+ atomic_long_unchecked_t mesq_noop_put_nacked;
37414
37415 };
37416
37417@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
37418 cchop_deallocate, tghop_invalidate, mcsop_last};
37419
37420 struct mcs_op_statistic {
37421- atomic_long_t count;
37422- atomic_long_t total;
37423+ atomic_long_unchecked_t count;
37424+ atomic_long_unchecked_t total;
37425 unsigned long max;
37426 };
37427
37428@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
37429
37430 #define STAT(id) do { \
37431 if (gru_options & OPT_STATS) \
37432- atomic_long_inc(&gru_stats.id); \
37433+ atomic_long_inc_unchecked(&gru_stats.id); \
37434 } while (0)
37435
37436 #ifdef CONFIG_SGI_GRU_DEBUG
37437diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
37438index 2275126..12a9dbfb 100644
37439--- a/drivers/misc/sgi-xp/xp.h
37440+++ b/drivers/misc/sgi-xp/xp.h
37441@@ -289,7 +289,7 @@ struct xpc_interface {
37442 xpc_notify_func, void *);
37443 void (*received) (short, int, void *);
37444 enum xp_retval (*partid_to_nasids) (short, void *);
37445-};
37446+} __no_const;
37447
37448 extern struct xpc_interface xpc_interface;
37449
37450diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
37451index b94d5f7..7f494c5 100644
37452--- a/drivers/misc/sgi-xp/xpc.h
37453+++ b/drivers/misc/sgi-xp/xpc.h
37454@@ -835,6 +835,7 @@ struct xpc_arch_operations {
37455 void (*received_payload) (struct xpc_channel *, void *);
37456 void (*notify_senders_of_disconnect) (struct xpc_channel *);
37457 };
37458+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
37459
37460 /* struct xpc_partition act_state values (for XPC HB) */
37461
37462@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
37463 /* found in xpc_main.c */
37464 extern struct device *xpc_part;
37465 extern struct device *xpc_chan;
37466-extern struct xpc_arch_operations xpc_arch_ops;
37467+extern xpc_arch_operations_no_const xpc_arch_ops;
37468 extern int xpc_disengage_timelimit;
37469 extern int xpc_disengage_timedout;
37470 extern int xpc_activate_IRQ_rcvd;
37471diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
37472index fd3688a..7e211a4 100644
37473--- a/drivers/misc/sgi-xp/xpc_main.c
37474+++ b/drivers/misc/sgi-xp/xpc_main.c
37475@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_notifier = {
37476 .notifier_call = xpc_system_die,
37477 };
37478
37479-struct xpc_arch_operations xpc_arch_ops;
37480+xpc_arch_operations_no_const xpc_arch_ops;
37481
37482 /*
37483 * Timer function to enforce the timelimit on the partition disengage.
37484diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
37485index 8b70e03..700bda6 100644
37486--- a/drivers/misc/sgi-xp/xpc_sn2.c
37487+++ b/drivers/misc/sgi-xp/xpc_sn2.c
37488@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_channel *ch, void *payload)
37489 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
37490 }
37491
37492-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
37493+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
37494 .setup_partitions = xpc_setup_partitions_sn2,
37495 .teardown_partitions = xpc_teardown_partitions_sn2,
37496 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
37497@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
37498 int ret;
37499 size_t buf_size;
37500
37501- xpc_arch_ops = xpc_arch_ops_sn2;
37502+ pax_open_kernel();
37503+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
37504+ pax_close_kernel();
37505
37506 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
37507 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
37508diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
37509index 8e08d71..7cb8c9b 100644
37510--- a/drivers/misc/sgi-xp/xpc_uv.c
37511+++ b/drivers/misc/sgi-xp/xpc_uv.c
37512@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_channel *ch, void *payload)
37513 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
37514 }
37515
37516-static struct xpc_arch_operations xpc_arch_ops_uv = {
37517+static const struct xpc_arch_operations xpc_arch_ops_uv = {
37518 .setup_partitions = xpc_setup_partitions_uv,
37519 .teardown_partitions = xpc_teardown_partitions_uv,
37520 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
37521@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_arch_ops_uv = {
37522 int
37523 xpc_init_uv(void)
37524 {
37525- xpc_arch_ops = xpc_arch_ops_uv;
37526+ pax_open_kernel();
37527+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
37528+ pax_close_kernel();
37529
37530 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
37531 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
37532diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
37533index 6fd20b42..650efe3 100644
37534--- a/drivers/mmc/host/sdhci-pci.c
37535+++ b/drivers/mmc/host/sdhci-pci.c
37536@@ -297,7 +297,7 @@ static const struct sdhci_pci_fixes sdhci_via = {
37537 .probe = via_probe,
37538 };
37539
37540-static const struct pci_device_id pci_ids[] __devinitdata = {
37541+static const struct pci_device_id pci_ids[] __devinitconst = {
37542 {
37543 .vendor = PCI_VENDOR_ID_RICOH,
37544 .device = PCI_DEVICE_ID_RICOH_R5C822,
37545diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
37546index e7563a9..5f90ce5 100644
37547--- a/drivers/mtd/chips/cfi_cmdset_0001.c
37548+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
37549@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long
37550 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
37551 unsigned long timeo = jiffies + HZ;
37552
37553+ pax_track_stack();
37554+
37555 /* Prevent setting state FL_SYNCING for chip in suspended state. */
37556 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
37557 goto sleep;
37558@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
37559 unsigned long initial_adr;
37560 int initial_len = len;
37561
37562+ pax_track_stack();
37563+
37564 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
37565 adr += chip->start;
37566 initial_adr = adr;
37567@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
37568 int retries = 3;
37569 int ret;
37570
37571+ pax_track_stack();
37572+
37573 adr += chip->start;
37574
37575 retry:
37576diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c
37577index 0667a67..3ab97ed 100644
37578--- a/drivers/mtd/chips/cfi_cmdset_0020.c
37579+++ b/drivers/mtd/chips/cfi_cmdset_0020.c
37580@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct map_info *map, struct flchip *chip, lof
37581 unsigned long cmd_addr;
37582 struct cfi_private *cfi = map->fldrv_priv;
37583
37584+ pax_track_stack();
37585+
37586 adr += chip->start;
37587
37588 /* Ensure cmd read/writes are aligned. */
37589@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct map_info *map, struct flchip *chip,
37590 DECLARE_WAITQUEUE(wait, current);
37591 int wbufsize, z;
37592
37593+ pax_track_stack();
37594+
37595 /* M58LW064A requires bus alignment for buffer wriets -- saw */
37596 if (adr & (map_bankwidth(map)-1))
37597 return -EINVAL;
37598@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(struct map_info *map, struct flchip *chip, u
37599 DECLARE_WAITQUEUE(wait, current);
37600 int ret = 0;
37601
37602+ pax_track_stack();
37603+
37604 adr += chip->start;
37605
37606 /* Let's determine this according to the interleave only once */
37607@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struct map_info *map, struct flchip *chip, un
37608 unsigned long timeo = jiffies + HZ;
37609 DECLARE_WAITQUEUE(wait, current);
37610
37611+ pax_track_stack();
37612+
37613 adr += chip->start;
37614
37615 /* Let's determine this according to the interleave only once */
37616@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(struct map_info *map, struct flchip *chip,
37617 unsigned long timeo = jiffies + HZ;
37618 DECLARE_WAITQUEUE(wait, current);
37619
37620+ pax_track_stack();
37621+
37622 adr += chip->start;
37623
37624 /* Let's determine this according to the interleave only once */
37625diff --git a/drivers/mtd/devices/doc2000.c b/drivers/mtd/devices/doc2000.c
37626index 5bf5f46..c5de373 100644
37627--- a/drivers/mtd/devices/doc2000.c
37628+++ b/drivers/mtd/devices/doc2000.c
37629@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mtd, loff_t to, size_t len,
37630
37631 /* The ECC will not be calculated correctly if less than 512 is written */
37632 /* DBB-
37633- if (len != 0x200 && eccbuf)
37634+ if (len != 0x200)
37635 printk(KERN_WARNING
37636 "ECC needs a full sector write (adr: %lx size %lx)\n",
37637 (long) to, (long) len);
37638diff --git a/drivers/mtd/devices/doc2001.c b/drivers/mtd/devices/doc2001.c
37639index 0990f78..bb4e8a4 100644
37640--- a/drivers/mtd/devices/doc2001.c
37641+++ b/drivers/mtd/devices/doc2001.c
37642@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mtd, loff_t from, size_t len,
37643 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
37644
37645 /* Don't allow read past end of device */
37646- if (from >= this->totlen)
37647+ if (from >= this->totlen || !len)
37648 return -EINVAL;
37649
37650 /* Don't allow a single read to cross a 512-byte block boundary */
37651diff --git a/drivers/mtd/ftl.c b/drivers/mtd/ftl.c
37652index e56d6b4..f07e6cf 100644
37653--- a/drivers/mtd/ftl.c
37654+++ b/drivers/mtd/ftl.c
37655@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *part, uint16_t srcunit,
37656 loff_t offset;
37657 uint16_t srcunitswap = cpu_to_le16(srcunit);
37658
37659+ pax_track_stack();
37660+
37661 eun = &part->EUNInfo[srcunit];
37662 xfer = &part->XferInfo[xferunit];
37663 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
37664diff --git a/drivers/mtd/inftlcore.c b/drivers/mtd/inftlcore.c
37665index 8aca552..146446e 100755
37666--- a/drivers/mtd/inftlcore.c
37667+++ b/drivers/mtd/inftlcore.c
37668@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLrecord *inftl, unsigned thisVUC, unsigned
37669 struct inftl_oob oob;
37670 size_t retlen;
37671
37672+ pax_track_stack();
37673+
37674 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
37675 "pending=%d)\n", inftl, thisVUC, pendingblock);
37676
37677diff --git a/drivers/mtd/inftlmount.c b/drivers/mtd/inftlmount.c
37678index 32e82ae..ed50953 100644
37679--- a/drivers/mtd/inftlmount.c
37680+++ b/drivers/mtd/inftlmount.c
37681@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTLrecord *inftl)
37682 struct INFTLPartition *ip;
37683 size_t retlen;
37684
37685+ pax_track_stack();
37686+
37687 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
37688
37689 /*
37690diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
37691index 79bf40f..fe5f8fd 100644
37692--- a/drivers/mtd/lpddr/qinfo_probe.c
37693+++ b/drivers/mtd/lpddr/qinfo_probe.c
37694@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map_info *map, struct lpddr_private *lpddr)
37695 {
37696 map_word pfow_val[4];
37697
37698+ pax_track_stack();
37699+
37700 /* Check identification string */
37701 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
37702 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
37703diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c
37704index 726a1b8..f46b460 100644
37705--- a/drivers/mtd/mtdchar.c
37706+++ b/drivers/mtd/mtdchar.c
37707@@ -461,6 +461,8 @@ static int mtd_ioctl(struct inode *inode, struct file *file,
37708 u_long size;
37709 struct mtd_info_user info;
37710
37711+ pax_track_stack();
37712+
37713 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
37714
37715 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
37716diff --git a/drivers/mtd/nftlcore.c b/drivers/mtd/nftlcore.c
37717index 1002e18..26d82d5 100644
37718--- a/drivers/mtd/nftlcore.c
37719+++ b/drivers/mtd/nftlcore.c
37720@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLrecord *nftl, unsigned thisVUC, unsigned p
37721 int inplace = 1;
37722 size_t retlen;
37723
37724+ pax_track_stack();
37725+
37726 memset(BlockMap, 0xff, sizeof(BlockMap));
37727 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
37728
37729diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
37730index 8b22b18..6fada85 100644
37731--- a/drivers/mtd/nftlmount.c
37732+++ b/drivers/mtd/nftlmount.c
37733@@ -23,6 +23,7 @@
37734 #include <asm/errno.h>
37735 #include <linux/delay.h>
37736 #include <linux/slab.h>
37737+#include <linux/sched.h>
37738 #include <linux/mtd/mtd.h>
37739 #include <linux/mtd/nand.h>
37740 #include <linux/mtd/nftl.h>
37741@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLrecord *nftl)
37742 struct mtd_info *mtd = nftl->mbd.mtd;
37743 unsigned int i;
37744
37745+ pax_track_stack();
37746+
37747 /* Assume logical EraseSize == physical erasesize for starting the scan.
37748 We'll sort it out later if we find a MediaHeader which says otherwise */
37749 /* Actually, we won't. The new DiskOnChip driver has already scanned
37750diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
37751index 14cec04..d775b87 100644
37752--- a/drivers/mtd/ubi/build.c
37753+++ b/drivers/mtd/ubi/build.c
37754@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
37755 static int __init bytes_str_to_int(const char *str)
37756 {
37757 char *endp;
37758- unsigned long result;
37759+ unsigned long result, scale = 1;
37760
37761 result = simple_strtoul(str, &endp, 0);
37762 if (str == endp || result >= INT_MAX) {
37763@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const char *str)
37764
37765 switch (*endp) {
37766 case 'G':
37767- result *= 1024;
37768+ scale *= 1024;
37769 case 'M':
37770- result *= 1024;
37771+ scale *= 1024;
37772 case 'K':
37773- result *= 1024;
37774+ scale *= 1024;
37775 if (endp[1] == 'i' && endp[2] == 'B')
37776 endp += 2;
37777 case '\0':
37778@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const char *str)
37779 return -EINVAL;
37780 }
37781
37782- return result;
37783+ if ((intoverflow_t)result*scale >= INT_MAX) {
37784+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
37785+ str);
37786+ return -EINVAL;
37787+ }
37788+
37789+ return result*scale;
37790 }
37791
37792 /**
37793diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
37794index ab68886..ca405e8 100644
37795--- a/drivers/net/atlx/atl2.c
37796+++ b/drivers/net/atlx/atl2.c
37797@@ -2845,7 +2845,7 @@ static void atl2_force_ps(struct atl2_hw *hw)
37798 */
37799
37800 #define ATL2_PARAM(X, desc) \
37801- static const int __devinitdata X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37802+ static const int __devinitconst X[ATL2_MAX_NIC + 1] = ATL2_PARAM_INIT; \
37803 MODULE_PARM(X, "1-" __MODULE_STRING(ATL2_MAX_NIC) "i"); \
37804 MODULE_PARM_DESC(X, desc);
37805 #else
37806diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
37807index 4874b2b..67f8526 100644
37808--- a/drivers/net/bnx2.c
37809+++ b/drivers/net/bnx2.c
37810@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
37811 int rc = 0;
37812 u32 magic, csum;
37813
37814+ pax_track_stack();
37815+
37816 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
37817 goto test_nvram_done;
37818
37819diff --git a/drivers/net/cxgb3/l2t.h b/drivers/net/cxgb3/l2t.h
37820index fd3eb07..8a6978d 100644
37821--- a/drivers/net/cxgb3/l2t.h
37822+++ b/drivers/net/cxgb3/l2t.h
37823@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
37824 */
37825 struct l2t_skb_cb {
37826 arp_failure_handler_func arp_failure_handler;
37827-};
37828+} __no_const;
37829
37830 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
37831
37832diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
37833index 032cfe0..411af379 100644
37834--- a/drivers/net/cxgb3/t3_hw.c
37835+++ b/drivers/net/cxgb3/t3_hw.c
37836@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
37837 int i, addr, ret;
37838 struct t3_vpd vpd;
37839
37840+ pax_track_stack();
37841+
37842 /*
37843 * Card information is normally at VPD_BASE but some early cards had
37844 * it at 0.
37845diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
37846index d1e0563..b9e129c 100644
37847--- a/drivers/net/e1000e/82571.c
37848+++ b/drivers/net/e1000e/82571.c
37849@@ -212,7 +212,7 @@ static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
37850 {
37851 struct e1000_hw *hw = &adapter->hw;
37852 struct e1000_mac_info *mac = &hw->mac;
37853- struct e1000_mac_operations *func = &mac->ops;
37854+ e1000_mac_operations_no_const *func = &mac->ops;
37855 u32 swsm = 0;
37856 u32 swsm2 = 0;
37857 bool force_clear_smbi = false;
37858@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
37859 temp = er32(ICRXDMTC);
37860 }
37861
37862-static struct e1000_mac_operations e82571_mac_ops = {
37863+static const struct e1000_mac_operations e82571_mac_ops = {
37864 /* .check_mng_mode: mac type dependent */
37865 /* .check_for_link: media type dependent */
37866 .id_led_init = e1000e_id_led_init,
37867@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e82571_mac_ops = {
37868 .setup_led = e1000e_setup_led_generic,
37869 };
37870
37871-static struct e1000_phy_operations e82_phy_ops_igp = {
37872+static const struct e1000_phy_operations e82_phy_ops_igp = {
37873 .acquire_phy = e1000_get_hw_semaphore_82571,
37874 .check_reset_block = e1000e_check_reset_block_generic,
37875 .commit_phy = NULL,
37876@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_phy_ops_igp = {
37877 .cfg_on_link_up = NULL,
37878 };
37879
37880-static struct e1000_phy_operations e82_phy_ops_m88 = {
37881+static const struct e1000_phy_operations e82_phy_ops_m88 = {
37882 .acquire_phy = e1000_get_hw_semaphore_82571,
37883 .check_reset_block = e1000e_check_reset_block_generic,
37884 .commit_phy = e1000e_phy_sw_reset,
37885@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_phy_ops_m88 = {
37886 .cfg_on_link_up = NULL,
37887 };
37888
37889-static struct e1000_phy_operations e82_phy_ops_bm = {
37890+static const struct e1000_phy_operations e82_phy_ops_bm = {
37891 .acquire_phy = e1000_get_hw_semaphore_82571,
37892 .check_reset_block = e1000e_check_reset_block_generic,
37893 .commit_phy = e1000e_phy_sw_reset,
37894@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_phy_ops_bm = {
37895 .cfg_on_link_up = NULL,
37896 };
37897
37898-static struct e1000_nvm_operations e82571_nvm_ops = {
37899+static const struct e1000_nvm_operations e82571_nvm_ops = {
37900 .acquire_nvm = e1000_acquire_nvm_82571,
37901 .read_nvm = e1000e_read_nvm_eerd,
37902 .release_nvm = e1000_release_nvm_82571,
37903diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
37904index 47db9bd..fa58ccd 100644
37905--- a/drivers/net/e1000e/e1000.h
37906+++ b/drivers/net/e1000e/e1000.h
37907@@ -375,9 +375,9 @@ struct e1000_info {
37908 u32 pba;
37909 u32 max_hw_frame_size;
37910 s32 (*get_variants)(struct e1000_adapter *);
37911- struct e1000_mac_operations *mac_ops;
37912- struct e1000_phy_operations *phy_ops;
37913- struct e1000_nvm_operations *nvm_ops;
37914+ const struct e1000_mac_operations *mac_ops;
37915+ const struct e1000_phy_operations *phy_ops;
37916+ const struct e1000_nvm_operations *nvm_ops;
37917 };
37918
37919 /* hardware capability, feature, and workaround flags */
37920diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
37921index ae5d736..e9a93a1 100644
37922--- a/drivers/net/e1000e/es2lan.c
37923+++ b/drivers/net/e1000e/es2lan.c
37924@@ -207,7 +207,7 @@ static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
37925 {
37926 struct e1000_hw *hw = &adapter->hw;
37927 struct e1000_mac_info *mac = &hw->mac;
37928- struct e1000_mac_operations *func = &mac->ops;
37929+ e1000_mac_operations_no_const *func = &mac->ops;
37930
37931 /* Set media type */
37932 switch (adapter->pdev->device) {
37933@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
37934 temp = er32(ICRXDMTC);
37935 }
37936
37937-static struct e1000_mac_operations es2_mac_ops = {
37938+static const struct e1000_mac_operations es2_mac_ops = {
37939 .id_led_init = e1000e_id_led_init,
37940 .check_mng_mode = e1000e_check_mng_mode_generic,
37941 /* check_for_link dependent on media type */
37942@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_mac_ops = {
37943 .setup_led = e1000e_setup_led_generic,
37944 };
37945
37946-static struct e1000_phy_operations es2_phy_ops = {
37947+static const struct e1000_phy_operations es2_phy_ops = {
37948 .acquire_phy = e1000_acquire_phy_80003es2lan,
37949 .check_reset_block = e1000e_check_reset_block_generic,
37950 .commit_phy = e1000e_phy_sw_reset,
37951@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_phy_ops = {
37952 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
37953 };
37954
37955-static struct e1000_nvm_operations es2_nvm_ops = {
37956+static const struct e1000_nvm_operations es2_nvm_ops = {
37957 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
37958 .read_nvm = e1000e_read_nvm_eerd,
37959 .release_nvm = e1000_release_nvm_80003es2lan,
37960diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
37961index 11f3b7c..6381887 100644
37962--- a/drivers/net/e1000e/hw.h
37963+++ b/drivers/net/e1000e/hw.h
37964@@ -753,6 +753,7 @@ struct e1000_mac_operations {
37965 s32 (*setup_physical_interface)(struct e1000_hw *);
37966 s32 (*setup_led)(struct e1000_hw *);
37967 };
37968+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
37969
37970 /* Function pointers for the PHY. */
37971 struct e1000_phy_operations {
37972@@ -774,6 +775,7 @@ struct e1000_phy_operations {
37973 s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
37974 s32 (*cfg_on_link_up)(struct e1000_hw *);
37975 };
37976+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
37977
37978 /* Function pointers for the NVM. */
37979 struct e1000_nvm_operations {
37980@@ -785,9 +787,10 @@ struct e1000_nvm_operations {
37981 s32 (*validate_nvm)(struct e1000_hw *);
37982 s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
37983 };
37984+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
37985
37986 struct e1000_mac_info {
37987- struct e1000_mac_operations ops;
37988+ e1000_mac_operations_no_const ops;
37989
37990 u8 addr[6];
37991 u8 perm_addr[6];
37992@@ -823,7 +826,7 @@ struct e1000_mac_info {
37993 };
37994
37995 struct e1000_phy_info {
37996- struct e1000_phy_operations ops;
37997+ e1000_phy_operations_no_const ops;
37998
37999 enum e1000_phy_type type;
38000
38001@@ -857,7 +860,7 @@ struct e1000_phy_info {
38002 };
38003
38004 struct e1000_nvm_info {
38005- struct e1000_nvm_operations ops;
38006+ e1000_nvm_operations_no_const ops;
38007
38008 enum e1000_nvm_type type;
38009 enum e1000_nvm_override override;
38010diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
38011index de39f9a..e28d3e0 100644
38012--- a/drivers/net/e1000e/ich8lan.c
38013+++ b/drivers/net/e1000e/ich8lan.c
38014@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
38015 }
38016 }
38017
38018-static struct e1000_mac_operations ich8_mac_ops = {
38019+static const struct e1000_mac_operations ich8_mac_ops = {
38020 .id_led_init = e1000e_id_led_init,
38021 .check_mng_mode = e1000_check_mng_mode_ich8lan,
38022 .check_for_link = e1000_check_for_copper_link_ich8lan,
38023@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_mac_ops = {
38024 /* id_led_init dependent on mac type */
38025 };
38026
38027-static struct e1000_phy_operations ich8_phy_ops = {
38028+static const struct e1000_phy_operations ich8_phy_ops = {
38029 .acquire_phy = e1000_acquire_swflag_ich8lan,
38030 .check_reset_block = e1000_check_reset_block_ich8lan,
38031 .commit_phy = NULL,
38032@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_phy_ops = {
38033 .write_phy_reg = e1000e_write_phy_reg_igp,
38034 };
38035
38036-static struct e1000_nvm_operations ich8_nvm_ops = {
38037+static const struct e1000_nvm_operations ich8_nvm_ops = {
38038 .acquire_nvm = e1000_acquire_nvm_ich8lan,
38039 .read_nvm = e1000_read_nvm_ich8lan,
38040 .release_nvm = e1000_release_nvm_ich8lan,
38041diff --git a/drivers/net/fealnx.c b/drivers/net/fealnx.c
38042index 18d5fbb..542d96d 100644
38043--- a/drivers/net/fealnx.c
38044+++ b/drivers/net/fealnx.c
38045@@ -151,7 +151,7 @@ struct chip_info {
38046 int flags;
38047 };
38048
38049-static const struct chip_info skel_netdrv_tbl[] __devinitdata = {
38050+static const struct chip_info skel_netdrv_tbl[] __devinitconst = {
38051 { "100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38052 { "100/10M Ethernet PCI Adapter", HAS_CHIP_XCVR },
38053 { "1000/100/10M Ethernet PCI Adapter", HAS_MII_XCVR },
38054diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
38055index 0e5b54b..b503f82 100644
38056--- a/drivers/net/hamradio/6pack.c
38057+++ b/drivers/net/hamradio/6pack.c
38058@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct tty_struct *tty,
38059 unsigned char buf[512];
38060 int count1;
38061
38062+ pax_track_stack();
38063+
38064 if (!count)
38065 return;
38066
38067diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
38068index 5862282..7cce8cb 100644
38069--- a/drivers/net/ibmveth.c
38070+++ b/drivers/net/ibmveth.c
38071@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attrs[] = {
38072 NULL,
38073 };
38074
38075-static struct sysfs_ops veth_pool_ops = {
38076+static const struct sysfs_ops veth_pool_ops = {
38077 .show = veth_pool_show,
38078 .store = veth_pool_store,
38079 };
38080diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
38081index d617f2d..57b5309 100644
38082--- a/drivers/net/igb/e1000_82575.c
38083+++ b/drivers/net/igb/e1000_82575.c
38084@@ -1411,7 +1411,7 @@ void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
38085 wr32(E1000_VT_CTL, vt_ctl);
38086 }
38087
38088-static struct e1000_mac_operations e1000_mac_ops_82575 = {
38089+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
38090 .reset_hw = igb_reset_hw_82575,
38091 .init_hw = igb_init_hw_82575,
38092 .check_for_link = igb_check_for_link_82575,
38093@@ -1420,13 +1420,13 @@ static struct e1000_mac_operations e1000_mac_ops_82575 = {
38094 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
38095 };
38096
38097-static struct e1000_phy_operations e1000_phy_ops_82575 = {
38098+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
38099 .acquire = igb_acquire_phy_82575,
38100 .get_cfg_done = igb_get_cfg_done_82575,
38101 .release = igb_release_phy_82575,
38102 };
38103
38104-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38105+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
38106 .acquire = igb_acquire_nvm_82575,
38107 .read = igb_read_nvm_eerd,
38108 .release = igb_release_nvm_82575,
38109diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
38110index 72081df..d855cf5 100644
38111--- a/drivers/net/igb/e1000_hw.h
38112+++ b/drivers/net/igb/e1000_hw.h
38113@@ -288,6 +288,7 @@ struct e1000_mac_operations {
38114 s32 (*read_mac_addr)(struct e1000_hw *);
38115 s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
38116 };
38117+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38118
38119 struct e1000_phy_operations {
38120 s32 (*acquire)(struct e1000_hw *);
38121@@ -303,6 +304,7 @@ struct e1000_phy_operations {
38122 s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
38123 s32 (*write_reg)(struct e1000_hw *, u32, u16);
38124 };
38125+typedef struct e1000_phy_operations __no_const e1000_phy_operations_no_const;
38126
38127 struct e1000_nvm_operations {
38128 s32 (*acquire)(struct e1000_hw *);
38129@@ -310,6 +312,7 @@ struct e1000_nvm_operations {
38130 void (*release)(struct e1000_hw *);
38131 s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
38132 };
38133+typedef struct e1000_nvm_operations __no_const e1000_nvm_operations_no_const;
38134
38135 struct e1000_info {
38136 s32 (*get_invariants)(struct e1000_hw *);
38137@@ -321,7 +324,7 @@ struct e1000_info {
38138 extern const struct e1000_info e1000_82575_info;
38139
38140 struct e1000_mac_info {
38141- struct e1000_mac_operations ops;
38142+ e1000_mac_operations_no_const ops;
38143
38144 u8 addr[6];
38145 u8 perm_addr[6];
38146@@ -365,7 +368,7 @@ struct e1000_mac_info {
38147 };
38148
38149 struct e1000_phy_info {
38150- struct e1000_phy_operations ops;
38151+ e1000_phy_operations_no_const ops;
38152
38153 enum e1000_phy_type type;
38154
38155@@ -400,7 +403,7 @@ struct e1000_phy_info {
38156 };
38157
38158 struct e1000_nvm_info {
38159- struct e1000_nvm_operations ops;
38160+ e1000_nvm_operations_no_const ops;
38161
38162 enum e1000_nvm_type type;
38163 enum e1000_nvm_override override;
38164@@ -446,6 +449,7 @@ struct e1000_mbx_operations {
38165 s32 (*check_for_ack)(struct e1000_hw *, u16);
38166 s32 (*check_for_rst)(struct e1000_hw *, u16);
38167 };
38168+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38169
38170 struct e1000_mbx_stats {
38171 u32 msgs_tx;
38172@@ -457,7 +461,7 @@ struct e1000_mbx_stats {
38173 };
38174
38175 struct e1000_mbx_info {
38176- struct e1000_mbx_operations ops;
38177+ e1000_mbx_operations_no_const ops;
38178 struct e1000_mbx_stats stats;
38179 u32 timeout;
38180 u32 usec_delay;
38181diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
38182index 1e8ce37..549c453 100644
38183--- a/drivers/net/igbvf/vf.h
38184+++ b/drivers/net/igbvf/vf.h
38185@@ -187,9 +187,10 @@ struct e1000_mac_operations {
38186 s32 (*read_mac_addr)(struct e1000_hw *);
38187 s32 (*set_vfta)(struct e1000_hw *, u16, bool);
38188 };
38189+typedef struct e1000_mac_operations __no_const e1000_mac_operations_no_const;
38190
38191 struct e1000_mac_info {
38192- struct e1000_mac_operations ops;
38193+ e1000_mac_operations_no_const ops;
38194 u8 addr[6];
38195 u8 perm_addr[6];
38196
38197@@ -211,6 +212,7 @@ struct e1000_mbx_operations {
38198 s32 (*check_for_ack)(struct e1000_hw *);
38199 s32 (*check_for_rst)(struct e1000_hw *);
38200 };
38201+typedef struct e1000_mbx_operations __no_const e1000_mbx_operations_no_const;
38202
38203 struct e1000_mbx_stats {
38204 u32 msgs_tx;
38205@@ -222,7 +224,7 @@ struct e1000_mbx_stats {
38206 };
38207
38208 struct e1000_mbx_info {
38209- struct e1000_mbx_operations ops;
38210+ e1000_mbx_operations_no_const ops;
38211 struct e1000_mbx_stats stats;
38212 u32 timeout;
38213 u32 usec_delay;
38214diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
38215index aa7286b..a61394f 100644
38216--- a/drivers/net/iseries_veth.c
38217+++ b/drivers/net/iseries_veth.c
38218@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_default_attrs[] = {
38219 NULL
38220 };
38221
38222-static struct sysfs_ops veth_cnx_sysfs_ops = {
38223+static const struct sysfs_ops veth_cnx_sysfs_ops = {
38224 .show = veth_cnx_attribute_show
38225 };
38226
38227@@ -441,7 +441,7 @@ static struct attribute *veth_port_default_attrs[] = {
38228 NULL
38229 };
38230
38231-static struct sysfs_ops veth_port_sysfs_ops = {
38232+static const struct sysfs_ops veth_port_sysfs_ops = {
38233 .show = veth_port_attribute_show
38234 };
38235
38236diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
38237index 8aa44dc..fa1e797 100644
38238--- a/drivers/net/ixgb/ixgb_main.c
38239+++ b/drivers/net/ixgb/ixgb_main.c
38240@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev)
38241 u32 rctl;
38242 int i;
38243
38244+ pax_track_stack();
38245+
38246 /* Check for Promiscuous and All Multicast modes */
38247
38248 rctl = IXGB_READ_REG(hw, RCTL);
38249diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
38250index af35e1d..8781785 100644
38251--- a/drivers/net/ixgb/ixgb_param.c
38252+++ b/drivers/net/ixgb/ixgb_param.c
38253@@ -260,6 +260,9 @@ void __devinit
38254 ixgb_check_options(struct ixgb_adapter *adapter)
38255 {
38256 int bd = adapter->bd_number;
38257+
38258+ pax_track_stack();
38259+
38260 if (bd >= IXGB_MAX_NIC) {
38261 printk(KERN_NOTICE
38262 "Warning: no configuration for board #%i\n", bd);
38263diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
38264index b17aa73..ed74540 100644
38265--- a/drivers/net/ixgbe/ixgbe_type.h
38266+++ b/drivers/net/ixgbe/ixgbe_type.h
38267@@ -2327,6 +2327,7 @@ struct ixgbe_eeprom_operations {
38268 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
38269 s32 (*update_checksum)(struct ixgbe_hw *);
38270 };
38271+typedef struct ixgbe_eeprom_operations __no_const ixgbe_eeprom_operations_no_const;
38272
38273 struct ixgbe_mac_operations {
38274 s32 (*init_hw)(struct ixgbe_hw *);
38275@@ -2376,6 +2377,7 @@ struct ixgbe_mac_operations {
38276 /* Flow Control */
38277 s32 (*fc_enable)(struct ixgbe_hw *, s32);
38278 };
38279+typedef struct ixgbe_mac_operations __no_const ixgbe_mac_operations_no_const;
38280
38281 struct ixgbe_phy_operations {
38282 s32 (*identify)(struct ixgbe_hw *);
38283@@ -2394,9 +2396,10 @@ struct ixgbe_phy_operations {
38284 s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *);
38285 s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8);
38286 };
38287+typedef struct ixgbe_phy_operations __no_const ixgbe_phy_operations_no_const;
38288
38289 struct ixgbe_eeprom_info {
38290- struct ixgbe_eeprom_operations ops;
38291+ ixgbe_eeprom_operations_no_const ops;
38292 enum ixgbe_eeprom_type type;
38293 u32 semaphore_delay;
38294 u16 word_size;
38295@@ -2404,7 +2407,7 @@ struct ixgbe_eeprom_info {
38296 };
38297
38298 struct ixgbe_mac_info {
38299- struct ixgbe_mac_operations ops;
38300+ ixgbe_mac_operations_no_const ops;
38301 enum ixgbe_mac_type type;
38302 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38303 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
38304@@ -2423,7 +2426,7 @@ struct ixgbe_mac_info {
38305 };
38306
38307 struct ixgbe_phy_info {
38308- struct ixgbe_phy_operations ops;
38309+ ixgbe_phy_operations_no_const ops;
38310 struct mdio_if_info mdio;
38311 enum ixgbe_phy_type type;
38312 u32 id;
38313diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
38314index 291a505..2543756 100644
38315--- a/drivers/net/mlx4/main.c
38316+++ b/drivers/net/mlx4/main.c
38317@@ -38,6 +38,7 @@
38318 #include <linux/errno.h>
38319 #include <linux/pci.h>
38320 #include <linux/dma-mapping.h>
38321+#include <linux/sched.h>
38322
38323 #include <linux/mlx4/device.h>
38324 #include <linux/mlx4/doorbell.h>
38325@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
38326 u64 icm_size;
38327 int err;
38328
38329+ pax_track_stack();
38330+
38331 err = mlx4_QUERY_FW(dev);
38332 if (err) {
38333 if (err == -EACCES)
38334diff --git a/drivers/net/niu.c b/drivers/net/niu.c
38335index 2dce134..fa5ce75 100644
38336--- a/drivers/net/niu.c
38337+++ b/drivers/net/niu.c
38338@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struct niu *np, u8 *ldg_num_map)
38339 int i, num_irqs, err;
38340 u8 first_ldg;
38341
38342+ pax_track_stack();
38343+
38344 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
38345 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
38346 ldg_num_map[i] = first_ldg + i;
38347diff --git a/drivers/net/pcnet32.c b/drivers/net/pcnet32.c
38348index c1b3f09..97cd8c4 100644
38349--- a/drivers/net/pcnet32.c
38350+++ b/drivers/net/pcnet32.c
38351@@ -79,7 +79,7 @@ static int cards_found;
38352 /*
38353 * VLB I/O addresses
38354 */
38355-static unsigned int pcnet32_portlist[] __initdata =
38356+static unsigned int pcnet32_portlist[] __devinitdata =
38357 { 0x300, 0x320, 0x340, 0x360, 0 };
38358
38359 static int pcnet32_debug = 0;
38360@@ -267,7 +267,7 @@ struct pcnet32_private {
38361 struct sk_buff **rx_skbuff;
38362 dma_addr_t *tx_dma_addr;
38363 dma_addr_t *rx_dma_addr;
38364- struct pcnet32_access a;
38365+ struct pcnet32_access *a;
38366 spinlock_t lock; /* Guard lock */
38367 unsigned int cur_rx, cur_tx; /* The next free ring entry */
38368 unsigned int rx_ring_size; /* current rx ring size */
38369@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct net_device *dev)
38370 u16 val;
38371
38372 netif_wake_queue(dev);
38373- val = lp->a.read_csr(ioaddr, CSR3);
38374+ val = lp->a->read_csr(ioaddr, CSR3);
38375 val &= 0x00ff;
38376- lp->a.write_csr(ioaddr, CSR3, val);
38377+ lp->a->write_csr(ioaddr, CSR3, val);
38378 napi_enable(&lp->napi);
38379 }
38380
38381@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_device *dev)
38382 r = mii_link_ok(&lp->mii_if);
38383 } else if (lp->chip_version >= PCNET32_79C970A) {
38384 ulong ioaddr = dev->base_addr; /* card base I/O address */
38385- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38386+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38387 } else { /* can not detect link on really old chips */
38388 r = 1;
38389 }
38390@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct net_device *dev,
38391 pcnet32_netif_stop(dev);
38392
38393 spin_lock_irqsave(&lp->lock, flags);
38394- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38395+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38396
38397 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
38398
38399@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct net_device *dev,
38400 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38401 {
38402 struct pcnet32_private *lp = netdev_priv(dev);
38403- struct pcnet32_access *a = &lp->a; /* access to registers */
38404+ struct pcnet32_access *a = lp->a; /* access to registers */
38405 ulong ioaddr = dev->base_addr; /* card base I/O address */
38406 struct sk_buff *skb; /* sk buff */
38407 int x, i; /* counters */
38408@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38409 pcnet32_netif_stop(dev);
38410
38411 spin_lock_irqsave(&lp->lock, flags);
38412- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38413+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
38414
38415 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
38416
38417 /* Reset the PCNET32 */
38418- lp->a.reset(ioaddr);
38419- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38420+ lp->a->reset(ioaddr);
38421+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38422
38423 /* switch pcnet32 to 32bit mode */
38424- lp->a.write_bcr(ioaddr, 20, 2);
38425+ lp->a->write_bcr(ioaddr, 20, 2);
38426
38427 /* purge & init rings but don't actually restart */
38428 pcnet32_restart(dev, 0x0000);
38429
38430- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38431+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38432
38433 /* Initialize Transmit buffers. */
38434 size = data_len + 15;
38435@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38436
38437 /* set int loopback in CSR15 */
38438 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
38439- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
38440+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
38441
38442 teststatus = cpu_to_le16(0x8000);
38443- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38444+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
38445
38446 /* Check status of descriptors */
38447 for (x = 0; x < numbuffs; x++) {
38448@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38449 }
38450 }
38451
38452- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38453+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
38454 wmb();
38455 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
38456 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
38457@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38458 pcnet32_restart(dev, CSR0_NORMAL);
38459 } else {
38460 pcnet32_purge_rx_ring(dev);
38461- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38462+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
38463 }
38464 spin_unlock_irqrestore(&lp->lock, flags);
38465
38466@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
38467 static void pcnet32_led_blink_callback(struct net_device *dev)
38468 {
38469 struct pcnet32_private *lp = netdev_priv(dev);
38470- struct pcnet32_access *a = &lp->a;
38471+ struct pcnet32_access *a = lp->a;
38472 ulong ioaddr = dev->base_addr;
38473 unsigned long flags;
38474 int i;
38475@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(struct net_device *dev)
38476 static int pcnet32_phys_id(struct net_device *dev, u32 data)
38477 {
38478 struct pcnet32_private *lp = netdev_priv(dev);
38479- struct pcnet32_access *a = &lp->a;
38480+ struct pcnet32_access *a = lp->a;
38481 ulong ioaddr = dev->base_addr;
38482 unsigned long flags;
38483 int i, regs[4];
38484@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_device *dev, unsigned long *flags,
38485 {
38486 int csr5;
38487 struct pcnet32_private *lp = netdev_priv(dev);
38488- struct pcnet32_access *a = &lp->a;
38489+ struct pcnet32_access *a = lp->a;
38490 ulong ioaddr = dev->base_addr;
38491 int ticks;
38492
38493@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38494 spin_lock_irqsave(&lp->lock, flags);
38495 if (pcnet32_tx(dev)) {
38496 /* reset the chip to clear the error condition, then restart */
38497- lp->a.reset(ioaddr);
38498- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38499+ lp->a->reset(ioaddr);
38500+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38501 pcnet32_restart(dev, CSR0_START);
38502 netif_wake_queue(dev);
38503 }
38504@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_struct *napi, int budget)
38505 __napi_complete(napi);
38506
38507 /* clear interrupt masks */
38508- val = lp->a.read_csr(ioaddr, CSR3);
38509+ val = lp->a->read_csr(ioaddr, CSR3);
38510 val &= 0x00ff;
38511- lp->a.write_csr(ioaddr, CSR3, val);
38512+ lp->a->write_csr(ioaddr, CSR3, val);
38513
38514 /* Set interrupt enable. */
38515- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
38516+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
38517
38518 spin_unlock_irqrestore(&lp->lock, flags);
38519 }
38520@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38521 int i, csr0;
38522 u16 *buff = ptr;
38523 struct pcnet32_private *lp = netdev_priv(dev);
38524- struct pcnet32_access *a = &lp->a;
38525+ struct pcnet32_access *a = lp->a;
38526 ulong ioaddr = dev->base_addr;
38527 unsigned long flags;
38528
38529@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_device *dev, struct ethtool_regs *regs,
38530 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
38531 if (lp->phymask & (1 << j)) {
38532 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
38533- lp->a.write_bcr(ioaddr, 33,
38534+ lp->a->write_bcr(ioaddr, 33,
38535 (j << 5) | i);
38536- *buff++ = lp->a.read_bcr(ioaddr, 34);
38537+ *buff++ = lp->a->read_bcr(ioaddr, 34);
38538 }
38539 }
38540 }
38541@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38542 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
38543 lp->options |= PCNET32_PORT_FD;
38544
38545- lp->a = *a;
38546+ lp->a = a;
38547
38548 /* prior to register_netdev, dev->name is not yet correct */
38549 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
38550@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38551 if (lp->mii) {
38552 /* lp->phycount and lp->phymask are set to 0 by memset above */
38553
38554- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38555+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
38556 /* scan for PHYs */
38557 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38558 unsigned short id1, id2;
38559@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int shared, struct pci_dev *pdev)
38560 "Found PHY %04x:%04x at address %d.\n",
38561 id1, id2, i);
38562 }
38563- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38564+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
38565 if (lp->phycount > 1) {
38566 lp->options |= PCNET32_PORT_MII;
38567 }
38568@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_device *dev)
38569 }
38570
38571 /* Reset the PCNET32 */
38572- lp->a.reset(ioaddr);
38573+ lp->a->reset(ioaddr);
38574
38575 /* switch pcnet32 to 32bit mode */
38576- lp->a.write_bcr(ioaddr, 20, 2);
38577+ lp->a->write_bcr(ioaddr, 20, 2);
38578
38579 if (netif_msg_ifup(lp))
38580 printk(KERN_DEBUG
38581@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_device *dev)
38582 (u32) (lp->init_dma_addr));
38583
38584 /* set/reset autoselect bit */
38585- val = lp->a.read_bcr(ioaddr, 2) & ~2;
38586+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
38587 if (lp->options & PCNET32_PORT_ASEL)
38588 val |= 2;
38589- lp->a.write_bcr(ioaddr, 2, val);
38590+ lp->a->write_bcr(ioaddr, 2, val);
38591
38592 /* handle full duplex setting */
38593 if (lp->mii_if.full_duplex) {
38594- val = lp->a.read_bcr(ioaddr, 9) & ~3;
38595+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
38596 if (lp->options & PCNET32_PORT_FD) {
38597 val |= 1;
38598 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
38599@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_device *dev)
38600 if (lp->chip_version == 0x2627)
38601 val |= 3;
38602 }
38603- lp->a.write_bcr(ioaddr, 9, val);
38604+ lp->a->write_bcr(ioaddr, 9, val);
38605 }
38606
38607 /* set/reset GPSI bit in test register */
38608- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
38609+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
38610 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
38611 val |= 0x10;
38612- lp->a.write_csr(ioaddr, 124, val);
38613+ lp->a->write_csr(ioaddr, 124, val);
38614
38615 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
38616 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
38617@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_device *dev)
38618 * duplex, and/or enable auto negotiation, and clear DANAS
38619 */
38620 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
38621- lp->a.write_bcr(ioaddr, 32,
38622- lp->a.read_bcr(ioaddr, 32) | 0x0080);
38623+ lp->a->write_bcr(ioaddr, 32,
38624+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
38625 /* disable Auto Negotiation, set 10Mpbs, HD */
38626- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
38627+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
38628 if (lp->options & PCNET32_PORT_FD)
38629 val |= 0x10;
38630 if (lp->options & PCNET32_PORT_100)
38631 val |= 0x08;
38632- lp->a.write_bcr(ioaddr, 32, val);
38633+ lp->a->write_bcr(ioaddr, 32, val);
38634 } else {
38635 if (lp->options & PCNET32_PORT_ASEL) {
38636- lp->a.write_bcr(ioaddr, 32,
38637- lp->a.read_bcr(ioaddr,
38638+ lp->a->write_bcr(ioaddr, 32,
38639+ lp->a->read_bcr(ioaddr,
38640 32) | 0x0080);
38641 /* enable auto negotiate, setup, disable fd */
38642- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
38643+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
38644 val |= 0x20;
38645- lp->a.write_bcr(ioaddr, 32, val);
38646+ lp->a->write_bcr(ioaddr, 32, val);
38647 }
38648 }
38649 } else {
38650@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_device *dev)
38651 * There is really no good other way to handle multiple PHYs
38652 * other than turning off all automatics
38653 */
38654- val = lp->a.read_bcr(ioaddr, 2);
38655- lp->a.write_bcr(ioaddr, 2, val & ~2);
38656- val = lp->a.read_bcr(ioaddr, 32);
38657- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38658+ val = lp->a->read_bcr(ioaddr, 2);
38659+ lp->a->write_bcr(ioaddr, 2, val & ~2);
38660+ val = lp->a->read_bcr(ioaddr, 32);
38661+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
38662
38663 if (!(lp->options & PCNET32_PORT_ASEL)) {
38664 /* setup ecmd */
38665@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_device *dev)
38666 ecmd.speed =
38667 lp->
38668 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
38669- bcr9 = lp->a.read_bcr(ioaddr, 9);
38670+ bcr9 = lp->a->read_bcr(ioaddr, 9);
38671
38672 if (lp->options & PCNET32_PORT_FD) {
38673 ecmd.duplex = DUPLEX_FULL;
38674@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_device *dev)
38675 ecmd.duplex = DUPLEX_HALF;
38676 bcr9 |= ~(1 << 0);
38677 }
38678- lp->a.write_bcr(ioaddr, 9, bcr9);
38679+ lp->a->write_bcr(ioaddr, 9, bcr9);
38680 }
38681
38682 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
38683@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_device *dev)
38684
38685 #ifdef DO_DXSUFLO
38686 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
38687- val = lp->a.read_csr(ioaddr, CSR3);
38688+ val = lp->a->read_csr(ioaddr, CSR3);
38689 val |= 0x40;
38690- lp->a.write_csr(ioaddr, CSR3, val);
38691+ lp->a->write_csr(ioaddr, CSR3, val);
38692 }
38693 #endif
38694
38695@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_device *dev)
38696 napi_enable(&lp->napi);
38697
38698 /* Re-initialize the PCNET32, and start it when done. */
38699- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38700- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38701+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
38702+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
38703
38704- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38705- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38706+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
38707+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38708
38709 netif_start_queue(dev);
38710
38711@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_device *dev)
38712
38713 i = 0;
38714 while (i++ < 100)
38715- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38716+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38717 break;
38718 /*
38719 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
38720 * reports that doing so triggers a bug in the '974.
38721 */
38722- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
38723+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
38724
38725 if (netif_msg_ifup(lp))
38726 printk(KERN_DEBUG
38727 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
38728 dev->name, i,
38729 (u32) (lp->init_dma_addr),
38730- lp->a.read_csr(ioaddr, CSR0));
38731+ lp->a->read_csr(ioaddr, CSR0));
38732
38733 spin_unlock_irqrestore(&lp->lock, flags);
38734
38735@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_device *dev)
38736 * Switch back to 16bit mode to avoid problems with dumb
38737 * DOS packet driver after a warm reboot
38738 */
38739- lp->a.write_bcr(ioaddr, 20, 4);
38740+ lp->a->write_bcr(ioaddr, 20, 4);
38741
38742 err_free_irq:
38743 spin_unlock_irqrestore(&lp->lock, flags);
38744@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38745
38746 /* wait for stop */
38747 for (i = 0; i < 100; i++)
38748- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
38749+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
38750 break;
38751
38752 if (i >= 100 && netif_msg_drv(lp))
38753@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_device *dev, unsigned int csr0_bits)
38754 return;
38755
38756 /* ReInit Ring */
38757- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
38758+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
38759 i = 0;
38760 while (i++ < 1000)
38761- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
38762+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
38763 break;
38764
38765- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
38766+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
38767 }
38768
38769 static void pcnet32_tx_timeout(struct net_device *dev)
38770@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct net_device *dev)
38771 if (pcnet32_debug & NETIF_MSG_DRV)
38772 printk(KERN_ERR
38773 "%s: transmit timed out, status %4.4x, resetting.\n",
38774- dev->name, lp->a.read_csr(ioaddr, CSR0));
38775- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38776+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38777+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38778 dev->stats.tx_errors++;
38779 if (netif_msg_tx_err(lp)) {
38780 int i;
38781@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38782 if (netif_msg_tx_queued(lp)) {
38783 printk(KERN_DEBUG
38784 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
38785- dev->name, lp->a.read_csr(ioaddr, CSR0));
38786+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38787 }
38788
38789 /* Default status -- will not enable Successful-TxDone
38790@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(struct sk_buff *skb,
38791 dev->stats.tx_bytes += skb->len;
38792
38793 /* Trigger an immediate send poll. */
38794- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38795+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
38796
38797 dev->trans_start = jiffies;
38798
38799@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
38800
38801 spin_lock(&lp->lock);
38802
38803- csr0 = lp->a.read_csr(ioaddr, CSR0);
38804+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38805 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
38806 if (csr0 == 0xffff) {
38807 break; /* PCMCIA remove happened */
38808 }
38809 /* Acknowledge all of the current interrupt sources ASAP. */
38810- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38811+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
38812
38813 if (netif_msg_intr(lp))
38814 printk(KERN_DEBUG
38815 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
38816- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
38817+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
38818
38819 /* Log misc errors. */
38820 if (csr0 & 0x4000)
38821@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
38822 if (napi_schedule_prep(&lp->napi)) {
38823 u16 val;
38824 /* set interrupt masks */
38825- val = lp->a.read_csr(ioaddr, CSR3);
38826+ val = lp->a->read_csr(ioaddr, CSR3);
38827 val |= 0x5f00;
38828- lp->a.write_csr(ioaddr, CSR3, val);
38829+ lp->a->write_csr(ioaddr, CSR3, val);
38830
38831 __napi_schedule(&lp->napi);
38832 break;
38833 }
38834- csr0 = lp->a.read_csr(ioaddr, CSR0);
38835+ csr0 = lp->a->read_csr(ioaddr, CSR0);
38836 }
38837
38838 if (netif_msg_intr(lp))
38839 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
38840- dev->name, lp->a.read_csr(ioaddr, CSR0));
38841+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38842
38843 spin_unlock(&lp->lock);
38844
38845@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_device *dev)
38846
38847 spin_lock_irqsave(&lp->lock, flags);
38848
38849- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38850+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38851
38852 if (netif_msg_ifdown(lp))
38853 printk(KERN_DEBUG
38854 "%s: Shutting down ethercard, status was %2.2x.\n",
38855- dev->name, lp->a.read_csr(ioaddr, CSR0));
38856+ dev->name, lp->a->read_csr(ioaddr, CSR0));
38857
38858 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
38859- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38860+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38861
38862 /*
38863 * Switch back to 16bit mode to avoid problems with dumb
38864 * DOS packet driver after a warm reboot
38865 */
38866- lp->a.write_bcr(ioaddr, 20, 4);
38867+ lp->a->write_bcr(ioaddr, 20, 4);
38868
38869 spin_unlock_irqrestore(&lp->lock, flags);
38870
38871@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_get_stats(struct net_device *dev)
38872 unsigned long flags;
38873
38874 spin_lock_irqsave(&lp->lock, flags);
38875- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
38876+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
38877 spin_unlock_irqrestore(&lp->lock, flags);
38878
38879 return &dev->stats;
38880@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struct net_device *dev)
38881 if (dev->flags & IFF_ALLMULTI) {
38882 ib->filter[0] = cpu_to_le32(~0U);
38883 ib->filter[1] = cpu_to_le32(~0U);
38884- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38885- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38886- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38887- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38888+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
38889+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
38890+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
38891+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
38892 return;
38893 }
38894 /* clear the multicast filter */
38895@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struct net_device *dev)
38896 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
38897 }
38898 for (i = 0; i < 4; i++)
38899- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
38900+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
38901 le16_to_cpu(mcast_table[i]));
38902 return;
38903 }
38904@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38905
38906 spin_lock_irqsave(&lp->lock, flags);
38907 suspended = pcnet32_suspend(dev, &flags, 0);
38908- csr15 = lp->a.read_csr(ioaddr, CSR15);
38909+ csr15 = lp->a->read_csr(ioaddr, CSR15);
38910 if (dev->flags & IFF_PROMISC) {
38911 /* Log any net taps. */
38912 if (netif_msg_hw(lp))
38913@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(struct net_device *dev)
38914 lp->init_block->mode =
38915 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
38916 7);
38917- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
38918+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
38919 } else {
38920 lp->init_block->mode =
38921 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
38922- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38923+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
38924 pcnet32_load_multicast(dev);
38925 }
38926
38927 if (suspended) {
38928 int csr5;
38929 /* clear SUSPEND (SPND) - CSR5 bit 0 */
38930- csr5 = lp->a.read_csr(ioaddr, CSR5);
38931- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38932+ csr5 = lp->a->read_csr(ioaddr, CSR5);
38933+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
38934 } else {
38935- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
38936+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
38937 pcnet32_restart(dev, CSR0_NORMAL);
38938 netif_wake_queue(dev);
38939 }
38940@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *dev, int phy_id, int reg_num)
38941 if (!lp->mii)
38942 return 0;
38943
38944- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38945- val_out = lp->a.read_bcr(ioaddr, 34);
38946+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38947+ val_out = lp->a->read_bcr(ioaddr, 34);
38948
38949 return val_out;
38950 }
38951@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val)
38952 if (!lp->mii)
38953 return;
38954
38955- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38956- lp->a.write_bcr(ioaddr, 34, val);
38957+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
38958+ lp->a->write_bcr(ioaddr, 34, val);
38959 }
38960
38961 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
38962@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38963 curr_link = mii_link_ok(&lp->mii_if);
38964 } else {
38965 ulong ioaddr = dev->base_addr; /* card base I/O address */
38966- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
38967+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
38968 }
38969 if (!curr_link) {
38970 if (prev_link || verbose) {
38971@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct net_device *dev, int verbose)
38972 (ecmd.duplex ==
38973 DUPLEX_FULL) ? "full" : "half");
38974 }
38975- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
38976+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
38977 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
38978 if (lp->mii_if.full_duplex)
38979 bcr9 |= (1 << 0);
38980 else
38981 bcr9 &= ~(1 << 0);
38982- lp->a.write_bcr(dev->base_addr, 9, bcr9);
38983+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
38984 }
38985 } else {
38986 if (netif_msg_link(lp))
38987diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
38988index 7cc9898..6eb50d3 100644
38989--- a/drivers/net/sis190.c
38990+++ b/drivers/net/sis190.c
38991@@ -1598,7 +1598,7 @@ static int __devinit sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev,
38992 static int __devinit sis190_get_mac_addr_from_apc(struct pci_dev *pdev,
38993 struct net_device *dev)
38994 {
38995- static const u16 __devinitdata ids[] = { 0x0965, 0x0966, 0x0968 };
38996+ static const u16 __devinitconst ids[] = { 0x0965, 0x0966, 0x0968 };
38997 struct sis190_private *tp = netdev_priv(dev);
38998 struct pci_dev *isa_bridge;
38999 u8 reg, tmp8;
39000diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
39001index e13685a..60c948c 100644
39002--- a/drivers/net/sundance.c
39003+++ b/drivers/net/sundance.c
39004@@ -225,7 +225,7 @@ enum {
39005 struct pci_id_info {
39006 const char *name;
39007 };
39008-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39009+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39010 {"D-Link DFE-550TX FAST Ethernet Adapter"},
39011 {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"},
39012 {"D-Link DFE-580TX 4 port Server Adapter"},
39013diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
39014index 529f55a..cccaa18 100644
39015--- a/drivers/net/tg3.h
39016+++ b/drivers/net/tg3.h
39017@@ -95,6 +95,7 @@
39018 #define CHIPREV_ID_5750_A0 0x4000
39019 #define CHIPREV_ID_5750_A1 0x4001
39020 #define CHIPREV_ID_5750_A3 0x4003
39021+#define CHIPREV_ID_5750_C1 0x4201
39022 #define CHIPREV_ID_5750_C2 0x4202
39023 #define CHIPREV_ID_5752_A0_HW 0x5000
39024 #define CHIPREV_ID_5752_A0 0x6000
39025diff --git a/drivers/net/tokenring/abyss.c b/drivers/net/tokenring/abyss.c
39026index b9db1b5..720f9ce 100644
39027--- a/drivers/net/tokenring/abyss.c
39028+++ b/drivers/net/tokenring/abyss.c
39029@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver = {
39030
39031 static int __init abyss_init (void)
39032 {
39033- abyss_netdev_ops = tms380tr_netdev_ops;
39034+ pax_open_kernel();
39035+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39036
39037- abyss_netdev_ops.ndo_open = abyss_open;
39038- abyss_netdev_ops.ndo_stop = abyss_close;
39039+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
39040+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
39041+ pax_close_kernel();
39042
39043 return pci_register_driver(&abyss_driver);
39044 }
39045diff --git a/drivers/net/tokenring/madgemc.c b/drivers/net/tokenring/madgemc.c
39046index 456f8bf..373e56d 100644
39047--- a/drivers/net/tokenring/madgemc.c
39048+++ b/drivers/net/tokenring/madgemc.c
39049@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver = {
39050
39051 static int __init madgemc_init (void)
39052 {
39053- madgemc_netdev_ops = tms380tr_netdev_ops;
39054- madgemc_netdev_ops.ndo_open = madgemc_open;
39055- madgemc_netdev_ops.ndo_stop = madgemc_close;
39056+ pax_open_kernel();
39057+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39058+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
39059+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
39060+ pax_close_kernel();
39061
39062 return mca_register_driver (&madgemc_driver);
39063 }
39064diff --git a/drivers/net/tokenring/proteon.c b/drivers/net/tokenring/proteon.c
39065index 16e8783..925bd49 100644
39066--- a/drivers/net/tokenring/proteon.c
39067+++ b/drivers/net/tokenring/proteon.c
39068@@ -353,9 +353,11 @@ static int __init proteon_init(void)
39069 struct platform_device *pdev;
39070 int i, num = 0, err = 0;
39071
39072- proteon_netdev_ops = tms380tr_netdev_ops;
39073- proteon_netdev_ops.ndo_open = proteon_open;
39074- proteon_netdev_ops.ndo_stop = tms380tr_close;
39075+ pax_open_kernel();
39076+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39077+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
39078+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
39079+ pax_close_kernel();
39080
39081 err = platform_driver_register(&proteon_driver);
39082 if (err)
39083diff --git a/drivers/net/tokenring/skisa.c b/drivers/net/tokenring/skisa.c
39084index 46db5c5..37c1536 100644
39085--- a/drivers/net/tokenring/skisa.c
39086+++ b/drivers/net/tokenring/skisa.c
39087@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
39088 struct platform_device *pdev;
39089 int i, num = 0, err = 0;
39090
39091- sk_isa_netdev_ops = tms380tr_netdev_ops;
39092- sk_isa_netdev_ops.ndo_open = sk_isa_open;
39093- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39094+ pax_open_kernel();
39095+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
39096+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
39097+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
39098+ pax_close_kernel();
39099
39100 err = platform_driver_register(&sk_isa_driver);
39101 if (err)
39102diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
39103index 74e5ba4..5cf6bc9 100644
39104--- a/drivers/net/tulip/de2104x.c
39105+++ b/drivers/net/tulip/de2104x.c
39106@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_info (struct de_private *de)
39107 struct de_srom_info_leaf *il;
39108 void *bufp;
39109
39110+ pax_track_stack();
39111+
39112 /* download entire eeprom */
39113 for (i = 0; i < DE_EEPROM_WORDS; i++)
39114 ((__le16 *)ee_data)[i] =
39115diff --git a/drivers/net/tulip/de4x5.c b/drivers/net/tulip/de4x5.c
39116index a8349b7..90f9dfe 100644
39117--- a/drivers/net/tulip/de4x5.c
39118+++ b/drivers/net/tulip/de4x5.c
39119@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39120 for (i=0; i<ETH_ALEN; i++) {
39121 tmp.addr[i] = dev->dev_addr[i];
39122 }
39123- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39124+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
39125 break;
39126
39127 case DE4X5_SET_HWADDR: /* Set the hardware address */
39128@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
39129 spin_lock_irqsave(&lp->lock, flags);
39130 memcpy(&statbuf, &lp->pktStats, ioc->len);
39131 spin_unlock_irqrestore(&lp->lock, flags);
39132- if (copy_to_user(ioc->data, &statbuf, ioc->len))
39133+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
39134 return -EFAULT;
39135 break;
39136 }
39137diff --git a/drivers/net/tulip/eeprom.c b/drivers/net/tulip/eeprom.c
39138index 391acd3..56d11cd 100644
39139--- a/drivers/net/tulip/eeprom.c
39140+++ b/drivers/net/tulip/eeprom.c
39141@@ -80,7 +80,7 @@ static struct eeprom_fixup eeprom_fixups[] __devinitdata = {
39142 {NULL}};
39143
39144
39145-static const char *block_name[] __devinitdata = {
39146+static const char *block_name[] __devinitconst = {
39147 "21140 non-MII",
39148 "21140 MII PHY",
39149 "21142 Serial PHY",
39150diff --git a/drivers/net/tulip/winbond-840.c b/drivers/net/tulip/winbond-840.c
39151index b38d3b7..b1cff23 100644
39152--- a/drivers/net/tulip/winbond-840.c
39153+++ b/drivers/net/tulip/winbond-840.c
39154@@ -235,7 +235,7 @@ struct pci_id_info {
39155 int drv_flags; /* Driver use, intended as capability flags. */
39156 };
39157
39158-static const struct pci_id_info pci_id_tbl[] __devinitdata = {
39159+static const struct pci_id_info pci_id_tbl[] __devinitconst = {
39160 { /* Sometime a Level-One switch card. */
39161 "Winbond W89c840", CanHaveMII | HasBrokenTx | FDXOnNoMII},
39162 { "Winbond W89c840", CanHaveMII | HasBrokenTx},
39163diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
39164index f450bc9..2b747c8 100644
39165--- a/drivers/net/usb/hso.c
39166+++ b/drivers/net/usb/hso.c
39167@@ -71,7 +71,7 @@
39168 #include <asm/byteorder.h>
39169 #include <linux/serial_core.h>
39170 #include <linux/serial.h>
39171-
39172+#include <asm/local.h>
39173
39174 #define DRIVER_VERSION "1.2"
39175 #define MOD_AUTHOR "Option Wireless"
39176@@ -258,7 +258,7 @@ struct hso_serial {
39177
39178 /* from usb_serial_port */
39179 struct tty_struct *tty;
39180- int open_count;
39181+ local_t open_count;
39182 spinlock_t serial_lock;
39183
39184 int (*write_data) (struct hso_serial *serial);
39185@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
39186 struct urb *urb;
39187
39188 urb = serial->rx_urb[0];
39189- if (serial->open_count > 0) {
39190+ if (local_read(&serial->open_count) > 0) {
39191 count = put_rxbuf_data(urb, serial);
39192 if (count == -1)
39193 return;
39194@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
39195 DUMP1(urb->transfer_buffer, urb->actual_length);
39196
39197 /* Anyone listening? */
39198- if (serial->open_count == 0)
39199+ if (local_read(&serial->open_count) == 0)
39200 return;
39201
39202 if (status == 0) {
39203@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39204 spin_unlock_irq(&serial->serial_lock);
39205
39206 /* check for port already opened, if not set the termios */
39207- serial->open_count++;
39208- if (serial->open_count == 1) {
39209+ if (local_inc_return(&serial->open_count) == 1) {
39210 tty->low_latency = 1;
39211 serial->rx_state = RX_IDLE;
39212 /* Force default termio settings */
39213@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
39214 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
39215 if (result) {
39216 hso_stop_serial_device(serial->parent);
39217- serial->open_count--;
39218+ local_dec(&serial->open_count);
39219 kref_put(&serial->parent->ref, hso_serial_ref_free);
39220 }
39221 } else {
39222@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
39223
39224 /* reset the rts and dtr */
39225 /* do the actual close */
39226- serial->open_count--;
39227+ local_dec(&serial->open_count);
39228
39229- if (serial->open_count <= 0) {
39230- serial->open_count = 0;
39231+ if (local_read(&serial->open_count) <= 0) {
39232+ local_set(&serial->open_count, 0);
39233 spin_lock_irq(&serial->serial_lock);
39234 if (serial->tty == tty) {
39235 serial->tty->driver_data = NULL;
39236@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
39237
39238 /* the actual setup */
39239 spin_lock_irqsave(&serial->serial_lock, flags);
39240- if (serial->open_count)
39241+ if (local_read(&serial->open_count))
39242 _hso_serial_set_termios(tty, old);
39243 else
39244 tty->termios = old;
39245@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interface *iface)
39246 /* Start all serial ports */
39247 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
39248 if (serial_table[i] && (serial_table[i]->interface == iface)) {
39249- if (dev2ser(serial_table[i])->open_count) {
39250+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
39251 result =
39252 hso_start_serial_device(serial_table[i], GFP_NOIO);
39253 hso_kick_transmit(dev2ser(serial_table[i]));
39254diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
39255index 3e94f0c..ffdd926 100644
39256--- a/drivers/net/vxge/vxge-config.h
39257+++ b/drivers/net/vxge/vxge-config.h
39258@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
39259 void (*link_down)(struct __vxge_hw_device *devh);
39260 void (*crit_err)(struct __vxge_hw_device *devh,
39261 enum vxge_hw_event type, u64 ext_data);
39262-};
39263+} __no_const;
39264
39265 /*
39266 * struct __vxge_hw_blockpool_entry - Block private data structure
39267diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
39268index 068d7a9..35293de 100644
39269--- a/drivers/net/vxge/vxge-main.c
39270+++ b/drivers/net/vxge/vxge-main.c
39271@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_TX(struct vxge_fifo *fifo)
39272 struct sk_buff *completed[NR_SKB_COMPLETED];
39273 int more;
39274
39275+ pax_track_stack();
39276+
39277 do {
39278 more = 0;
39279 skb_ptr = completed;
39280@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
39281 u8 mtable[256] = {0}; /* CPU to vpath mapping */
39282 int index;
39283
39284+ pax_track_stack();
39285+
39286 /*
39287 * Filling
39288 * - itable with bucket numbers
39289diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
39290index 461742b..81be42e 100644
39291--- a/drivers/net/vxge/vxge-traffic.h
39292+++ b/drivers/net/vxge/vxge-traffic.h
39293@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
39294 struct vxge_hw_mempool_dma *dma_object,
39295 u32 index,
39296 u32 is_last);
39297-};
39298+} __no_const;
39299
39300 void
39301 __vxge_hw_mempool_destroy(
39302diff --git a/drivers/net/wan/cycx_x25.c b/drivers/net/wan/cycx_x25.c
39303index cd8cb95..4153b79 100644
39304--- a/drivers/net/wan/cycx_x25.c
39305+++ b/drivers/net/wan/cycx_x25.c
39306@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned char *p, int len)
39307 unsigned char hex[1024],
39308 * phex = hex;
39309
39310+ pax_track_stack();
39311+
39312 if (len >= (sizeof(hex) / 2))
39313 len = (sizeof(hex) / 2) - 1;
39314
39315diff --git a/drivers/net/wan/hdlc_x25.c b/drivers/net/wan/hdlc_x25.c
39316index aa9248f..a4e3c3b 100644
39317--- a/drivers/net/wan/hdlc_x25.c
39318+++ b/drivers/net/wan/hdlc_x25.c
39319@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
39320
39321 static int x25_open(struct net_device *dev)
39322 {
39323- struct lapb_register_struct cb;
39324+ static struct lapb_register_struct cb = {
39325+ .connect_confirmation = x25_connected,
39326+ .connect_indication = x25_connected,
39327+ .disconnect_confirmation = x25_disconnected,
39328+ .disconnect_indication = x25_disconnected,
39329+ .data_indication = x25_data_indication,
39330+ .data_transmit = x25_data_transmit
39331+ };
39332 int result;
39333
39334- cb.connect_confirmation = x25_connected;
39335- cb.connect_indication = x25_connected;
39336- cb.disconnect_confirmation = x25_disconnected;
39337- cb.disconnect_indication = x25_disconnected;
39338- cb.data_indication = x25_data_indication;
39339- cb.data_transmit = x25_data_transmit;
39340-
39341 result = lapb_register(dev, &cb);
39342 if (result != LAPB_OK)
39343 return result;
39344diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
39345index 5ad287c..783b020 100644
39346--- a/drivers/net/wimax/i2400m/usb-fw.c
39347+++ b/drivers/net/wimax/i2400m/usb-fw.c
39348@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(struct i2400m *i2400m,
39349 int do_autopm = 1;
39350 DECLARE_COMPLETION_ONSTACK(notif_completion);
39351
39352+ pax_track_stack();
39353+
39354 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
39355 i2400m, ack, ack_size);
39356 BUG_ON(_ack == i2400m->bm_ack_buf);
39357diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
39358index 6c26840..62c97c3 100644
39359--- a/drivers/net/wireless/airo.c
39360+++ b/drivers/net/wireless/airo.c
39361@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (struct airo_info *ai) {
39362 BSSListElement * loop_net;
39363 BSSListElement * tmp_net;
39364
39365+ pax_track_stack();
39366+
39367 /* Blow away current list of scan results */
39368 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
39369 list_move_tail (&loop_net->list, &ai->network_free_list);
39370@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *ai, u8 *mac, int lock)
39371 WepKeyRid wkr;
39372 int rc;
39373
39374+ pax_track_stack();
39375+
39376 memset( &mySsid, 0, sizeof( mySsid ) );
39377 kfree (ai->flash);
39378 ai->flash = NULL;
39379@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct inode *inode,
39380 __le32 *vals = stats.vals;
39381 int len;
39382
39383+ pax_track_stack();
39384+
39385 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39386 return -ENOMEM;
39387 data = (struct proc_data *)file->private_data;
39388@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct inode *inode, struct file *file ) {
39389 /* If doLoseSync is not 1, we won't do a Lose Sync */
39390 int doLoseSync = -1;
39391
39392+ pax_track_stack();
39393+
39394 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
39395 return -ENOMEM;
39396 data = (struct proc_data *)file->private_data;
39397@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_device *dev,
39398 int i;
39399 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
39400
39401+ pax_track_stack();
39402+
39403 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
39404 if (!qual)
39405 return -ENOMEM;
39406@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(struct airo_info *local)
39407 CapabilityRid cap_rid;
39408 __le32 *vals = stats_rid.vals;
39409
39410+ pax_track_stack();
39411+
39412 /* Get stats out of the card */
39413 clear_bit(JOB_WSTATS, &local->jobs);
39414 if (local->power.event) {
39415diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
39416index 747508c..82e965d 100644
39417--- a/drivers/net/wireless/ath/ath5k/debug.c
39418+++ b/drivers/net/wireless/ath/ath5k/debug.c
39419@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
39420 unsigned int v;
39421 u64 tsf;
39422
39423+ pax_track_stack();
39424+
39425 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
39426 len += snprintf(buf+len, sizeof(buf)-len,
39427 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
39428@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
39429 unsigned int len = 0;
39430 unsigned int i;
39431
39432+ pax_track_stack();
39433+
39434 len += snprintf(buf+len, sizeof(buf)-len,
39435 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
39436
39437diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
39438index 2be4c22..593b1eb 100644
39439--- a/drivers/net/wireless/ath/ath9k/debug.c
39440+++ b/drivers/net/wireless/ath/ath9k/debug.c
39441@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struct file *file, char __user *user_buf,
39442 char buf[512];
39443 unsigned int len = 0;
39444
39445+ pax_track_stack();
39446+
39447 len += snprintf(buf + len, sizeof(buf) - len,
39448 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
39449 len += snprintf(buf + len, sizeof(buf) - len,
39450@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
39451 int i;
39452 u8 addr[ETH_ALEN];
39453
39454+ pax_track_stack();
39455+
39456 len += snprintf(buf + len, sizeof(buf) - len,
39457 "primary: %s (%s chan=%d ht=%d)\n",
39458 wiphy_name(sc->pri_wiphy->hw->wiphy),
39459diff --git a/drivers/net/wireless/b43/debugfs.c b/drivers/net/wireless/b43/debugfs.c
39460index 80b19a4..dab3a45 100644
39461--- a/drivers/net/wireless/b43/debugfs.c
39462+++ b/drivers/net/wireless/b43/debugfs.c
39463@@ -43,7 +43,7 @@ static struct dentry *rootdir;
39464 struct b43_debugfs_fops {
39465 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
39466 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
39467- struct file_operations fops;
39468+ const struct file_operations fops;
39469 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
39470 size_t file_struct_offset;
39471 };
39472diff --git a/drivers/net/wireless/b43legacy/debugfs.c b/drivers/net/wireless/b43legacy/debugfs.c
39473index 1f85ac5..c99b4b4 100644
39474--- a/drivers/net/wireless/b43legacy/debugfs.c
39475+++ b/drivers/net/wireless/b43legacy/debugfs.c
39476@@ -44,7 +44,7 @@ static struct dentry *rootdir;
39477 struct b43legacy_debugfs_fops {
39478 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
39479 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
39480- struct file_operations fops;
39481+ const struct file_operations fops;
39482 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
39483 size_t file_struct_offset;
39484 /* Take wl->irq_lock before calling read/write? */
39485diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
39486index 43102bf..3b569c3 100644
39487--- a/drivers/net/wireless/ipw2x00/ipw2100.c
39488+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
39489@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid,
39490 int err;
39491 DECLARE_SSID_BUF(ssid);
39492
39493+ pax_track_stack();
39494+
39495 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
39496
39497 if (ssid_len)
39498@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw2100_priv *priv,
39499 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
39500 int err;
39501
39502+ pax_track_stack();
39503+
39504 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
39505 idx, keylen, len);
39506
39507diff --git a/drivers/net/wireless/ipw2x00/libipw_rx.c b/drivers/net/wireless/ipw2x00/libipw_rx.c
39508index 282b1f7..169f0cf 100644
39509--- a/drivers/net/wireless/ipw2x00/libipw_rx.c
39510+++ b/drivers/net/wireless/ipw2x00/libipw_rx.c
39511@@ -1566,6 +1566,8 @@ static void libipw_process_probe_response(struct libipw_device
39512 unsigned long flags;
39513 DECLARE_SSID_BUF(ssid);
39514
39515+ pax_track_stack();
39516+
39517 LIBIPW_DEBUG_SCAN("'%s' (%pM"
39518 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
39519 print_ssid(ssid, info_element->data, info_element->len),
39520diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
39521index 950267a..80d5fd2 100644
39522--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
39523+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
39524@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib = {
39525 },
39526 };
39527
39528-static struct iwl_ops iwl1000_ops = {
39529+static const struct iwl_ops iwl1000_ops = {
39530 .ucode = &iwl5000_ucode,
39531 .lib = &iwl1000_lib,
39532 .hcmd = &iwl5000_hcmd,
39533diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
39534index 56bfcc3..b348020 100644
39535--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
39536+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
39537@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
39538 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
39539 };
39540
39541-static struct iwl_ops iwl3945_ops = {
39542+static const struct iwl_ops iwl3945_ops = {
39543 .ucode = &iwl3945_ucode,
39544 .lib = &iwl3945_lib,
39545 .hcmd = &iwl3945_hcmd,
39546diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
39547index 585b8d4..e142963 100644
39548--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
39549+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
39550@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib = {
39551 },
39552 };
39553
39554-static struct iwl_ops iwl4965_ops = {
39555+static const struct iwl_ops iwl4965_ops = {
39556 .ucode = &iwl4965_ucode,
39557 .lib = &iwl4965_lib,
39558 .hcmd = &iwl4965_hcmd,
39559diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
39560index 1f423f2..e37c192 100644
39561--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
39562+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
39563@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib = {
39564 },
39565 };
39566
39567-struct iwl_ops iwl5000_ops = {
39568+const struct iwl_ops iwl5000_ops = {
39569 .ucode = &iwl5000_ucode,
39570 .lib = &iwl5000_lib,
39571 .hcmd = &iwl5000_hcmd,
39572 .utils = &iwl5000_hcmd_utils,
39573 };
39574
39575-static struct iwl_ops iwl5150_ops = {
39576+static const struct iwl_ops iwl5150_ops = {
39577 .ucode = &iwl5000_ucode,
39578 .lib = &iwl5150_lib,
39579 .hcmd = &iwl5000_hcmd,
39580diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
39581index 1473452..f07d5e1 100644
39582--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
39583+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
39584@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = {
39585 .calc_rssi = iwl5000_calc_rssi,
39586 };
39587
39588-static struct iwl_ops iwl6000_ops = {
39589+static const struct iwl_ops iwl6000_ops = {
39590 .ucode = &iwl5000_ucode,
39591 .lib = &iwl6000_lib,
39592 .hcmd = &iwl5000_hcmd,
39593diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39594index 1a3dfa2..b3e0a61 100644
39595--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39596+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
39597@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
39598 u8 active_index = 0;
39599 s32 tpt = 0;
39600
39601+ pax_track_stack();
39602+
39603 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
39604
39605 if (!ieee80211_is_data(hdr->frame_control) ||
39606@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
39607 u8 valid_tx_ant = 0;
39608 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
39609
39610+ pax_track_stack();
39611+
39612 /* Override starting rate (index 0) if needed for debug purposes */
39613 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
39614
39615diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
39616index 0e56d78..6a3c107 100644
39617--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
39618+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
39619@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
39620 if (iwl_debug_level & IWL_DL_INFO)
39621 dev_printk(KERN_DEBUG, &(pdev->dev),
39622 "Disabling hw_scan\n");
39623- iwl_hw_ops.hw_scan = NULL;
39624+ pax_open_kernel();
39625+ *(void **)&iwl_hw_ops.hw_scan = NULL;
39626+ pax_close_kernel();
39627 }
39628
39629 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
39630diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
39631index cbc6290..eb323d7 100644
39632--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
39633+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
39634@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv);
39635 #endif
39636
39637 #else
39638-#define IWL_DEBUG(__priv, level, fmt, args...)
39639-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
39640+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
39641+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
39642 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
39643 void *p, u32 len)
39644 {}
39645diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39646index a198bcf..8e68233 100644
39647--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39648+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
39649@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
39650 int pos = 0;
39651 const size_t bufsz = sizeof(buf);
39652
39653+ pax_track_stack();
39654+
39655 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
39656 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
39657 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
39658@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
39659 const size_t bufsz = sizeof(buf);
39660 ssize_t ret;
39661
39662+ pax_track_stack();
39663+
39664 for (i = 0; i < AC_NUM; i++) {
39665 pos += scnprintf(buf + pos, bufsz - pos,
39666 "\tcw_min\tcw_max\taifsn\ttxop\n");
39667diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
39668index 3539ea4..b174bfa 100644
39669--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
39670+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
39671@@ -68,7 +68,7 @@ struct iwl_tx_queue;
39672
39673 /* shared structures from iwl-5000.c */
39674 extern struct iwl_mod_params iwl50_mod_params;
39675-extern struct iwl_ops iwl5000_ops;
39676+extern const struct iwl_ops iwl5000_ops;
39677 extern struct iwl_ucode_ops iwl5000_ucode;
39678 extern struct iwl_lib_ops iwl5000_lib;
39679 extern struct iwl_hcmd_ops iwl5000_hcmd;
39680diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39681index 619590d..69235ee 100644
39682--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
39683+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
39684@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
39685 */
39686 if (iwl3945_mod_params.disable_hw_scan) {
39687 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
39688- iwl3945_hw_ops.hw_scan = NULL;
39689+ pax_open_kernel();
39690+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
39691+ pax_close_kernel();
39692 }
39693
39694
39695diff --git a/drivers/net/wireless/iwmc3200wifi/debugfs.c b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39696index 1465379..fe4d78b 100644
39697--- a/drivers/net/wireless/iwmc3200wifi/debugfs.c
39698+++ b/drivers/net/wireless/iwmc3200wifi/debugfs.c
39699@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(struct file *filp,
39700 int buf_len = 512;
39701 size_t len = 0;
39702
39703+ pax_track_stack();
39704+
39705 if (*ppos != 0)
39706 return 0;
39707 if (count < sizeof(buf))
39708diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
39709index 893a55c..7f66a50 100644
39710--- a/drivers/net/wireless/libertas/debugfs.c
39711+++ b/drivers/net/wireless/libertas/debugfs.c
39712@@ -708,7 +708,7 @@ out_unlock:
39713 struct lbs_debugfs_files {
39714 const char *name;
39715 int perm;
39716- struct file_operations fops;
39717+ const struct file_operations fops;
39718 };
39719
39720 static const struct lbs_debugfs_files debugfs_files[] = {
39721diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
39722index 2ecbedb..42704f0 100644
39723--- a/drivers/net/wireless/rndis_wlan.c
39724+++ b/drivers/net/wireless/rndis_wlan.c
39725@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
39726
39727 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
39728
39729- if (rts_threshold < 0 || rts_threshold > 2347)
39730+ if (rts_threshold > 2347)
39731 rts_threshold = 2347;
39732
39733 tmp = cpu_to_le32(rts_threshold);
39734diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
39735index 334ccd6..47f8944 100644
39736--- a/drivers/oprofile/buffer_sync.c
39737+++ b/drivers/oprofile/buffer_sync.c
39738@@ -342,7 +342,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
39739 if (cookie == NO_COOKIE)
39740 offset = pc;
39741 if (cookie == INVALID_COOKIE) {
39742- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39743+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39744 offset = pc;
39745 }
39746 if (cookie != last_cookie) {
39747@@ -386,14 +386,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
39748 /* add userspace sample */
39749
39750 if (!mm) {
39751- atomic_inc(&oprofile_stats.sample_lost_no_mm);
39752+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
39753 return 0;
39754 }
39755
39756 cookie = lookup_dcookie(mm, s->eip, &offset);
39757
39758 if (cookie == INVALID_COOKIE) {
39759- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
39760+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
39761 return 0;
39762 }
39763
39764@@ -562,7 +562,7 @@ void sync_buffer(int cpu)
39765 /* ignore backtraces if failed to add a sample */
39766 if (state == sb_bt_start) {
39767 state = sb_bt_ignore;
39768- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
39769+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
39770 }
39771 }
39772 release_mm(mm);
39773diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
39774index 5df60a6..72f5c1c 100644
39775--- a/drivers/oprofile/event_buffer.c
39776+++ b/drivers/oprofile/event_buffer.c
39777@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
39778 }
39779
39780 if (buffer_pos == buffer_size) {
39781- atomic_inc(&oprofile_stats.event_lost_overflow);
39782+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
39783 return;
39784 }
39785
39786diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
39787index dc8a042..fe5f315 100644
39788--- a/drivers/oprofile/oprof.c
39789+++ b/drivers/oprofile/oprof.c
39790@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
39791 if (oprofile_ops.switch_events())
39792 return;
39793
39794- atomic_inc(&oprofile_stats.multiplex_counter);
39795+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
39796 start_switch_worker();
39797 }
39798
39799diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
39800index 61689e8..387f7f8 100644
39801--- a/drivers/oprofile/oprofile_stats.c
39802+++ b/drivers/oprofile/oprofile_stats.c
39803@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
39804 cpu_buf->sample_invalid_eip = 0;
39805 }
39806
39807- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
39808- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
39809- atomic_set(&oprofile_stats.event_lost_overflow, 0);
39810- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
39811- atomic_set(&oprofile_stats.multiplex_counter, 0);
39812+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
39813+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
39814+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
39815+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
39816+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
39817 }
39818
39819
39820diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
39821index 0b54e46..a37c527 100644
39822--- a/drivers/oprofile/oprofile_stats.h
39823+++ b/drivers/oprofile/oprofile_stats.h
39824@@ -13,11 +13,11 @@
39825 #include <asm/atomic.h>
39826
39827 struct oprofile_stat_struct {
39828- atomic_t sample_lost_no_mm;
39829- atomic_t sample_lost_no_mapping;
39830- atomic_t bt_lost_no_mapping;
39831- atomic_t event_lost_overflow;
39832- atomic_t multiplex_counter;
39833+ atomic_unchecked_t sample_lost_no_mm;
39834+ atomic_unchecked_t sample_lost_no_mapping;
39835+ atomic_unchecked_t bt_lost_no_mapping;
39836+ atomic_unchecked_t event_lost_overflow;
39837+ atomic_unchecked_t multiplex_counter;
39838 };
39839
39840 extern struct oprofile_stat_struct oprofile_stats;
39841diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
39842index 2766a6d..80c77e2 100644
39843--- a/drivers/oprofile/oprofilefs.c
39844+++ b/drivers/oprofile/oprofilefs.c
39845@@ -187,7 +187,7 @@ static const struct file_operations atomic_ro_fops = {
39846
39847
39848 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
39849- char const *name, atomic_t *val)
39850+ char const *name, atomic_unchecked_t *val)
39851 {
39852 struct dentry *d = __oprofilefs_create_file(sb, root, name,
39853 &atomic_ro_fops, 0444);
39854diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c
39855index 13a64bc..ad62835 100644
39856--- a/drivers/parisc/pdc_stable.c
39857+++ b/drivers/parisc/pdc_stable.c
39858@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj, struct attribute *attr,
39859 return ret;
39860 }
39861
39862-static struct sysfs_ops pdcspath_attr_ops = {
39863+static const struct sysfs_ops pdcspath_attr_ops = {
39864 .show = pdcspath_attr_show,
39865 .store = pdcspath_attr_store,
39866 };
39867diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
39868index 8eefe56..40751a7 100644
39869--- a/drivers/parport/procfs.c
39870+++ b/drivers/parport/procfs.c
39871@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
39872
39873 *ppos += len;
39874
39875- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
39876+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
39877 }
39878
39879 #ifdef CONFIG_PARPORT_1284
39880@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
39881
39882 *ppos += len;
39883
39884- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
39885+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
39886 }
39887 #endif /* IEEE1284.3 support. */
39888
39889diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
39890index 73e7d8e..c80f3d2 100644
39891--- a/drivers/pci/hotplug/acpiphp_glue.c
39892+++ b/drivers/pci/hotplug/acpiphp_glue.c
39893@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notifier_block *nb, unsigned long val,
39894 }
39895
39896
39897-static struct acpi_dock_ops acpiphp_dock_ops = {
39898+static const struct acpi_dock_ops acpiphp_dock_ops = {
39899 .handler = handle_hotplug_event_func,
39900 };
39901
39902diff --git a/drivers/pci/hotplug/cpci_hotplug.h b/drivers/pci/hotplug/cpci_hotplug.h
39903index 9fff878..ad0ad53 100644
39904--- a/drivers/pci/hotplug/cpci_hotplug.h
39905+++ b/drivers/pci/hotplug/cpci_hotplug.h
39906@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
39907 int (*hardware_test) (struct slot* slot, u32 value);
39908 u8 (*get_power) (struct slot* slot);
39909 int (*set_power) (struct slot* slot, int value);
39910-};
39911+} __no_const;
39912
39913 struct cpci_hp_controller {
39914 unsigned int irq;
39915diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
39916index 76ba8a1..20ca857 100644
39917--- a/drivers/pci/hotplug/cpqphp_nvram.c
39918+++ b/drivers/pci/hotplug/cpqphp_nvram.c
39919@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
39920
39921 void compaq_nvram_init (void __iomem *rom_start)
39922 {
39923+
39924+#ifndef CONFIG_PAX_KERNEXEC
39925 if (rom_start) {
39926 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
39927 }
39928+#endif
39929+
39930 dbg("int15 entry = %p\n", compaq_int15_entry_point);
39931
39932 /* initialize our int15 lock */
39933diff --git a/drivers/pci/hotplug/fakephp.c b/drivers/pci/hotplug/fakephp.c
39934index 6151389..0a894ef 100644
39935--- a/drivers/pci/hotplug/fakephp.c
39936+++ b/drivers/pci/hotplug/fakephp.c
39937@@ -73,7 +73,7 @@ static void legacy_release(struct kobject *kobj)
39938 }
39939
39940 static struct kobj_type legacy_ktype = {
39941- .sysfs_ops = &(struct sysfs_ops){
39942+ .sysfs_ops = &(const struct sysfs_ops){
39943 .store = legacy_store, .show = legacy_show
39944 },
39945 .release = &legacy_release,
39946diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c
39947index 5b680df..fe05b7e 100644
39948--- a/drivers/pci/intel-iommu.c
39949+++ b/drivers/pci/intel-iommu.c
39950@@ -2643,7 +2643,7 @@ error:
39951 return 0;
39952 }
39953
39954-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
39955+dma_addr_t intel_map_page(struct device *dev, struct page *page,
39956 unsigned long offset, size_t size,
39957 enum dma_data_direction dir,
39958 struct dma_attrs *attrs)
39959@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain *dom, struct iova *iova)
39960 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
39961 }
39962
39963-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39964+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39965 size_t size, enum dma_data_direction dir,
39966 struct dma_attrs *attrs)
39967 {
39968@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
39969 }
39970 }
39971
39972-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39973+void *intel_alloc_coherent(struct device *hwdev, size_t size,
39974 dma_addr_t *dma_handle, gfp_t flags)
39975 {
39976 void *vaddr;
39977@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct device *hwdev, size_t size,
39978 return NULL;
39979 }
39980
39981-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39982+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39983 dma_addr_t dma_handle)
39984 {
39985 int order;
39986@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
39987 free_pages((unsigned long)vaddr, order);
39988 }
39989
39990-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39991+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
39992 int nelems, enum dma_data_direction dir,
39993 struct dma_attrs *attrs)
39994 {
39995@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
39996 return nelems;
39997 }
39998
39999-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40000+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
40001 enum dma_data_direction dir, struct dma_attrs *attrs)
40002 {
40003 int i;
40004@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
40005 return nelems;
40006 }
40007
40008-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40009+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
40010 {
40011 return !dma_addr;
40012 }
40013
40014-struct dma_map_ops intel_dma_ops = {
40015+const struct dma_map_ops intel_dma_ops = {
40016 .alloc_coherent = intel_alloc_coherent,
40017 .free_coherent = intel_free_coherent,
40018 .map_sg = intel_map_sg,
40019diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
40020index 5b7056c..607bc94 100644
40021--- a/drivers/pci/pcie/aspm.c
40022+++ b/drivers/pci/pcie/aspm.c
40023@@ -27,9 +27,9 @@
40024 #define MODULE_PARAM_PREFIX "pcie_aspm."
40025
40026 /* Note: those are not register definitions */
40027-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
40028-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
40029-#define ASPM_STATE_L1 (4) /* L1 state */
40030+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
40031+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
40032+#define ASPM_STATE_L1 (4U) /* L1 state */
40033 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
40034 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
40035
40036diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
40037index 8105e32..ca10419 100644
40038--- a/drivers/pci/probe.c
40039+++ b/drivers/pci/probe.c
40040@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
40041 return ret;
40042 }
40043
40044-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
40045+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
40046 struct device_attribute *attr,
40047 char *buf)
40048 {
40049 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
40050 }
40051
40052-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
40053+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
40054 struct device_attribute *attr,
40055 char *buf)
40056 {
40057diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
40058index a03ad8c..024b0da 100644
40059--- a/drivers/pci/proc.c
40060+++ b/drivers/pci/proc.c
40061@@ -480,7 +480,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
40062 static int __init pci_proc_init(void)
40063 {
40064 struct pci_dev *dev = NULL;
40065+
40066+#ifdef CONFIG_GRKERNSEC_PROC_ADD
40067+#ifdef CONFIG_GRKERNSEC_PROC_USER
40068+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
40069+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
40070+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
40071+#endif
40072+#else
40073 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
40074+#endif
40075 proc_create("devices", 0, proc_bus_pci_dir,
40076 &proc_bus_pci_dev_operations);
40077 proc_initialized = 1;
40078diff --git a/drivers/pci/slot.c b/drivers/pci/slot.c
40079index 8c02b6c..5584d8e 100644
40080--- a/drivers/pci/slot.c
40081+++ b/drivers/pci/slot.c
40082@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struct kobject *kobj,
40083 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
40084 }
40085
40086-static struct sysfs_ops pci_slot_sysfs_ops = {
40087+static const struct sysfs_ops pci_slot_sysfs_ops = {
40088 .show = pci_slot_attr_show,
40089 .store = pci_slot_attr_store,
40090 };
40091diff --git a/drivers/pcmcia/pcmcia_ioctl.c b/drivers/pcmcia/pcmcia_ioctl.c
40092index 30cf71d2..50938f1 100644
40093--- a/drivers/pcmcia/pcmcia_ioctl.c
40094+++ b/drivers/pcmcia/pcmcia_ioctl.c
40095@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode, struct file * file,
40096 return -EFAULT;
40097 }
40098 }
40099- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40100+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
40101 if (!buf)
40102 return -ENOMEM;
40103
40104diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
40105index 52183c4..b224c69 100644
40106--- a/drivers/platform/x86/acer-wmi.c
40107+++ b/drivers/platform/x86/acer-wmi.c
40108@@ -918,7 +918,7 @@ static int update_bl_status(struct backlight_device *bd)
40109 return 0;
40110 }
40111
40112-static struct backlight_ops acer_bl_ops = {
40113+static const struct backlight_ops acer_bl_ops = {
40114 .get_brightness = read_brightness,
40115 .update_status = update_bl_status,
40116 };
40117diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
40118index 767cb61..a87380b 100644
40119--- a/drivers/platform/x86/asus-laptop.c
40120+++ b/drivers/platform/x86/asus-laptop.c
40121@@ -250,7 +250,7 @@ static struct backlight_device *asus_backlight_device;
40122 */
40123 static int read_brightness(struct backlight_device *bd);
40124 static int update_bl_status(struct backlight_device *bd);
40125-static struct backlight_ops asusbl_ops = {
40126+static const struct backlight_ops asusbl_ops = {
40127 .get_brightness = read_brightness,
40128 .update_status = update_bl_status,
40129 };
40130diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
40131index d66c07a..a4abaac 100644
40132--- a/drivers/platform/x86/asus_acpi.c
40133+++ b/drivers/platform/x86/asus_acpi.c
40134@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_device *device, int type)
40135 return 0;
40136 }
40137
40138-static struct backlight_ops asus_backlight_data = {
40139+static const struct backlight_ops asus_backlight_data = {
40140 .get_brightness = read_brightness,
40141 .update_status = set_brightness_status,
40142 };
40143diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
40144index 11003bb..550ff1b 100644
40145--- a/drivers/platform/x86/compal-laptop.c
40146+++ b/drivers/platform/x86/compal-laptop.c
40147@@ -163,7 +163,7 @@ static int bl_update_status(struct backlight_device *b)
40148 return set_lcd_level(b->props.brightness);
40149 }
40150
40151-static struct backlight_ops compalbl_ops = {
40152+static const struct backlight_ops compalbl_ops = {
40153 .get_brightness = bl_get_brightness,
40154 .update_status = bl_update_status,
40155 };
40156diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
40157index 07a74da..9dc99fa 100644
40158--- a/drivers/platform/x86/dell-laptop.c
40159+++ b/drivers/platform/x86/dell-laptop.c
40160@@ -318,7 +318,7 @@ static int dell_get_intensity(struct backlight_device *bd)
40161 return buffer.output[1];
40162 }
40163
40164-static struct backlight_ops dell_ops = {
40165+static const struct backlight_ops dell_ops = {
40166 .get_brightness = dell_get_intensity,
40167 .update_status = dell_send_intensity,
40168 };
40169diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
40170index c533b1c..5c81f22 100644
40171--- a/drivers/platform/x86/eeepc-laptop.c
40172+++ b/drivers/platform/x86/eeepc-laptop.c
40173@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device;
40174 */
40175 static int read_brightness(struct backlight_device *bd);
40176 static int update_bl_status(struct backlight_device *bd);
40177-static struct backlight_ops eeepcbl_ops = {
40178+static const struct backlight_ops eeepcbl_ops = {
40179 .get_brightness = read_brightness,
40180 .update_status = update_bl_status,
40181 };
40182diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
40183index bcd4ba8..a249b35 100644
40184--- a/drivers/platform/x86/fujitsu-laptop.c
40185+++ b/drivers/platform/x86/fujitsu-laptop.c
40186@@ -436,7 +436,7 @@ static int bl_update_status(struct backlight_device *b)
40187 return ret;
40188 }
40189
40190-static struct backlight_ops fujitsubl_ops = {
40191+static const struct backlight_ops fujitsubl_ops = {
40192 .get_brightness = bl_get_brightness,
40193 .update_status = bl_update_status,
40194 };
40195diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
40196index 759763d..1093ba2 100644
40197--- a/drivers/platform/x86/msi-laptop.c
40198+++ b/drivers/platform/x86/msi-laptop.c
40199@@ -161,7 +161,7 @@ static int bl_update_status(struct backlight_device *b)
40200 return set_lcd_level(b->props.brightness);
40201 }
40202
40203-static struct backlight_ops msibl_ops = {
40204+static const struct backlight_ops msibl_ops = {
40205 .get_brightness = bl_get_brightness,
40206 .update_status = bl_update_status,
40207 };
40208diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
40209index fe7cf01..9012d8d 100644
40210--- a/drivers/platform/x86/panasonic-laptop.c
40211+++ b/drivers/platform/x86/panasonic-laptop.c
40212@@ -352,7 +352,7 @@ static int bl_set_status(struct backlight_device *bd)
40213 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
40214 }
40215
40216-static struct backlight_ops pcc_backlight_ops = {
40217+static const struct backlight_ops pcc_backlight_ops = {
40218 .get_brightness = bl_get,
40219 .update_status = bl_set_status,
40220 };
40221diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
40222index a2a742c..b37e25e 100644
40223--- a/drivers/platform/x86/sony-laptop.c
40224+++ b/drivers/platform/x86/sony-laptop.c
40225@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness(struct backlight_device *bd)
40226 }
40227
40228 static struct backlight_device *sony_backlight_device;
40229-static struct backlight_ops sony_backlight_ops = {
40230+static const struct backlight_ops sony_backlight_ops = {
40231 .update_status = sony_backlight_update_status,
40232 .get_brightness = sony_backlight_get_brightness,
40233 };
40234diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
40235index 68271ae..5e8fb10 100644
40236--- a/drivers/platform/x86/thinkpad_acpi.c
40237+++ b/drivers/platform/x86/thinkpad_acpi.c
40238@@ -2139,7 +2139,7 @@ static int hotkey_mask_get(void)
40239 return 0;
40240 }
40241
40242-void static hotkey_mask_warn_incomplete_mask(void)
40243+static void hotkey_mask_warn_incomplete_mask(void)
40244 {
40245 /* log only what the user can fix... */
40246 const u32 wantedmask = hotkey_driver_mask &
40247@@ -6125,7 +6125,7 @@ static void tpacpi_brightness_notify_change(void)
40248 BACKLIGHT_UPDATE_HOTKEY);
40249 }
40250
40251-static struct backlight_ops ibm_backlight_data = {
40252+static const struct backlight_ops ibm_backlight_data = {
40253 .get_brightness = brightness_get,
40254 .update_status = brightness_update_status,
40255 };
40256diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
40257index 51c0a8b..0786629 100644
40258--- a/drivers/platform/x86/toshiba_acpi.c
40259+++ b/drivers/platform/x86/toshiba_acpi.c
40260@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
40261 return AE_OK;
40262 }
40263
40264-static struct backlight_ops toshiba_backlight_data = {
40265+static const struct backlight_ops toshiba_backlight_data = {
40266 .get_brightness = get_lcd,
40267 .update_status = set_lcd_status,
40268 };
40269diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
40270index fc83783c..cf370d7 100644
40271--- a/drivers/pnp/pnpbios/bioscalls.c
40272+++ b/drivers/pnp/pnpbios/bioscalls.c
40273@@ -60,7 +60,7 @@ do { \
40274 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
40275 } while(0)
40276
40277-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
40278+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
40279 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
40280
40281 /*
40282@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40283
40284 cpu = get_cpu();
40285 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
40286+
40287+ pax_open_kernel();
40288 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
40289+ pax_close_kernel();
40290
40291 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
40292 spin_lock_irqsave(&pnp_bios_lock, flags);
40293@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
40294 :"memory");
40295 spin_unlock_irqrestore(&pnp_bios_lock, flags);
40296
40297+ pax_open_kernel();
40298 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
40299+ pax_close_kernel();
40300+
40301 put_cpu();
40302
40303 /* If we get here and this is set then the PnP BIOS faulted on us. */
40304@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
40305 return status;
40306 }
40307
40308-void pnpbios_calls_init(union pnp_bios_install_struct *header)
40309+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
40310 {
40311 int i;
40312
40313@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40314 pnp_bios_callpoint.offset = header->fields.pm16offset;
40315 pnp_bios_callpoint.segment = PNP_CS16;
40316
40317+ pax_open_kernel();
40318+
40319 for_each_possible_cpu(i) {
40320 struct desc_struct *gdt = get_cpu_gdt_table(i);
40321 if (!gdt)
40322@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
40323 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
40324 (unsigned long)__va(header->fields.pm16dseg));
40325 }
40326+
40327+ pax_close_kernel();
40328 }
40329diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
40330index ba97654..66b99d4 100644
40331--- a/drivers/pnp/resource.c
40332+++ b/drivers/pnp/resource.c
40333@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
40334 return 1;
40335
40336 /* check if the resource is valid */
40337- if (*irq < 0 || *irq > 15)
40338+ if (*irq > 15)
40339 return 0;
40340
40341 /* check if the resource is reserved */
40342@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
40343 return 1;
40344
40345 /* check if the resource is valid */
40346- if (*dma < 0 || *dma == 4 || *dma > 7)
40347+ if (*dma == 4 || *dma > 7)
40348 return 0;
40349
40350 /* check if the resource is reserved */
40351diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c
40352index 62bb981..24a2dc9 100644
40353--- a/drivers/power/bq27x00_battery.c
40354+++ b/drivers/power/bq27x00_battery.c
40355@@ -44,7 +44,7 @@ struct bq27x00_device_info;
40356 struct bq27x00_access_methods {
40357 int (*read)(u8 reg, int *rt_value, int b_single,
40358 struct bq27x00_device_info *di);
40359-};
40360+} __no_const;
40361
40362 struct bq27x00_device_info {
40363 struct device *dev;
40364diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
40365index 62227cd..b5b538b 100644
40366--- a/drivers/rtc/rtc-dev.c
40367+++ b/drivers/rtc/rtc-dev.c
40368@@ -14,6 +14,7 @@
40369 #include <linux/module.h>
40370 #include <linux/rtc.h>
40371 #include <linux/sched.h>
40372+#include <linux/grsecurity.h>
40373 #include "rtc-core.h"
40374
40375 static dev_t rtc_devt;
40376@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *file,
40377 if (copy_from_user(&tm, uarg, sizeof(tm)))
40378 return -EFAULT;
40379
40380+ gr_log_timechange();
40381+
40382 return rtc_set_time(rtc, &tm);
40383
40384 case RTC_PIE_ON:
40385diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
40386index 968e3c7..fbc637a 100644
40387--- a/drivers/s390/cio/qdio_perf.c
40388+++ b/drivers/s390/cio/qdio_perf.c
40389@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_pde;
40390 static int qdio_perf_proc_show(struct seq_file *m, void *v)
40391 {
40392 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
40393- (long)atomic_long_read(&perf_stats.qdio_int));
40394+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
40395 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
40396- (long)atomic_long_read(&perf_stats.pci_int));
40397+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
40398 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
40399- (long)atomic_long_read(&perf_stats.thin_int));
40400+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
40401 seq_printf(m, "\n");
40402 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
40403- (long)atomic_long_read(&perf_stats.tasklet_inbound));
40404+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
40405 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
40406- (long)atomic_long_read(&perf_stats.tasklet_outbound));
40407+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
40408 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
40409- (long)atomic_long_read(&perf_stats.tasklet_thinint),
40410- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
40411+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
40412+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
40413 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
40414- (long)atomic_long_read(&perf_stats.thinint_inbound),
40415- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
40416+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
40417+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
40418 seq_printf(m, "\n");
40419 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
40420- (long)atomic_long_read(&perf_stats.siga_in));
40421+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
40422 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
40423- (long)atomic_long_read(&perf_stats.siga_out));
40424+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
40425 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
40426- (long)atomic_long_read(&perf_stats.siga_sync));
40427+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
40428 seq_printf(m, "\n");
40429 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
40430- (long)atomic_long_read(&perf_stats.inbound_handler));
40431+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
40432 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
40433- (long)atomic_long_read(&perf_stats.outbound_handler));
40434+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
40435 seq_printf(m, "\n");
40436 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
40437- (long)atomic_long_read(&perf_stats.fast_requeue));
40438+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
40439 seq_printf(m, "Number of outbound target full condition\t: %li\n",
40440- (long)atomic_long_read(&perf_stats.outbound_target_full));
40441+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
40442 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
40443- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
40444+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
40445 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
40446- (long)atomic_long_read(&perf_stats.debug_stop_polling));
40447+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
40448 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
40449- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
40450+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
40451 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
40452- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
40453- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
40454+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
40455+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
40456 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
40457- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
40458- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
40459+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
40460+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
40461 seq_printf(m, "\n");
40462 return 0;
40463 }
40464diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h
40465index ff4504c..b3604c3 100644
40466--- a/drivers/s390/cio/qdio_perf.h
40467+++ b/drivers/s390/cio/qdio_perf.h
40468@@ -13,46 +13,46 @@
40469
40470 struct qdio_perf_stats {
40471 /* interrupt handler calls */
40472- atomic_long_t qdio_int;
40473- atomic_long_t pci_int;
40474- atomic_long_t thin_int;
40475+ atomic_long_unchecked_t qdio_int;
40476+ atomic_long_unchecked_t pci_int;
40477+ atomic_long_unchecked_t thin_int;
40478
40479 /* tasklet runs */
40480- atomic_long_t tasklet_inbound;
40481- atomic_long_t tasklet_outbound;
40482- atomic_long_t tasklet_thinint;
40483- atomic_long_t tasklet_thinint_loop;
40484- atomic_long_t thinint_inbound;
40485- atomic_long_t thinint_inbound_loop;
40486- atomic_long_t thinint_inbound_loop2;
40487+ atomic_long_unchecked_t tasklet_inbound;
40488+ atomic_long_unchecked_t tasklet_outbound;
40489+ atomic_long_unchecked_t tasklet_thinint;
40490+ atomic_long_unchecked_t tasklet_thinint_loop;
40491+ atomic_long_unchecked_t thinint_inbound;
40492+ atomic_long_unchecked_t thinint_inbound_loop;
40493+ atomic_long_unchecked_t thinint_inbound_loop2;
40494
40495 /* signal adapter calls */
40496- atomic_long_t siga_out;
40497- atomic_long_t siga_in;
40498- atomic_long_t siga_sync;
40499+ atomic_long_unchecked_t siga_out;
40500+ atomic_long_unchecked_t siga_in;
40501+ atomic_long_unchecked_t siga_sync;
40502
40503 /* misc */
40504- atomic_long_t inbound_handler;
40505- atomic_long_t outbound_handler;
40506- atomic_long_t fast_requeue;
40507- atomic_long_t outbound_target_full;
40508+ atomic_long_unchecked_t inbound_handler;
40509+ atomic_long_unchecked_t outbound_handler;
40510+ atomic_long_unchecked_t fast_requeue;
40511+ atomic_long_unchecked_t outbound_target_full;
40512
40513 /* for debugging */
40514- atomic_long_t debug_tl_out_timer;
40515- atomic_long_t debug_stop_polling;
40516- atomic_long_t debug_eqbs_all;
40517- atomic_long_t debug_eqbs_incomplete;
40518- atomic_long_t debug_sqbs_all;
40519- atomic_long_t debug_sqbs_incomplete;
40520+ atomic_long_unchecked_t debug_tl_out_timer;
40521+ atomic_long_unchecked_t debug_stop_polling;
40522+ atomic_long_unchecked_t debug_eqbs_all;
40523+ atomic_long_unchecked_t debug_eqbs_incomplete;
40524+ atomic_long_unchecked_t debug_sqbs_all;
40525+ atomic_long_unchecked_t debug_sqbs_incomplete;
40526 };
40527
40528 extern struct qdio_perf_stats perf_stats;
40529 extern int qdio_performance_stats;
40530
40531-static inline void qdio_perf_stat_inc(atomic_long_t *count)
40532+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
40533 {
40534 if (qdio_performance_stats)
40535- atomic_long_inc(count);
40536+ atomic_long_inc_unchecked(count);
40537 }
40538
40539 int qdio_setup_perf_stats(void);
40540diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
40541index 1ddcf40..a85f062 100644
40542--- a/drivers/scsi/BusLogic.c
40543+++ b/drivers/scsi/BusLogic.c
40544@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFlashPointProbeInfo(struct BusLogic_HostAda
40545 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
40546 *PrototypeHostAdapter)
40547 {
40548+ pax_track_stack();
40549+
40550 /*
40551 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
40552 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
40553diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
40554index cdbdec9..b7d560b 100644
40555--- a/drivers/scsi/aacraid/aacraid.h
40556+++ b/drivers/scsi/aacraid/aacraid.h
40557@@ -471,7 +471,7 @@ struct adapter_ops
40558 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
40559 /* Administrative operations */
40560 int (*adapter_comm)(struct aac_dev * dev, int comm);
40561-};
40562+} __no_const;
40563
40564 /*
40565 * Define which interrupt handler needs to be installed
40566diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
40567index a5b8e7b..a6a0e43 100644
40568--- a/drivers/scsi/aacraid/commctrl.c
40569+++ b/drivers/scsi/aacraid/commctrl.c
40570@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_dev* dev, void __user * arg)
40571 u32 actual_fibsize64, actual_fibsize = 0;
40572 int i;
40573
40574+ pax_track_stack();
40575
40576 if (dev->in_reset) {
40577 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
40578diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
40579index 9b97c3e..f099725 100644
40580--- a/drivers/scsi/aacraid/linit.c
40581+++ b/drivers/scsi/aacraid/linit.c
40582@@ -91,7 +91,7 @@ static DECLARE_PCI_DEVICE_TABLE(aac_pci_tbl) = {
40583 #elif defined(__devinitconst)
40584 static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40585 #else
40586-static const struct pci_device_id aac_pci_tbl[] __devinitdata = {
40587+static const struct pci_device_id aac_pci_tbl[] __devinitconst = {
40588 #endif
40589 { 0x1028, 0x0001, 0x1028, 0x0001, 0, 0, 0 }, /* PERC 2/Si (Iguana/PERC2Si) */
40590 { 0x1028, 0x0002, 0x1028, 0x0002, 0, 0, 1 }, /* PERC 3/Di (Opal/PERC3Di) */
40591diff --git a/drivers/scsi/aic94xx/aic94xx_init.c b/drivers/scsi/aic94xx/aic94xx_init.c
40592index 996f722..9127845 100644
40593--- a/drivers/scsi/aic94xx/aic94xx_init.c
40594+++ b/drivers/scsi/aic94xx/aic94xx_init.c
40595@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(struct device *dev,
40596 flash_error_table[i].reason);
40597 }
40598
40599-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
40600+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
40601 asd_show_update_bios, asd_store_update_bios);
40602
40603 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
40604@@ -1011,7 +1011,7 @@ static struct sas_domain_function_template aic94xx_transport_functions = {
40605 .lldd_control_phy = asd_control_phy,
40606 };
40607
40608-static const struct pci_device_id aic94xx_pci_table[] __devinitdata = {
40609+static const struct pci_device_id aic94xx_pci_table[] __devinitconst = {
40610 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x410),0, 0, 1},
40611 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x412),0, 0, 1},
40612 {PCI_DEVICE(PCI_VENDOR_ID_ADAPTEC2, 0x416),0, 0, 1},
40613diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
40614index 58efd4b..cb48dc7 100644
40615--- a/drivers/scsi/bfa/bfa_ioc.h
40616+++ b/drivers/scsi/bfa/bfa_ioc.h
40617@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
40618 bfa_ioc_disable_cbfn_t disable_cbfn;
40619 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
40620 bfa_ioc_reset_cbfn_t reset_cbfn;
40621-};
40622+} __no_const;
40623
40624 /**
40625 * Heartbeat failure notification queue element.
40626diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
40627index 7ad177e..5503586 100644
40628--- a/drivers/scsi/bfa/bfa_iocfc.h
40629+++ b/drivers/scsi/bfa/bfa_iocfc.h
40630@@ -61,7 +61,7 @@ struct bfa_hwif_s {
40631 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
40632 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
40633 u32 *nvecs, u32 *maxvec);
40634-};
40635+} __no_const;
40636 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
40637
40638 struct bfa_iocfc_s {
40639diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
40640index 4967643..cbec06b 100644
40641--- a/drivers/scsi/dpt_i2o.c
40642+++ b/drivers/scsi/dpt_i2o.c
40643@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
40644 dma_addr_t addr;
40645 ulong flags = 0;
40646
40647+ pax_track_stack();
40648+
40649 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
40650 // get user msg size in u32s
40651 if(get_user(size, &user_msg[0])){
40652@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_d
40653 s32 rcode;
40654 dma_addr_t addr;
40655
40656+ pax_track_stack();
40657+
40658 memset(msg, 0 , sizeof(msg));
40659 len = scsi_bufflen(cmd);
40660 direction = 0x00000000;
40661diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
40662index c7076ce..e20c67c 100644
40663--- a/drivers/scsi/eata.c
40664+++ b/drivers/scsi/eata.c
40665@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long port_base, unsigned int j,
40666 struct hostdata *ha;
40667 char name[16];
40668
40669+ pax_track_stack();
40670+
40671 sprintf(name, "%s%d", driver_name, j);
40672
40673 if (!request_region(port_base, REGION_SIZE, driver_name)) {
40674diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
40675index 11ae5c9..891daec 100644
40676--- a/drivers/scsi/fcoe/libfcoe.c
40677+++ b/drivers/scsi/fcoe/libfcoe.c
40678@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fcoe_ctlr *fip, struct sk_buff *skb)
40679 size_t rlen;
40680 size_t dlen;
40681
40682+ pax_track_stack();
40683+
40684 fiph = (struct fip_header *)skb->data;
40685 sub = fiph->fip_subcode;
40686 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
40687diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
40688index 71c7bbe..e93088a 100644
40689--- a/drivers/scsi/fnic/fnic_main.c
40690+++ b/drivers/scsi/fnic/fnic_main.c
40691@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
40692 /* Start local port initiatialization */
40693
40694 lp->link_up = 0;
40695- lp->tt = fnic_transport_template;
40696+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
40697
40698 lp->max_retry_count = fnic->config.flogi_retries;
40699 lp->max_rport_retry_count = fnic->config.plogi_retries;
40700diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
40701index bb96d74..9ec3ce4 100644
40702--- a/drivers/scsi/gdth.c
40703+++ b/drivers/scsi/gdth.c
40704@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
40705 ulong flags;
40706 gdth_ha_str *ha;
40707
40708+ pax_track_stack();
40709+
40710 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
40711 return -EFAULT;
40712 ha = gdth_find_ha(ldrv.ionode);
40713@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg, char *cmnd)
40714 gdth_ha_str *ha;
40715 int rval;
40716
40717+ pax_track_stack();
40718+
40719 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
40720 res.number >= MAX_HDRIVES)
40721 return -EFAULT;
40722@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg, char *cmnd)
40723 gdth_ha_str *ha;
40724 int rval;
40725
40726+ pax_track_stack();
40727+
40728 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
40729 return -EFAULT;
40730 ha = gdth_find_ha(gen.ionode);
40731@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
40732 int i;
40733 gdth_cmd_str gdtcmd;
40734 char cmnd[MAX_COMMAND_SIZE];
40735+
40736+ pax_track_stack();
40737+
40738 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
40739
40740 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
40741diff --git a/drivers/scsi/gdth_proc.c b/drivers/scsi/gdth_proc.c
40742index 1258da3..20d8ae6 100644
40743--- a/drivers/scsi/gdth_proc.c
40744+++ b/drivers/scsi/gdth_proc.c
40745@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi_Host *host, char *buffer,
40746 ulong64 paddr;
40747
40748 char cmnd[MAX_COMMAND_SIZE];
40749+
40750+ pax_track_stack();
40751+
40752 memset(cmnd, 0xff, 12);
40753 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
40754
40755@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
40756 gdth_hget_str *phg;
40757 char cmnd[MAX_COMMAND_SIZE];
40758
40759+ pax_track_stack();
40760+
40761 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
40762 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
40763 if (!gdtcmd || !estr)
40764diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
40765index d03a926..f324286 100644
40766--- a/drivers/scsi/hosts.c
40767+++ b/drivers/scsi/hosts.c
40768@@ -40,7 +40,7 @@
40769 #include "scsi_logging.h"
40770
40771
40772-static atomic_t scsi_host_next_hn; /* host_no for next new host */
40773+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
40774
40775
40776 static void scsi_host_cls_release(struct device *dev)
40777@@ -347,7 +347,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
40778 * subtract one because we increment first then return, but we need to
40779 * know what the next host number was before increment
40780 */
40781- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
40782+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
40783 shost->dma_channel = 0xff;
40784
40785 /* These three are default values which can be overridden */
40786diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
40787index a601159..55e19d2 100644
40788--- a/drivers/scsi/ipr.c
40789+++ b/drivers/scsi/ipr.c
40790@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_queued_cmd *qc)
40791 return true;
40792 }
40793
40794-static struct ata_port_operations ipr_sata_ops = {
40795+static const struct ata_port_operations ipr_sata_ops = {
40796 .phy_reset = ipr_ata_phy_reset,
40797 .hardreset = ipr_sata_reset,
40798 .post_internal_cmd = ipr_ata_post_internal,
40799diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
40800index 4e49fbc..97907ff 100644
40801--- a/drivers/scsi/ips.h
40802+++ b/drivers/scsi/ips.h
40803@@ -1027,7 +1027,7 @@ typedef struct {
40804 int (*intr)(struct ips_ha *);
40805 void (*enableint)(struct ips_ha *);
40806 uint32_t (*statupd)(struct ips_ha *);
40807-} ips_hw_func_t;
40808+} __no_const ips_hw_func_t;
40809
40810 typedef struct ips_ha {
40811 uint8_t ha_id[IPS_MAX_CHANNELS+1];
40812diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
40813index c1c1574..a9c9348 100644
40814--- a/drivers/scsi/libfc/fc_exch.c
40815+++ b/drivers/scsi/libfc/fc_exch.c
40816@@ -86,12 +86,12 @@ struct fc_exch_mgr {
40817 * all together if not used XXX
40818 */
40819 struct {
40820- atomic_t no_free_exch;
40821- atomic_t no_free_exch_xid;
40822- atomic_t xid_not_found;
40823- atomic_t xid_busy;
40824- atomic_t seq_not_found;
40825- atomic_t non_bls_resp;
40826+ atomic_unchecked_t no_free_exch;
40827+ atomic_unchecked_t no_free_exch_xid;
40828+ atomic_unchecked_t xid_not_found;
40829+ atomic_unchecked_t xid_busy;
40830+ atomic_unchecked_t seq_not_found;
40831+ atomic_unchecked_t non_bls_resp;
40832 } stats;
40833 };
40834 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
40835@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
40836 /* allocate memory for exchange */
40837 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
40838 if (!ep) {
40839- atomic_inc(&mp->stats.no_free_exch);
40840+ atomic_inc_unchecked(&mp->stats.no_free_exch);
40841 goto out;
40842 }
40843 memset(ep, 0, sizeof(*ep));
40844@@ -557,7 +557,7 @@ out:
40845 return ep;
40846 err:
40847 spin_unlock_bh(&pool->lock);
40848- atomic_inc(&mp->stats.no_free_exch_xid);
40849+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
40850 mempool_free(ep, mp->ep_pool);
40851 return NULL;
40852 }
40853@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40854 xid = ntohs(fh->fh_ox_id); /* we originated exch */
40855 ep = fc_exch_find(mp, xid);
40856 if (!ep) {
40857- atomic_inc(&mp->stats.xid_not_found);
40858+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40859 reject = FC_RJT_OX_ID;
40860 goto out;
40861 }
40862@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40863 ep = fc_exch_find(mp, xid);
40864 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
40865 if (ep) {
40866- atomic_inc(&mp->stats.xid_busy);
40867+ atomic_inc_unchecked(&mp->stats.xid_busy);
40868 reject = FC_RJT_RX_ID;
40869 goto rel;
40870 }
40871@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40872 }
40873 xid = ep->xid; /* get our XID */
40874 } else if (!ep) {
40875- atomic_inc(&mp->stats.xid_not_found);
40876+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40877 reject = FC_RJT_RX_ID; /* XID not found */
40878 goto out;
40879 }
40880@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
40881 } else {
40882 sp = &ep->seq;
40883 if (sp->id != fh->fh_seq_id) {
40884- atomic_inc(&mp->stats.seq_not_found);
40885+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40886 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
40887 goto rel;
40888 }
40889@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40890
40891 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
40892 if (!ep) {
40893- atomic_inc(&mp->stats.xid_not_found);
40894+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40895 goto out;
40896 }
40897 if (ep->esb_stat & ESB_ST_COMPLETE) {
40898- atomic_inc(&mp->stats.xid_not_found);
40899+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40900 goto out;
40901 }
40902 if (ep->rxid == FC_XID_UNKNOWN)
40903 ep->rxid = ntohs(fh->fh_rx_id);
40904 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
40905- atomic_inc(&mp->stats.xid_not_found);
40906+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40907 goto rel;
40908 }
40909 if (ep->did != ntoh24(fh->fh_s_id) &&
40910 ep->did != FC_FID_FLOGI) {
40911- atomic_inc(&mp->stats.xid_not_found);
40912+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40913 goto rel;
40914 }
40915 sof = fr_sof(fp);
40916@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40917 } else {
40918 sp = &ep->seq;
40919 if (sp->id != fh->fh_seq_id) {
40920- atomic_inc(&mp->stats.seq_not_found);
40921+ atomic_inc_unchecked(&mp->stats.seq_not_found);
40922 goto rel;
40923 }
40924 }
40925@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
40926 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
40927
40928 if (!sp)
40929- atomic_inc(&mp->stats.xid_not_found);
40930+ atomic_inc_unchecked(&mp->stats.xid_not_found);
40931 else
40932- atomic_inc(&mp->stats.non_bls_resp);
40933+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
40934
40935 fc_frame_free(fp);
40936 }
40937diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
40938index 0ee989f..a582241 100644
40939--- a/drivers/scsi/libsas/sas_ata.c
40940+++ b/drivers/scsi/libsas/sas_ata.c
40941@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_link *link, unsigned int sc_reg_in,
40942 }
40943 }
40944
40945-static struct ata_port_operations sas_sata_ops = {
40946+static const struct ata_port_operations sas_sata_ops = {
40947 .phy_reset = sas_ata_phy_reset,
40948 .post_internal_cmd = sas_ata_post_internal,
40949 .qc_defer = ata_std_qc_defer,
40950diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
40951index aa10f79..5cc79e4 100644
40952--- a/drivers/scsi/lpfc/lpfc.h
40953+++ b/drivers/scsi/lpfc/lpfc.h
40954@@ -400,7 +400,7 @@ struct lpfc_vport {
40955 struct dentry *debug_nodelist;
40956 struct dentry *vport_debugfs_root;
40957 struct lpfc_debugfs_trc *disc_trc;
40958- atomic_t disc_trc_cnt;
40959+ atomic_unchecked_t disc_trc_cnt;
40960 #endif
40961 uint8_t stat_data_enabled;
40962 uint8_t stat_data_blocked;
40963@@ -725,8 +725,8 @@ struct lpfc_hba {
40964 struct timer_list fabric_block_timer;
40965 unsigned long bit_flags;
40966 #define FABRIC_COMANDS_BLOCKED 0
40967- atomic_t num_rsrc_err;
40968- atomic_t num_cmd_success;
40969+ atomic_unchecked_t num_rsrc_err;
40970+ atomic_unchecked_t num_cmd_success;
40971 unsigned long last_rsrc_error_time;
40972 unsigned long last_ramp_down_time;
40973 unsigned long last_ramp_up_time;
40974@@ -740,7 +740,7 @@ struct lpfc_hba {
40975 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
40976 struct dentry *debug_slow_ring_trc;
40977 struct lpfc_debugfs_trc *slow_ring_trc;
40978- atomic_t slow_ring_trc_cnt;
40979+ atomic_unchecked_t slow_ring_trc_cnt;
40980 #endif
40981
40982 /* Used for deferred freeing of ELS data buffers */
40983diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
40984index 8d0f0de..7c77a62 100644
40985--- a/drivers/scsi/lpfc/lpfc_debugfs.c
40986+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
40987@@ -124,7 +124,7 @@ struct lpfc_debug {
40988 int len;
40989 };
40990
40991-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40992+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
40993 static unsigned long lpfc_debugfs_start_time = 0L;
40994
40995 /**
40996@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
40997 lpfc_debugfs_enable = 0;
40998
40999 len = 0;
41000- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
41001+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
41002 (lpfc_debugfs_max_disc_trc - 1);
41003 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
41004 dtp = vport->disc_trc + i;
41005@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
41006 lpfc_debugfs_enable = 0;
41007
41008 len = 0;
41009- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
41010+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
41011 (lpfc_debugfs_max_slow_ring_trc - 1);
41012 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
41013 dtp = phba->slow_ring_trc + i;
41014@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpfc_hba *phba, char *buf, int size)
41015 uint32_t *ptr;
41016 char buffer[1024];
41017
41018+ pax_track_stack();
41019+
41020 off = 0;
41021 spin_lock_irq(&phba->hbalock);
41022
41023@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
41024 !vport || !vport->disc_trc)
41025 return;
41026
41027- index = atomic_inc_return(&vport->disc_trc_cnt) &
41028+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
41029 (lpfc_debugfs_max_disc_trc - 1);
41030 dtp = vport->disc_trc + index;
41031 dtp->fmt = fmt;
41032 dtp->data1 = data1;
41033 dtp->data2 = data2;
41034 dtp->data3 = data3;
41035- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41036+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41037 dtp->jif = jiffies;
41038 #endif
41039 return;
41040@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
41041 !phba || !phba->slow_ring_trc)
41042 return;
41043
41044- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
41045+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
41046 (lpfc_debugfs_max_slow_ring_trc - 1);
41047 dtp = phba->slow_ring_trc + index;
41048 dtp->fmt = fmt;
41049 dtp->data1 = data1;
41050 dtp->data2 = data2;
41051 dtp->data3 = data3;
41052- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
41053+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
41054 dtp->jif = jiffies;
41055 #endif
41056 return;
41057@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41058 "slow_ring buffer\n");
41059 goto debug_failed;
41060 }
41061- atomic_set(&phba->slow_ring_trc_cnt, 0);
41062+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
41063 memset(phba->slow_ring_trc, 0,
41064 (sizeof(struct lpfc_debugfs_trc) *
41065 lpfc_debugfs_max_slow_ring_trc));
41066@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
41067 "buffer\n");
41068 goto debug_failed;
41069 }
41070- atomic_set(&vport->disc_trc_cnt, 0);
41071+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
41072
41073 snprintf(name, sizeof(name), "discovery_trace");
41074 vport->debug_disc_trc =
41075diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
41076index 549bc7d..8189dbb 100644
41077--- a/drivers/scsi/lpfc/lpfc_init.c
41078+++ b/drivers/scsi/lpfc/lpfc_init.c
41079@@ -8021,8 +8021,10 @@ lpfc_init(void)
41080 printk(LPFC_COPYRIGHT "\n");
41081
41082 if (lpfc_enable_npiv) {
41083- lpfc_transport_functions.vport_create = lpfc_vport_create;
41084- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41085+ pax_open_kernel();
41086+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
41087+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
41088+ pax_close_kernel();
41089 }
41090 lpfc_transport_template =
41091 fc_attach_transport(&lpfc_transport_functions);
41092diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
41093index c88f59f..ff2a42f 100644
41094--- a/drivers/scsi/lpfc/lpfc_scsi.c
41095+++ b/drivers/scsi/lpfc/lpfc_scsi.c
41096@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
41097 uint32_t evt_posted;
41098
41099 spin_lock_irqsave(&phba->hbalock, flags);
41100- atomic_inc(&phba->num_rsrc_err);
41101+ atomic_inc_unchecked(&phba->num_rsrc_err);
41102 phba->last_rsrc_error_time = jiffies;
41103
41104 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
41105@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
41106 unsigned long flags;
41107 struct lpfc_hba *phba = vport->phba;
41108 uint32_t evt_posted;
41109- atomic_inc(&phba->num_cmd_success);
41110+ atomic_inc_unchecked(&phba->num_cmd_success);
41111
41112 if (vport->cfg_lun_queue_depth <= queue_depth)
41113 return;
41114@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41115 int i;
41116 struct lpfc_rport_data *rdata;
41117
41118- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
41119- num_cmd_success = atomic_read(&phba->num_cmd_success);
41120+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
41121+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
41122
41123 vports = lpfc_create_vport_work_array(phba);
41124 if (vports != NULL)
41125@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
41126 }
41127 }
41128 lpfc_destroy_vport_work_array(phba, vports);
41129- atomic_set(&phba->num_rsrc_err, 0);
41130- atomic_set(&phba->num_cmd_success, 0);
41131+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41132+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41133 }
41134
41135 /**
41136@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
41137 }
41138 }
41139 lpfc_destroy_vport_work_array(phba, vports);
41140- atomic_set(&phba->num_rsrc_err, 0);
41141- atomic_set(&phba->num_cmd_success, 0);
41142+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
41143+ atomic_set_unchecked(&phba->num_cmd_success, 0);
41144 }
41145
41146 /**
41147diff --git a/drivers/scsi/megaraid/megaraid_mbox.c b/drivers/scsi/megaraid/megaraid_mbox.c
41148index 234f0b7..3020aea 100644
41149--- a/drivers/scsi/megaraid/megaraid_mbox.c
41150+++ b/drivers/scsi/megaraid/megaraid_mbox.c
41151@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter)
41152 int rval;
41153 int i;
41154
41155+ pax_track_stack();
41156+
41157 // Allocate memory for the base list of scb for management module.
41158 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
41159
41160diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
41161index 7a117c1..ee01e9e 100644
41162--- a/drivers/scsi/osd/osd_initiator.c
41163+++ b/drivers/scsi/osd/osd_initiator.c
41164@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
41165 int nelem = ARRAY_SIZE(get_attrs), a = 0;
41166 int ret;
41167
41168+ pax_track_stack();
41169+
41170 or = osd_start_request(od, GFP_KERNEL);
41171 if (!or)
41172 return -ENOMEM;
41173diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
41174index 9ab8c86..9425ad3 100644
41175--- a/drivers/scsi/pmcraid.c
41176+++ b/drivers/scsi/pmcraid.c
41177@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
41178 res->scsi_dev = scsi_dev;
41179 scsi_dev->hostdata = res;
41180 res->change_detected = 0;
41181- atomic_set(&res->read_failures, 0);
41182- atomic_set(&res->write_failures, 0);
41183+ atomic_set_unchecked(&res->read_failures, 0);
41184+ atomic_set_unchecked(&res->write_failures, 0);
41185 rc = 0;
41186 }
41187 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
41188@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
41189
41190 /* If this was a SCSI read/write command keep count of errors */
41191 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
41192- atomic_inc(&res->read_failures);
41193+ atomic_inc_unchecked(&res->read_failures);
41194 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
41195- atomic_inc(&res->write_failures);
41196+ atomic_inc_unchecked(&res->write_failures);
41197
41198 if (!RES_IS_GSCSI(res->cfg_entry) &&
41199 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
41200@@ -4116,7 +4116,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
41201
41202 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
41203 /* add resources only after host is added into system */
41204- if (!atomic_read(&pinstance->expose_resources))
41205+ if (!atomic_read_unchecked(&pinstance->expose_resources))
41206 return;
41207
41208 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
41209@@ -4850,7 +4850,7 @@ static int __devinit pmcraid_init_instance(
41210 init_waitqueue_head(&pinstance->reset_wait_q);
41211
41212 atomic_set(&pinstance->outstanding_cmds, 0);
41213- atomic_set(&pinstance->expose_resources, 0);
41214+ atomic_set_unchecked(&pinstance->expose_resources, 0);
41215
41216 INIT_LIST_HEAD(&pinstance->free_res_q);
41217 INIT_LIST_HEAD(&pinstance->used_res_q);
41218@@ -5502,7 +5502,7 @@ static int __devinit pmcraid_probe(
41219 /* Schedule worker thread to handle CCN and take care of adding and
41220 * removing devices to OS
41221 */
41222- atomic_set(&pinstance->expose_resources, 1);
41223+ atomic_set_unchecked(&pinstance->expose_resources, 1);
41224 schedule_work(&pinstance->worker_q);
41225 return rc;
41226
41227diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
41228index 3441b3f..6cbe8f7 100644
41229--- a/drivers/scsi/pmcraid.h
41230+++ b/drivers/scsi/pmcraid.h
41231@@ -690,7 +690,7 @@ struct pmcraid_instance {
41232 atomic_t outstanding_cmds;
41233
41234 /* should add/delete resources to mid-layer now ?*/
41235- atomic_t expose_resources;
41236+ atomic_unchecked_t expose_resources;
41237
41238 /* Tasklet to handle deferred processing */
41239 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
41240@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
41241 struct list_head queue; /* link to "to be exposed" resources */
41242 struct pmcraid_config_table_entry cfg_entry;
41243 struct scsi_device *scsi_dev; /* Link scsi_device structure */
41244- atomic_t read_failures; /* count of failed READ commands */
41245- atomic_t write_failures; /* count of failed WRITE commands */
41246+ atomic_unchecked_t read_failures; /* count of failed READ commands */
41247+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
41248
41249 /* To indicate add/delete/modify during CCN */
41250 u8 change_detected;
41251diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
41252index 2150618..7034215 100644
41253--- a/drivers/scsi/qla2xxx/qla_def.h
41254+++ b/drivers/scsi/qla2xxx/qla_def.h
41255@@ -2089,7 +2089,7 @@ struct isp_operations {
41256
41257 int (*get_flash_version) (struct scsi_qla_host *, void *);
41258 int (*start_scsi) (srb_t *);
41259-};
41260+} __no_const;
41261
41262 /* MSI-X Support *************************************************************/
41263
41264diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
41265index 81b5f29..2ae1fad 100644
41266--- a/drivers/scsi/qla4xxx/ql4_def.h
41267+++ b/drivers/scsi/qla4xxx/ql4_def.h
41268@@ -240,7 +240,7 @@ struct ddb_entry {
41269 atomic_t retry_relogin_timer; /* Min Time between relogins
41270 * (4000 only) */
41271 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
41272- atomic_t relogin_retry_count; /* Num of times relogin has been
41273+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
41274 * retried */
41275
41276 uint16_t port;
41277diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
41278index af8c323..515dd51 100644
41279--- a/drivers/scsi/qla4xxx/ql4_init.c
41280+++ b/drivers/scsi/qla4xxx/ql4_init.c
41281@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
41282 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
41283 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
41284 atomic_set(&ddb_entry->relogin_timer, 0);
41285- atomic_set(&ddb_entry->relogin_retry_count, 0);
41286+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41287 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41288 list_add_tail(&ddb_entry->list, &ha->ddb_list);
41289 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
41290@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha,
41291 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
41292 atomic_set(&ddb_entry->port_down_timer,
41293 ha->port_down_retry_count);
41294- atomic_set(&ddb_entry->relogin_retry_count, 0);
41295+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
41296 atomic_set(&ddb_entry->relogin_timer, 0);
41297 clear_bit(DF_RELOGIN, &ddb_entry->flags);
41298 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
41299diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
41300index 83c8b5e..a82b348 100644
41301--- a/drivers/scsi/qla4xxx/ql4_os.c
41302+++ b/drivers/scsi/qla4xxx/ql4_os.c
41303@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_qla_host *ha)
41304 ddb_entry->fw_ddb_device_state ==
41305 DDB_DS_SESSION_FAILED) {
41306 /* Reset retry relogin timer */
41307- atomic_inc(&ddb_entry->relogin_retry_count);
41308+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
41309 DEBUG2(printk("scsi%ld: index[%d] relogin"
41310 " timed out-retrying"
41311 " relogin (%d)\n",
41312 ha->host_no,
41313 ddb_entry->fw_ddb_index,
41314- atomic_read(&ddb_entry->
41315+ atomic_read_unchecked(&ddb_entry->
41316 relogin_retry_count))
41317 );
41318 start_dpc++;
41319diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
41320index dd098ca..686ce01 100644
41321--- a/drivers/scsi/scsi.c
41322+++ b/drivers/scsi/scsi.c
41323@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
41324 unsigned long timeout;
41325 int rtn = 0;
41326
41327- atomic_inc(&cmd->device->iorequest_cnt);
41328+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41329
41330 /* check if the device is still usable */
41331 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
41332diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
41333index bc3e363..e1a8e50 100644
41334--- a/drivers/scsi/scsi_debug.c
41335+++ b/drivers/scsi/scsi_debug.c
41336@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_cmnd * scp, int mselect6,
41337 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
41338 unsigned char *cmd = (unsigned char *)scp->cmnd;
41339
41340+ pax_track_stack();
41341+
41342 if ((errsts = check_readiness(scp, 1, devip)))
41343 return errsts;
41344 memset(arr, 0, sizeof(arr));
41345@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cmnd * scp,
41346 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
41347 unsigned char *cmd = (unsigned char *)scp->cmnd;
41348
41349+ pax_track_stack();
41350+
41351 if ((errsts = check_readiness(scp, 1, devip)))
41352 return errsts;
41353 memset(arr, 0, sizeof(arr));
41354diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
41355index 8df12522..c4c1472 100644
41356--- a/drivers/scsi/scsi_lib.c
41357+++ b/drivers/scsi/scsi_lib.c
41358@@ -1389,7 +1389,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
41359 shost = sdev->host;
41360 scsi_init_cmd_errh(cmd);
41361 cmd->result = DID_NO_CONNECT << 16;
41362- atomic_inc(&cmd->device->iorequest_cnt);
41363+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
41364
41365 /*
41366 * SCSI request completion path will do scsi_device_unbusy(),
41367@@ -1420,9 +1420,9 @@ static void scsi_softirq_done(struct request *rq)
41368 */
41369 cmd->serial_number = 0;
41370
41371- atomic_inc(&cmd->device->iodone_cnt);
41372+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
41373 if (cmd->result)
41374- atomic_inc(&cmd->device->ioerr_cnt);
41375+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
41376
41377 disposition = scsi_decide_disposition(cmd);
41378 if (disposition != SUCCESS &&
41379diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
41380index 91a93e0..eae0fe3 100644
41381--- a/drivers/scsi/scsi_sysfs.c
41382+++ b/drivers/scsi/scsi_sysfs.c
41383@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
41384 char *buf) \
41385 { \
41386 struct scsi_device *sdev = to_scsi_device(dev); \
41387- unsigned long long count = atomic_read(&sdev->field); \
41388+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
41389 return snprintf(buf, 20, "0x%llx\n", count); \
41390 } \
41391 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
41392diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
41393index 1030327..f91fd30 100644
41394--- a/drivers/scsi/scsi_tgt_lib.c
41395+++ b/drivers/scsi/scsi_tgt_lib.c
41396@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
41397 int err;
41398
41399 dprintk("%lx %u\n", uaddr, len);
41400- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
41401+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
41402 if (err) {
41403 /*
41404 * TODO: need to fixup sg_tablesize, max_segment_size,
41405diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
41406index db02e31..1b42ea9 100644
41407--- a/drivers/scsi/scsi_transport_fc.c
41408+++ b/drivers/scsi/scsi_transport_fc.c
41409@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
41410 * Netlink Infrastructure
41411 */
41412
41413-static atomic_t fc_event_seq;
41414+static atomic_unchecked_t fc_event_seq;
41415
41416 /**
41417 * fc_get_event_number - Obtain the next sequential FC event number
41418@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
41419 u32
41420 fc_get_event_number(void)
41421 {
41422- return atomic_add_return(1, &fc_event_seq);
41423+ return atomic_add_return_unchecked(1, &fc_event_seq);
41424 }
41425 EXPORT_SYMBOL(fc_get_event_number);
41426
41427@@ -641,7 +641,7 @@ static __init int fc_transport_init(void)
41428 {
41429 int error;
41430
41431- atomic_set(&fc_event_seq, 0);
41432+ atomic_set_unchecked(&fc_event_seq, 0);
41433
41434 error = transport_class_register(&fc_host_class);
41435 if (error)
41436diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
41437index de2f8c4..63c5278 100644
41438--- a/drivers/scsi/scsi_transport_iscsi.c
41439+++ b/drivers/scsi/scsi_transport_iscsi.c
41440@@ -81,7 +81,7 @@ struct iscsi_internal {
41441 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
41442 };
41443
41444-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
41445+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
41446 static struct workqueue_struct *iscsi_eh_timer_workq;
41447
41448 /*
41449@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
41450 int err;
41451
41452 ihost = shost->shost_data;
41453- session->sid = atomic_add_return(1, &iscsi_session_nr);
41454+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
41455
41456 if (id == ISCSI_MAX_TARGET) {
41457 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
41458@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(void)
41459 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
41460 ISCSI_TRANSPORT_VERSION);
41461
41462- atomic_set(&iscsi_session_nr, 0);
41463+ atomic_set_unchecked(&iscsi_session_nr, 0);
41464
41465 err = class_register(&iscsi_transport_class);
41466 if (err)
41467diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
41468index 21a045e..ec89e03 100644
41469--- a/drivers/scsi/scsi_transport_srp.c
41470+++ b/drivers/scsi/scsi_transport_srp.c
41471@@ -33,7 +33,7 @@
41472 #include "scsi_transport_srp_internal.h"
41473
41474 struct srp_host_attrs {
41475- atomic_t next_port_id;
41476+ atomic_unchecked_t next_port_id;
41477 };
41478 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
41479
41480@@ -62,7 +62,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
41481 struct Scsi_Host *shost = dev_to_shost(dev);
41482 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
41483
41484- atomic_set(&srp_host->next_port_id, 0);
41485+ atomic_set_unchecked(&srp_host->next_port_id, 0);
41486 return 0;
41487 }
41488
41489@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
41490 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
41491 rport->roles = ids->roles;
41492
41493- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
41494+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
41495 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
41496
41497 transport_setup_device(&rport->dev);
41498diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
41499index 040f751..98a5ed2 100644
41500--- a/drivers/scsi/sg.c
41501+++ b/drivers/scsi/sg.c
41502@@ -1064,7 +1064,7 @@ sg_ioctl(struct inode *inode, struct file *filp,
41503 sdp->disk->disk_name,
41504 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
41505 NULL,
41506- (char *)arg);
41507+ (char __user *)arg);
41508 case BLKTRACESTART:
41509 return blk_trace_startstop(sdp->device->request_queue, 1);
41510 case BLKTRACESTOP:
41511@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
41512 const struct file_operations * fops;
41513 };
41514
41515-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
41516+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
41517 {"allow_dio", &adio_fops},
41518 {"debug", &debug_fops},
41519 {"def_reserved_size", &dressz_fops},
41520@@ -2307,7 +2307,7 @@ sg_proc_init(void)
41521 {
41522 int k, mask;
41523 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
41524- struct sg_proc_leaf * leaf;
41525+ const struct sg_proc_leaf * leaf;
41526
41527 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
41528 if (!sg_proc_sgp)
41529diff --git a/drivers/scsi/sym53c8xx_2/sym_glue.c b/drivers/scsi/sym53c8xx_2/sym_glue.c
41530index c19ca5e..3eb5959 100644
41531--- a/drivers/scsi/sym53c8xx_2/sym_glue.c
41532+++ b/drivers/scsi/sym53c8xx_2/sym_glue.c
41533@@ -1758,6 +1758,8 @@ static int __devinit sym2_probe(struct pci_dev *pdev,
41534 int do_iounmap = 0;
41535 int do_disable_device = 1;
41536
41537+ pax_track_stack();
41538+
41539 memset(&sym_dev, 0, sizeof(sym_dev));
41540 memset(&nvram, 0, sizeof(nvram));
41541 sym_dev.pdev = pdev;
41542diff --git a/drivers/serial/kgdboc.c b/drivers/serial/kgdboc.c
41543index eadc1ab..2d81457 100644
41544--- a/drivers/serial/kgdboc.c
41545+++ b/drivers/serial/kgdboc.c
41546@@ -18,7 +18,7 @@
41547
41548 #define MAX_CONFIG_LEN 40
41549
41550-static struct kgdb_io kgdboc_io_ops;
41551+static const struct kgdb_io kgdboc_io_ops;
41552
41553 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
41554 static int configured = -1;
41555@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void)
41556 module_put(THIS_MODULE);
41557 }
41558
41559-static struct kgdb_io kgdboc_io_ops = {
41560+static const struct kgdb_io kgdboc_io_ops = {
41561 .name = "kgdboc",
41562 .read_char = kgdboc_get_char,
41563 .write_char = kgdboc_put_char,
41564diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
41565index b76f246..7f41af7 100644
41566--- a/drivers/spi/spi.c
41567+++ b/drivers/spi/spi.c
41568@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, struct spi_message *message)
41569 EXPORT_SYMBOL_GPL(spi_sync);
41570
41571 /* portable code must never pass more than 32 bytes */
41572-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
41573+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
41574
41575 static u8 *buf;
41576
41577diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
41578index 99010d4..6bad87b 100644
41579--- a/drivers/staging/android/binder.c
41580+++ b/drivers/staging/android/binder.c
41581@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
41582 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
41583 }
41584
41585-static struct vm_operations_struct binder_vm_ops = {
41586+static const struct vm_operations_struct binder_vm_ops = {
41587 .open = binder_vma_open,
41588 .close = binder_vma_close,
41589 };
41590diff --git a/drivers/staging/b3dfg/b3dfg.c b/drivers/staging/b3dfg/b3dfg.c
41591index cda26bb..39fed3f 100644
41592--- a/drivers/staging/b3dfg/b3dfg.c
41593+++ b/drivers/staging/b3dfg/b3dfg.c
41594@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_area_struct *vma,
41595 return VM_FAULT_NOPAGE;
41596 }
41597
41598-static struct vm_operations_struct b3dfg_vm_ops = {
41599+static const struct vm_operations_struct b3dfg_vm_ops = {
41600 .fault = b3dfg_vma_fault,
41601 };
41602
41603@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp, struct vm_area_struct *vma)
41604 return r;
41605 }
41606
41607-static struct file_operations b3dfg_fops = {
41608+static const struct file_operations b3dfg_fops = {
41609 .owner = THIS_MODULE,
41610 .open = b3dfg_open,
41611 .release = b3dfg_release,
41612diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
41613index 908f25a..c9a579b 100644
41614--- a/drivers/staging/comedi/comedi_fops.c
41615+++ b/drivers/staging/comedi/comedi_fops.c
41616@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct *area)
41617 mutex_unlock(&dev->mutex);
41618 }
41619
41620-static struct vm_operations_struct comedi_vm_ops = {
41621+static const struct vm_operations_struct comedi_vm_ops = {
41622 .close = comedi_unmap,
41623 };
41624
41625diff --git a/drivers/staging/dream/qdsp5/adsp_driver.c b/drivers/staging/dream/qdsp5/adsp_driver.c
41626index e55a0db..577b776 100644
41627--- a/drivers/staging/dream/qdsp5/adsp_driver.c
41628+++ b/drivers/staging/dream/qdsp5/adsp_driver.c
41629@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_device(struct inode *inode)
41630 static dev_t adsp_devno;
41631 static struct class *adsp_class;
41632
41633-static struct file_operations adsp_fops = {
41634+static const struct file_operations adsp_fops = {
41635 .owner = THIS_MODULE,
41636 .open = adsp_open,
41637 .unlocked_ioctl = adsp_ioctl,
41638diff --git a/drivers/staging/dream/qdsp5/audio_aac.c b/drivers/staging/dream/qdsp5/audio_aac.c
41639index ad2390f..4116ee8 100644
41640--- a/drivers/staging/dream/qdsp5/audio_aac.c
41641+++ b/drivers/staging/dream/qdsp5/audio_aac.c
41642@@ -1022,7 +1022,7 @@ done:
41643 return rc;
41644 }
41645
41646-static struct file_operations audio_aac_fops = {
41647+static const struct file_operations audio_aac_fops = {
41648 .owner = THIS_MODULE,
41649 .open = audio_open,
41650 .release = audio_release,
41651diff --git a/drivers/staging/dream/qdsp5/audio_amrnb.c b/drivers/staging/dream/qdsp5/audio_amrnb.c
41652index cd818a5..870b37b 100644
41653--- a/drivers/staging/dream/qdsp5/audio_amrnb.c
41654+++ b/drivers/staging/dream/qdsp5/audio_amrnb.c
41655@@ -833,7 +833,7 @@ done:
41656 return rc;
41657 }
41658
41659-static struct file_operations audio_amrnb_fops = {
41660+static const struct file_operations audio_amrnb_fops = {
41661 .owner = THIS_MODULE,
41662 .open = audamrnb_open,
41663 .release = audamrnb_release,
41664diff --git a/drivers/staging/dream/qdsp5/audio_evrc.c b/drivers/staging/dream/qdsp5/audio_evrc.c
41665index 4b43e18..cedafda 100644
41666--- a/drivers/staging/dream/qdsp5/audio_evrc.c
41667+++ b/drivers/staging/dream/qdsp5/audio_evrc.c
41668@@ -805,7 +805,7 @@ dma_fail:
41669 return rc;
41670 }
41671
41672-static struct file_operations audio_evrc_fops = {
41673+static const struct file_operations audio_evrc_fops = {
41674 .owner = THIS_MODULE,
41675 .open = audevrc_open,
41676 .release = audevrc_release,
41677diff --git a/drivers/staging/dream/qdsp5/audio_in.c b/drivers/staging/dream/qdsp5/audio_in.c
41678index 3d950a2..9431118 100644
41679--- a/drivers/staging/dream/qdsp5/audio_in.c
41680+++ b/drivers/staging/dream/qdsp5/audio_in.c
41681@@ -913,7 +913,7 @@ static int audpre_open(struct inode *inode, struct file *file)
41682 return 0;
41683 }
41684
41685-static struct file_operations audio_fops = {
41686+static const struct file_operations audio_fops = {
41687 .owner = THIS_MODULE,
41688 .open = audio_in_open,
41689 .release = audio_in_release,
41690@@ -922,7 +922,7 @@ static struct file_operations audio_fops = {
41691 .unlocked_ioctl = audio_in_ioctl,
41692 };
41693
41694-static struct file_operations audpre_fops = {
41695+static const struct file_operations audpre_fops = {
41696 .owner = THIS_MODULE,
41697 .open = audpre_open,
41698 .unlocked_ioctl = audpre_ioctl,
41699diff --git a/drivers/staging/dream/qdsp5/audio_mp3.c b/drivers/staging/dream/qdsp5/audio_mp3.c
41700index b95574f..286c2f4 100644
41701--- a/drivers/staging/dream/qdsp5/audio_mp3.c
41702+++ b/drivers/staging/dream/qdsp5/audio_mp3.c
41703@@ -941,7 +941,7 @@ done:
41704 return rc;
41705 }
41706
41707-static struct file_operations audio_mp3_fops = {
41708+static const struct file_operations audio_mp3_fops = {
41709 .owner = THIS_MODULE,
41710 .open = audio_open,
41711 .release = audio_release,
41712diff --git a/drivers/staging/dream/qdsp5/audio_out.c b/drivers/staging/dream/qdsp5/audio_out.c
41713index d1adcf6..f8f9833 100644
41714--- a/drivers/staging/dream/qdsp5/audio_out.c
41715+++ b/drivers/staging/dream/qdsp5/audio_out.c
41716@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inode, struct file *file)
41717 return 0;
41718 }
41719
41720-static struct file_operations audio_fops = {
41721+static const struct file_operations audio_fops = {
41722 .owner = THIS_MODULE,
41723 .open = audio_open,
41724 .release = audio_release,
41725@@ -819,7 +819,7 @@ static struct file_operations audio_fops = {
41726 .unlocked_ioctl = audio_ioctl,
41727 };
41728
41729-static struct file_operations audpp_fops = {
41730+static const struct file_operations audpp_fops = {
41731 .owner = THIS_MODULE,
41732 .open = audpp_open,
41733 .unlocked_ioctl = audpp_ioctl,
41734diff --git a/drivers/staging/dream/qdsp5/audio_qcelp.c b/drivers/staging/dream/qdsp5/audio_qcelp.c
41735index f0f50e3..f6b9dbc 100644
41736--- a/drivers/staging/dream/qdsp5/audio_qcelp.c
41737+++ b/drivers/staging/dream/qdsp5/audio_qcelp.c
41738@@ -816,7 +816,7 @@ err:
41739 return rc;
41740 }
41741
41742-static struct file_operations audio_qcelp_fops = {
41743+static const struct file_operations audio_qcelp_fops = {
41744 .owner = THIS_MODULE,
41745 .open = audqcelp_open,
41746 .release = audqcelp_release,
41747diff --git a/drivers/staging/dream/qdsp5/snd.c b/drivers/staging/dream/qdsp5/snd.c
41748index 037d7ff..5469ec3 100644
41749--- a/drivers/staging/dream/qdsp5/snd.c
41750+++ b/drivers/staging/dream/qdsp5/snd.c
41751@@ -242,7 +242,7 @@ err:
41752 return rc;
41753 }
41754
41755-static struct file_operations snd_fops = {
41756+static const struct file_operations snd_fops = {
41757 .owner = THIS_MODULE,
41758 .open = snd_open,
41759 .release = snd_release,
41760diff --git a/drivers/staging/dream/smd/smd_qmi.c b/drivers/staging/dream/smd/smd_qmi.c
41761index d4e7d88..0ea632a 100644
41762--- a/drivers/staging/dream/smd/smd_qmi.c
41763+++ b/drivers/staging/dream/smd/smd_qmi.c
41764@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip, struct file *fp)
41765 return 0;
41766 }
41767
41768-static struct file_operations qmi_fops = {
41769+static const struct file_operations qmi_fops = {
41770 .owner = THIS_MODULE,
41771 .read = qmi_read,
41772 .write = qmi_write,
41773diff --git a/drivers/staging/dream/smd/smd_rpcrouter_device.c b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41774index cd3910b..ff053d3 100644
41775--- a/drivers/staging/dream/smd/smd_rpcrouter_device.c
41776+++ b/drivers/staging/dream/smd/smd_rpcrouter_device.c
41777@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file *filp, unsigned int cmd,
41778 return rc;
41779 }
41780
41781-static struct file_operations rpcrouter_server_fops = {
41782+static const struct file_operations rpcrouter_server_fops = {
41783 .owner = THIS_MODULE,
41784 .open = rpcrouter_open,
41785 .release = rpcrouter_release,
41786@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_server_fops = {
41787 .unlocked_ioctl = rpcrouter_ioctl,
41788 };
41789
41790-static struct file_operations rpcrouter_router_fops = {
41791+static const struct file_operations rpcrouter_router_fops = {
41792 .owner = THIS_MODULE,
41793 .open = rpcrouter_open,
41794 .release = rpcrouter_release,
41795diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
41796index c24e4e0..07665be 100644
41797--- a/drivers/staging/dst/dcore.c
41798+++ b/drivers/staging/dst/dcore.c
41799@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendisk *disk, fmode_t mode)
41800 return 0;
41801 }
41802
41803-static struct block_device_operations dst_blk_ops = {
41804+static const struct block_device_operations dst_blk_ops = {
41805 .open = dst_bdev_open,
41806 .release = dst_bdev_release,
41807 .owner = THIS_MODULE,
41808@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(struct dst_ctl *ctl,
41809 n->size = ctl->size;
41810
41811 atomic_set(&n->refcnt, 1);
41812- atomic_long_set(&n->gen, 0);
41813+ atomic_long_set_unchecked(&n->gen, 0);
41814 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
41815
41816 err = dst_node_sysfs_init(n);
41817diff --git a/drivers/staging/dst/trans.c b/drivers/staging/dst/trans.c
41818index 557d372..8d84422 100644
41819--- a/drivers/staging/dst/trans.c
41820+++ b/drivers/staging/dst/trans.c
41821@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n, struct bio *bio)
41822 t->error = 0;
41823 t->retries = 0;
41824 atomic_set(&t->refcnt, 1);
41825- t->gen = atomic_long_inc_return(&n->gen);
41826+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
41827
41828 t->enc = bio_data_dir(bio);
41829 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
41830diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
41831index 94f7752..d051514 100644
41832--- a/drivers/staging/et131x/et1310_tx.c
41833+++ b/drivers/staging/et131x/et1310_tx.c
41834@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
41835 struct net_device_stats *stats = &etdev->net_stats;
41836
41837 if (pMpTcb->Flags & fMP_DEST_BROAD)
41838- atomic_inc(&etdev->Stats.brdcstxmt);
41839+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
41840 else if (pMpTcb->Flags & fMP_DEST_MULTI)
41841- atomic_inc(&etdev->Stats.multixmt);
41842+ atomic_inc_unchecked(&etdev->Stats.multixmt);
41843 else
41844- atomic_inc(&etdev->Stats.unixmt);
41845+ atomic_inc_unchecked(&etdev->Stats.unixmt);
41846
41847 if (pMpTcb->Packet) {
41848 stats->tx_bytes += pMpTcb->Packet->len;
41849diff --git a/drivers/staging/et131x/et131x_adapter.h b/drivers/staging/et131x/et131x_adapter.h
41850index 1dfe06f..f469b4d 100644
41851--- a/drivers/staging/et131x/et131x_adapter.h
41852+++ b/drivers/staging/et131x/et131x_adapter.h
41853@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
41854 * operations
41855 */
41856 u32 unircv; /* # multicast packets received */
41857- atomic_t unixmt; /* # multicast packets for Tx */
41858+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
41859 u32 multircv; /* # multicast packets received */
41860- atomic_t multixmt; /* # multicast packets for Tx */
41861+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
41862 u32 brdcstrcv; /* # broadcast packets received */
41863- atomic_t brdcstxmt; /* # broadcast packets for Tx */
41864+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
41865 u32 norcvbuf; /* # Rx packets discarded */
41866 u32 noxmtbuf; /* # Tx packets discarded */
41867
41868diff --git a/drivers/staging/go7007/go7007-v4l2.c b/drivers/staging/go7007/go7007-v4l2.c
41869index 4bd353a..e28f455 100644
41870--- a/drivers/staging/go7007/go7007-v4l2.c
41871+++ b/drivers/staging/go7007/go7007-v4l2.c
41872@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
41873 return 0;
41874 }
41875
41876-static struct vm_operations_struct go7007_vm_ops = {
41877+static const struct vm_operations_struct go7007_vm_ops = {
41878 .open = go7007_vm_open,
41879 .close = go7007_vm_close,
41880 .fault = go7007_vm_fault,
41881diff --git a/drivers/staging/hv/Channel.c b/drivers/staging/hv/Channel.c
41882index 366dc95..b974d87 100644
41883--- a/drivers/staging/hv/Channel.c
41884+++ b/drivers/staging/hv/Channel.c
41885@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vmbus_channel *Channel, void *Kbuffer,
41886
41887 DPRINT_ENTER(VMBUS);
41888
41889- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
41890- atomic_inc(&gVmbusConnection.NextGpadlHandle);
41891+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
41892+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
41893
41894 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
41895 ASSERT(msgInfo != NULL);
41896diff --git a/drivers/staging/hv/Hv.c b/drivers/staging/hv/Hv.c
41897index b12237f..01ae28a 100644
41898--- a/drivers/staging/hv/Hv.c
41899+++ b/drivers/staging/hv/Hv.c
41900@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, void *Input, void *Output)
41901 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
41902 u32 outputAddressHi = outputAddress >> 32;
41903 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
41904- volatile void *hypercallPage = gHvContext.HypercallPage;
41905+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
41906
41907 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
41908 Control, Input, Output);
41909diff --git a/drivers/staging/hv/VmbusApi.h b/drivers/staging/hv/VmbusApi.h
41910index d089bb1..2ebc158 100644
41911--- a/drivers/staging/hv/VmbusApi.h
41912+++ b/drivers/staging/hv/VmbusApi.h
41913@@ -109,7 +109,7 @@ struct vmbus_channel_interface {
41914 u32 *GpadlHandle);
41915 int (*TeardownGpadl)(struct hv_device *device, u32 GpadlHandle);
41916 void (*GetInfo)(struct hv_device *dev, struct hv_device_info *devinfo);
41917-};
41918+} __no_const;
41919
41920 /* Base driver object */
41921 struct hv_driver {
41922diff --git a/drivers/staging/hv/VmbusPrivate.h b/drivers/staging/hv/VmbusPrivate.h
41923index 5a37cce..6ecc88c 100644
41924--- a/drivers/staging/hv/VmbusPrivate.h
41925+++ b/drivers/staging/hv/VmbusPrivate.h
41926@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
41927 struct VMBUS_CONNECTION {
41928 enum VMBUS_CONNECT_STATE ConnectState;
41929
41930- atomic_t NextGpadlHandle;
41931+ atomic_unchecked_t NextGpadlHandle;
41932
41933 /*
41934 * Represents channel interrupts. Each bit position represents a
41935diff --git a/drivers/staging/hv/blkvsc_drv.c b/drivers/staging/hv/blkvsc_drv.c
41936index 871a202..ca50ddf 100644
41937--- a/drivers/staging/hv/blkvsc_drv.c
41938+++ b/drivers/staging/hv/blkvsc_drv.c
41939@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
41940 /* The one and only one */
41941 static struct blkvsc_driver_context g_blkvsc_drv;
41942
41943-static struct block_device_operations block_ops = {
41944+static const struct block_device_operations block_ops = {
41945 .owner = THIS_MODULE,
41946 .open = blkvsc_open,
41947 .release = blkvsc_release,
41948diff --git a/drivers/staging/hv/vmbus_drv.c b/drivers/staging/hv/vmbus_drv.c
41949index 6acc49a..fbc8d46 100644
41950--- a/drivers/staging/hv/vmbus_drv.c
41951+++ b/drivers/staging/hv/vmbus_drv.c
41952@@ -532,7 +532,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41953 to_device_context(root_device_obj);
41954 struct device_context *child_device_ctx =
41955 to_device_context(child_device_obj);
41956- static atomic_t device_num = ATOMIC_INIT(0);
41957+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
41958
41959 DPRINT_ENTER(VMBUS_DRV);
41960
41961@@ -541,7 +541,7 @@ static int vmbus_child_device_register(struct hv_device *root_device_obj,
41962
41963 /* Set the device name. Otherwise, device_register() will fail. */
41964 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
41965- atomic_inc_return(&device_num));
41966+ atomic_inc_return_unchecked(&device_num));
41967
41968 /* The new device belongs to this bus */
41969 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
41970diff --git a/drivers/staging/iio/ring_generic.h b/drivers/staging/iio/ring_generic.h
41971index d926189..17b19fd 100644
41972--- a/drivers/staging/iio/ring_generic.h
41973+++ b/drivers/staging/iio/ring_generic.h
41974@@ -87,7 +87,7 @@ struct iio_ring_access_funcs {
41975
41976 int (*is_enabled)(struct iio_ring_buffer *ring);
41977 int (*enable)(struct iio_ring_buffer *ring);
41978-};
41979+} __no_const;
41980
41981 /**
41982 * struct iio_ring_buffer - general ring buffer structure
41983diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
41984index 1b237b7..88c624e 100644
41985--- a/drivers/staging/octeon/ethernet-rx.c
41986+++ b/drivers/staging/octeon/ethernet-rx.c
41987@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long unused)
41988 /* Increment RX stats for virtual ports */
41989 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
41990 #ifdef CONFIG_64BIT
41991- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
41992- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
41993+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
41994+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
41995 #else
41996- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
41997- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
41998+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
41999+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
42000 #endif
42001 }
42002 netif_receive_skb(skb);
42003@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long unused)
42004 dev->name);
42005 */
42006 #ifdef CONFIG_64BIT
42007- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
42008+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
42009 #else
42010- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
42011+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
42012 #endif
42013 dev_kfree_skb_irq(skb);
42014 }
42015diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
42016index 492c502..d9909f1 100644
42017--- a/drivers/staging/octeon/ethernet.c
42018+++ b/drivers/staging/octeon/ethernet.c
42019@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
42020 * since the RX tasklet also increments it.
42021 */
42022 #ifdef CONFIG_64BIT
42023- atomic64_add(rx_status.dropped_packets,
42024- (atomic64_t *)&priv->stats.rx_dropped);
42025+ atomic64_add_unchecked(rx_status.dropped_packets,
42026+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
42027 #else
42028- atomic_add(rx_status.dropped_packets,
42029- (atomic_t *)&priv->stats.rx_dropped);
42030+ atomic_add_unchecked(rx_status.dropped_packets,
42031+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
42032 #endif
42033 }
42034
42035diff --git a/drivers/staging/otus/80211core/pub_zfi.h b/drivers/staging/otus/80211core/pub_zfi.h
42036index a35bd5d..28fff45 100644
42037--- a/drivers/staging/otus/80211core/pub_zfi.h
42038+++ b/drivers/staging/otus/80211core/pub_zfi.h
42039@@ -531,7 +531,7 @@ struct zsCbFuncTbl
42040 u8_t (*zfcbClassifyTxPacket)(zdev_t* dev, zbuf_t* buf);
42041
42042 void (*zfcbHwWatchDogNotify)(zdev_t* dev);
42043-};
42044+} __no_const;
42045
42046 extern void zfZeroMemory(u8_t* va, u16_t length);
42047 #define ZM_INIT_CB_FUNC_TABLE(p) zfZeroMemory((u8_t *)p, sizeof(struct zsCbFuncTbl));
42048diff --git a/drivers/staging/panel/panel.c b/drivers/staging/panel/panel.c
42049index c39a25f..696f5aa 100644
42050--- a/drivers/staging/panel/panel.c
42051+++ b/drivers/staging/panel/panel.c
42052@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *inode, struct file *file)
42053 return 0;
42054 }
42055
42056-static struct file_operations lcd_fops = {
42057+static const struct file_operations lcd_fops = {
42058 .write = lcd_write,
42059 .open = lcd_open,
42060 .release = lcd_release,
42061@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *inode, struct file *file)
42062 return 0;
42063 }
42064
42065-static struct file_operations keypad_fops = {
42066+static const struct file_operations keypad_fops = {
42067 .read = keypad_read, /* read */
42068 .open = keypad_open, /* open */
42069 .release = keypad_release, /* close */
42070diff --git a/drivers/staging/phison/phison.c b/drivers/staging/phison/phison.c
42071index 270ebcb..37e46af 100644
42072--- a/drivers/staging/phison/phison.c
42073+++ b/drivers/staging/phison/phison.c
42074@@ -43,7 +43,7 @@ static struct scsi_host_template phison_sht = {
42075 ATA_BMDMA_SHT(DRV_NAME),
42076 };
42077
42078-static struct ata_port_operations phison_ops = {
42079+static const struct ata_port_operations phison_ops = {
42080 .inherits = &ata_bmdma_port_ops,
42081 .prereset = phison_pre_reset,
42082 };
42083diff --git a/drivers/staging/poch/poch.c b/drivers/staging/poch/poch.c
42084index 2eb8e3d..57616a7 100644
42085--- a/drivers/staging/poch/poch.c
42086+++ b/drivers/staging/poch/poch.c
42087@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inode, struct file *filp,
42088 return 0;
42089 }
42090
42091-static struct file_operations poch_fops = {
42092+static const struct file_operations poch_fops = {
42093 .owner = THIS_MODULE,
42094 .open = poch_open,
42095 .release = poch_release,
42096diff --git a/drivers/staging/pohmelfs/inode.c b/drivers/staging/pohmelfs/inode.c
42097index c94de31..19402bc 100644
42098--- a/drivers/staging/pohmelfs/inode.c
42099+++ b/drivers/staging/pohmelfs/inode.c
42100@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42101 mutex_init(&psb->mcache_lock);
42102 psb->mcache_root = RB_ROOT;
42103 psb->mcache_timeout = msecs_to_jiffies(5000);
42104- atomic_long_set(&psb->mcache_gen, 0);
42105+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
42106
42107 psb->trans_max_pages = 100;
42108
42109@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct super_block *sb, void *data, int silent)
42110 INIT_LIST_HEAD(&psb->crypto_ready_list);
42111 INIT_LIST_HEAD(&psb->crypto_active_list);
42112
42113- atomic_set(&psb->trans_gen, 1);
42114+ atomic_set_unchecked(&psb->trans_gen, 1);
42115 atomic_long_set(&psb->total_inodes, 0);
42116
42117 mutex_init(&psb->state_lock);
42118diff --git a/drivers/staging/pohmelfs/mcache.c b/drivers/staging/pohmelfs/mcache.c
42119index e22665c..a2a9390 100644
42120--- a/drivers/staging/pohmelfs/mcache.c
42121+++ b/drivers/staging/pohmelfs/mcache.c
42122@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_alloc(struct pohmelfs_sb *psb, u64 start
42123 m->data = data;
42124 m->start = start;
42125 m->size = size;
42126- m->gen = atomic_long_inc_return(&psb->mcache_gen);
42127+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
42128
42129 mutex_lock(&psb->mcache_lock);
42130 err = pohmelfs_mcache_insert(psb, m);
42131diff --git a/drivers/staging/pohmelfs/netfs.h b/drivers/staging/pohmelfs/netfs.h
42132index 623a07d..4035c19 100644
42133--- a/drivers/staging/pohmelfs/netfs.h
42134+++ b/drivers/staging/pohmelfs/netfs.h
42135@@ -570,14 +570,14 @@ struct pohmelfs_config;
42136 struct pohmelfs_sb {
42137 struct rb_root mcache_root;
42138 struct mutex mcache_lock;
42139- atomic_long_t mcache_gen;
42140+ atomic_long_unchecked_t mcache_gen;
42141 unsigned long mcache_timeout;
42142
42143 unsigned int idx;
42144
42145 unsigned int trans_retries;
42146
42147- atomic_t trans_gen;
42148+ atomic_unchecked_t trans_gen;
42149
42150 unsigned int crypto_attached_size;
42151 unsigned int crypto_align_size;
42152diff --git a/drivers/staging/pohmelfs/trans.c b/drivers/staging/pohmelfs/trans.c
42153index 36a2535..0591bf4 100644
42154--- a/drivers/staging/pohmelfs/trans.c
42155+++ b/drivers/staging/pohmelfs/trans.c
42156@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_trans *t, struct pohmelfs_sb *psb)
42157 int err;
42158 struct netfs_cmd *cmd = t->iovec.iov_base;
42159
42160- t->gen = atomic_inc_return(&psb->trans_gen);
42161+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
42162
42163 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
42164 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
42165diff --git a/drivers/staging/sep/sep_driver.c b/drivers/staging/sep/sep_driver.c
42166index f890a16..509ece8 100644
42167--- a/drivers/staging/sep/sep_driver.c
42168+++ b/drivers/staging/sep/sep_driver.c
42169@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver = {
42170 static dev_t sep_devno;
42171
42172 /* the files operations structure of the driver */
42173-static struct file_operations sep_file_operations = {
42174+static const struct file_operations sep_file_operations = {
42175 .owner = THIS_MODULE,
42176 .ioctl = sep_ioctl,
42177 .poll = sep_poll,
42178diff --git a/drivers/staging/usbip/usbip_common.h b/drivers/staging/usbip/usbip_common.h
42179index 5e16bc3..7655b10 100644
42180--- a/drivers/staging/usbip/usbip_common.h
42181+++ b/drivers/staging/usbip/usbip_common.h
42182@@ -374,7 +374,7 @@ struct usbip_device {
42183 void (*shutdown)(struct usbip_device *);
42184 void (*reset)(struct usbip_device *);
42185 void (*unusable)(struct usbip_device *);
42186- } eh_ops;
42187+ } __no_const eh_ops;
42188 };
42189
42190
42191diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
42192index 57f7946..d9df23d 100644
42193--- a/drivers/staging/usbip/vhci.h
42194+++ b/drivers/staging/usbip/vhci.h
42195@@ -92,7 +92,7 @@ struct vhci_hcd {
42196 unsigned resuming:1;
42197 unsigned long re_timeout;
42198
42199- atomic_t seqnum;
42200+ atomic_unchecked_t seqnum;
42201
42202 /*
42203 * NOTE:
42204diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
42205index 20cd7db..c2693ff 100644
42206--- a/drivers/staging/usbip/vhci_hcd.c
42207+++ b/drivers/staging/usbip/vhci_hcd.c
42208@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
42209 return;
42210 }
42211
42212- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
42213+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42214 if (priv->seqnum == 0xffff)
42215 usbip_uinfo("seqnum max\n");
42216
42217@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
42218 return -ENOMEM;
42219 }
42220
42221- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
42222+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
42223 if (unlink->seqnum == 0xffff)
42224 usbip_uinfo("seqnum max\n");
42225
42226@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hcd)
42227 vdev->rhport = rhport;
42228 }
42229
42230- atomic_set(&vhci->seqnum, 0);
42231+ atomic_set_unchecked(&vhci->seqnum, 0);
42232 spin_lock_init(&vhci->lock);
42233
42234
42235diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
42236index 7fd76fe..673695a 100644
42237--- a/drivers/staging/usbip/vhci_rx.c
42238+++ b/drivers/staging/usbip/vhci_rx.c
42239@@ -79,7 +79,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
42240 usbip_uerr("cannot find a urb of seqnum %u\n",
42241 pdu->base.seqnum);
42242 usbip_uinfo("max seqnum %d\n",
42243- atomic_read(&the_controller->seqnum));
42244+ atomic_read_unchecked(&the_controller->seqnum));
42245 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
42246 return;
42247 }
42248diff --git a/drivers/staging/vme/devices/vme_user.c b/drivers/staging/vme/devices/vme_user.c
42249index 7891288..8e31300 100644
42250--- a/drivers/staging/vme/devices/vme_user.c
42251+++ b/drivers/staging/vme/devices/vme_user.c
42252@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *, struct file *, unsigned int,
42253 static int __init vme_user_probe(struct device *, int, int);
42254 static int __exit vme_user_remove(struct device *, int, int);
42255
42256-static struct file_operations vme_user_fops = {
42257+static const struct file_operations vme_user_fops = {
42258 .open = vme_user_open,
42259 .release = vme_user_release,
42260 .read = vme_user_read,
42261diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
42262index 58abf44..00c1fc8 100644
42263--- a/drivers/staging/vt6655/hostap.c
42264+++ b/drivers/staging/vt6655/hostap.c
42265@@ -84,7 +84,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42266 PSDevice apdev_priv;
42267 struct net_device *dev = pDevice->dev;
42268 int ret;
42269- const struct net_device_ops apdev_netdev_ops = {
42270+ net_device_ops_no_const apdev_netdev_ops = {
42271 .ndo_start_xmit = pDevice->tx_80211,
42272 };
42273
42274diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
42275index 0c8267a..db1f363 100644
42276--- a/drivers/staging/vt6656/hostap.c
42277+++ b/drivers/staging/vt6656/hostap.c
42278@@ -86,7 +86,7 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
42279 PSDevice apdev_priv;
42280 struct net_device *dev = pDevice->dev;
42281 int ret;
42282- const struct net_device_ops apdev_netdev_ops = {
42283+ net_device_ops_no_const apdev_netdev_ops = {
42284 .ndo_start_xmit = pDevice->tx_80211,
42285 };
42286
42287diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
42288index 925678b..da7f5ed 100644
42289--- a/drivers/staging/wlan-ng/hfa384x_usb.c
42290+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
42291@@ -205,7 +205,7 @@ static void unlocked_usbctlx_complete(hfa384x_t *hw, hfa384x_usbctlx_t *ctlx);
42292
42293 struct usbctlx_completor {
42294 int (*complete) (struct usbctlx_completor *);
42295-};
42296+} __no_const;
42297 typedef struct usbctlx_completor usbctlx_completor_t;
42298
42299 static int
42300diff --git a/drivers/telephony/ixj.c b/drivers/telephony/ixj.c
42301index 40de151..924f268 100644
42302--- a/drivers/telephony/ixj.c
42303+++ b/drivers/telephony/ixj.c
42304@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
42305 bool mContinue;
42306 char *pIn, *pOut;
42307
42308+ pax_track_stack();
42309+
42310 if (!SCI_Prepare(j))
42311 return 0;
42312
42313diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
42314index e941367..b631f5a 100644
42315--- a/drivers/uio/uio.c
42316+++ b/drivers/uio/uio.c
42317@@ -23,6 +23,7 @@
42318 #include <linux/string.h>
42319 #include <linux/kobject.h>
42320 #include <linux/uio_driver.h>
42321+#include <asm/local.h>
42322
42323 #define UIO_MAX_DEVICES 255
42324
42325@@ -30,10 +31,10 @@ struct uio_device {
42326 struct module *owner;
42327 struct device *dev;
42328 int minor;
42329- atomic_t event;
42330+ atomic_unchecked_t event;
42331 struct fasync_struct *async_queue;
42332 wait_queue_head_t wait;
42333- int vma_count;
42334+ local_t vma_count;
42335 struct uio_info *info;
42336 struct kobject *map_dir;
42337 struct kobject *portio_dir;
42338@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr,
42339 return entry->show(mem, buf);
42340 }
42341
42342-static struct sysfs_ops map_sysfs_ops = {
42343+static const struct sysfs_ops map_sysfs_ops = {
42344 .show = map_type_show,
42345 };
42346
42347@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr,
42348 return entry->show(port, buf);
42349 }
42350
42351-static struct sysfs_ops portio_sysfs_ops = {
42352+static const struct sysfs_ops portio_sysfs_ops = {
42353 .show = portio_type_show,
42354 };
42355
42356@@ -255,7 +256,7 @@ static ssize_t show_event(struct device *dev,
42357 struct uio_device *idev = dev_get_drvdata(dev);
42358 if (idev)
42359 return sprintf(buf, "%u\n",
42360- (unsigned int)atomic_read(&idev->event));
42361+ (unsigned int)atomic_read_unchecked(&idev->event));
42362 else
42363 return -ENODEV;
42364 }
42365@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *info)
42366 {
42367 struct uio_device *idev = info->uio_dev;
42368
42369- atomic_inc(&idev->event);
42370+ atomic_inc_unchecked(&idev->event);
42371 wake_up_interruptible(&idev->wait);
42372 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
42373 }
42374@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode, struct file *filep)
42375 }
42376
42377 listener->dev = idev;
42378- listener->event_count = atomic_read(&idev->event);
42379+ listener->event_count = atomic_read_unchecked(&idev->event);
42380 filep->private_data = listener;
42381
42382 if (idev->info->open) {
42383@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
42384 return -EIO;
42385
42386 poll_wait(filep, &idev->wait, wait);
42387- if (listener->event_count != atomic_read(&idev->event))
42388+ if (listener->event_count != atomic_read_unchecked(&idev->event))
42389 return POLLIN | POLLRDNORM;
42390 return 0;
42391 }
42392@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
42393 do {
42394 set_current_state(TASK_INTERRUPTIBLE);
42395
42396- event_count = atomic_read(&idev->event);
42397+ event_count = atomic_read_unchecked(&idev->event);
42398 if (event_count != listener->event_count) {
42399 if (copy_to_user(buf, &event_count, count))
42400 retval = -EFAULT;
42401@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
42402 static void uio_vma_open(struct vm_area_struct *vma)
42403 {
42404 struct uio_device *idev = vma->vm_private_data;
42405- idev->vma_count++;
42406+ local_inc(&idev->vma_count);
42407 }
42408
42409 static void uio_vma_close(struct vm_area_struct *vma)
42410 {
42411 struct uio_device *idev = vma->vm_private_data;
42412- idev->vma_count--;
42413+ local_dec(&idev->vma_count);
42414 }
42415
42416 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
42417@@ -840,7 +841,7 @@ int __uio_register_device(struct module *owner,
42418 idev->owner = owner;
42419 idev->info = info;
42420 init_waitqueue_head(&idev->wait);
42421- atomic_set(&idev->event, 0);
42422+ atomic_set_unchecked(&idev->event, 0);
42423
42424 ret = uio_get_minor(idev);
42425 if (ret)
42426diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
42427index fbea856..06efea6 100644
42428--- a/drivers/usb/atm/usbatm.c
42429+++ b/drivers/usb/atm/usbatm.c
42430@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42431 if (printk_ratelimit())
42432 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
42433 __func__, vpi, vci);
42434- atomic_inc(&vcc->stats->rx_err);
42435+ atomic_inc_unchecked(&vcc->stats->rx_err);
42436 return;
42437 }
42438
42439@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42440 if (length > ATM_MAX_AAL5_PDU) {
42441 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
42442 __func__, length, vcc);
42443- atomic_inc(&vcc->stats->rx_err);
42444+ atomic_inc_unchecked(&vcc->stats->rx_err);
42445 goto out;
42446 }
42447
42448@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42449 if (sarb->len < pdu_length) {
42450 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
42451 __func__, pdu_length, sarb->len, vcc);
42452- atomic_inc(&vcc->stats->rx_err);
42453+ atomic_inc_unchecked(&vcc->stats->rx_err);
42454 goto out;
42455 }
42456
42457 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
42458 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
42459 __func__, vcc);
42460- atomic_inc(&vcc->stats->rx_err);
42461+ atomic_inc_unchecked(&vcc->stats->rx_err);
42462 goto out;
42463 }
42464
42465@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42466 if (printk_ratelimit())
42467 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
42468 __func__, length);
42469- atomic_inc(&vcc->stats->rx_drop);
42470+ atomic_inc_unchecked(&vcc->stats->rx_drop);
42471 goto out;
42472 }
42473
42474@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
42475
42476 vcc->push(vcc, skb);
42477
42478- atomic_inc(&vcc->stats->rx);
42479+ atomic_inc_unchecked(&vcc->stats->rx);
42480 out:
42481 skb_trim(sarb, 0);
42482 }
42483@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned long data)
42484 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
42485
42486 usbatm_pop(vcc, skb);
42487- atomic_inc(&vcc->stats->tx);
42488+ atomic_inc_unchecked(&vcc->stats->tx);
42489
42490 skb = skb_dequeue(&instance->sndqueue);
42491 }
42492@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
42493 if (!left--)
42494 return sprintf(page,
42495 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
42496- atomic_read(&atm_dev->stats.aal5.tx),
42497- atomic_read(&atm_dev->stats.aal5.tx_err),
42498- atomic_read(&atm_dev->stats.aal5.rx),
42499- atomic_read(&atm_dev->stats.aal5.rx_err),
42500- atomic_read(&atm_dev->stats.aal5.rx_drop));
42501+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
42502+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
42503+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
42504+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
42505+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
42506
42507 if (!left--) {
42508 if (instance->disconnected)
42509diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
42510index 24e6205..fe5a5d4 100644
42511--- a/drivers/usb/core/hcd.c
42512+++ b/drivers/usb/core/hcd.c
42513@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutdown);
42514
42515 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42516
42517-struct usb_mon_operations *mon_ops;
42518+const struct usb_mon_operations *mon_ops;
42519
42520 /*
42521 * The registration is unlocked.
42522@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
42523 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
42524 */
42525
42526-int usb_mon_register (struct usb_mon_operations *ops)
42527+int usb_mon_register (const struct usb_mon_operations *ops)
42528 {
42529
42530 if (mon_ops)
42531diff --git a/drivers/usb/core/hcd.h b/drivers/usb/core/hcd.h
42532index bcbe104..9cfd1c6 100644
42533--- a/drivers/usb/core/hcd.h
42534+++ b/drivers/usb/core/hcd.h
42535@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) { }
42536 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
42537
42538 struct usb_mon_operations {
42539- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
42540- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42541- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42542+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
42543+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
42544+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
42545 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
42546 };
42547
42548-extern struct usb_mon_operations *mon_ops;
42549+extern const struct usb_mon_operations *mon_ops;
42550
42551 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
42552 {
42553@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(struct usb_bus *bus, struct urb *urb,
42554 (*mon_ops->urb_complete)(bus, urb, status);
42555 }
42556
42557-int usb_mon_register(struct usb_mon_operations *ops);
42558+int usb_mon_register(const struct usb_mon_operations *ops);
42559 void usb_mon_deregister(void);
42560
42561 #else
42562diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
42563index 409cc94..a673bad 100644
42564--- a/drivers/usb/core/message.c
42565+++ b/drivers/usb/core/message.c
42566@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device *udev, int index)
42567 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
42568 if (buf) {
42569 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
42570- if (len > 0) {
42571- smallbuf = kmalloc(++len, GFP_NOIO);
42572+ if (len++ > 0) {
42573+ smallbuf = kmalloc(len, GFP_NOIO);
42574 if (!smallbuf)
42575 return buf;
42576 memcpy(smallbuf, buf, len);
42577diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
42578index 62ff5e7..530b74e 100644
42579--- a/drivers/usb/misc/appledisplay.c
42580+++ b/drivers/usb/misc/appledisplay.c
42581@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightness(struct backlight_device *bd)
42582 return pdata->msgdata[1];
42583 }
42584
42585-static struct backlight_ops appledisplay_bl_data = {
42586+static const struct backlight_ops appledisplay_bl_data = {
42587 .get_brightness = appledisplay_bl_get_brightness,
42588 .update_status = appledisplay_bl_update_status,
42589 };
42590diff --git a/drivers/usb/mon/mon_main.c b/drivers/usb/mon/mon_main.c
42591index e0c2db3..bd8cb66 100644
42592--- a/drivers/usb/mon/mon_main.c
42593+++ b/drivers/usb/mon/mon_main.c
42594@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
42595 /*
42596 * Ops
42597 */
42598-static struct usb_mon_operations mon_ops_0 = {
42599+static const struct usb_mon_operations mon_ops_0 = {
42600 .urb_submit = mon_submit,
42601 .urb_submit_error = mon_submit_error,
42602 .urb_complete = mon_complete,
42603diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
42604index d6bea3e..60b250e 100644
42605--- a/drivers/usb/wusbcore/wa-hc.h
42606+++ b/drivers/usb/wusbcore/wa-hc.h
42607@@ -192,7 +192,7 @@ struct wahc {
42608 struct list_head xfer_delayed_list;
42609 spinlock_t xfer_list_lock;
42610 struct work_struct xfer_work;
42611- atomic_t xfer_id_count;
42612+ atomic_unchecked_t xfer_id_count;
42613 };
42614
42615
42616@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
42617 INIT_LIST_HEAD(&wa->xfer_delayed_list);
42618 spin_lock_init(&wa->xfer_list_lock);
42619 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
42620- atomic_set(&wa->xfer_id_count, 1);
42621+ atomic_set_unchecked(&wa->xfer_id_count, 1);
42622 }
42623
42624 /**
42625diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
42626index 613a5fc..3174865 100644
42627--- a/drivers/usb/wusbcore/wa-xfer.c
42628+++ b/drivers/usb/wusbcore/wa-xfer.c
42629@@ -293,7 +293,7 @@ out:
42630 */
42631 static void wa_xfer_id_init(struct wa_xfer *xfer)
42632 {
42633- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
42634+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
42635 }
42636
42637 /*
42638diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
42639index aa42fce..f8a828c 100644
42640--- a/drivers/uwb/wlp/messages.c
42641+++ b/drivers/uwb/wlp/messages.c
42642@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct sk_buff *skb)
42643 size_t len = skb->len;
42644 size_t used;
42645 ssize_t result;
42646- struct wlp_nonce enonce, rnonce;
42647+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
42648 enum wlp_assc_error assc_err;
42649 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
42650 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
42651diff --git a/drivers/uwb/wlp/sysfs.c b/drivers/uwb/wlp/sysfs.c
42652index 0370399..6627c94 100644
42653--- a/drivers/uwb/wlp/sysfs.c
42654+++ b/drivers/uwb/wlp/sysfs.c
42655@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobject *kobj, struct attribute *attr,
42656 return ret;
42657 }
42658
42659-static
42660-struct sysfs_ops wss_sysfs_ops = {
42661+static const struct sysfs_ops wss_sysfs_ops = {
42662 .show = wlp_wss_attr_show,
42663 .store = wlp_wss_attr_store,
42664 };
42665diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
42666index d5e8010..5687b56 100644
42667--- a/drivers/video/atmel_lcdfb.c
42668+++ b/drivers/video/atmel_lcdfb.c
42669@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struct backlight_device *bl)
42670 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
42671 }
42672
42673-static struct backlight_ops atmel_lcdc_bl_ops = {
42674+static const struct backlight_ops atmel_lcdc_bl_ops = {
42675 .update_status = atmel_bl_update_status,
42676 .get_brightness = atmel_bl_get_brightness,
42677 };
42678diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
42679index e4e4d43..66bcbcc 100644
42680--- a/drivers/video/aty/aty128fb.c
42681+++ b/drivers/video/aty/aty128fb.c
42682@@ -149,7 +149,7 @@ enum {
42683 };
42684
42685 /* Must match above enum */
42686-static const char *r128_family[] __devinitdata = {
42687+static const char *r128_family[] __devinitconst = {
42688 "AGP",
42689 "PCI",
42690 "PRO AGP",
42691@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(struct backlight_device *bd)
42692 return bd->props.brightness;
42693 }
42694
42695-static struct backlight_ops aty128_bl_data = {
42696+static const struct backlight_ops aty128_bl_data = {
42697 .get_brightness = aty128_bl_get_brightness,
42698 .update_status = aty128_bl_update_status,
42699 };
42700diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
42701index 913b4a4..9295a38 100644
42702--- a/drivers/video/aty/atyfb_base.c
42703+++ b/drivers/video/aty/atyfb_base.c
42704@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct backlight_device *bd)
42705 return bd->props.brightness;
42706 }
42707
42708-static struct backlight_ops aty_bl_data = {
42709+static const struct backlight_ops aty_bl_data = {
42710 .get_brightness = aty_bl_get_brightness,
42711 .update_status = aty_bl_update_status,
42712 };
42713diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
42714index 1a056ad..221bd6a 100644
42715--- a/drivers/video/aty/radeon_backlight.c
42716+++ b/drivers/video/aty/radeon_backlight.c
42717@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(struct backlight_device *bd)
42718 return bd->props.brightness;
42719 }
42720
42721-static struct backlight_ops radeon_bl_data = {
42722+static const struct backlight_ops radeon_bl_data = {
42723 .get_brightness = radeon_bl_get_brightness,
42724 .update_status = radeon_bl_update_status,
42725 };
42726diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
42727index ad05da5..3cb2cb9 100644
42728--- a/drivers/video/backlight/adp5520_bl.c
42729+++ b/drivers/video/backlight/adp5520_bl.c
42730@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(struct backlight_device *bl)
42731 return error ? data->current_brightness : reg_val;
42732 }
42733
42734-static struct backlight_ops adp5520_bl_ops = {
42735+static const struct backlight_ops adp5520_bl_ops = {
42736 .update_status = adp5520_bl_update_status,
42737 .get_brightness = adp5520_bl_get_brightness,
42738 };
42739diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
42740index 2c3bdfc..d769b0b 100644
42741--- a/drivers/video/backlight/adx_bl.c
42742+++ b/drivers/video/backlight/adx_bl.c
42743@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct fb_info *fb)
42744 return 1;
42745 }
42746
42747-static struct backlight_ops adx_backlight_ops = {
42748+static const struct backlight_ops adx_backlight_ops = {
42749 .options = 0,
42750 .update_status = adx_backlight_update_status,
42751 .get_brightness = adx_backlight_get_brightness,
42752diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
42753index 505c082..6b6b3cc 100644
42754--- a/drivers/video/backlight/atmel-pwm-bl.c
42755+++ b/drivers/video/backlight/atmel-pwm-bl.c
42756@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl)
42757 return pwm_channel_enable(&pwmbl->pwmc);
42758 }
42759
42760-static struct backlight_ops atmel_pwm_bl_ops = {
42761+static const struct backlight_ops atmel_pwm_bl_ops = {
42762 .get_brightness = atmel_pwm_bl_get_intensity,
42763 .update_status = atmel_pwm_bl_set_intensity,
42764 };
42765diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
42766index 5e20e6e..89025e6 100644
42767--- a/drivers/video/backlight/backlight.c
42768+++ b/drivers/video/backlight/backlight.c
42769@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
42770 * ERR_PTR() or a pointer to the newly allocated device.
42771 */
42772 struct backlight_device *backlight_device_register(const char *name,
42773- struct device *parent, void *devdata, struct backlight_ops *ops)
42774+ struct device *parent, void *devdata, const struct backlight_ops *ops)
42775 {
42776 struct backlight_device *new_bd;
42777 int rc;
42778diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
42779index 9677494..b4bcf80 100644
42780--- a/drivers/video/backlight/corgi_lcd.c
42781+++ b/drivers/video/backlight/corgi_lcd.c
42782@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit)
42783 }
42784 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
42785
42786-static struct backlight_ops corgi_bl_ops = {
42787+static const struct backlight_ops corgi_bl_ops = {
42788 .get_brightness = corgi_bl_get_intensity,
42789 .update_status = corgi_bl_update_status,
42790 };
42791diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
42792index b9fe62b..2914bf1 100644
42793--- a/drivers/video/backlight/cr_bllcd.c
42794+++ b/drivers/video/backlight/cr_bllcd.c
42795@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(struct backlight_device *bd)
42796 return intensity;
42797 }
42798
42799-static struct backlight_ops cr_backlight_ops = {
42800+static const struct backlight_ops cr_backlight_ops = {
42801 .get_brightness = cr_backlight_get_intensity,
42802 .update_status = cr_backlight_set_intensity,
42803 };
42804diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
42805index 701a108..feacfd5 100644
42806--- a/drivers/video/backlight/da903x_bl.c
42807+++ b/drivers/video/backlight/da903x_bl.c
42808@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightness(struct backlight_device *bl)
42809 return data->current_brightness;
42810 }
42811
42812-static struct backlight_ops da903x_backlight_ops = {
42813+static const struct backlight_ops da903x_backlight_ops = {
42814 .update_status = da903x_backlight_update_status,
42815 .get_brightness = da903x_backlight_get_brightness,
42816 };
42817diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
42818index 6d27f62..e6d348e 100644
42819--- a/drivers/video/backlight/generic_bl.c
42820+++ b/drivers/video/backlight/generic_bl.c
42821@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
42822 }
42823 EXPORT_SYMBOL(corgibl_limit_intensity);
42824
42825-static struct backlight_ops genericbl_ops = {
42826+static const struct backlight_ops genericbl_ops = {
42827 .options = BL_CORE_SUSPENDRESUME,
42828 .get_brightness = genericbl_get_intensity,
42829 .update_status = genericbl_send_intensity,
42830diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
42831index 7fb4eef..f7cc528 100644
42832--- a/drivers/video/backlight/hp680_bl.c
42833+++ b/drivers/video/backlight/hp680_bl.c
42834@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct backlight_device *bd)
42835 return current_intensity;
42836 }
42837
42838-static struct backlight_ops hp680bl_ops = {
42839+static const struct backlight_ops hp680bl_ops = {
42840 .get_brightness = hp680bl_get_intensity,
42841 .update_status = hp680bl_set_intensity,
42842 };
42843diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
42844index 7aed256..db9071f 100644
42845--- a/drivers/video/backlight/jornada720_bl.c
42846+++ b/drivers/video/backlight/jornada720_bl.c
42847@@ -93,7 +93,7 @@ out:
42848 return ret;
42849 }
42850
42851-static struct backlight_ops jornada_bl_ops = {
42852+static const struct backlight_ops jornada_bl_ops = {
42853 .get_brightness = jornada_bl_get_brightness,
42854 .update_status = jornada_bl_update_status,
42855 .options = BL_CORE_SUSPENDRESUME,
42856diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
42857index a38fda1..939e7b8 100644
42858--- a/drivers/video/backlight/kb3886_bl.c
42859+++ b/drivers/video/backlight/kb3886_bl.c
42860@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct backlight_device *bd)
42861 return kb3886bl_intensity;
42862 }
42863
42864-static struct backlight_ops kb3886bl_ops = {
42865+static const struct backlight_ops kb3886bl_ops = {
42866 .get_brightness = kb3886bl_get_intensity,
42867 .update_status = kb3886bl_send_intensity,
42868 };
42869diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
42870index 6b488b8..00a9591 100644
42871--- a/drivers/video/backlight/locomolcd.c
42872+++ b/drivers/video/backlight/locomolcd.c
42873@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struct backlight_device *bd)
42874 return current_intensity;
42875 }
42876
42877-static struct backlight_ops locomobl_data = {
42878+static const struct backlight_ops locomobl_data = {
42879 .get_brightness = locomolcd_get_intensity,
42880 .update_status = locomolcd_set_intensity,
42881 };
42882diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
42883index 99bdfa8..3dac448 100644
42884--- a/drivers/video/backlight/mbp_nvidia_bl.c
42885+++ b/drivers/video/backlight/mbp_nvidia_bl.c
42886@@ -33,7 +33,7 @@ struct dmi_match_data {
42887 unsigned long iostart;
42888 unsigned long iolen;
42889 /* Backlight operations structure. */
42890- struct backlight_ops backlight_ops;
42891+ const struct backlight_ops backlight_ops;
42892 };
42893
42894 /* Module parameters. */
42895diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
42896index cbad67e..3cf900e 100644
42897--- a/drivers/video/backlight/omap1_bl.c
42898+++ b/drivers/video/backlight/omap1_bl.c
42899@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct backlight_device *dev)
42900 return bl->current_intensity;
42901 }
42902
42903-static struct backlight_ops omapbl_ops = {
42904+static const struct backlight_ops omapbl_ops = {
42905 .get_brightness = omapbl_get_intensity,
42906 .update_status = omapbl_update_status,
42907 };
42908diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
42909index 9edaf24..075786e 100644
42910--- a/drivers/video/backlight/progear_bl.c
42911+++ b/drivers/video/backlight/progear_bl.c
42912@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struct backlight_device *bd)
42913 return intensity - HW_LEVEL_MIN;
42914 }
42915
42916-static struct backlight_ops progearbl_ops = {
42917+static const struct backlight_ops progearbl_ops = {
42918 .get_brightness = progearbl_get_intensity,
42919 .update_status = progearbl_set_intensity,
42920 };
42921diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
42922index 8871662..df9e0b3 100644
42923--- a/drivers/video/backlight/pwm_bl.c
42924+++ b/drivers/video/backlight/pwm_bl.c
42925@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(struct backlight_device *bl)
42926 return bl->props.brightness;
42927 }
42928
42929-static struct backlight_ops pwm_backlight_ops = {
42930+static const struct backlight_ops pwm_backlight_ops = {
42931 .update_status = pwm_backlight_update_status,
42932 .get_brightness = pwm_backlight_get_brightness,
42933 };
42934diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
42935index 43edbad..e14ce4d 100644
42936--- a/drivers/video/backlight/tosa_bl.c
42937+++ b/drivers/video/backlight/tosa_bl.c
42938@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct backlight_device *dev)
42939 return props->brightness;
42940 }
42941
42942-static struct backlight_ops bl_ops = {
42943+static const struct backlight_ops bl_ops = {
42944 .get_brightness = tosa_bl_get_brightness,
42945 .update_status = tosa_bl_update_status,
42946 };
42947diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
42948index 467bdb7..e32add3 100644
42949--- a/drivers/video/backlight/wm831x_bl.c
42950+++ b/drivers/video/backlight/wm831x_bl.c
42951@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightness(struct backlight_device *bl)
42952 return data->current_brightness;
42953 }
42954
42955-static struct backlight_ops wm831x_backlight_ops = {
42956+static const struct backlight_ops wm831x_backlight_ops = {
42957 .options = BL_CORE_SUSPENDRESUME,
42958 .update_status = wm831x_backlight_update_status,
42959 .get_brightness = wm831x_backlight_get_brightness,
42960diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
42961index e49ae5e..db4e6f7 100644
42962--- a/drivers/video/bf54x-lq043fb.c
42963+++ b/drivers/video/bf54x-lq043fb.c
42964@@ -463,7 +463,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42965 return 0;
42966 }
42967
42968-static struct backlight_ops bfin_lq043fb_bl_ops = {
42969+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42970 .get_brightness = bl_get_brightness,
42971 };
42972
42973diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
42974index 2c72a7c..d523e52 100644
42975--- a/drivers/video/bfin-t350mcqb-fb.c
42976+++ b/drivers/video/bfin-t350mcqb-fb.c
42977@@ -381,7 +381,7 @@ static int bl_get_brightness(struct backlight_device *bd)
42978 return 0;
42979 }
42980
42981-static struct backlight_ops bfin_lq043fb_bl_ops = {
42982+static const struct backlight_ops bfin_lq043fb_bl_ops = {
42983 .get_brightness = bl_get_brightness,
42984 };
42985
42986diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
42987index f53b9f1..958bf4e 100644
42988--- a/drivers/video/fbcmap.c
42989+++ b/drivers/video/fbcmap.c
42990@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
42991 rc = -ENODEV;
42992 goto out;
42993 }
42994- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
42995- !info->fbops->fb_setcmap)) {
42996+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
42997 rc = -EINVAL;
42998 goto out1;
42999 }
43000diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
43001index 99bbd28..ad3829e 100644
43002--- a/drivers/video/fbmem.c
43003+++ b/drivers/video/fbmem.c
43004@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43005 image->dx += image->width + 8;
43006 }
43007 } else if (rotate == FB_ROTATE_UD) {
43008- for (x = 0; x < num && image->dx >= 0; x++) {
43009+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
43010 info->fbops->fb_imageblit(info, image);
43011 image->dx -= image->width + 8;
43012 }
43013@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
43014 image->dy += image->height + 8;
43015 }
43016 } else if (rotate == FB_ROTATE_CCW) {
43017- for (x = 0; x < num && image->dy >= 0; x++) {
43018+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
43019 info->fbops->fb_imageblit(info, image);
43020 image->dy -= image->height + 8;
43021 }
43022@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
43023 int flags = info->flags;
43024 int ret = 0;
43025
43026+ pax_track_stack();
43027+
43028 if (var->activate & FB_ACTIVATE_INV_MODE) {
43029 struct fb_videomode mode1, mode2;
43030
43031@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43032 void __user *argp = (void __user *)arg;
43033 long ret = 0;
43034
43035+ pax_track_stack();
43036+
43037 switch (cmd) {
43038 case FBIOGET_VSCREENINFO:
43039 if (!lock_fb_info(info))
43040@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
43041 return -EFAULT;
43042 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
43043 return -EINVAL;
43044- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
43045+ if (con2fb.framebuffer >= FB_MAX)
43046 return -EINVAL;
43047 if (!registered_fb[con2fb.framebuffer])
43048 request_module("fb%d", con2fb.framebuffer);
43049diff --git a/drivers/video/geode/gx1fb_core.c b/drivers/video/geode/gx1fb_core.c
43050index f20eff8..3e4f622 100644
43051--- a/drivers/video/geode/gx1fb_core.c
43052+++ b/drivers/video/geode/gx1fb_core.c
43053@@ -30,7 +30,7 @@ static int crt_option = 1;
43054 static char panel_option[32] = "";
43055
43056 /* Modes relevant to the GX1 (taken from modedb.c) */
43057-static const struct fb_videomode __initdata gx1_modedb[] = {
43058+static const struct fb_videomode __initconst gx1_modedb[] = {
43059 /* 640x480-60 VESA */
43060 { NULL, 60, 640, 480, 39682, 48, 16, 33, 10, 96, 2,
43061 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_VESA },
43062diff --git a/drivers/video/gxt4500.c b/drivers/video/gxt4500.c
43063index 896e53d..4d87d0b 100644
43064--- a/drivers/video/gxt4500.c
43065+++ b/drivers/video/gxt4500.c
43066@@ -156,7 +156,7 @@ struct gxt4500_par {
43067 static char *mode_option;
43068
43069 /* default mode: 1280x1024 @ 60 Hz, 8 bpp */
43070-static const struct fb_videomode defaultmode __devinitdata = {
43071+static const struct fb_videomode defaultmode __devinitconst = {
43072 .refresh = 60,
43073 .xres = 1280,
43074 .yres = 1024,
43075@@ -581,7 +581,7 @@ static int gxt4500_blank(int blank, struct fb_info *info)
43076 return 0;
43077 }
43078
43079-static const struct fb_fix_screeninfo gxt4500_fix __devinitdata = {
43080+static const struct fb_fix_screeninfo gxt4500_fix __devinitconst = {
43081 .id = "IBM GXT4500P",
43082 .type = FB_TYPE_PACKED_PIXELS,
43083 .visual = FB_VISUAL_PSEUDOCOLOR,
43084diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
43085index f5bedee..28c6028 100644
43086--- a/drivers/video/i810/i810_accel.c
43087+++ b/drivers/video/i810/i810_accel.c
43088@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
43089 }
43090 }
43091 printk("ringbuffer lockup!!!\n");
43092+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
43093 i810_report_error(mmio);
43094 par->dev_flags |= LOCKUP;
43095 info->pixmap.scan_align = 1;
43096diff --git a/drivers/video/i810/i810_main.c b/drivers/video/i810/i810_main.c
43097index 5743ea2..457f82c 100644
43098--- a/drivers/video/i810/i810_main.c
43099+++ b/drivers/video/i810/i810_main.c
43100@@ -97,7 +97,7 @@ static int i810fb_blank (int blank_mode, struct fb_info *info);
43101 static void i810fb_release_resource (struct fb_info *info, struct i810fb_par *par);
43102
43103 /* PCI */
43104-static const char *i810_pci_list[] __devinitdata = {
43105+static const char *i810_pci_list[] __devinitconst = {
43106 "Intel(R) 810 Framebuffer Device" ,
43107 "Intel(R) 810-DC100 Framebuffer Device" ,
43108 "Intel(R) 810E Framebuffer Device" ,
43109diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
43110index 3c14e43..eafa544 100644
43111--- a/drivers/video/logo/logo_linux_clut224.ppm
43112+++ b/drivers/video/logo/logo_linux_clut224.ppm
43113@@ -1,1604 +1,1123 @@
43114 P3
43115-# Standard 224-color Linux logo
43116 80 80
43117 255
43118- 0 0 0 0 0 0 0 0 0 0 0 0
43119- 0 0 0 0 0 0 0 0 0 0 0 0
43120- 0 0 0 0 0 0 0 0 0 0 0 0
43121- 0 0 0 0 0 0 0 0 0 0 0 0
43122- 0 0 0 0 0 0 0 0 0 0 0 0
43123- 0 0 0 0 0 0 0 0 0 0 0 0
43124- 0 0 0 0 0 0 0 0 0 0 0 0
43125- 0 0 0 0 0 0 0 0 0 0 0 0
43126- 0 0 0 0 0 0 0 0 0 0 0 0
43127- 6 6 6 6 6 6 10 10 10 10 10 10
43128- 10 10 10 6 6 6 6 6 6 6 6 6
43129- 0 0 0 0 0 0 0 0 0 0 0 0
43130- 0 0 0 0 0 0 0 0 0 0 0 0
43131- 0 0 0 0 0 0 0 0 0 0 0 0
43132- 0 0 0 0 0 0 0 0 0 0 0 0
43133- 0 0 0 0 0 0 0 0 0 0 0 0
43134- 0 0 0 0 0 0 0 0 0 0 0 0
43135- 0 0 0 0 0 0 0 0 0 0 0 0
43136- 0 0 0 0 0 0 0 0 0 0 0 0
43137- 0 0 0 0 0 0 0 0 0 0 0 0
43138- 0 0 0 0 0 0 0 0 0 0 0 0
43139- 0 0 0 0 0 0 0 0 0 0 0 0
43140- 0 0 0 0 0 0 0 0 0 0 0 0
43141- 0 0 0 0 0 0 0 0 0 0 0 0
43142- 0 0 0 0 0 0 0 0 0 0 0 0
43143- 0 0 0 0 0 0 0 0 0 0 0 0
43144- 0 0 0 0 0 0 0 0 0 0 0 0
43145- 0 0 0 0 0 0 0 0 0 0 0 0
43146- 0 0 0 6 6 6 10 10 10 14 14 14
43147- 22 22 22 26 26 26 30 30 30 34 34 34
43148- 30 30 30 30 30 30 26 26 26 18 18 18
43149- 14 14 14 10 10 10 6 6 6 0 0 0
43150- 0 0 0 0 0 0 0 0 0 0 0 0
43151- 0 0 0 0 0 0 0 0 0 0 0 0
43152- 0 0 0 0 0 0 0 0 0 0 0 0
43153- 0 0 0 0 0 0 0 0 0 0 0 0
43154- 0 0 0 0 0 0 0 0 0 0 0 0
43155- 0 0 0 0 0 0 0 0 0 0 0 0
43156- 0 0 0 0 0 0 0 0 0 0 0 0
43157- 0 0 0 0 0 0 0 0 0 0 0 0
43158- 0 0 0 0 0 0 0 0 0 0 0 0
43159- 0 0 0 0 0 1 0 0 1 0 0 0
43160- 0 0 0 0 0 0 0 0 0 0 0 0
43161- 0 0 0 0 0 0 0 0 0 0 0 0
43162- 0 0 0 0 0 0 0 0 0 0 0 0
43163- 0 0 0 0 0 0 0 0 0 0 0 0
43164- 0 0 0 0 0 0 0 0 0 0 0 0
43165- 0 0 0 0 0 0 0 0 0 0 0 0
43166- 6 6 6 14 14 14 26 26 26 42 42 42
43167- 54 54 54 66 66 66 78 78 78 78 78 78
43168- 78 78 78 74 74 74 66 66 66 54 54 54
43169- 42 42 42 26 26 26 18 18 18 10 10 10
43170- 6 6 6 0 0 0 0 0 0 0 0 0
43171- 0 0 0 0 0 0 0 0 0 0 0 0
43172- 0 0 0 0 0 0 0 0 0 0 0 0
43173- 0 0 0 0 0 0 0 0 0 0 0 0
43174- 0 0 0 0 0 0 0 0 0 0 0 0
43175- 0 0 0 0 0 0 0 0 0 0 0 0
43176- 0 0 0 0 0 0 0 0 0 0 0 0
43177- 0 0 0 0 0 0 0 0 0 0 0 0
43178- 0 0 0 0 0 0 0 0 0 0 0 0
43179- 0 0 1 0 0 0 0 0 0 0 0 0
43180- 0 0 0 0 0 0 0 0 0 0 0 0
43181- 0 0 0 0 0 0 0 0 0 0 0 0
43182- 0 0 0 0 0 0 0 0 0 0 0 0
43183- 0 0 0 0 0 0 0 0 0 0 0 0
43184- 0 0 0 0 0 0 0 0 0 0 0 0
43185- 0 0 0 0 0 0 0 0 0 10 10 10
43186- 22 22 22 42 42 42 66 66 66 86 86 86
43187- 66 66 66 38 38 38 38 38 38 22 22 22
43188- 26 26 26 34 34 34 54 54 54 66 66 66
43189- 86 86 86 70 70 70 46 46 46 26 26 26
43190- 14 14 14 6 6 6 0 0 0 0 0 0
43191- 0 0 0 0 0 0 0 0 0 0 0 0
43192- 0 0 0 0 0 0 0 0 0 0 0 0
43193- 0 0 0 0 0 0 0 0 0 0 0 0
43194- 0 0 0 0 0 0 0 0 0 0 0 0
43195- 0 0 0 0 0 0 0 0 0 0 0 0
43196- 0 0 0 0 0 0 0 0 0 0 0 0
43197- 0 0 0 0 0 0 0 0 0 0 0 0
43198- 0 0 0 0 0 0 0 0 0 0 0 0
43199- 0 0 1 0 0 1 0 0 1 0 0 0
43200- 0 0 0 0 0 0 0 0 0 0 0 0
43201- 0 0 0 0 0 0 0 0 0 0 0 0
43202- 0 0 0 0 0 0 0 0 0 0 0 0
43203- 0 0 0 0 0 0 0 0 0 0 0 0
43204- 0 0 0 0 0 0 0 0 0 0 0 0
43205- 0 0 0 0 0 0 10 10 10 26 26 26
43206- 50 50 50 82 82 82 58 58 58 6 6 6
43207- 2 2 6 2 2 6 2 2 6 2 2 6
43208- 2 2 6 2 2 6 2 2 6 2 2 6
43209- 6 6 6 54 54 54 86 86 86 66 66 66
43210- 38 38 38 18 18 18 6 6 6 0 0 0
43211- 0 0 0 0 0 0 0 0 0 0 0 0
43212- 0 0 0 0 0 0 0 0 0 0 0 0
43213- 0 0 0 0 0 0 0 0 0 0 0 0
43214- 0 0 0 0 0 0 0 0 0 0 0 0
43215- 0 0 0 0 0 0 0 0 0 0 0 0
43216- 0 0 0 0 0 0 0 0 0 0 0 0
43217- 0 0 0 0 0 0 0 0 0 0 0 0
43218- 0 0 0 0 0 0 0 0 0 0 0 0
43219- 0 0 0 0 0 0 0 0 0 0 0 0
43220- 0 0 0 0 0 0 0 0 0 0 0 0
43221- 0 0 0 0 0 0 0 0 0 0 0 0
43222- 0 0 0 0 0 0 0 0 0 0 0 0
43223- 0 0 0 0 0 0 0 0 0 0 0 0
43224- 0 0 0 0 0 0 0 0 0 0 0 0
43225- 0 0 0 6 6 6 22 22 22 50 50 50
43226- 78 78 78 34 34 34 2 2 6 2 2 6
43227- 2 2 6 2 2 6 2 2 6 2 2 6
43228- 2 2 6 2 2 6 2 2 6 2 2 6
43229- 2 2 6 2 2 6 6 6 6 70 70 70
43230- 78 78 78 46 46 46 22 22 22 6 6 6
43231- 0 0 0 0 0 0 0 0 0 0 0 0
43232- 0 0 0 0 0 0 0 0 0 0 0 0
43233- 0 0 0 0 0 0 0 0 0 0 0 0
43234- 0 0 0 0 0 0 0 0 0 0 0 0
43235- 0 0 0 0 0 0 0 0 0 0 0 0
43236- 0 0 0 0 0 0 0 0 0 0 0 0
43237- 0 0 0 0 0 0 0 0 0 0 0 0
43238- 0 0 0 0 0 0 0 0 0 0 0 0
43239- 0 0 1 0 0 1 0 0 1 0 0 0
43240- 0 0 0 0 0 0 0 0 0 0 0 0
43241- 0 0 0 0 0 0 0 0 0 0 0 0
43242- 0 0 0 0 0 0 0 0 0 0 0 0
43243- 0 0 0 0 0 0 0 0 0 0 0 0
43244- 0 0 0 0 0 0 0 0 0 0 0 0
43245- 6 6 6 18 18 18 42 42 42 82 82 82
43246- 26 26 26 2 2 6 2 2 6 2 2 6
43247- 2 2 6 2 2 6 2 2 6 2 2 6
43248- 2 2 6 2 2 6 2 2 6 14 14 14
43249- 46 46 46 34 34 34 6 6 6 2 2 6
43250- 42 42 42 78 78 78 42 42 42 18 18 18
43251- 6 6 6 0 0 0 0 0 0 0 0 0
43252- 0 0 0 0 0 0 0 0 0 0 0 0
43253- 0 0 0 0 0 0 0 0 0 0 0 0
43254- 0 0 0 0 0 0 0 0 0 0 0 0
43255- 0 0 0 0 0 0 0 0 0 0 0 0
43256- 0 0 0 0 0 0 0 0 0 0 0 0
43257- 0 0 0 0 0 0 0 0 0 0 0 0
43258- 0 0 0 0 0 0 0 0 0 0 0 0
43259- 0 0 1 0 0 0 0 0 1 0 0 0
43260- 0 0 0 0 0 0 0 0 0 0 0 0
43261- 0 0 0 0 0 0 0 0 0 0 0 0
43262- 0 0 0 0 0 0 0 0 0 0 0 0
43263- 0 0 0 0 0 0 0 0 0 0 0 0
43264- 0 0 0 0 0 0 0 0 0 0 0 0
43265- 10 10 10 30 30 30 66 66 66 58 58 58
43266- 2 2 6 2 2 6 2 2 6 2 2 6
43267- 2 2 6 2 2 6 2 2 6 2 2 6
43268- 2 2 6 2 2 6 2 2 6 26 26 26
43269- 86 86 86 101 101 101 46 46 46 10 10 10
43270- 2 2 6 58 58 58 70 70 70 34 34 34
43271- 10 10 10 0 0 0 0 0 0 0 0 0
43272- 0 0 0 0 0 0 0 0 0 0 0 0
43273- 0 0 0 0 0 0 0 0 0 0 0 0
43274- 0 0 0 0 0 0 0 0 0 0 0 0
43275- 0 0 0 0 0 0 0 0 0 0 0 0
43276- 0 0 0 0 0 0 0 0 0 0 0 0
43277- 0 0 0 0 0 0 0 0 0 0 0 0
43278- 0 0 0 0 0 0 0 0 0 0 0 0
43279- 0 0 1 0 0 1 0 0 1 0 0 0
43280- 0 0 0 0 0 0 0 0 0 0 0 0
43281- 0 0 0 0 0 0 0 0 0 0 0 0
43282- 0 0 0 0 0 0 0 0 0 0 0 0
43283- 0 0 0 0 0 0 0 0 0 0 0 0
43284- 0 0 0 0 0 0 0 0 0 0 0 0
43285- 14 14 14 42 42 42 86 86 86 10 10 10
43286- 2 2 6 2 2 6 2 2 6 2 2 6
43287- 2 2 6 2 2 6 2 2 6 2 2 6
43288- 2 2 6 2 2 6 2 2 6 30 30 30
43289- 94 94 94 94 94 94 58 58 58 26 26 26
43290- 2 2 6 6 6 6 78 78 78 54 54 54
43291- 22 22 22 6 6 6 0 0 0 0 0 0
43292- 0 0 0 0 0 0 0 0 0 0 0 0
43293- 0 0 0 0 0 0 0 0 0 0 0 0
43294- 0 0 0 0 0 0 0 0 0 0 0 0
43295- 0 0 0 0 0 0 0 0 0 0 0 0
43296- 0 0 0 0 0 0 0 0 0 0 0 0
43297- 0 0 0 0 0 0 0 0 0 0 0 0
43298- 0 0 0 0 0 0 0 0 0 0 0 0
43299- 0 0 0 0 0 0 0 0 0 0 0 0
43300- 0 0 0 0 0 0 0 0 0 0 0 0
43301- 0 0 0 0 0 0 0 0 0 0 0 0
43302- 0 0 0 0 0 0 0 0 0 0 0 0
43303- 0 0 0 0 0 0 0 0 0 0 0 0
43304- 0 0 0 0 0 0 0 0 0 6 6 6
43305- 22 22 22 62 62 62 62 62 62 2 2 6
43306- 2 2 6 2 2 6 2 2 6 2 2 6
43307- 2 2 6 2 2 6 2 2 6 2 2 6
43308- 2 2 6 2 2 6 2 2 6 26 26 26
43309- 54 54 54 38 38 38 18 18 18 10 10 10
43310- 2 2 6 2 2 6 34 34 34 82 82 82
43311- 38 38 38 14 14 14 0 0 0 0 0 0
43312- 0 0 0 0 0 0 0 0 0 0 0 0
43313- 0 0 0 0 0 0 0 0 0 0 0 0
43314- 0 0 0 0 0 0 0 0 0 0 0 0
43315- 0 0 0 0 0 0 0 0 0 0 0 0
43316- 0 0 0 0 0 0 0 0 0 0 0 0
43317- 0 0 0 0 0 0 0 0 0 0 0 0
43318- 0 0 0 0 0 0 0 0 0 0 0 0
43319- 0 0 0 0 0 1 0 0 1 0 0 0
43320- 0 0 0 0 0 0 0 0 0 0 0 0
43321- 0 0 0 0 0 0 0 0 0 0 0 0
43322- 0 0 0 0 0 0 0 0 0 0 0 0
43323- 0 0 0 0 0 0 0 0 0 0 0 0
43324- 0 0 0 0 0 0 0 0 0 6 6 6
43325- 30 30 30 78 78 78 30 30 30 2 2 6
43326- 2 2 6 2 2 6 2 2 6 2 2 6
43327- 2 2 6 2 2 6 2 2 6 2 2 6
43328- 2 2 6 2 2 6 2 2 6 10 10 10
43329- 10 10 10 2 2 6 2 2 6 2 2 6
43330- 2 2 6 2 2 6 2 2 6 78 78 78
43331- 50 50 50 18 18 18 6 6 6 0 0 0
43332- 0 0 0 0 0 0 0 0 0 0 0 0
43333- 0 0 0 0 0 0 0 0 0 0 0 0
43334- 0 0 0 0 0 0 0 0 0 0 0 0
43335- 0 0 0 0 0 0 0 0 0 0 0 0
43336- 0 0 0 0 0 0 0 0 0 0 0 0
43337- 0 0 0 0 0 0 0 0 0 0 0 0
43338- 0 0 0 0 0 0 0 0 0 0 0 0
43339- 0 0 1 0 0 0 0 0 0 0 0 0
43340- 0 0 0 0 0 0 0 0 0 0 0 0
43341- 0 0 0 0 0 0 0 0 0 0 0 0
43342- 0 0 0 0 0 0 0 0 0 0 0 0
43343- 0 0 0 0 0 0 0 0 0 0 0 0
43344- 0 0 0 0 0 0 0 0 0 10 10 10
43345- 38 38 38 86 86 86 14 14 14 2 2 6
43346- 2 2 6 2 2 6 2 2 6 2 2 6
43347- 2 2 6 2 2 6 2 2 6 2 2 6
43348- 2 2 6 2 2 6 2 2 6 2 2 6
43349- 2 2 6 2 2 6 2 2 6 2 2 6
43350- 2 2 6 2 2 6 2 2 6 54 54 54
43351- 66 66 66 26 26 26 6 6 6 0 0 0
43352- 0 0 0 0 0 0 0 0 0 0 0 0
43353- 0 0 0 0 0 0 0 0 0 0 0 0
43354- 0 0 0 0 0 0 0 0 0 0 0 0
43355- 0 0 0 0 0 0 0 0 0 0 0 0
43356- 0 0 0 0 0 0 0 0 0 0 0 0
43357- 0 0 0 0 0 0 0 0 0 0 0 0
43358- 0 0 0 0 0 0 0 0 0 0 0 0
43359- 0 0 0 0 0 1 0 0 1 0 0 0
43360- 0 0 0 0 0 0 0 0 0 0 0 0
43361- 0 0 0 0 0 0 0 0 0 0 0 0
43362- 0 0 0 0 0 0 0 0 0 0 0 0
43363- 0 0 0 0 0 0 0 0 0 0 0 0
43364- 0 0 0 0 0 0 0 0 0 14 14 14
43365- 42 42 42 82 82 82 2 2 6 2 2 6
43366- 2 2 6 6 6 6 10 10 10 2 2 6
43367- 2 2 6 2 2 6 2 2 6 2 2 6
43368- 2 2 6 2 2 6 2 2 6 6 6 6
43369- 14 14 14 10 10 10 2 2 6 2 2 6
43370- 2 2 6 2 2 6 2 2 6 18 18 18
43371- 82 82 82 34 34 34 10 10 10 0 0 0
43372- 0 0 0 0 0 0 0 0 0 0 0 0
43373- 0 0 0 0 0 0 0 0 0 0 0 0
43374- 0 0 0 0 0 0 0 0 0 0 0 0
43375- 0 0 0 0 0 0 0 0 0 0 0 0
43376- 0 0 0 0 0 0 0 0 0 0 0 0
43377- 0 0 0 0 0 0 0 0 0 0 0 0
43378- 0 0 0 0 0 0 0 0 0 0 0 0
43379- 0 0 1 0 0 0 0 0 0 0 0 0
43380- 0 0 0 0 0 0 0 0 0 0 0 0
43381- 0 0 0 0 0 0 0 0 0 0 0 0
43382- 0 0 0 0 0 0 0 0 0 0 0 0
43383- 0 0 0 0 0 0 0 0 0 0 0 0
43384- 0 0 0 0 0 0 0 0 0 14 14 14
43385- 46 46 46 86 86 86 2 2 6 2 2 6
43386- 6 6 6 6 6 6 22 22 22 34 34 34
43387- 6 6 6 2 2 6 2 2 6 2 2 6
43388- 2 2 6 2 2 6 18 18 18 34 34 34
43389- 10 10 10 50 50 50 22 22 22 2 2 6
43390- 2 2 6 2 2 6 2 2 6 10 10 10
43391- 86 86 86 42 42 42 14 14 14 0 0 0
43392- 0 0 0 0 0 0 0 0 0 0 0 0
43393- 0 0 0 0 0 0 0 0 0 0 0 0
43394- 0 0 0 0 0 0 0 0 0 0 0 0
43395- 0 0 0 0 0 0 0 0 0 0 0 0
43396- 0 0 0 0 0 0 0 0 0 0 0 0
43397- 0 0 0 0 0 0 0 0 0 0 0 0
43398- 0 0 0 0 0 0 0 0 0 0 0 0
43399- 0 0 1 0 0 1 0 0 1 0 0 0
43400- 0 0 0 0 0 0 0 0 0 0 0 0
43401- 0 0 0 0 0 0 0 0 0 0 0 0
43402- 0 0 0 0 0 0 0 0 0 0 0 0
43403- 0 0 0 0 0 0 0 0 0 0 0 0
43404- 0 0 0 0 0 0 0 0 0 14 14 14
43405- 46 46 46 86 86 86 2 2 6 2 2 6
43406- 38 38 38 116 116 116 94 94 94 22 22 22
43407- 22 22 22 2 2 6 2 2 6 2 2 6
43408- 14 14 14 86 86 86 138 138 138 162 162 162
43409-154 154 154 38 38 38 26 26 26 6 6 6
43410- 2 2 6 2 2 6 2 2 6 2 2 6
43411- 86 86 86 46 46 46 14 14 14 0 0 0
43412- 0 0 0 0 0 0 0 0 0 0 0 0
43413- 0 0 0 0 0 0 0 0 0 0 0 0
43414- 0 0 0 0 0 0 0 0 0 0 0 0
43415- 0 0 0 0 0 0 0 0 0 0 0 0
43416- 0 0 0 0 0 0 0 0 0 0 0 0
43417- 0 0 0 0 0 0 0 0 0 0 0 0
43418- 0 0 0 0 0 0 0 0 0 0 0 0
43419- 0 0 0 0 0 0 0 0 0 0 0 0
43420- 0 0 0 0 0 0 0 0 0 0 0 0
43421- 0 0 0 0 0 0 0 0 0 0 0 0
43422- 0 0 0 0 0 0 0 0 0 0 0 0
43423- 0 0 0 0 0 0 0 0 0 0 0 0
43424- 0 0 0 0 0 0 0 0 0 14 14 14
43425- 46 46 46 86 86 86 2 2 6 14 14 14
43426-134 134 134 198 198 198 195 195 195 116 116 116
43427- 10 10 10 2 2 6 2 2 6 6 6 6
43428-101 98 89 187 187 187 210 210 210 218 218 218
43429-214 214 214 134 134 134 14 14 14 6 6 6
43430- 2 2 6 2 2 6 2 2 6 2 2 6
43431- 86 86 86 50 50 50 18 18 18 6 6 6
43432- 0 0 0 0 0 0 0 0 0 0 0 0
43433- 0 0 0 0 0 0 0 0 0 0 0 0
43434- 0 0 0 0 0 0 0 0 0 0 0 0
43435- 0 0 0 0 0 0 0 0 0 0 0 0
43436- 0 0 0 0 0 0 0 0 0 0 0 0
43437- 0 0 0 0 0 0 0 0 0 0 0 0
43438- 0 0 0 0 0 0 0 0 1 0 0 0
43439- 0 0 1 0 0 1 0 0 1 0 0 0
43440- 0 0 0 0 0 0 0 0 0 0 0 0
43441- 0 0 0 0 0 0 0 0 0 0 0 0
43442- 0 0 0 0 0 0 0 0 0 0 0 0
43443- 0 0 0 0 0 0 0 0 0 0 0 0
43444- 0 0 0 0 0 0 0 0 0 14 14 14
43445- 46 46 46 86 86 86 2 2 6 54 54 54
43446-218 218 218 195 195 195 226 226 226 246 246 246
43447- 58 58 58 2 2 6 2 2 6 30 30 30
43448-210 210 210 253 253 253 174 174 174 123 123 123
43449-221 221 221 234 234 234 74 74 74 2 2 6
43450- 2 2 6 2 2 6 2 2 6 2 2 6
43451- 70 70 70 58 58 58 22 22 22 6 6 6
43452- 0 0 0 0 0 0 0 0 0 0 0 0
43453- 0 0 0 0 0 0 0 0 0 0 0 0
43454- 0 0 0 0 0 0 0 0 0 0 0 0
43455- 0 0 0 0 0 0 0 0 0 0 0 0
43456- 0 0 0 0 0 0 0 0 0 0 0 0
43457- 0 0 0 0 0 0 0 0 0 0 0 0
43458- 0 0 0 0 0 0 0 0 0 0 0 0
43459- 0 0 0 0 0 0 0 0 0 0 0 0
43460- 0 0 0 0 0 0 0 0 0 0 0 0
43461- 0 0 0 0 0 0 0 0 0 0 0 0
43462- 0 0 0 0 0 0 0 0 0 0 0 0
43463- 0 0 0 0 0 0 0 0 0 0 0 0
43464- 0 0 0 0 0 0 0 0 0 14 14 14
43465- 46 46 46 82 82 82 2 2 6 106 106 106
43466-170 170 170 26 26 26 86 86 86 226 226 226
43467-123 123 123 10 10 10 14 14 14 46 46 46
43468-231 231 231 190 190 190 6 6 6 70 70 70
43469- 90 90 90 238 238 238 158 158 158 2 2 6
43470- 2 2 6 2 2 6 2 2 6 2 2 6
43471- 70 70 70 58 58 58 22 22 22 6 6 6
43472- 0 0 0 0 0 0 0 0 0 0 0 0
43473- 0 0 0 0 0 0 0 0 0 0 0 0
43474- 0 0 0 0 0 0 0 0 0 0 0 0
43475- 0 0 0 0 0 0 0 0 0 0 0 0
43476- 0 0 0 0 0 0 0 0 0 0 0 0
43477- 0 0 0 0 0 0 0 0 0 0 0 0
43478- 0 0 0 0 0 0 0 0 1 0 0 0
43479- 0 0 1 0 0 1 0 0 1 0 0 0
43480- 0 0 0 0 0 0 0 0 0 0 0 0
43481- 0 0 0 0 0 0 0 0 0 0 0 0
43482- 0 0 0 0 0 0 0 0 0 0 0 0
43483- 0 0 0 0 0 0 0 0 0 0 0 0
43484- 0 0 0 0 0 0 0 0 0 14 14 14
43485- 42 42 42 86 86 86 6 6 6 116 116 116
43486-106 106 106 6 6 6 70 70 70 149 149 149
43487-128 128 128 18 18 18 38 38 38 54 54 54
43488-221 221 221 106 106 106 2 2 6 14 14 14
43489- 46 46 46 190 190 190 198 198 198 2 2 6
43490- 2 2 6 2 2 6 2 2 6 2 2 6
43491- 74 74 74 62 62 62 22 22 22 6 6 6
43492- 0 0 0 0 0 0 0 0 0 0 0 0
43493- 0 0 0 0 0 0 0 0 0 0 0 0
43494- 0 0 0 0 0 0 0 0 0 0 0 0
43495- 0 0 0 0 0 0 0 0 0 0 0 0
43496- 0 0 0 0 0 0 0 0 0 0 0 0
43497- 0 0 0 0 0 0 0 0 0 0 0 0
43498- 0 0 0 0 0 0 0 0 1 0 0 0
43499- 0 0 1 0 0 0 0 0 1 0 0 0
43500- 0 0 0 0 0 0 0 0 0 0 0 0
43501- 0 0 0 0 0 0 0 0 0 0 0 0
43502- 0 0 0 0 0 0 0 0 0 0 0 0
43503- 0 0 0 0 0 0 0 0 0 0 0 0
43504- 0 0 0 0 0 0 0 0 0 14 14 14
43505- 42 42 42 94 94 94 14 14 14 101 101 101
43506-128 128 128 2 2 6 18 18 18 116 116 116
43507-118 98 46 121 92 8 121 92 8 98 78 10
43508-162 162 162 106 106 106 2 2 6 2 2 6
43509- 2 2 6 195 195 195 195 195 195 6 6 6
43510- 2 2 6 2 2 6 2 2 6 2 2 6
43511- 74 74 74 62 62 62 22 22 22 6 6 6
43512- 0 0 0 0 0 0 0 0 0 0 0 0
43513- 0 0 0 0 0 0 0 0 0 0 0 0
43514- 0 0 0 0 0 0 0 0 0 0 0 0
43515- 0 0 0 0 0 0 0 0 0 0 0 0
43516- 0 0 0 0 0 0 0 0 0 0 0 0
43517- 0 0 0 0 0 0 0 0 0 0 0 0
43518- 0 0 0 0 0 0 0 0 1 0 0 1
43519- 0 0 1 0 0 0 0 0 1 0 0 0
43520- 0 0 0 0 0 0 0 0 0 0 0 0
43521- 0 0 0 0 0 0 0 0 0 0 0 0
43522- 0 0 0 0 0 0 0 0 0 0 0 0
43523- 0 0 0 0 0 0 0 0 0 0 0 0
43524- 0 0 0 0 0 0 0 0 0 10 10 10
43525- 38 38 38 90 90 90 14 14 14 58 58 58
43526-210 210 210 26 26 26 54 38 6 154 114 10
43527-226 170 11 236 186 11 225 175 15 184 144 12
43528-215 174 15 175 146 61 37 26 9 2 2 6
43529- 70 70 70 246 246 246 138 138 138 2 2 6
43530- 2 2 6 2 2 6 2 2 6 2 2 6
43531- 70 70 70 66 66 66 26 26 26 6 6 6
43532- 0 0 0 0 0 0 0 0 0 0 0 0
43533- 0 0 0 0 0 0 0 0 0 0 0 0
43534- 0 0 0 0 0 0 0 0 0 0 0 0
43535- 0 0 0 0 0 0 0 0 0 0 0 0
43536- 0 0 0 0 0 0 0 0 0 0 0 0
43537- 0 0 0 0 0 0 0 0 0 0 0 0
43538- 0 0 0 0 0 0 0 0 0 0 0 0
43539- 0 0 0 0 0 0 0 0 0 0 0 0
43540- 0 0 0 0 0 0 0 0 0 0 0 0
43541- 0 0 0 0 0 0 0 0 0 0 0 0
43542- 0 0 0 0 0 0 0 0 0 0 0 0
43543- 0 0 0 0 0 0 0 0 0 0 0 0
43544- 0 0 0 0 0 0 0 0 0 10 10 10
43545- 38 38 38 86 86 86 14 14 14 10 10 10
43546-195 195 195 188 164 115 192 133 9 225 175 15
43547-239 182 13 234 190 10 232 195 16 232 200 30
43548-245 207 45 241 208 19 232 195 16 184 144 12
43549-218 194 134 211 206 186 42 42 42 2 2 6
43550- 2 2 6 2 2 6 2 2 6 2 2 6
43551- 50 50 50 74 74 74 30 30 30 6 6 6
43552- 0 0 0 0 0 0 0 0 0 0 0 0
43553- 0 0 0 0 0 0 0 0 0 0 0 0
43554- 0 0 0 0 0 0 0 0 0 0 0 0
43555- 0 0 0 0 0 0 0 0 0 0 0 0
43556- 0 0 0 0 0 0 0 0 0 0 0 0
43557- 0 0 0 0 0 0 0 0 0 0 0 0
43558- 0 0 0 0 0 0 0 0 0 0 0 0
43559- 0 0 0 0 0 0 0 0 0 0 0 0
43560- 0 0 0 0 0 0 0 0 0 0 0 0
43561- 0 0 0 0 0 0 0 0 0 0 0 0
43562- 0 0 0 0 0 0 0 0 0 0 0 0
43563- 0 0 0 0 0 0 0 0 0 0 0 0
43564- 0 0 0 0 0 0 0 0 0 10 10 10
43565- 34 34 34 86 86 86 14 14 14 2 2 6
43566-121 87 25 192 133 9 219 162 10 239 182 13
43567-236 186 11 232 195 16 241 208 19 244 214 54
43568-246 218 60 246 218 38 246 215 20 241 208 19
43569-241 208 19 226 184 13 121 87 25 2 2 6
43570- 2 2 6 2 2 6 2 2 6 2 2 6
43571- 50 50 50 82 82 82 34 34 34 10 10 10
43572- 0 0 0 0 0 0 0 0 0 0 0 0
43573- 0 0 0 0 0 0 0 0 0 0 0 0
43574- 0 0 0 0 0 0 0 0 0 0 0 0
43575- 0 0 0 0 0 0 0 0 0 0 0 0
43576- 0 0 0 0 0 0 0 0 0 0 0 0
43577- 0 0 0 0 0 0 0 0 0 0 0 0
43578- 0 0 0 0 0 0 0 0 0 0 0 0
43579- 0 0 0 0 0 0 0 0 0 0 0 0
43580- 0 0 0 0 0 0 0 0 0 0 0 0
43581- 0 0 0 0 0 0 0 0 0 0 0 0
43582- 0 0 0 0 0 0 0 0 0 0 0 0
43583- 0 0 0 0 0 0 0 0 0 0 0 0
43584- 0 0 0 0 0 0 0 0 0 10 10 10
43585- 34 34 34 82 82 82 30 30 30 61 42 6
43586-180 123 7 206 145 10 230 174 11 239 182 13
43587-234 190 10 238 202 15 241 208 19 246 218 74
43588-246 218 38 246 215 20 246 215 20 246 215 20
43589-226 184 13 215 174 15 184 144 12 6 6 6
43590- 2 2 6 2 2 6 2 2 6 2 2 6
43591- 26 26 26 94 94 94 42 42 42 14 14 14
43592- 0 0 0 0 0 0 0 0 0 0 0 0
43593- 0 0 0 0 0 0 0 0 0 0 0 0
43594- 0 0 0 0 0 0 0 0 0 0 0 0
43595- 0 0 0 0 0 0 0 0 0 0 0 0
43596- 0 0 0 0 0 0 0 0 0 0 0 0
43597- 0 0 0 0 0 0 0 0 0 0 0 0
43598- 0 0 0 0 0 0 0 0 0 0 0 0
43599- 0 0 0 0 0 0 0 0 0 0 0 0
43600- 0 0 0 0 0 0 0 0 0 0 0 0
43601- 0 0 0 0 0 0 0 0 0 0 0 0
43602- 0 0 0 0 0 0 0 0 0 0 0 0
43603- 0 0 0 0 0 0 0 0 0 0 0 0
43604- 0 0 0 0 0 0 0 0 0 10 10 10
43605- 30 30 30 78 78 78 50 50 50 104 69 6
43606-192 133 9 216 158 10 236 178 12 236 186 11
43607-232 195 16 241 208 19 244 214 54 245 215 43
43608-246 215 20 246 215 20 241 208 19 198 155 10
43609-200 144 11 216 158 10 156 118 10 2 2 6
43610- 2 2 6 2 2 6 2 2 6 2 2 6
43611- 6 6 6 90 90 90 54 54 54 18 18 18
43612- 6 6 6 0 0 0 0 0 0 0 0 0
43613- 0 0 0 0 0 0 0 0 0 0 0 0
43614- 0 0 0 0 0 0 0 0 0 0 0 0
43615- 0 0 0 0 0 0 0 0 0 0 0 0
43616- 0 0 0 0 0 0 0 0 0 0 0 0
43617- 0 0 0 0 0 0 0 0 0 0 0 0
43618- 0 0 0 0 0 0 0 0 0 0 0 0
43619- 0 0 0 0 0 0 0 0 0 0 0 0
43620- 0 0 0 0 0 0 0 0 0 0 0 0
43621- 0 0 0 0 0 0 0 0 0 0 0 0
43622- 0 0 0 0 0 0 0 0 0 0 0 0
43623- 0 0 0 0 0 0 0 0 0 0 0 0
43624- 0 0 0 0 0 0 0 0 0 10 10 10
43625- 30 30 30 78 78 78 46 46 46 22 22 22
43626-137 92 6 210 162 10 239 182 13 238 190 10
43627-238 202 15 241 208 19 246 215 20 246 215 20
43628-241 208 19 203 166 17 185 133 11 210 150 10
43629-216 158 10 210 150 10 102 78 10 2 2 6
43630- 6 6 6 54 54 54 14 14 14 2 2 6
43631- 2 2 6 62 62 62 74 74 74 30 30 30
43632- 10 10 10 0 0 0 0 0 0 0 0 0
43633- 0 0 0 0 0 0 0 0 0 0 0 0
43634- 0 0 0 0 0 0 0 0 0 0 0 0
43635- 0 0 0 0 0 0 0 0 0 0 0 0
43636- 0 0 0 0 0 0 0 0 0 0 0 0
43637- 0 0 0 0 0 0 0 0 0 0 0 0
43638- 0 0 0 0 0 0 0 0 0 0 0 0
43639- 0 0 0 0 0 0 0 0 0 0 0 0
43640- 0 0 0 0 0 0 0 0 0 0 0 0
43641- 0 0 0 0 0 0 0 0 0 0 0 0
43642- 0 0 0 0 0 0 0 0 0 0 0 0
43643- 0 0 0 0 0 0 0 0 0 0 0 0
43644- 0 0 0 0 0 0 0 0 0 10 10 10
43645- 34 34 34 78 78 78 50 50 50 6 6 6
43646- 94 70 30 139 102 15 190 146 13 226 184 13
43647-232 200 30 232 195 16 215 174 15 190 146 13
43648-168 122 10 192 133 9 210 150 10 213 154 11
43649-202 150 34 182 157 106 101 98 89 2 2 6
43650- 2 2 6 78 78 78 116 116 116 58 58 58
43651- 2 2 6 22 22 22 90 90 90 46 46 46
43652- 18 18 18 6 6 6 0 0 0 0 0 0
43653- 0 0 0 0 0 0 0 0 0 0 0 0
43654- 0 0 0 0 0 0 0 0 0 0 0 0
43655- 0 0 0 0 0 0 0 0 0 0 0 0
43656- 0 0 0 0 0 0 0 0 0 0 0 0
43657- 0 0 0 0 0 0 0 0 0 0 0 0
43658- 0 0 0 0 0 0 0 0 0 0 0 0
43659- 0 0 0 0 0 0 0 0 0 0 0 0
43660- 0 0 0 0 0 0 0 0 0 0 0 0
43661- 0 0 0 0 0 0 0 0 0 0 0 0
43662- 0 0 0 0 0 0 0 0 0 0 0 0
43663- 0 0 0 0 0 0 0 0 0 0 0 0
43664- 0 0 0 0 0 0 0 0 0 10 10 10
43665- 38 38 38 86 86 86 50 50 50 6 6 6
43666-128 128 128 174 154 114 156 107 11 168 122 10
43667-198 155 10 184 144 12 197 138 11 200 144 11
43668-206 145 10 206 145 10 197 138 11 188 164 115
43669-195 195 195 198 198 198 174 174 174 14 14 14
43670- 2 2 6 22 22 22 116 116 116 116 116 116
43671- 22 22 22 2 2 6 74 74 74 70 70 70
43672- 30 30 30 10 10 10 0 0 0 0 0 0
43673- 0 0 0 0 0 0 0 0 0 0 0 0
43674- 0 0 0 0 0 0 0 0 0 0 0 0
43675- 0 0 0 0 0 0 0 0 0 0 0 0
43676- 0 0 0 0 0 0 0 0 0 0 0 0
43677- 0 0 0 0 0 0 0 0 0 0 0 0
43678- 0 0 0 0 0 0 0 0 0 0 0 0
43679- 0 0 0 0 0 0 0 0 0 0 0 0
43680- 0 0 0 0 0 0 0 0 0 0 0 0
43681- 0 0 0 0 0 0 0 0 0 0 0 0
43682- 0 0 0 0 0 0 0 0 0 0 0 0
43683- 0 0 0 0 0 0 0 0 0 0 0 0
43684- 0 0 0 0 0 0 6 6 6 18 18 18
43685- 50 50 50 101 101 101 26 26 26 10 10 10
43686-138 138 138 190 190 190 174 154 114 156 107 11
43687-197 138 11 200 144 11 197 138 11 192 133 9
43688-180 123 7 190 142 34 190 178 144 187 187 187
43689-202 202 202 221 221 221 214 214 214 66 66 66
43690- 2 2 6 2 2 6 50 50 50 62 62 62
43691- 6 6 6 2 2 6 10 10 10 90 90 90
43692- 50 50 50 18 18 18 6 6 6 0 0 0
43693- 0 0 0 0 0 0 0 0 0 0 0 0
43694- 0 0 0 0 0 0 0 0 0 0 0 0
43695- 0 0 0 0 0 0 0 0 0 0 0 0
43696- 0 0 0 0 0 0 0 0 0 0 0 0
43697- 0 0 0 0 0 0 0 0 0 0 0 0
43698- 0 0 0 0 0 0 0 0 0 0 0 0
43699- 0 0 0 0 0 0 0 0 0 0 0 0
43700- 0 0 0 0 0 0 0 0 0 0 0 0
43701- 0 0 0 0 0 0 0 0 0 0 0 0
43702- 0 0 0 0 0 0 0 0 0 0 0 0
43703- 0 0 0 0 0 0 0 0 0 0 0 0
43704- 0 0 0 0 0 0 10 10 10 34 34 34
43705- 74 74 74 74 74 74 2 2 6 6 6 6
43706-144 144 144 198 198 198 190 190 190 178 166 146
43707-154 121 60 156 107 11 156 107 11 168 124 44
43708-174 154 114 187 187 187 190 190 190 210 210 210
43709-246 246 246 253 253 253 253 253 253 182 182 182
43710- 6 6 6 2 2 6 2 2 6 2 2 6
43711- 2 2 6 2 2 6 2 2 6 62 62 62
43712- 74 74 74 34 34 34 14 14 14 0 0 0
43713- 0 0 0 0 0 0 0 0 0 0 0 0
43714- 0 0 0 0 0 0 0 0 0 0 0 0
43715- 0 0 0 0 0 0 0 0 0 0 0 0
43716- 0 0 0 0 0 0 0 0 0 0 0 0
43717- 0 0 0 0 0 0 0 0 0 0 0 0
43718- 0 0 0 0 0 0 0 0 0 0 0 0
43719- 0 0 0 0 0 0 0 0 0 0 0 0
43720- 0 0 0 0 0 0 0 0 0 0 0 0
43721- 0 0 0 0 0 0 0 0 0 0 0 0
43722- 0 0 0 0 0 0 0 0 0 0 0 0
43723- 0 0 0 0 0 0 0 0 0 0 0 0
43724- 0 0 0 10 10 10 22 22 22 54 54 54
43725- 94 94 94 18 18 18 2 2 6 46 46 46
43726-234 234 234 221 221 221 190 190 190 190 190 190
43727-190 190 190 187 187 187 187 187 187 190 190 190
43728-190 190 190 195 195 195 214 214 214 242 242 242
43729-253 253 253 253 253 253 253 253 253 253 253 253
43730- 82 82 82 2 2 6 2 2 6 2 2 6
43731- 2 2 6 2 2 6 2 2 6 14 14 14
43732- 86 86 86 54 54 54 22 22 22 6 6 6
43733- 0 0 0 0 0 0 0 0 0 0 0 0
43734- 0 0 0 0 0 0 0 0 0 0 0 0
43735- 0 0 0 0 0 0 0 0 0 0 0 0
43736- 0 0 0 0 0 0 0 0 0 0 0 0
43737- 0 0 0 0 0 0 0 0 0 0 0 0
43738- 0 0 0 0 0 0 0 0 0 0 0 0
43739- 0 0 0 0 0 0 0 0 0 0 0 0
43740- 0 0 0 0 0 0 0 0 0 0 0 0
43741- 0 0 0 0 0 0 0 0 0 0 0 0
43742- 0 0 0 0 0 0 0 0 0 0 0 0
43743- 0 0 0 0 0 0 0 0 0 0 0 0
43744- 6 6 6 18 18 18 46 46 46 90 90 90
43745- 46 46 46 18 18 18 6 6 6 182 182 182
43746-253 253 253 246 246 246 206 206 206 190 190 190
43747-190 190 190 190 190 190 190 190 190 190 190 190
43748-206 206 206 231 231 231 250 250 250 253 253 253
43749-253 253 253 253 253 253 253 253 253 253 253 253
43750-202 202 202 14 14 14 2 2 6 2 2 6
43751- 2 2 6 2 2 6 2 2 6 2 2 6
43752- 42 42 42 86 86 86 42 42 42 18 18 18
43753- 6 6 6 0 0 0 0 0 0 0 0 0
43754- 0 0 0 0 0 0 0 0 0 0 0 0
43755- 0 0 0 0 0 0 0 0 0 0 0 0
43756- 0 0 0 0 0 0 0 0 0 0 0 0
43757- 0 0 0 0 0 0 0 0 0 0 0 0
43758- 0 0 0 0 0 0 0 0 0 0 0 0
43759- 0 0 0 0 0 0 0 0 0 0 0 0
43760- 0 0 0 0 0 0 0 0 0 0 0 0
43761- 0 0 0 0 0 0 0 0 0 0 0 0
43762- 0 0 0 0 0 0 0 0 0 0 0 0
43763- 0 0 0 0 0 0 0 0 0 6 6 6
43764- 14 14 14 38 38 38 74 74 74 66 66 66
43765- 2 2 6 6 6 6 90 90 90 250 250 250
43766-253 253 253 253 253 253 238 238 238 198 198 198
43767-190 190 190 190 190 190 195 195 195 221 221 221
43768-246 246 246 253 253 253 253 253 253 253 253 253
43769-253 253 253 253 253 253 253 253 253 253 253 253
43770-253 253 253 82 82 82 2 2 6 2 2 6
43771- 2 2 6 2 2 6 2 2 6 2 2 6
43772- 2 2 6 78 78 78 70 70 70 34 34 34
43773- 14 14 14 6 6 6 0 0 0 0 0 0
43774- 0 0 0 0 0 0 0 0 0 0 0 0
43775- 0 0 0 0 0 0 0 0 0 0 0 0
43776- 0 0 0 0 0 0 0 0 0 0 0 0
43777- 0 0 0 0 0 0 0 0 0 0 0 0
43778- 0 0 0 0 0 0 0 0 0 0 0 0
43779- 0 0 0 0 0 0 0 0 0 0 0 0
43780- 0 0 0 0 0 0 0 0 0 0 0 0
43781- 0 0 0 0 0 0 0 0 0 0 0 0
43782- 0 0 0 0 0 0 0 0 0 0 0 0
43783- 0 0 0 0 0 0 0 0 0 14 14 14
43784- 34 34 34 66 66 66 78 78 78 6 6 6
43785- 2 2 6 18 18 18 218 218 218 253 253 253
43786-253 253 253 253 253 253 253 253 253 246 246 246
43787-226 226 226 231 231 231 246 246 246 253 253 253
43788-253 253 253 253 253 253 253 253 253 253 253 253
43789-253 253 253 253 253 253 253 253 253 253 253 253
43790-253 253 253 178 178 178 2 2 6 2 2 6
43791- 2 2 6 2 2 6 2 2 6 2 2 6
43792- 2 2 6 18 18 18 90 90 90 62 62 62
43793- 30 30 30 10 10 10 0 0 0 0 0 0
43794- 0 0 0 0 0 0 0 0 0 0 0 0
43795- 0 0 0 0 0 0 0 0 0 0 0 0
43796- 0 0 0 0 0 0 0 0 0 0 0 0
43797- 0 0 0 0 0 0 0 0 0 0 0 0
43798- 0 0 0 0 0 0 0 0 0 0 0 0
43799- 0 0 0 0 0 0 0 0 0 0 0 0
43800- 0 0 0 0 0 0 0 0 0 0 0 0
43801- 0 0 0 0 0 0 0 0 0 0 0 0
43802- 0 0 0 0 0 0 0 0 0 0 0 0
43803- 0 0 0 0 0 0 10 10 10 26 26 26
43804- 58 58 58 90 90 90 18 18 18 2 2 6
43805- 2 2 6 110 110 110 253 253 253 253 253 253
43806-253 253 253 253 253 253 253 253 253 253 253 253
43807-250 250 250 253 253 253 253 253 253 253 253 253
43808-253 253 253 253 253 253 253 253 253 253 253 253
43809-253 253 253 253 253 253 253 253 253 253 253 253
43810-253 253 253 231 231 231 18 18 18 2 2 6
43811- 2 2 6 2 2 6 2 2 6 2 2 6
43812- 2 2 6 2 2 6 18 18 18 94 94 94
43813- 54 54 54 26 26 26 10 10 10 0 0 0
43814- 0 0 0 0 0 0 0 0 0 0 0 0
43815- 0 0 0 0 0 0 0 0 0 0 0 0
43816- 0 0 0 0 0 0 0 0 0 0 0 0
43817- 0 0 0 0 0 0 0 0 0 0 0 0
43818- 0 0 0 0 0 0 0 0 0 0 0 0
43819- 0 0 0 0 0 0 0 0 0 0 0 0
43820- 0 0 0 0 0 0 0 0 0 0 0 0
43821- 0 0 0 0 0 0 0 0 0 0 0 0
43822- 0 0 0 0 0 0 0 0 0 0 0 0
43823- 0 0 0 6 6 6 22 22 22 50 50 50
43824- 90 90 90 26 26 26 2 2 6 2 2 6
43825- 14 14 14 195 195 195 250 250 250 253 253 253
43826-253 253 253 253 253 253 253 253 253 253 253 253
43827-253 253 253 253 253 253 253 253 253 253 253 253
43828-253 253 253 253 253 253 253 253 253 253 253 253
43829-253 253 253 253 253 253 253 253 253 253 253 253
43830-250 250 250 242 242 242 54 54 54 2 2 6
43831- 2 2 6 2 2 6 2 2 6 2 2 6
43832- 2 2 6 2 2 6 2 2 6 38 38 38
43833- 86 86 86 50 50 50 22 22 22 6 6 6
43834- 0 0 0 0 0 0 0 0 0 0 0 0
43835- 0 0 0 0 0 0 0 0 0 0 0 0
43836- 0 0 0 0 0 0 0 0 0 0 0 0
43837- 0 0 0 0 0 0 0 0 0 0 0 0
43838- 0 0 0 0 0 0 0 0 0 0 0 0
43839- 0 0 0 0 0 0 0 0 0 0 0 0
43840- 0 0 0 0 0 0 0 0 0 0 0 0
43841- 0 0 0 0 0 0 0 0 0 0 0 0
43842- 0 0 0 0 0 0 0 0 0 0 0 0
43843- 6 6 6 14 14 14 38 38 38 82 82 82
43844- 34 34 34 2 2 6 2 2 6 2 2 6
43845- 42 42 42 195 195 195 246 246 246 253 253 253
43846-253 253 253 253 253 253 253 253 253 250 250 250
43847-242 242 242 242 242 242 250 250 250 253 253 253
43848-253 253 253 253 253 253 253 253 253 253 253 253
43849-253 253 253 250 250 250 246 246 246 238 238 238
43850-226 226 226 231 231 231 101 101 101 6 6 6
43851- 2 2 6 2 2 6 2 2 6 2 2 6
43852- 2 2 6 2 2 6 2 2 6 2 2 6
43853- 38 38 38 82 82 82 42 42 42 14 14 14
43854- 6 6 6 0 0 0 0 0 0 0 0 0
43855- 0 0 0 0 0 0 0 0 0 0 0 0
43856- 0 0 0 0 0 0 0 0 0 0 0 0
43857- 0 0 0 0 0 0 0 0 0 0 0 0
43858- 0 0 0 0 0 0 0 0 0 0 0 0
43859- 0 0 0 0 0 0 0 0 0 0 0 0
43860- 0 0 0 0 0 0 0 0 0 0 0 0
43861- 0 0 0 0 0 0 0 0 0 0 0 0
43862- 0 0 0 0 0 0 0 0 0 0 0 0
43863- 10 10 10 26 26 26 62 62 62 66 66 66
43864- 2 2 6 2 2 6 2 2 6 6 6 6
43865- 70 70 70 170 170 170 206 206 206 234 234 234
43866-246 246 246 250 250 250 250 250 250 238 238 238
43867-226 226 226 231 231 231 238 238 238 250 250 250
43868-250 250 250 250 250 250 246 246 246 231 231 231
43869-214 214 214 206 206 206 202 202 202 202 202 202
43870-198 198 198 202 202 202 182 182 182 18 18 18
43871- 2 2 6 2 2 6 2 2 6 2 2 6
43872- 2 2 6 2 2 6 2 2 6 2 2 6
43873- 2 2 6 62 62 62 66 66 66 30 30 30
43874- 10 10 10 0 0 0 0 0 0 0 0 0
43875- 0 0 0 0 0 0 0 0 0 0 0 0
43876- 0 0 0 0 0 0 0 0 0 0 0 0
43877- 0 0 0 0 0 0 0 0 0 0 0 0
43878- 0 0 0 0 0 0 0 0 0 0 0 0
43879- 0 0 0 0 0 0 0 0 0 0 0 0
43880- 0 0 0 0 0 0 0 0 0 0 0 0
43881- 0 0 0 0 0 0 0 0 0 0 0 0
43882- 0 0 0 0 0 0 0 0 0 0 0 0
43883- 14 14 14 42 42 42 82 82 82 18 18 18
43884- 2 2 6 2 2 6 2 2 6 10 10 10
43885- 94 94 94 182 182 182 218 218 218 242 242 242
43886-250 250 250 253 253 253 253 253 253 250 250 250
43887-234 234 234 253 253 253 253 253 253 253 253 253
43888-253 253 253 253 253 253 253 253 253 246 246 246
43889-238 238 238 226 226 226 210 210 210 202 202 202
43890-195 195 195 195 195 195 210 210 210 158 158 158
43891- 6 6 6 14 14 14 50 50 50 14 14 14
43892- 2 2 6 2 2 6 2 2 6 2 2 6
43893- 2 2 6 6 6 6 86 86 86 46 46 46
43894- 18 18 18 6 6 6 0 0 0 0 0 0
43895- 0 0 0 0 0 0 0 0 0 0 0 0
43896- 0 0 0 0 0 0 0 0 0 0 0 0
43897- 0 0 0 0 0 0 0 0 0 0 0 0
43898- 0 0 0 0 0 0 0 0 0 0 0 0
43899- 0 0 0 0 0 0 0 0 0 0 0 0
43900- 0 0 0 0 0 0 0 0 0 0 0 0
43901- 0 0 0 0 0 0 0 0 0 0 0 0
43902- 0 0 0 0 0 0 0 0 0 6 6 6
43903- 22 22 22 54 54 54 70 70 70 2 2 6
43904- 2 2 6 10 10 10 2 2 6 22 22 22
43905-166 166 166 231 231 231 250 250 250 253 253 253
43906-253 253 253 253 253 253 253 253 253 250 250 250
43907-242 242 242 253 253 253 253 253 253 253 253 253
43908-253 253 253 253 253 253 253 253 253 253 253 253
43909-253 253 253 253 253 253 253 253 253 246 246 246
43910-231 231 231 206 206 206 198 198 198 226 226 226
43911- 94 94 94 2 2 6 6 6 6 38 38 38
43912- 30 30 30 2 2 6 2 2 6 2 2 6
43913- 2 2 6 2 2 6 62 62 62 66 66 66
43914- 26 26 26 10 10 10 0 0 0 0 0 0
43915- 0 0 0 0 0 0 0 0 0 0 0 0
43916- 0 0 0 0 0 0 0 0 0 0 0 0
43917- 0 0 0 0 0 0 0 0 0 0 0 0
43918- 0 0 0 0 0 0 0 0 0 0 0 0
43919- 0 0 0 0 0 0 0 0 0 0 0 0
43920- 0 0 0 0 0 0 0 0 0 0 0 0
43921- 0 0 0 0 0 0 0 0 0 0 0 0
43922- 0 0 0 0 0 0 0 0 0 10 10 10
43923- 30 30 30 74 74 74 50 50 50 2 2 6
43924- 26 26 26 26 26 26 2 2 6 106 106 106
43925-238 238 238 253 253 253 253 253 253 253 253 253
43926-253 253 253 253 253 253 253 253 253 253 253 253
43927-253 253 253 253 253 253 253 253 253 253 253 253
43928-253 253 253 253 253 253 253 253 253 253 253 253
43929-253 253 253 253 253 253 253 253 253 253 253 253
43930-253 253 253 246 246 246 218 218 218 202 202 202
43931-210 210 210 14 14 14 2 2 6 2 2 6
43932- 30 30 30 22 22 22 2 2 6 2 2 6
43933- 2 2 6 2 2 6 18 18 18 86 86 86
43934- 42 42 42 14 14 14 0 0 0 0 0 0
43935- 0 0 0 0 0 0 0 0 0 0 0 0
43936- 0 0 0 0 0 0 0 0 0 0 0 0
43937- 0 0 0 0 0 0 0 0 0 0 0 0
43938- 0 0 0 0 0 0 0 0 0 0 0 0
43939- 0 0 0 0 0 0 0 0 0 0 0 0
43940- 0 0 0 0 0 0 0 0 0 0 0 0
43941- 0 0 0 0 0 0 0 0 0 0 0 0
43942- 0 0 0 0 0 0 0 0 0 14 14 14
43943- 42 42 42 90 90 90 22 22 22 2 2 6
43944- 42 42 42 2 2 6 18 18 18 218 218 218
43945-253 253 253 253 253 253 253 253 253 253 253 253
43946-253 253 253 253 253 253 253 253 253 253 253 253
43947-253 253 253 253 253 253 253 253 253 253 253 253
43948-253 253 253 253 253 253 253 253 253 253 253 253
43949-253 253 253 253 253 253 253 253 253 253 253 253
43950-253 253 253 253 253 253 250 250 250 221 221 221
43951-218 218 218 101 101 101 2 2 6 14 14 14
43952- 18 18 18 38 38 38 10 10 10 2 2 6
43953- 2 2 6 2 2 6 2 2 6 78 78 78
43954- 58 58 58 22 22 22 6 6 6 0 0 0
43955- 0 0 0 0 0 0 0 0 0 0 0 0
43956- 0 0 0 0 0 0 0 0 0 0 0 0
43957- 0 0 0 0 0 0 0 0 0 0 0 0
43958- 0 0 0 0 0 0 0 0 0 0 0 0
43959- 0 0 0 0 0 0 0 0 0 0 0 0
43960- 0 0 0 0 0 0 0 0 0 0 0 0
43961- 0 0 0 0 0 0 0 0 0 0 0 0
43962- 0 0 0 0 0 0 6 6 6 18 18 18
43963- 54 54 54 82 82 82 2 2 6 26 26 26
43964- 22 22 22 2 2 6 123 123 123 253 253 253
43965-253 253 253 253 253 253 253 253 253 253 253 253
43966-253 253 253 253 253 253 253 253 253 253 253 253
43967-253 253 253 253 253 253 253 253 253 253 253 253
43968-253 253 253 253 253 253 253 253 253 253 253 253
43969-253 253 253 253 253 253 253 253 253 253 253 253
43970-253 253 253 253 253 253 253 253 253 250 250 250
43971-238 238 238 198 198 198 6 6 6 38 38 38
43972- 58 58 58 26 26 26 38 38 38 2 2 6
43973- 2 2 6 2 2 6 2 2 6 46 46 46
43974- 78 78 78 30 30 30 10 10 10 0 0 0
43975- 0 0 0 0 0 0 0 0 0 0 0 0
43976- 0 0 0 0 0 0 0 0 0 0 0 0
43977- 0 0 0 0 0 0 0 0 0 0 0 0
43978- 0 0 0 0 0 0 0 0 0 0 0 0
43979- 0 0 0 0 0 0 0 0 0 0 0 0
43980- 0 0 0 0 0 0 0 0 0 0 0 0
43981- 0 0 0 0 0 0 0 0 0 0 0 0
43982- 0 0 0 0 0 0 10 10 10 30 30 30
43983- 74 74 74 58 58 58 2 2 6 42 42 42
43984- 2 2 6 22 22 22 231 231 231 253 253 253
43985-253 253 253 253 253 253 253 253 253 253 253 253
43986-253 253 253 253 253 253 253 253 253 250 250 250
43987-253 253 253 253 253 253 253 253 253 253 253 253
43988-253 253 253 253 253 253 253 253 253 253 253 253
43989-253 253 253 253 253 253 253 253 253 253 253 253
43990-253 253 253 253 253 253 253 253 253 253 253 253
43991-253 253 253 246 246 246 46 46 46 38 38 38
43992- 42 42 42 14 14 14 38 38 38 14 14 14
43993- 2 2 6 2 2 6 2 2 6 6 6 6
43994- 86 86 86 46 46 46 14 14 14 0 0 0
43995- 0 0 0 0 0 0 0 0 0 0 0 0
43996- 0 0 0 0 0 0 0 0 0 0 0 0
43997- 0 0 0 0 0 0 0 0 0 0 0 0
43998- 0 0 0 0 0 0 0 0 0 0 0 0
43999- 0 0 0 0 0 0 0 0 0 0 0 0
44000- 0 0 0 0 0 0 0 0 0 0 0 0
44001- 0 0 0 0 0 0 0 0 0 0 0 0
44002- 0 0 0 6 6 6 14 14 14 42 42 42
44003- 90 90 90 18 18 18 18 18 18 26 26 26
44004- 2 2 6 116 116 116 253 253 253 253 253 253
44005-253 253 253 253 253 253 253 253 253 253 253 253
44006-253 253 253 253 253 253 250 250 250 238 238 238
44007-253 253 253 253 253 253 253 253 253 253 253 253
44008-253 253 253 253 253 253 253 253 253 253 253 253
44009-253 253 253 253 253 253 253 253 253 253 253 253
44010-253 253 253 253 253 253 253 253 253 253 253 253
44011-253 253 253 253 253 253 94 94 94 6 6 6
44012- 2 2 6 2 2 6 10 10 10 34 34 34
44013- 2 2 6 2 2 6 2 2 6 2 2 6
44014- 74 74 74 58 58 58 22 22 22 6 6 6
44015- 0 0 0 0 0 0 0 0 0 0 0 0
44016- 0 0 0 0 0 0 0 0 0 0 0 0
44017- 0 0 0 0 0 0 0 0 0 0 0 0
44018- 0 0 0 0 0 0 0 0 0 0 0 0
44019- 0 0 0 0 0 0 0 0 0 0 0 0
44020- 0 0 0 0 0 0 0 0 0 0 0 0
44021- 0 0 0 0 0 0 0 0 0 0 0 0
44022- 0 0 0 10 10 10 26 26 26 66 66 66
44023- 82 82 82 2 2 6 38 38 38 6 6 6
44024- 14 14 14 210 210 210 253 253 253 253 253 253
44025-253 253 253 253 253 253 253 253 253 253 253 253
44026-253 253 253 253 253 253 246 246 246 242 242 242
44027-253 253 253 253 253 253 253 253 253 253 253 253
44028-253 253 253 253 253 253 253 253 253 253 253 253
44029-253 253 253 253 253 253 253 253 253 253 253 253
44030-253 253 253 253 253 253 253 253 253 253 253 253
44031-253 253 253 253 253 253 144 144 144 2 2 6
44032- 2 2 6 2 2 6 2 2 6 46 46 46
44033- 2 2 6 2 2 6 2 2 6 2 2 6
44034- 42 42 42 74 74 74 30 30 30 10 10 10
44035- 0 0 0 0 0 0 0 0 0 0 0 0
44036- 0 0 0 0 0 0 0 0 0 0 0 0
44037- 0 0 0 0 0 0 0 0 0 0 0 0
44038- 0 0 0 0 0 0 0 0 0 0 0 0
44039- 0 0 0 0 0 0 0 0 0 0 0 0
44040- 0 0 0 0 0 0 0 0 0 0 0 0
44041- 0 0 0 0 0 0 0 0 0 0 0 0
44042- 6 6 6 14 14 14 42 42 42 90 90 90
44043- 26 26 26 6 6 6 42 42 42 2 2 6
44044- 74 74 74 250 250 250 253 253 253 253 253 253
44045-253 253 253 253 253 253 253 253 253 253 253 253
44046-253 253 253 253 253 253 242 242 242 242 242 242
44047-253 253 253 253 253 253 253 253 253 253 253 253
44048-253 253 253 253 253 253 253 253 253 253 253 253
44049-253 253 253 253 253 253 253 253 253 253 253 253
44050-253 253 253 253 253 253 253 253 253 253 253 253
44051-253 253 253 253 253 253 182 182 182 2 2 6
44052- 2 2 6 2 2 6 2 2 6 46 46 46
44053- 2 2 6 2 2 6 2 2 6 2 2 6
44054- 10 10 10 86 86 86 38 38 38 10 10 10
44055- 0 0 0 0 0 0 0 0 0 0 0 0
44056- 0 0 0 0 0 0 0 0 0 0 0 0
44057- 0 0 0 0 0 0 0 0 0 0 0 0
44058- 0 0 0 0 0 0 0 0 0 0 0 0
44059- 0 0 0 0 0 0 0 0 0 0 0 0
44060- 0 0 0 0 0 0 0 0 0 0 0 0
44061- 0 0 0 0 0 0 0 0 0 0 0 0
44062- 10 10 10 26 26 26 66 66 66 82 82 82
44063- 2 2 6 22 22 22 18 18 18 2 2 6
44064-149 149 149 253 253 253 253 253 253 253 253 253
44065-253 253 253 253 253 253 253 253 253 253 253 253
44066-253 253 253 253 253 253 234 234 234 242 242 242
44067-253 253 253 253 253 253 253 253 253 253 253 253
44068-253 253 253 253 253 253 253 253 253 253 253 253
44069-253 253 253 253 253 253 253 253 253 253 253 253
44070-253 253 253 253 253 253 253 253 253 253 253 253
44071-253 253 253 253 253 253 206 206 206 2 2 6
44072- 2 2 6 2 2 6 2 2 6 38 38 38
44073- 2 2 6 2 2 6 2 2 6 2 2 6
44074- 6 6 6 86 86 86 46 46 46 14 14 14
44075- 0 0 0 0 0 0 0 0 0 0 0 0
44076- 0 0 0 0 0 0 0 0 0 0 0 0
44077- 0 0 0 0 0 0 0 0 0 0 0 0
44078- 0 0 0 0 0 0 0 0 0 0 0 0
44079- 0 0 0 0 0 0 0 0 0 0 0 0
44080- 0 0 0 0 0 0 0 0 0 0 0 0
44081- 0 0 0 0 0 0 0 0 0 6 6 6
44082- 18 18 18 46 46 46 86 86 86 18 18 18
44083- 2 2 6 34 34 34 10 10 10 6 6 6
44084-210 210 210 253 253 253 253 253 253 253 253 253
44085-253 253 253 253 253 253 253 253 253 253 253 253
44086-253 253 253 253 253 253 234 234 234 242 242 242
44087-253 253 253 253 253 253 253 253 253 253 253 253
44088-253 253 253 253 253 253 253 253 253 253 253 253
44089-253 253 253 253 253 253 253 253 253 253 253 253
44090-253 253 253 253 253 253 253 253 253 253 253 253
44091-253 253 253 253 253 253 221 221 221 6 6 6
44092- 2 2 6 2 2 6 6 6 6 30 30 30
44093- 2 2 6 2 2 6 2 2 6 2 2 6
44094- 2 2 6 82 82 82 54 54 54 18 18 18
44095- 6 6 6 0 0 0 0 0 0 0 0 0
44096- 0 0 0 0 0 0 0 0 0 0 0 0
44097- 0 0 0 0 0 0 0 0 0 0 0 0
44098- 0 0 0 0 0 0 0 0 0 0 0 0
44099- 0 0 0 0 0 0 0 0 0 0 0 0
44100- 0 0 0 0 0 0 0 0 0 0 0 0
44101- 0 0 0 0 0 0 0 0 0 10 10 10
44102- 26 26 26 66 66 66 62 62 62 2 2 6
44103- 2 2 6 38 38 38 10 10 10 26 26 26
44104-238 238 238 253 253 253 253 253 253 253 253 253
44105-253 253 253 253 253 253 253 253 253 253 253 253
44106-253 253 253 253 253 253 231 231 231 238 238 238
44107-253 253 253 253 253 253 253 253 253 253 253 253
44108-253 253 253 253 253 253 253 253 253 253 253 253
44109-253 253 253 253 253 253 253 253 253 253 253 253
44110-253 253 253 253 253 253 253 253 253 253 253 253
44111-253 253 253 253 253 253 231 231 231 6 6 6
44112- 2 2 6 2 2 6 10 10 10 30 30 30
44113- 2 2 6 2 2 6 2 2 6 2 2 6
44114- 2 2 6 66 66 66 58 58 58 22 22 22
44115- 6 6 6 0 0 0 0 0 0 0 0 0
44116- 0 0 0 0 0 0 0 0 0 0 0 0
44117- 0 0 0 0 0 0 0 0 0 0 0 0
44118- 0 0 0 0 0 0 0 0 0 0 0 0
44119- 0 0 0 0 0 0 0 0 0 0 0 0
44120- 0 0 0 0 0 0 0 0 0 0 0 0
44121- 0 0 0 0 0 0 0 0 0 10 10 10
44122- 38 38 38 78 78 78 6 6 6 2 2 6
44123- 2 2 6 46 46 46 14 14 14 42 42 42
44124-246 246 246 253 253 253 253 253 253 253 253 253
44125-253 253 253 253 253 253 253 253 253 253 253 253
44126-253 253 253 253 253 253 231 231 231 242 242 242
44127-253 253 253 253 253 253 253 253 253 253 253 253
44128-253 253 253 253 253 253 253 253 253 253 253 253
44129-253 253 253 253 253 253 253 253 253 253 253 253
44130-253 253 253 253 253 253 253 253 253 253 253 253
44131-253 253 253 253 253 253 234 234 234 10 10 10
44132- 2 2 6 2 2 6 22 22 22 14 14 14
44133- 2 2 6 2 2 6 2 2 6 2 2 6
44134- 2 2 6 66 66 66 62 62 62 22 22 22
44135- 6 6 6 0 0 0 0 0 0 0 0 0
44136- 0 0 0 0 0 0 0 0 0 0 0 0
44137- 0 0 0 0 0 0 0 0 0 0 0 0
44138- 0 0 0 0 0 0 0 0 0 0 0 0
44139- 0 0 0 0 0 0 0 0 0 0 0 0
44140- 0 0 0 0 0 0 0 0 0 0 0 0
44141- 0 0 0 0 0 0 6 6 6 18 18 18
44142- 50 50 50 74 74 74 2 2 6 2 2 6
44143- 14 14 14 70 70 70 34 34 34 62 62 62
44144-250 250 250 253 253 253 253 253 253 253 253 253
44145-253 253 253 253 253 253 253 253 253 253 253 253
44146-253 253 253 253 253 253 231 231 231 246 246 246
44147-253 253 253 253 253 253 253 253 253 253 253 253
44148-253 253 253 253 253 253 253 253 253 253 253 253
44149-253 253 253 253 253 253 253 253 253 253 253 253
44150-253 253 253 253 253 253 253 253 253 253 253 253
44151-253 253 253 253 253 253 234 234 234 14 14 14
44152- 2 2 6 2 2 6 30 30 30 2 2 6
44153- 2 2 6 2 2 6 2 2 6 2 2 6
44154- 2 2 6 66 66 66 62 62 62 22 22 22
44155- 6 6 6 0 0 0 0 0 0 0 0 0
44156- 0 0 0 0 0 0 0 0 0 0 0 0
44157- 0 0 0 0 0 0 0 0 0 0 0 0
44158- 0 0 0 0 0 0 0 0 0 0 0 0
44159- 0 0 0 0 0 0 0 0 0 0 0 0
44160- 0 0 0 0 0 0 0 0 0 0 0 0
44161- 0 0 0 0 0 0 6 6 6 18 18 18
44162- 54 54 54 62 62 62 2 2 6 2 2 6
44163- 2 2 6 30 30 30 46 46 46 70 70 70
44164-250 250 250 253 253 253 253 253 253 253 253 253
44165-253 253 253 253 253 253 253 253 253 253 253 253
44166-253 253 253 253 253 253 231 231 231 246 246 246
44167-253 253 253 253 253 253 253 253 253 253 253 253
44168-253 253 253 253 253 253 253 253 253 253 253 253
44169-253 253 253 253 253 253 253 253 253 253 253 253
44170-253 253 253 253 253 253 253 253 253 253 253 253
44171-253 253 253 253 253 253 226 226 226 10 10 10
44172- 2 2 6 6 6 6 30 30 30 2 2 6
44173- 2 2 6 2 2 6 2 2 6 2 2 6
44174- 2 2 6 66 66 66 58 58 58 22 22 22
44175- 6 6 6 0 0 0 0 0 0 0 0 0
44176- 0 0 0 0 0 0 0 0 0 0 0 0
44177- 0 0 0 0 0 0 0 0 0 0 0 0
44178- 0 0 0 0 0 0 0 0 0 0 0 0
44179- 0 0 0 0 0 0 0 0 0 0 0 0
44180- 0 0 0 0 0 0 0 0 0 0 0 0
44181- 0 0 0 0 0 0 6 6 6 22 22 22
44182- 58 58 58 62 62 62 2 2 6 2 2 6
44183- 2 2 6 2 2 6 30 30 30 78 78 78
44184-250 250 250 253 253 253 253 253 253 253 253 253
44185-253 253 253 253 253 253 253 253 253 253 253 253
44186-253 253 253 253 253 253 231 231 231 246 246 246
44187-253 253 253 253 253 253 253 253 253 253 253 253
44188-253 253 253 253 253 253 253 253 253 253 253 253
44189-253 253 253 253 253 253 253 253 253 253 253 253
44190-253 253 253 253 253 253 253 253 253 253 253 253
44191-253 253 253 253 253 253 206 206 206 2 2 6
44192- 22 22 22 34 34 34 18 14 6 22 22 22
44193- 26 26 26 18 18 18 6 6 6 2 2 6
44194- 2 2 6 82 82 82 54 54 54 18 18 18
44195- 6 6 6 0 0 0 0 0 0 0 0 0
44196- 0 0 0 0 0 0 0 0 0 0 0 0
44197- 0 0 0 0 0 0 0 0 0 0 0 0
44198- 0 0 0 0 0 0 0 0 0 0 0 0
44199- 0 0 0 0 0 0 0 0 0 0 0 0
44200- 0 0 0 0 0 0 0 0 0 0 0 0
44201- 0 0 0 0 0 0 6 6 6 26 26 26
44202- 62 62 62 106 106 106 74 54 14 185 133 11
44203-210 162 10 121 92 8 6 6 6 62 62 62
44204-238 238 238 253 253 253 253 253 253 253 253 253
44205-253 253 253 253 253 253 253 253 253 253 253 253
44206-253 253 253 253 253 253 231 231 231 246 246 246
44207-253 253 253 253 253 253 253 253 253 253 253 253
44208-253 253 253 253 253 253 253 253 253 253 253 253
44209-253 253 253 253 253 253 253 253 253 253 253 253
44210-253 253 253 253 253 253 253 253 253 253 253 253
44211-253 253 253 253 253 253 158 158 158 18 18 18
44212- 14 14 14 2 2 6 2 2 6 2 2 6
44213- 6 6 6 18 18 18 66 66 66 38 38 38
44214- 6 6 6 94 94 94 50 50 50 18 18 18
44215- 6 6 6 0 0 0 0 0 0 0 0 0
44216- 0 0 0 0 0 0 0 0 0 0 0 0
44217- 0 0 0 0 0 0 0 0 0 0 0 0
44218- 0 0 0 0 0 0 0 0 0 0 0 0
44219- 0 0 0 0 0 0 0 0 0 0 0 0
44220- 0 0 0 0 0 0 0 0 0 6 6 6
44221- 10 10 10 10 10 10 18 18 18 38 38 38
44222- 78 78 78 142 134 106 216 158 10 242 186 14
44223-246 190 14 246 190 14 156 118 10 10 10 10
44224- 90 90 90 238 238 238 253 253 253 253 253 253
44225-253 253 253 253 253 253 253 253 253 253 253 253
44226-253 253 253 253 253 253 231 231 231 250 250 250
44227-253 253 253 253 253 253 253 253 253 253 253 253
44228-253 253 253 253 253 253 253 253 253 253 253 253
44229-253 253 253 253 253 253 253 253 253 253 253 253
44230-253 253 253 253 253 253 253 253 253 246 230 190
44231-238 204 91 238 204 91 181 142 44 37 26 9
44232- 2 2 6 2 2 6 2 2 6 2 2 6
44233- 2 2 6 2 2 6 38 38 38 46 46 46
44234- 26 26 26 106 106 106 54 54 54 18 18 18
44235- 6 6 6 0 0 0 0 0 0 0 0 0
44236- 0 0 0 0 0 0 0 0 0 0 0 0
44237- 0 0 0 0 0 0 0 0 0 0 0 0
44238- 0 0 0 0 0 0 0 0 0 0 0 0
44239- 0 0 0 0 0 0 0 0 0 0 0 0
44240- 0 0 0 6 6 6 14 14 14 22 22 22
44241- 30 30 30 38 38 38 50 50 50 70 70 70
44242-106 106 106 190 142 34 226 170 11 242 186 14
44243-246 190 14 246 190 14 246 190 14 154 114 10
44244- 6 6 6 74 74 74 226 226 226 253 253 253
44245-253 253 253 253 253 253 253 253 253 253 253 253
44246-253 253 253 253 253 253 231 231 231 250 250 250
44247-253 253 253 253 253 253 253 253 253 253 253 253
44248-253 253 253 253 253 253 253 253 253 253 253 253
44249-253 253 253 253 253 253 253 253 253 253 253 253
44250-253 253 253 253 253 253 253 253 253 228 184 62
44251-241 196 14 241 208 19 232 195 16 38 30 10
44252- 2 2 6 2 2 6 2 2 6 2 2 6
44253- 2 2 6 6 6 6 30 30 30 26 26 26
44254-203 166 17 154 142 90 66 66 66 26 26 26
44255- 6 6 6 0 0 0 0 0 0 0 0 0
44256- 0 0 0 0 0 0 0 0 0 0 0 0
44257- 0 0 0 0 0 0 0 0 0 0 0 0
44258- 0 0 0 0 0 0 0 0 0 0 0 0
44259- 0 0 0 0 0 0 0 0 0 0 0 0
44260- 6 6 6 18 18 18 38 38 38 58 58 58
44261- 78 78 78 86 86 86 101 101 101 123 123 123
44262-175 146 61 210 150 10 234 174 13 246 186 14
44263-246 190 14 246 190 14 246 190 14 238 190 10
44264-102 78 10 2 2 6 46 46 46 198 198 198
44265-253 253 253 253 253 253 253 253 253 253 253 253
44266-253 253 253 253 253 253 234 234 234 242 242 242
44267-253 253 253 253 253 253 253 253 253 253 253 253
44268-253 253 253 253 253 253 253 253 253 253 253 253
44269-253 253 253 253 253 253 253 253 253 253 253 253
44270-253 253 253 253 253 253 253 253 253 224 178 62
44271-242 186 14 241 196 14 210 166 10 22 18 6
44272- 2 2 6 2 2 6 2 2 6 2 2 6
44273- 2 2 6 2 2 6 6 6 6 121 92 8
44274-238 202 15 232 195 16 82 82 82 34 34 34
44275- 10 10 10 0 0 0 0 0 0 0 0 0
44276- 0 0 0 0 0 0 0 0 0 0 0 0
44277- 0 0 0 0 0 0 0 0 0 0 0 0
44278- 0 0 0 0 0 0 0 0 0 0 0 0
44279- 0 0 0 0 0 0 0 0 0 0 0 0
44280- 14 14 14 38 38 38 70 70 70 154 122 46
44281-190 142 34 200 144 11 197 138 11 197 138 11
44282-213 154 11 226 170 11 242 186 14 246 190 14
44283-246 190 14 246 190 14 246 190 14 246 190 14
44284-225 175 15 46 32 6 2 2 6 22 22 22
44285-158 158 158 250 250 250 253 253 253 253 253 253
44286-253 253 253 253 253 253 253 253 253 253 253 253
44287-253 253 253 253 253 253 253 253 253 253 253 253
44288-253 253 253 253 253 253 253 253 253 253 253 253
44289-253 253 253 253 253 253 253 253 253 253 253 253
44290-253 253 253 250 250 250 242 242 242 224 178 62
44291-239 182 13 236 186 11 213 154 11 46 32 6
44292- 2 2 6 2 2 6 2 2 6 2 2 6
44293- 2 2 6 2 2 6 61 42 6 225 175 15
44294-238 190 10 236 186 11 112 100 78 42 42 42
44295- 14 14 14 0 0 0 0 0 0 0 0 0
44296- 0 0 0 0 0 0 0 0 0 0 0 0
44297- 0 0 0 0 0 0 0 0 0 0 0 0
44298- 0 0 0 0 0 0 0 0 0 0 0 0
44299- 0 0 0 0 0 0 0 0 0 6 6 6
44300- 22 22 22 54 54 54 154 122 46 213 154 11
44301-226 170 11 230 174 11 226 170 11 226 170 11
44302-236 178 12 242 186 14 246 190 14 246 190 14
44303-246 190 14 246 190 14 246 190 14 246 190 14
44304-241 196 14 184 144 12 10 10 10 2 2 6
44305- 6 6 6 116 116 116 242 242 242 253 253 253
44306-253 253 253 253 253 253 253 253 253 253 253 253
44307-253 253 253 253 253 253 253 253 253 253 253 253
44308-253 253 253 253 253 253 253 253 253 253 253 253
44309-253 253 253 253 253 253 253 253 253 253 253 253
44310-253 253 253 231 231 231 198 198 198 214 170 54
44311-236 178 12 236 178 12 210 150 10 137 92 6
44312- 18 14 6 2 2 6 2 2 6 2 2 6
44313- 6 6 6 70 47 6 200 144 11 236 178 12
44314-239 182 13 239 182 13 124 112 88 58 58 58
44315- 22 22 22 6 6 6 0 0 0 0 0 0
44316- 0 0 0 0 0 0 0 0 0 0 0 0
44317- 0 0 0 0 0 0 0 0 0 0 0 0
44318- 0 0 0 0 0 0 0 0 0 0 0 0
44319- 0 0 0 0 0 0 0 0 0 10 10 10
44320- 30 30 30 70 70 70 180 133 36 226 170 11
44321-239 182 13 242 186 14 242 186 14 246 186 14
44322-246 190 14 246 190 14 246 190 14 246 190 14
44323-246 190 14 246 190 14 246 190 14 246 190 14
44324-246 190 14 232 195 16 98 70 6 2 2 6
44325- 2 2 6 2 2 6 66 66 66 221 221 221
44326-253 253 253 253 253 253 253 253 253 253 253 253
44327-253 253 253 253 253 253 253 253 253 253 253 253
44328-253 253 253 253 253 253 253 253 253 253 253 253
44329-253 253 253 253 253 253 253 253 253 253 253 253
44330-253 253 253 206 206 206 198 198 198 214 166 58
44331-230 174 11 230 174 11 216 158 10 192 133 9
44332-163 110 8 116 81 8 102 78 10 116 81 8
44333-167 114 7 197 138 11 226 170 11 239 182 13
44334-242 186 14 242 186 14 162 146 94 78 78 78
44335- 34 34 34 14 14 14 6 6 6 0 0 0
44336- 0 0 0 0 0 0 0 0 0 0 0 0
44337- 0 0 0 0 0 0 0 0 0 0 0 0
44338- 0 0 0 0 0 0 0 0 0 0 0 0
44339- 0 0 0 0 0 0 0 0 0 6 6 6
44340- 30 30 30 78 78 78 190 142 34 226 170 11
44341-239 182 13 246 190 14 246 190 14 246 190 14
44342-246 190 14 246 190 14 246 190 14 246 190 14
44343-246 190 14 246 190 14 246 190 14 246 190 14
44344-246 190 14 241 196 14 203 166 17 22 18 6
44345- 2 2 6 2 2 6 2 2 6 38 38 38
44346-218 218 218 253 253 253 253 253 253 253 253 253
44347-253 253 253 253 253 253 253 253 253 253 253 253
44348-253 253 253 253 253 253 253 253 253 253 253 253
44349-253 253 253 253 253 253 253 253 253 253 253 253
44350-250 250 250 206 206 206 198 198 198 202 162 69
44351-226 170 11 236 178 12 224 166 10 210 150 10
44352-200 144 11 197 138 11 192 133 9 197 138 11
44353-210 150 10 226 170 11 242 186 14 246 190 14
44354-246 190 14 246 186 14 225 175 15 124 112 88
44355- 62 62 62 30 30 30 14 14 14 6 6 6
44356- 0 0 0 0 0 0 0 0 0 0 0 0
44357- 0 0 0 0 0 0 0 0 0 0 0 0
44358- 0 0 0 0 0 0 0 0 0 0 0 0
44359- 0 0 0 0 0 0 0 0 0 10 10 10
44360- 30 30 30 78 78 78 174 135 50 224 166 10
44361-239 182 13 246 190 14 246 190 14 246 190 14
44362-246 190 14 246 190 14 246 190 14 246 190 14
44363-246 190 14 246 190 14 246 190 14 246 190 14
44364-246 190 14 246 190 14 241 196 14 139 102 15
44365- 2 2 6 2 2 6 2 2 6 2 2 6
44366- 78 78 78 250 250 250 253 253 253 253 253 253
44367-253 253 253 253 253 253 253 253 253 253 253 253
44368-253 253 253 253 253 253 253 253 253 253 253 253
44369-253 253 253 253 253 253 253 253 253 253 253 253
44370-250 250 250 214 214 214 198 198 198 190 150 46
44371-219 162 10 236 178 12 234 174 13 224 166 10
44372-216 158 10 213 154 11 213 154 11 216 158 10
44373-226 170 11 239 182 13 246 190 14 246 190 14
44374-246 190 14 246 190 14 242 186 14 206 162 42
44375-101 101 101 58 58 58 30 30 30 14 14 14
44376- 6 6 6 0 0 0 0 0 0 0 0 0
44377- 0 0 0 0 0 0 0 0 0 0 0 0
44378- 0 0 0 0 0 0 0 0 0 0 0 0
44379- 0 0 0 0 0 0 0 0 0 10 10 10
44380- 30 30 30 74 74 74 174 135 50 216 158 10
44381-236 178 12 246 190 14 246 190 14 246 190 14
44382-246 190 14 246 190 14 246 190 14 246 190 14
44383-246 190 14 246 190 14 246 190 14 246 190 14
44384-246 190 14 246 190 14 241 196 14 226 184 13
44385- 61 42 6 2 2 6 2 2 6 2 2 6
44386- 22 22 22 238 238 238 253 253 253 253 253 253
44387-253 253 253 253 253 253 253 253 253 253 253 253
44388-253 253 253 253 253 253 253 253 253 253 253 253
44389-253 253 253 253 253 253 253 253 253 253 253 253
44390-253 253 253 226 226 226 187 187 187 180 133 36
44391-216 158 10 236 178 12 239 182 13 236 178 12
44392-230 174 11 226 170 11 226 170 11 230 174 11
44393-236 178 12 242 186 14 246 190 14 246 190 14
44394-246 190 14 246 190 14 246 186 14 239 182 13
44395-206 162 42 106 106 106 66 66 66 34 34 34
44396- 14 14 14 6 6 6 0 0 0 0 0 0
44397- 0 0 0 0 0 0 0 0 0 0 0 0
44398- 0 0 0 0 0 0 0 0 0 0 0 0
44399- 0 0 0 0 0 0 0 0 0 6 6 6
44400- 26 26 26 70 70 70 163 133 67 213 154 11
44401-236 178 12 246 190 14 246 190 14 246 190 14
44402-246 190 14 246 190 14 246 190 14 246 190 14
44403-246 190 14 246 190 14 246 190 14 246 190 14
44404-246 190 14 246 190 14 246 190 14 241 196 14
44405-190 146 13 18 14 6 2 2 6 2 2 6
44406- 46 46 46 246 246 246 253 253 253 253 253 253
44407-253 253 253 253 253 253 253 253 253 253 253 253
44408-253 253 253 253 253 253 253 253 253 253 253 253
44409-253 253 253 253 253 253 253 253 253 253 253 253
44410-253 253 253 221 221 221 86 86 86 156 107 11
44411-216 158 10 236 178 12 242 186 14 246 186 14
44412-242 186 14 239 182 13 239 182 13 242 186 14
44413-242 186 14 246 186 14 246 190 14 246 190 14
44414-246 190 14 246 190 14 246 190 14 246 190 14
44415-242 186 14 225 175 15 142 122 72 66 66 66
44416- 30 30 30 10 10 10 0 0 0 0 0 0
44417- 0 0 0 0 0 0 0 0 0 0 0 0
44418- 0 0 0 0 0 0 0 0 0 0 0 0
44419- 0 0 0 0 0 0 0 0 0 6 6 6
44420- 26 26 26 70 70 70 163 133 67 210 150 10
44421-236 178 12 246 190 14 246 190 14 246 190 14
44422-246 190 14 246 190 14 246 190 14 246 190 14
44423-246 190 14 246 190 14 246 190 14 246 190 14
44424-246 190 14 246 190 14 246 190 14 246 190 14
44425-232 195 16 121 92 8 34 34 34 106 106 106
44426-221 221 221 253 253 253 253 253 253 253 253 253
44427-253 253 253 253 253 253 253 253 253 253 253 253
44428-253 253 253 253 253 253 253 253 253 253 253 253
44429-253 253 253 253 253 253 253 253 253 253 253 253
44430-242 242 242 82 82 82 18 14 6 163 110 8
44431-216 158 10 236 178 12 242 186 14 246 190 14
44432-246 190 14 246 190 14 246 190 14 246 190 14
44433-246 190 14 246 190 14 246 190 14 246 190 14
44434-246 190 14 246 190 14 246 190 14 246 190 14
44435-246 190 14 246 190 14 242 186 14 163 133 67
44436- 46 46 46 18 18 18 6 6 6 0 0 0
44437- 0 0 0 0 0 0 0 0 0 0 0 0
44438- 0 0 0 0 0 0 0 0 0 0 0 0
44439- 0 0 0 0 0 0 0 0 0 10 10 10
44440- 30 30 30 78 78 78 163 133 67 210 150 10
44441-236 178 12 246 186 14 246 190 14 246 190 14
44442-246 190 14 246 190 14 246 190 14 246 190 14
44443-246 190 14 246 190 14 246 190 14 246 190 14
44444-246 190 14 246 190 14 246 190 14 246 190 14
44445-241 196 14 215 174 15 190 178 144 253 253 253
44446-253 253 253 253 253 253 253 253 253 253 253 253
44447-253 253 253 253 253 253 253 253 253 253 253 253
44448-253 253 253 253 253 253 253 253 253 253 253 253
44449-253 253 253 253 253 253 253 253 253 218 218 218
44450- 58 58 58 2 2 6 22 18 6 167 114 7
44451-216 158 10 236 178 12 246 186 14 246 190 14
44452-246 190 14 246 190 14 246 190 14 246 190 14
44453-246 190 14 246 190 14 246 190 14 246 190 14
44454-246 190 14 246 190 14 246 190 14 246 190 14
44455-246 190 14 246 186 14 242 186 14 190 150 46
44456- 54 54 54 22 22 22 6 6 6 0 0 0
44457- 0 0 0 0 0 0 0 0 0 0 0 0
44458- 0 0 0 0 0 0 0 0 0 0 0 0
44459- 0 0 0 0 0 0 0 0 0 14 14 14
44460- 38 38 38 86 86 86 180 133 36 213 154 11
44461-236 178 12 246 186 14 246 190 14 246 190 14
44462-246 190 14 246 190 14 246 190 14 246 190 14
44463-246 190 14 246 190 14 246 190 14 246 190 14
44464-246 190 14 246 190 14 246 190 14 246 190 14
44465-246 190 14 232 195 16 190 146 13 214 214 214
44466-253 253 253 253 253 253 253 253 253 253 253 253
44467-253 253 253 253 253 253 253 253 253 253 253 253
44468-253 253 253 253 253 253 253 253 253 253 253 253
44469-253 253 253 250 250 250 170 170 170 26 26 26
44470- 2 2 6 2 2 6 37 26 9 163 110 8
44471-219 162 10 239 182 13 246 186 14 246 190 14
44472-246 190 14 246 190 14 246 190 14 246 190 14
44473-246 190 14 246 190 14 246 190 14 246 190 14
44474-246 190 14 246 190 14 246 190 14 246 190 14
44475-246 186 14 236 178 12 224 166 10 142 122 72
44476- 46 46 46 18 18 18 6 6 6 0 0 0
44477- 0 0 0 0 0 0 0 0 0 0 0 0
44478- 0 0 0 0 0 0 0 0 0 0 0 0
44479- 0 0 0 0 0 0 6 6 6 18 18 18
44480- 50 50 50 109 106 95 192 133 9 224 166 10
44481-242 186 14 246 190 14 246 190 14 246 190 14
44482-246 190 14 246 190 14 246 190 14 246 190 14
44483-246 190 14 246 190 14 246 190 14 246 190 14
44484-246 190 14 246 190 14 246 190 14 246 190 14
44485-242 186 14 226 184 13 210 162 10 142 110 46
44486-226 226 226 253 253 253 253 253 253 253 253 253
44487-253 253 253 253 253 253 253 253 253 253 253 253
44488-253 253 253 253 253 253 253 253 253 253 253 253
44489-198 198 198 66 66 66 2 2 6 2 2 6
44490- 2 2 6 2 2 6 50 34 6 156 107 11
44491-219 162 10 239 182 13 246 186 14 246 190 14
44492-246 190 14 246 190 14 246 190 14 246 190 14
44493-246 190 14 246 190 14 246 190 14 246 190 14
44494-246 190 14 246 190 14 246 190 14 242 186 14
44495-234 174 13 213 154 11 154 122 46 66 66 66
44496- 30 30 30 10 10 10 0 0 0 0 0 0
44497- 0 0 0 0 0 0 0 0 0 0 0 0
44498- 0 0 0 0 0 0 0 0 0 0 0 0
44499- 0 0 0 0 0 0 6 6 6 22 22 22
44500- 58 58 58 154 121 60 206 145 10 234 174 13
44501-242 186 14 246 186 14 246 190 14 246 190 14
44502-246 190 14 246 190 14 246 190 14 246 190 14
44503-246 190 14 246 190 14 246 190 14 246 190 14
44504-246 190 14 246 190 14 246 190 14 246 190 14
44505-246 186 14 236 178 12 210 162 10 163 110 8
44506- 61 42 6 138 138 138 218 218 218 250 250 250
44507-253 253 253 253 253 253 253 253 253 250 250 250
44508-242 242 242 210 210 210 144 144 144 66 66 66
44509- 6 6 6 2 2 6 2 2 6 2 2 6
44510- 2 2 6 2 2 6 61 42 6 163 110 8
44511-216 158 10 236 178 12 246 190 14 246 190 14
44512-246 190 14 246 190 14 246 190 14 246 190 14
44513-246 190 14 246 190 14 246 190 14 246 190 14
44514-246 190 14 239 182 13 230 174 11 216 158 10
44515-190 142 34 124 112 88 70 70 70 38 38 38
44516- 18 18 18 6 6 6 0 0 0 0 0 0
44517- 0 0 0 0 0 0 0 0 0 0 0 0
44518- 0 0 0 0 0 0 0 0 0 0 0 0
44519- 0 0 0 0 0 0 6 6 6 22 22 22
44520- 62 62 62 168 124 44 206 145 10 224 166 10
44521-236 178 12 239 182 13 242 186 14 242 186 14
44522-246 186 14 246 190 14 246 190 14 246 190 14
44523-246 190 14 246 190 14 246 190 14 246 190 14
44524-246 190 14 246 190 14 246 190 14 246 190 14
44525-246 190 14 236 178 12 216 158 10 175 118 6
44526- 80 54 7 2 2 6 6 6 6 30 30 30
44527- 54 54 54 62 62 62 50 50 50 38 38 38
44528- 14 14 14 2 2 6 2 2 6 2 2 6
44529- 2 2 6 2 2 6 2 2 6 2 2 6
44530- 2 2 6 6 6 6 80 54 7 167 114 7
44531-213 154 11 236 178 12 246 190 14 246 190 14
44532-246 190 14 246 190 14 246 190 14 246 190 14
44533-246 190 14 242 186 14 239 182 13 239 182 13
44534-230 174 11 210 150 10 174 135 50 124 112 88
44535- 82 82 82 54 54 54 34 34 34 18 18 18
44536- 6 6 6 0 0 0 0 0 0 0 0 0
44537- 0 0 0 0 0 0 0 0 0 0 0 0
44538- 0 0 0 0 0 0 0 0 0 0 0 0
44539- 0 0 0 0 0 0 6 6 6 18 18 18
44540- 50 50 50 158 118 36 192 133 9 200 144 11
44541-216 158 10 219 162 10 224 166 10 226 170 11
44542-230 174 11 236 178 12 239 182 13 239 182 13
44543-242 186 14 246 186 14 246 190 14 246 190 14
44544-246 190 14 246 190 14 246 190 14 246 190 14
44545-246 186 14 230 174 11 210 150 10 163 110 8
44546-104 69 6 10 10 10 2 2 6 2 2 6
44547- 2 2 6 2 2 6 2 2 6 2 2 6
44548- 2 2 6 2 2 6 2 2 6 2 2 6
44549- 2 2 6 2 2 6 2 2 6 2 2 6
44550- 2 2 6 6 6 6 91 60 6 167 114 7
44551-206 145 10 230 174 11 242 186 14 246 190 14
44552-246 190 14 246 190 14 246 186 14 242 186 14
44553-239 182 13 230 174 11 224 166 10 213 154 11
44554-180 133 36 124 112 88 86 86 86 58 58 58
44555- 38 38 38 22 22 22 10 10 10 6 6 6
44556- 0 0 0 0 0 0 0 0 0 0 0 0
44557- 0 0 0 0 0 0 0 0 0 0 0 0
44558- 0 0 0 0 0 0 0 0 0 0 0 0
44559- 0 0 0 0 0 0 0 0 0 14 14 14
44560- 34 34 34 70 70 70 138 110 50 158 118 36
44561-167 114 7 180 123 7 192 133 9 197 138 11
44562-200 144 11 206 145 10 213 154 11 219 162 10
44563-224 166 10 230 174 11 239 182 13 242 186 14
44564-246 186 14 246 186 14 246 186 14 246 186 14
44565-239 182 13 216 158 10 185 133 11 152 99 6
44566-104 69 6 18 14 6 2 2 6 2 2 6
44567- 2 2 6 2 2 6 2 2 6 2 2 6
44568- 2 2 6 2 2 6 2 2 6 2 2 6
44569- 2 2 6 2 2 6 2 2 6 2 2 6
44570- 2 2 6 6 6 6 80 54 7 152 99 6
44571-192 133 9 219 162 10 236 178 12 239 182 13
44572-246 186 14 242 186 14 239 182 13 236 178 12
44573-224 166 10 206 145 10 192 133 9 154 121 60
44574- 94 94 94 62 62 62 42 42 42 22 22 22
44575- 14 14 14 6 6 6 0 0 0 0 0 0
44576- 0 0 0 0 0 0 0 0 0 0 0 0
44577- 0 0 0 0 0 0 0 0 0 0 0 0
44578- 0 0 0 0 0 0 0 0 0 0 0 0
44579- 0 0 0 0 0 0 0 0 0 6 6 6
44580- 18 18 18 34 34 34 58 58 58 78 78 78
44581-101 98 89 124 112 88 142 110 46 156 107 11
44582-163 110 8 167 114 7 175 118 6 180 123 7
44583-185 133 11 197 138 11 210 150 10 219 162 10
44584-226 170 11 236 178 12 236 178 12 234 174 13
44585-219 162 10 197 138 11 163 110 8 130 83 6
44586- 91 60 6 10 10 10 2 2 6 2 2 6
44587- 18 18 18 38 38 38 38 38 38 38 38 38
44588- 38 38 38 38 38 38 38 38 38 38 38 38
44589- 38 38 38 38 38 38 26 26 26 2 2 6
44590- 2 2 6 6 6 6 70 47 6 137 92 6
44591-175 118 6 200 144 11 219 162 10 230 174 11
44592-234 174 13 230 174 11 219 162 10 210 150 10
44593-192 133 9 163 110 8 124 112 88 82 82 82
44594- 50 50 50 30 30 30 14 14 14 6 6 6
44595- 0 0 0 0 0 0 0 0 0 0 0 0
44596- 0 0 0 0 0 0 0 0 0 0 0 0
44597- 0 0 0 0 0 0 0 0 0 0 0 0
44598- 0 0 0 0 0 0 0 0 0 0 0 0
44599- 0 0 0 0 0 0 0 0 0 0 0 0
44600- 6 6 6 14 14 14 22 22 22 34 34 34
44601- 42 42 42 58 58 58 74 74 74 86 86 86
44602-101 98 89 122 102 70 130 98 46 121 87 25
44603-137 92 6 152 99 6 163 110 8 180 123 7
44604-185 133 11 197 138 11 206 145 10 200 144 11
44605-180 123 7 156 107 11 130 83 6 104 69 6
44606- 50 34 6 54 54 54 110 110 110 101 98 89
44607- 86 86 86 82 82 82 78 78 78 78 78 78
44608- 78 78 78 78 78 78 78 78 78 78 78 78
44609- 78 78 78 82 82 82 86 86 86 94 94 94
44610-106 106 106 101 101 101 86 66 34 124 80 6
44611-156 107 11 180 123 7 192 133 9 200 144 11
44612-206 145 10 200 144 11 192 133 9 175 118 6
44613-139 102 15 109 106 95 70 70 70 42 42 42
44614- 22 22 22 10 10 10 0 0 0 0 0 0
44615- 0 0 0 0 0 0 0 0 0 0 0 0
44616- 0 0 0 0 0 0 0 0 0 0 0 0
44617- 0 0 0 0 0 0 0 0 0 0 0 0
44618- 0 0 0 0 0 0 0 0 0 0 0 0
44619- 0 0 0 0 0 0 0 0 0 0 0 0
44620- 0 0 0 0 0 0 6 6 6 10 10 10
44621- 14 14 14 22 22 22 30 30 30 38 38 38
44622- 50 50 50 62 62 62 74 74 74 90 90 90
44623-101 98 89 112 100 78 121 87 25 124 80 6
44624-137 92 6 152 99 6 152 99 6 152 99 6
44625-138 86 6 124 80 6 98 70 6 86 66 30
44626-101 98 89 82 82 82 58 58 58 46 46 46
44627- 38 38 38 34 34 34 34 34 34 34 34 34
44628- 34 34 34 34 34 34 34 34 34 34 34 34
44629- 34 34 34 34 34 34 38 38 38 42 42 42
44630- 54 54 54 82 82 82 94 86 76 91 60 6
44631-134 86 6 156 107 11 167 114 7 175 118 6
44632-175 118 6 167 114 7 152 99 6 121 87 25
44633-101 98 89 62 62 62 34 34 34 18 18 18
44634- 6 6 6 0 0 0 0 0 0 0 0 0
44635- 0 0 0 0 0 0 0 0 0 0 0 0
44636- 0 0 0 0 0 0 0 0 0 0 0 0
44637- 0 0 0 0 0 0 0 0 0 0 0 0
44638- 0 0 0 0 0 0 0 0 0 0 0 0
44639- 0 0 0 0 0 0 0 0 0 0 0 0
44640- 0 0 0 0 0 0 0 0 0 0 0 0
44641- 0 0 0 6 6 6 6 6 6 10 10 10
44642- 18 18 18 22 22 22 30 30 30 42 42 42
44643- 50 50 50 66 66 66 86 86 86 101 98 89
44644-106 86 58 98 70 6 104 69 6 104 69 6
44645-104 69 6 91 60 6 82 62 34 90 90 90
44646- 62 62 62 38 38 38 22 22 22 14 14 14
44647- 10 10 10 10 10 10 10 10 10 10 10 10
44648- 10 10 10 10 10 10 6 6 6 10 10 10
44649- 10 10 10 10 10 10 10 10 10 14 14 14
44650- 22 22 22 42 42 42 70 70 70 89 81 66
44651- 80 54 7 104 69 6 124 80 6 137 92 6
44652-134 86 6 116 81 8 100 82 52 86 86 86
44653- 58 58 58 30 30 30 14 14 14 6 6 6
44654- 0 0 0 0 0 0 0 0 0 0 0 0
44655- 0 0 0 0 0 0 0 0 0 0 0 0
44656- 0 0 0 0 0 0 0 0 0 0 0 0
44657- 0 0 0 0 0 0 0 0 0 0 0 0
44658- 0 0 0 0 0 0 0 0 0 0 0 0
44659- 0 0 0 0 0 0 0 0 0 0 0 0
44660- 0 0 0 0 0 0 0 0 0 0 0 0
44661- 0 0 0 0 0 0 0 0 0 0 0 0
44662- 0 0 0 6 6 6 10 10 10 14 14 14
44663- 18 18 18 26 26 26 38 38 38 54 54 54
44664- 70 70 70 86 86 86 94 86 76 89 81 66
44665- 89 81 66 86 86 86 74 74 74 50 50 50
44666- 30 30 30 14 14 14 6 6 6 0 0 0
44667- 0 0 0 0 0 0 0 0 0 0 0 0
44668- 0 0 0 0 0 0 0 0 0 0 0 0
44669- 0 0 0 0 0 0 0 0 0 0 0 0
44670- 6 6 6 18 18 18 34 34 34 58 58 58
44671- 82 82 82 89 81 66 89 81 66 89 81 66
44672- 94 86 66 94 86 76 74 74 74 50 50 50
44673- 26 26 26 14 14 14 6 6 6 0 0 0
44674- 0 0 0 0 0 0 0 0 0 0 0 0
44675- 0 0 0 0 0 0 0 0 0 0 0 0
44676- 0 0 0 0 0 0 0 0 0 0 0 0
44677- 0 0 0 0 0 0 0 0 0 0 0 0
44678- 0 0 0 0 0 0 0 0 0 0 0 0
44679- 0 0 0 0 0 0 0 0 0 0 0 0
44680- 0 0 0 0 0 0 0 0 0 0 0 0
44681- 0 0 0 0 0 0 0 0 0 0 0 0
44682- 0 0 0 0 0 0 0 0 0 0 0 0
44683- 6 6 6 6 6 6 14 14 14 18 18 18
44684- 30 30 30 38 38 38 46 46 46 54 54 54
44685- 50 50 50 42 42 42 30 30 30 18 18 18
44686- 10 10 10 0 0 0 0 0 0 0 0 0
44687- 0 0 0 0 0 0 0 0 0 0 0 0
44688- 0 0 0 0 0 0 0 0 0 0 0 0
44689- 0 0 0 0 0 0 0 0 0 0 0 0
44690- 0 0 0 6 6 6 14 14 14 26 26 26
44691- 38 38 38 50 50 50 58 58 58 58 58 58
44692- 54 54 54 42 42 42 30 30 30 18 18 18
44693- 10 10 10 0 0 0 0 0 0 0 0 0
44694- 0 0 0 0 0 0 0 0 0 0 0 0
44695- 0 0 0 0 0 0 0 0 0 0 0 0
44696- 0 0 0 0 0 0 0 0 0 0 0 0
44697- 0 0 0 0 0 0 0 0 0 0 0 0
44698- 0 0 0 0 0 0 0 0 0 0 0 0
44699- 0 0 0 0 0 0 0 0 0 0 0 0
44700- 0 0 0 0 0 0 0 0 0 0 0 0
44701- 0 0 0 0 0 0 0 0 0 0 0 0
44702- 0 0 0 0 0 0 0 0 0 0 0 0
44703- 0 0 0 0 0 0 0 0 0 6 6 6
44704- 6 6 6 10 10 10 14 14 14 18 18 18
44705- 18 18 18 14 14 14 10 10 10 6 6 6
44706- 0 0 0 0 0 0 0 0 0 0 0 0
44707- 0 0 0 0 0 0 0 0 0 0 0 0
44708- 0 0 0 0 0 0 0 0 0 0 0 0
44709- 0 0 0 0 0 0 0 0 0 0 0 0
44710- 0 0 0 0 0 0 0 0 0 6 6 6
44711- 14 14 14 18 18 18 22 22 22 22 22 22
44712- 18 18 18 14 14 14 10 10 10 6 6 6
44713- 0 0 0 0 0 0 0 0 0 0 0 0
44714- 0 0 0 0 0 0 0 0 0 0 0 0
44715- 0 0 0 0 0 0 0 0 0 0 0 0
44716- 0 0 0 0 0 0 0 0 0 0 0 0
44717- 0 0 0 0 0 0 0 0 0 0 0 0
44718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44729+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44731+4 4 4 4 4 4
44732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44743+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44745+4 4 4 4 4 4
44746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44759+4 4 4 4 4 4
44760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44773+4 4 4 4 4 4
44774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44787+4 4 4 4 4 4
44788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44801+4 4 4 4 4 4
44802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44806+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
44807+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
44808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44811+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
44812+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44813+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
44814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44815+4 4 4 4 4 4
44816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44820+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
44821+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
44822+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44825+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
44826+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
44827+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
44828+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44829+4 4 4 4 4 4
44830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44834+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
44835+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
44836+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44839+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
44840+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
44841+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
44842+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
44843+4 4 4 4 4 4
44844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44847+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
44848+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
44849+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
44850+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
44851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44852+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44853+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
44854+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
44855+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
44856+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
44857+4 4 4 4 4 4
44858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44861+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
44862+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
44863+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
44864+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
44865+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
44866+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
44867+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
44868+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
44869+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
44870+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
44871+4 4 4 4 4 4
44872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
44875+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
44876+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
44877+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
44878+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
44879+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
44880+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
44881+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
44882+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
44883+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
44884+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
44885+4 4 4 4 4 4
44886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44888+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
44889+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
44890+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
44891+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
44892+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
44893+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
44894+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
44895+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
44896+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
44897+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
44898+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
44899+4 4 4 4 4 4
44900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44902+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
44903+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
44904+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
44905+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
44906+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
44907+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
44908+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
44909+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
44910+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
44911+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
44912+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
44913+4 4 4 4 4 4
44914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44915+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44916+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
44917+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
44918+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
44919+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
44920+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
44921+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
44922+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
44923+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
44924+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
44925+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
44926+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
44927+4 4 4 4 4 4
44928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44929+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44930+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
44931+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
44932+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
44933+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
44934+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
44935+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
44936+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
44937+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
44938+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
44939+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
44940+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
44941+4 4 4 4 4 4
44942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44943+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
44944+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
44945+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
44946+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
44947+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
44948+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
44949+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
44950+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
44951+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
44952+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
44953+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
44954+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
44955+4 4 4 4 4 4
44956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
44957+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
44958+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
44959+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
44960+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
44961+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
44962+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
44963+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
44964+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
44965+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
44966+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
44967+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
44968+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
44969+0 0 0 4 4 4
44970+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
44971+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
44972+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
44973+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
44974+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
44975+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
44976+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
44977+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
44978+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
44979+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
44980+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
44981+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
44982+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
44983+2 0 0 0 0 0
44984+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
44985+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
44986+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
44987+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
44988+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
44989+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
44990+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
44991+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
44992+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
44993+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
44994+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
44995+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
44996+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
44997+37 38 37 0 0 0
44998+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
44999+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
45000+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
45001+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
45002+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
45003+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
45004+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
45005+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
45006+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
45007+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
45008+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
45009+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
45010+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
45011+85 115 134 4 0 0
45012+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
45013+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
45014+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
45015+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
45016+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
45017+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
45018+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
45019+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
45020+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
45021+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
45022+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
45023+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
45024+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
45025+60 73 81 4 0 0
45026+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
45027+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
45028+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
45029+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
45030+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
45031+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
45032+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
45033+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
45034+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
45035+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
45036+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
45037+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
45038+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
45039+16 19 21 4 0 0
45040+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
45041+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
45042+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
45043+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
45044+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
45045+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
45046+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
45047+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
45048+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
45049+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
45050+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
45051+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
45052+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
45053+4 0 0 4 3 3
45054+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
45055+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
45056+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
45057+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
45058+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
45059+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
45060+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
45061+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
45062+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
45063+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
45064+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
45065+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
45066+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
45067+3 2 2 4 4 4
45068+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
45069+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
45070+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
45071+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
45072+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
45073+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
45074+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
45075+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
45076+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
45077+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
45078+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
45079+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
45080+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
45081+4 4 4 4 4 4
45082+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
45083+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
45084+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
45085+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
45086+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
45087+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
45088+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
45089+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
45090+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
45091+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
45092+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
45093+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
45094+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
45095+4 4 4 4 4 4
45096+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
45097+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
45098+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
45099+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
45100+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
45101+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45102+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
45103+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
45104+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
45105+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
45106+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
45107+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
45108+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
45109+5 5 5 5 5 5
45110+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
45111+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
45112+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
45113+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
45114+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
45115+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45116+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
45117+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
45118+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
45119+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
45120+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
45121+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
45122+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45123+5 5 5 4 4 4
45124+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
45125+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
45126+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
45127+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
45128+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45129+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
45130+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
45131+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
45132+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
45133+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
45134+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
45135+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45136+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45137+4 4 4 4 4 4
45138+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
45139+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
45140+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
45141+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
45142+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
45143+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45144+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45145+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
45146+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
45147+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
45148+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
45149+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
45150+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45151+4 4 4 4 4 4
45152+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
45153+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
45154+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
45155+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
45156+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45157+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
45158+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
45159+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
45160+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
45161+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
45162+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
45163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45164+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45165+4 4 4 4 4 4
45166+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
45167+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
45168+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
45169+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
45170+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45171+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45172+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
45173+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
45174+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
45175+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
45176+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
45177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45178+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45179+4 4 4 4 4 4
45180+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
45181+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
45182+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
45183+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
45184+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45185+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
45186+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
45187+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
45188+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
45189+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
45190+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45192+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45193+4 4 4 4 4 4
45194+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
45195+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
45196+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
45197+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
45198+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
45199+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
45200+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
45201+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
45202+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
45203+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
45204+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
45205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45206+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45207+4 4 4 4 4 4
45208+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
45209+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
45210+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
45211+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
45212+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
45213+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
45214+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
45215+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
45216+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
45217+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
45218+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
45219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45220+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45221+4 4 4 4 4 4
45222+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
45223+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
45224+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
45225+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45226+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
45227+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
45228+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
45229+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
45230+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
45231+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
45232+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45234+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45235+4 4 4 4 4 4
45236+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
45237+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
45238+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
45239+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45240+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45241+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
45242+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
45243+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
45244+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
45245+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
45246+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45248+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45249+4 4 4 4 4 4
45250+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
45251+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
45252+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45253+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
45254+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45255+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
45256+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
45257+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
45258+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
45259+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
45260+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45262+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45263+4 4 4 4 4 4
45264+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
45265+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
45266+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45267+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
45268+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45269+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
45270+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
45271+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
45272+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45273+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45274+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45276+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45277+4 4 4 4 4 4
45278+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45279+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
45280+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
45281+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
45282+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
45283+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
45284+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
45285+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
45286+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45287+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45288+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45290+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45291+4 4 4 4 4 4
45292+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
45293+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
45294+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
45295+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
45296+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45297+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
45298+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
45299+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
45300+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45301+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45302+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45304+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45305+4 4 4 4 4 4
45306+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
45307+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
45308+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45309+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
45310+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
45311+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
45312+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
45313+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
45314+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45315+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45316+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45318+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45319+4 4 4 4 4 4
45320+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
45321+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
45322+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45323+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
45324+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
45325+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
45326+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
45327+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
45328+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
45329+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45330+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45332+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45333+4 4 4 4 4 4
45334+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45335+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
45336+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
45337+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
45338+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
45339+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
45340+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
45341+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
45342+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45343+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45344+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45346+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45347+4 4 4 4 4 4
45348+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
45349+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
45350+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45351+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
45352+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
45353+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
45354+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
45355+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
45356+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
45357+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45358+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45360+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45361+4 4 4 4 4 4
45362+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
45363+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
45364+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
45365+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
45366+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
45367+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
45368+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
45369+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
45370+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45371+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45372+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45374+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45375+4 4 4 4 4 4
45376+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45377+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
45378+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
45379+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
45380+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
45381+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
45382+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
45383+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
45384+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45385+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45386+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45388+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45389+4 4 4 4 4 4
45390+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45391+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
45392+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
45393+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
45394+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
45395+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
45396+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45397+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
45398+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
45399+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45400+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45402+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45403+4 4 4 4 4 4
45404+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45405+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
45406+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
45407+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45408+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
45409+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
45410+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
45411+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
45412+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
45413+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45414+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45416+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45417+4 4 4 4 4 4
45418+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
45419+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
45420+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
45421+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
45422+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
45423+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
45424+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
45425+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
45426+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
45427+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45428+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45430+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45431+4 4 4 4 4 4
45432+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45433+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
45434+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
45435+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
45436+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
45437+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
45438+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
45439+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
45440+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
45441+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45442+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45444+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45445+4 4 4 4 4 4
45446+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
45447+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
45448+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
45449+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
45450+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
45451+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
45452+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
45453+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
45454+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
45455+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45456+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45458+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45459+4 4 4 4 4 4
45460+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
45461+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
45462+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
45463+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
45464+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
45465+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
45466+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
45467+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
45468+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
45469+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
45470+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45472+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45473+4 4 4 4 4 4
45474+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
45475+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
45476+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
45477+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
45478+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
45479+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
45480+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
45481+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
45482+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
45483+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
45484+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45486+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45487+4 4 4 4 4 4
45488+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
45489+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45490+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
45491+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
45492+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
45493+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
45494+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
45495+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
45496+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
45497+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
45498+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45500+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45501+4 4 4 4 4 4
45502+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
45503+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
45504+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
45505+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
45506+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
45507+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
45508+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45509+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
45510+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
45511+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
45512+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45514+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45515+4 4 4 4 4 4
45516+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
45517+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
45518+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
45519+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
45520+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
45521+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
45522+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
45523+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
45524+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
45525+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
45526+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45528+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45529+4 4 4 4 4 4
45530+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
45531+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
45532+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45533+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
45534+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
45535+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
45536+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
45537+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
45538+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
45539+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
45540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45542+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45543+4 4 4 4 4 4
45544+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45545+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
45546+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
45547+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
45548+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
45549+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
45550+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
45551+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
45552+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
45553+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45556+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45557+4 4 4 4 4 4
45558+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
45559+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
45560+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
45561+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
45562+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
45563+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
45564+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
45565+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
45566+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
45567+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
45568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45570+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45571+4 4 4 4 4 4
45572+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
45573+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
45574+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
45575+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
45576+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
45577+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
45578+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
45579+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
45580+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
45581+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45584+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45585+4 4 4 4 4 4
45586+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
45587+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45588+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
45589+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
45590+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
45591+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
45592+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
45593+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
45594+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
45595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45598+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45599+4 4 4 4 4 4
45600+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
45601+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
45602+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
45603+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
45604+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
45605+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
45606+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
45607+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
45608+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
45609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45612+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45613+4 4 4 4 4 4
45614+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45615+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
45616+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
45617+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
45618+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
45619+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
45620+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
45621+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
45622+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45626+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45627+4 4 4 4 4 4
45628+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
45629+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
45630+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45631+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
45632+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
45633+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
45634+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
45635+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
45636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45640+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45641+4 4 4 4 4 4
45642+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45643+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
45644+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
45645+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
45646+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
45647+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
45648+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
45649+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
45650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45654+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45655+4 4 4 4 4 4
45656+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45657+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
45658+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45659+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
45660+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
45661+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
45662+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
45663+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
45664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45668+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45669+4 4 4 4 4 4
45670+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45671+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
45672+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
45673+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
45674+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
45675+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
45676+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
45677+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
45678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45682+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45683+4 4 4 4 4 4
45684+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45685+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
45686+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
45687+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45688+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
45689+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
45690+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
45691+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45696+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45697+4 4 4 4 4 4
45698+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45699+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45700+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45701+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
45702+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
45703+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
45704+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
45705+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
45706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45710+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45711+4 4 4 4 4 4
45712+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45713+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45714+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45715+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45716+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
45717+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
45718+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
45719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45724+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45725+4 4 4 4 4 4
45726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45727+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45728+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45729+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
45730+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
45731+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
45732+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
45733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45738+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45739+4 4 4 4 4 4
45740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45742+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45743+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
45744+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
45745+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
45746+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
45747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45753+4 4 4 4 4 4
45754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45757+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
45758+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
45759+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
45760+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
45761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45767+4 4 4 4 4 4
45768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
45772+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
45773+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
45774+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
45775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45781+4 4 4 4 4 4
45782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45786+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
45787+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
45788+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
45789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45795+4 4 4 4 4 4
45796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45800+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
45801+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
45802+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45809+4 4 4 4 4 4
45810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45814+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
45815+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
45816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45823+4 4 4 4 4 4
45824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45828+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
45829+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
45830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
45837+4 4 4 4 4 4
45838diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
45839index 443e3c8..c443d6a 100644
45840--- a/drivers/video/nvidia/nv_backlight.c
45841+++ b/drivers/video/nvidia/nv_backlight.c
45842@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(struct backlight_device *bd)
45843 return bd->props.brightness;
45844 }
45845
45846-static struct backlight_ops nvidia_bl_ops = {
45847+static const struct backlight_ops nvidia_bl_ops = {
45848 .get_brightness = nvidia_bl_get_brightness,
45849 .update_status = nvidia_bl_update_status,
45850 };
45851diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
45852index d94c57f..912984c 100644
45853--- a/drivers/video/riva/fbdev.c
45854+++ b/drivers/video/riva/fbdev.c
45855@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct backlight_device *bd)
45856 return bd->props.brightness;
45857 }
45858
45859-static struct backlight_ops riva_bl_ops = {
45860+static const struct backlight_ops riva_bl_ops = {
45861 .get_brightness = riva_bl_get_brightness,
45862 .update_status = riva_bl_update_status,
45863 };
45864diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
45865index 54fbb29..2c108fc 100644
45866--- a/drivers/video/uvesafb.c
45867+++ b/drivers/video/uvesafb.c
45868@@ -18,6 +18,7 @@
45869 #include <linux/fb.h>
45870 #include <linux/io.h>
45871 #include <linux/mutex.h>
45872+#include <linux/moduleloader.h>
45873 #include <video/edid.h>
45874 #include <video/uvesafb.h>
45875 #ifdef CONFIG_X86
45876@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
45877 NULL,
45878 };
45879
45880- return call_usermodehelper(v86d_path, argv, envp, 1);
45881+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
45882 }
45883
45884 /*
45885@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
45886 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
45887 par->pmi_setpal = par->ypan = 0;
45888 } else {
45889+
45890+#ifdef CONFIG_PAX_KERNEXEC
45891+#ifdef CONFIG_MODULES
45892+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
45893+#endif
45894+ if (!par->pmi_code) {
45895+ par->pmi_setpal = par->ypan = 0;
45896+ return 0;
45897+ }
45898+#endif
45899+
45900 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
45901 + task->t.regs.edi);
45902+
45903+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45904+ pax_open_kernel();
45905+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
45906+ pax_close_kernel();
45907+
45908+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
45909+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
45910+#else
45911 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
45912 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
45913+#endif
45914+
45915 printk(KERN_INFO "uvesafb: protected mode interface info at "
45916 "%04x:%04x\n",
45917 (u16)task->t.regs.es, (u16)task->t.regs.edi);
45918@@ -1799,6 +1822,11 @@ out:
45919 if (par->vbe_modes)
45920 kfree(par->vbe_modes);
45921
45922+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45923+ if (par->pmi_code)
45924+ module_free_exec(NULL, par->pmi_code);
45925+#endif
45926+
45927 framebuffer_release(info);
45928 return err;
45929 }
45930@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platform_device *dev)
45931 kfree(par->vbe_state_orig);
45932 if (par->vbe_state_saved)
45933 kfree(par->vbe_state_saved);
45934+
45935+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45936+ if (par->pmi_code)
45937+ module_free_exec(NULL, par->pmi_code);
45938+#endif
45939+
45940 }
45941
45942 framebuffer_release(info);
45943diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
45944index bd37ee1..cb827e8 100644
45945--- a/drivers/video/vesafb.c
45946+++ b/drivers/video/vesafb.c
45947@@ -9,6 +9,7 @@
45948 */
45949
45950 #include <linux/module.h>
45951+#include <linux/moduleloader.h>
45952 #include <linux/kernel.h>
45953 #include <linux/errno.h>
45954 #include <linux/string.h>
45955@@ -53,8 +54,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
45956 static int vram_total __initdata; /* Set total amount of memory */
45957 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
45958 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
45959-static void (*pmi_start)(void) __read_mostly;
45960-static void (*pmi_pal) (void) __read_mostly;
45961+static void (*pmi_start)(void) __read_only;
45962+static void (*pmi_pal) (void) __read_only;
45963 static int depth __read_mostly;
45964 static int vga_compat __read_mostly;
45965 /* --------------------------------------------------------------------- */
45966@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
45967 unsigned int size_vmode;
45968 unsigned int size_remap;
45969 unsigned int size_total;
45970+ void *pmi_code = NULL;
45971
45972 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
45973 return -ENODEV;
45974@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
45975 size_remap = size_total;
45976 vesafb_fix.smem_len = size_remap;
45977
45978-#ifndef __i386__
45979- screen_info.vesapm_seg = 0;
45980-#endif
45981-
45982 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
45983 printk(KERN_WARNING
45984 "vesafb: cannot reserve video memory at 0x%lx\n",
45985@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct platform_device *dev)
45986 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
45987 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
45988
45989+#ifdef __i386__
45990+
45991+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
45992+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
45993+ if (!pmi_code)
45994+#elif !defined(CONFIG_PAX_KERNEXEC)
45995+ if (0)
45996+#endif
45997+
45998+#endif
45999+ screen_info.vesapm_seg = 0;
46000+
46001 if (screen_info.vesapm_seg) {
46002- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
46003- screen_info.vesapm_seg,screen_info.vesapm_off);
46004+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
46005+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
46006 }
46007
46008 if (screen_info.vesapm_seg < 0xc000)
46009@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct platform_device *dev)
46010
46011 if (ypan || pmi_setpal) {
46012 unsigned short *pmi_base;
46013+
46014 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
46015- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
46016- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
46017+
46018+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46019+ pax_open_kernel();
46020+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
46021+#else
46022+ pmi_code = pmi_base;
46023+#endif
46024+
46025+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
46026+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
46027+
46028+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46029+ pmi_start = ktva_ktla(pmi_start);
46030+ pmi_pal = ktva_ktla(pmi_pal);
46031+ pax_close_kernel();
46032+#endif
46033+
46034 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
46035 if (pmi_base[3]) {
46036 printk(KERN_INFO "vesafb: pmi: ports = ");
46037@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct platform_device *dev)
46038 info->node, info->fix.id);
46039 return 0;
46040 err:
46041+
46042+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
46043+ module_free_exec(NULL, pmi_code);
46044+#endif
46045+
46046 if (info->screen_base)
46047 iounmap(info->screen_base);
46048 framebuffer_release(info);
46049diff --git a/drivers/xen/sys-hypervisor.c b/drivers/xen/sys-hypervisor.c
46050index 88a60e0..6783cc2 100644
46051--- a/drivers/xen/sys-hypervisor.c
46052+++ b/drivers/xen/sys-hypervisor.c
46053@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct kobject *kobj,
46054 return 0;
46055 }
46056
46057-static struct sysfs_ops hyp_sysfs_ops = {
46058+static const struct sysfs_ops hyp_sysfs_ops = {
46059 .show = hyp_sysfs_show,
46060 .store = hyp_sysfs_store,
46061 };
46062diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
46063index 18f74ec..3227009 100644
46064--- a/fs/9p/vfs_inode.c
46065+++ b/fs/9p/vfs_inode.c
46066@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
46067 static void
46068 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46069 {
46070- char *s = nd_get_link(nd);
46071+ const char *s = nd_get_link(nd);
46072
46073 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
46074 IS_ERR(s) ? "<error>" : s);
46075diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
46076index bb4cc5b..df5eaa0 100644
46077--- a/fs/Kconfig.binfmt
46078+++ b/fs/Kconfig.binfmt
46079@@ -86,7 +86,7 @@ config HAVE_AOUT
46080
46081 config BINFMT_AOUT
46082 tristate "Kernel support for a.out and ECOFF binaries"
46083- depends on HAVE_AOUT
46084+ depends on HAVE_AOUT && BROKEN
46085 ---help---
46086 A.out (Assembler.OUTput) is a set of formats for libraries and
46087 executables used in the earliest versions of UNIX. Linux used
46088diff --git a/fs/aio.c b/fs/aio.c
46089index 22a19ad..d484e5b 100644
46090--- a/fs/aio.c
46091+++ b/fs/aio.c
46092@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx *ctx)
46093 size += sizeof(struct io_event) * nr_events;
46094 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
46095
46096- if (nr_pages < 0)
46097+ if (nr_pages <= 0)
46098 return -EINVAL;
46099
46100 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
46101@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ctx,
46102 struct aio_timeout to;
46103 int retry = 0;
46104
46105+ pax_track_stack();
46106+
46107 /* needed to zero any padding within an entry (there shouldn't be
46108 * any, but C is fun!
46109 */
46110@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *iocb)
46111 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
46112 {
46113 ssize_t ret;
46114+ struct iovec iovstack;
46115
46116 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
46117 kiocb->ki_nbytes, 1,
46118- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
46119+ &iovstack, &kiocb->ki_iovec);
46120 if (ret < 0)
46121 goto out;
46122
46123+ if (kiocb->ki_iovec == &iovstack) {
46124+ kiocb->ki_inline_vec = iovstack;
46125+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
46126+ }
46127 kiocb->ki_nr_segs = kiocb->ki_nbytes;
46128 kiocb->ki_cur_seg = 0;
46129 /* ki_nbytes/left now reflect bytes instead of segs */
46130diff --git a/fs/attr.c b/fs/attr.c
46131index 96d394b..33cf5b4 100644
46132--- a/fs/attr.c
46133+++ b/fs/attr.c
46134@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
46135 unsigned long limit;
46136
46137 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
46138+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
46139 if (limit != RLIM_INFINITY && offset > limit)
46140 goto out_sig;
46141 if (offset > inode->i_sb->s_maxbytes)
46142diff --git a/fs/autofs/root.c b/fs/autofs/root.c
46143index 4a1401c..05eb5ca 100644
46144--- a/fs/autofs/root.c
46145+++ b/fs/autofs/root.c
46146@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct inode *dir, struct dentry *dentry, const c
46147 set_bit(n,sbi->symlink_bitmap);
46148 sl = &sbi->symlink[n];
46149 sl->len = strlen(symname);
46150- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
46151+ slsize = sl->len+1;
46152+ sl->data = kmalloc(slsize, GFP_KERNEL);
46153 if (!sl->data) {
46154 clear_bit(n,sbi->symlink_bitmap);
46155 unlock_kernel();
46156diff --git a/fs/autofs4/symlink.c b/fs/autofs4/symlink.c
46157index b4ea829..e63ef18 100644
46158--- a/fs/autofs4/symlink.c
46159+++ b/fs/autofs4/symlink.c
46160@@ -15,7 +15,7 @@
46161 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
46162 {
46163 struct autofs_info *ino = autofs4_dentry_ino(dentry);
46164- nd_set_link(nd, (char *)ino->u.symlink);
46165+ nd_set_link(nd, ino->u.symlink);
46166 return NULL;
46167 }
46168
46169diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
46170index 2341375..df9d1c2 100644
46171--- a/fs/autofs4/waitq.c
46172+++ b/fs/autofs4/waitq.c
46173@@ -60,7 +60,7 @@ static int autofs4_write(struct file *file, const void *addr, int bytes)
46174 {
46175 unsigned long sigpipe, flags;
46176 mm_segment_t fs;
46177- const char *data = (const char *)addr;
46178+ const char __user *data = (const char __force_user *)addr;
46179 ssize_t wr = 0;
46180
46181 /** WARNING: this is not safe for writing more than PIPE_BUF bytes! **/
46182diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
46183index 9158c07..3f06659 100644
46184--- a/fs/befs/linuxvfs.c
46185+++ b/fs/befs/linuxvfs.c
46186@@ -498,7 +498,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46187 {
46188 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
46189 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
46190- char *link = nd_get_link(nd);
46191+ const char *link = nd_get_link(nd);
46192 if (!IS_ERR(link))
46193 kfree(link);
46194 }
46195diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
46196index 0133b5a..b3baa9f 100644
46197--- a/fs/binfmt_aout.c
46198+++ b/fs/binfmt_aout.c
46199@@ -16,6 +16,7 @@
46200 #include <linux/string.h>
46201 #include <linux/fs.h>
46202 #include <linux/file.h>
46203+#include <linux/security.h>
46204 #include <linux/stat.h>
46205 #include <linux/fcntl.h>
46206 #include <linux/ptrace.h>
46207@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46208 #endif
46209 # define START_STACK(u) (u.start_stack)
46210
46211+ memset(&dump, 0, sizeof(dump));
46212+
46213 fs = get_fs();
46214 set_fs(KERNEL_DS);
46215 has_dumped = 1;
46216@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46217
46218 /* If the size of the dump file exceeds the rlimit, then see what would happen
46219 if we wrote the stack, but not the data area. */
46220+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
46221 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
46222 dump.u_dsize = 0;
46223
46224 /* Make sure we have enough room to write the stack and data areas. */
46225+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
46226 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
46227 dump.u_ssize = 0;
46228
46229@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file, u
46230 dump_size = dump.u_ssize << PAGE_SHIFT;
46231 DUMP_WRITE(dump_start,dump_size);
46232 }
46233-/* Finally dump the task struct. Not be used by gdb, but could be useful */
46234- set_fs(KERNEL_DS);
46235- DUMP_WRITE(current,sizeof(*current));
46236+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
46237 end_coredump:
46238 set_fs(fs);
46239 return has_dumped;
46240@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46241 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
46242 if (rlim >= RLIM_INFINITY)
46243 rlim = ~0;
46244+
46245+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
46246 if (ex.a_data + ex.a_bss > rlim)
46247 return -ENOMEM;
46248
46249@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46250 install_exec_creds(bprm);
46251 current->flags &= ~PF_FORKNOEXEC;
46252
46253+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46254+ current->mm->pax_flags = 0UL;
46255+#endif
46256+
46257+#ifdef CONFIG_PAX_PAGEEXEC
46258+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
46259+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
46260+
46261+#ifdef CONFIG_PAX_EMUTRAMP
46262+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
46263+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
46264+#endif
46265+
46266+#ifdef CONFIG_PAX_MPROTECT
46267+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
46268+ current->mm->pax_flags |= MF_PAX_MPROTECT;
46269+#endif
46270+
46271+ }
46272+#endif
46273+
46274 if (N_MAGIC(ex) == OMAGIC) {
46275 unsigned long text_addr, map_size;
46276 loff_t pos;
46277@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
46278
46279 down_write(&current->mm->mmap_sem);
46280 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
46281- PROT_READ | PROT_WRITE | PROT_EXEC,
46282+ PROT_READ | PROT_WRITE,
46283 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
46284 fd_offset + ex.a_text);
46285 up_write(&current->mm->mmap_sem);
46286diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
46287index 1ed37ba..b9c035f 100644
46288--- a/fs/binfmt_elf.c
46289+++ b/fs/binfmt_elf.c
46290@@ -31,6 +31,7 @@
46291 #include <linux/random.h>
46292 #include <linux/elf.h>
46293 #include <linux/utsname.h>
46294+#include <linux/xattr.h>
46295 #include <asm/uaccess.h>
46296 #include <asm/param.h>
46297 #include <asm/page.h>
46298@@ -50,6 +51,10 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
46299 #define elf_core_dump NULL
46300 #endif
46301
46302+#ifdef CONFIG_PAX_MPROTECT
46303+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
46304+#endif
46305+
46306 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
46307 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
46308 #else
46309@@ -69,6 +74,11 @@ static struct linux_binfmt elf_format = {
46310 .load_binary = load_elf_binary,
46311 .load_shlib = load_elf_library,
46312 .core_dump = elf_core_dump,
46313+
46314+#ifdef CONFIG_PAX_MPROTECT
46315+ .handle_mprotect= elf_handle_mprotect,
46316+#endif
46317+
46318 .min_coredump = ELF_EXEC_PAGESIZE,
46319 .hasvdso = 1
46320 };
46321@@ -77,6 +87,8 @@ static struct linux_binfmt elf_format = {
46322
46323 static int set_brk(unsigned long start, unsigned long end)
46324 {
46325+ unsigned long e = end;
46326+
46327 start = ELF_PAGEALIGN(start);
46328 end = ELF_PAGEALIGN(end);
46329 if (end > start) {
46330@@ -87,7 +99,7 @@ static int set_brk(unsigned long start, unsigned long end)
46331 if (BAD_ADDR(addr))
46332 return addr;
46333 }
46334- current->mm->start_brk = current->mm->brk = end;
46335+ current->mm->start_brk = current->mm->brk = e;
46336 return 0;
46337 }
46338
46339@@ -148,12 +160,15 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46340 elf_addr_t __user *u_rand_bytes;
46341 const char *k_platform = ELF_PLATFORM;
46342 const char *k_base_platform = ELF_BASE_PLATFORM;
46343- unsigned char k_rand_bytes[16];
46344+ u32 k_rand_bytes[4];
46345 int items;
46346 elf_addr_t *elf_info;
46347 int ei_index = 0;
46348 const struct cred *cred = current_cred();
46349 struct vm_area_struct *vma;
46350+ unsigned long saved_auxv[AT_VECTOR_SIZE];
46351+
46352+ pax_track_stack();
46353
46354 /*
46355 * In some cases (e.g. Hyper-Threading), we want to avoid L1
46356@@ -195,8 +210,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46357 * Generate 16 random bytes for userspace PRNG seeding.
46358 */
46359 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
46360- u_rand_bytes = (elf_addr_t __user *)
46361- STACK_ALLOC(p, sizeof(k_rand_bytes));
46362+ srandom32(k_rand_bytes[0] ^ random32());
46363+ srandom32(k_rand_bytes[1] ^ random32());
46364+ srandom32(k_rand_bytes[2] ^ random32());
46365+ srandom32(k_rand_bytes[3] ^ random32());
46366+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
46367+ u_rand_bytes = (elf_addr_t __user *) p;
46368 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
46369 return -EFAULT;
46370
46371@@ -308,9 +327,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
46372 return -EFAULT;
46373 current->mm->env_end = p;
46374
46375+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
46376+
46377 /* Put the elf_info on the stack in the right place. */
46378 sp = (elf_addr_t __user *)envp + 1;
46379- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
46380+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
46381 return -EFAULT;
46382 return 0;
46383 }
46384@@ -385,10 +406,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46385 {
46386 struct elf_phdr *elf_phdata;
46387 struct elf_phdr *eppnt;
46388- unsigned long load_addr = 0;
46389+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
46390 int load_addr_set = 0;
46391 unsigned long last_bss = 0, elf_bss = 0;
46392- unsigned long error = ~0UL;
46393+ unsigned long error = -EINVAL;
46394 unsigned long total_size;
46395 int retval, i, size;
46396
46397@@ -434,6 +455,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46398 goto out_close;
46399 }
46400
46401+#ifdef CONFIG_PAX_SEGMEXEC
46402+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
46403+ pax_task_size = SEGMEXEC_TASK_SIZE;
46404+#endif
46405+
46406 eppnt = elf_phdata;
46407 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
46408 if (eppnt->p_type == PT_LOAD) {
46409@@ -477,8 +503,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
46410 k = load_addr + eppnt->p_vaddr;
46411 if (BAD_ADDR(k) ||
46412 eppnt->p_filesz > eppnt->p_memsz ||
46413- eppnt->p_memsz > TASK_SIZE ||
46414- TASK_SIZE - eppnt->p_memsz < k) {
46415+ eppnt->p_memsz > pax_task_size ||
46416+ pax_task_size - eppnt->p_memsz < k) {
46417 error = -ENOMEM;
46418 goto out_close;
46419 }
46420@@ -532,6 +558,348 @@ out:
46421 return error;
46422 }
46423
46424+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
46425+{
46426+ unsigned long pax_flags = 0UL;
46427+
46428+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46429+
46430+#ifdef CONFIG_PAX_PAGEEXEC
46431+ if (elf_phdata->p_flags & PF_PAGEEXEC)
46432+ pax_flags |= MF_PAX_PAGEEXEC;
46433+#endif
46434+
46435+#ifdef CONFIG_PAX_SEGMEXEC
46436+ if (elf_phdata->p_flags & PF_SEGMEXEC)
46437+ pax_flags |= MF_PAX_SEGMEXEC;
46438+#endif
46439+
46440+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46441+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46442+ if (nx_enabled)
46443+ pax_flags &= ~MF_PAX_SEGMEXEC;
46444+ else
46445+ pax_flags &= ~MF_PAX_PAGEEXEC;
46446+ }
46447+#endif
46448+
46449+#ifdef CONFIG_PAX_EMUTRAMP
46450+ if (elf_phdata->p_flags & PF_EMUTRAMP)
46451+ pax_flags |= MF_PAX_EMUTRAMP;
46452+#endif
46453+
46454+#ifdef CONFIG_PAX_MPROTECT
46455+ if (elf_phdata->p_flags & PF_MPROTECT)
46456+ pax_flags |= MF_PAX_MPROTECT;
46457+#endif
46458+
46459+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46460+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
46461+ pax_flags |= MF_PAX_RANDMMAP;
46462+#endif
46463+
46464+#endif
46465+
46466+ return pax_flags;
46467+}
46468+
46469+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
46470+{
46471+ unsigned long pax_flags = 0UL;
46472+
46473+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46474+
46475+#ifdef CONFIG_PAX_PAGEEXEC
46476+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
46477+ pax_flags |= MF_PAX_PAGEEXEC;
46478+#endif
46479+
46480+#ifdef CONFIG_PAX_SEGMEXEC
46481+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
46482+ pax_flags |= MF_PAX_SEGMEXEC;
46483+#endif
46484+
46485+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46486+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46487+ if (nx_enabled)
46488+ pax_flags &= ~MF_PAX_SEGMEXEC;
46489+ else
46490+ pax_flags &= ~MF_PAX_PAGEEXEC;
46491+ }
46492+#endif
46493+
46494+#ifdef CONFIG_PAX_EMUTRAMP
46495+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
46496+ pax_flags |= MF_PAX_EMUTRAMP;
46497+#endif
46498+
46499+#ifdef CONFIG_PAX_MPROTECT
46500+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
46501+ pax_flags |= MF_PAX_MPROTECT;
46502+#endif
46503+
46504+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46505+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
46506+ pax_flags |= MF_PAX_RANDMMAP;
46507+#endif
46508+
46509+#endif
46510+
46511+ return pax_flags;
46512+}
46513+
46514+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
46515+{
46516+ unsigned long pax_flags = 0UL;
46517+
46518+#ifdef CONFIG_PAX_EI_PAX
46519+
46520+#ifdef CONFIG_PAX_PAGEEXEC
46521+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
46522+ pax_flags |= MF_PAX_PAGEEXEC;
46523+#endif
46524+
46525+#ifdef CONFIG_PAX_SEGMEXEC
46526+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
46527+ pax_flags |= MF_PAX_SEGMEXEC;
46528+#endif
46529+
46530+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46531+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46532+ if (nx_enabled)
46533+ pax_flags &= ~MF_PAX_SEGMEXEC;
46534+ else
46535+ pax_flags &= ~MF_PAX_PAGEEXEC;
46536+ }
46537+#endif
46538+
46539+#ifdef CONFIG_PAX_EMUTRAMP
46540+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
46541+ pax_flags |= MF_PAX_EMUTRAMP;
46542+#endif
46543+
46544+#ifdef CONFIG_PAX_MPROTECT
46545+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
46546+ pax_flags |= MF_PAX_MPROTECT;
46547+#endif
46548+
46549+#ifdef CONFIG_PAX_ASLR
46550+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
46551+ pax_flags |= MF_PAX_RANDMMAP;
46552+#endif
46553+
46554+#else
46555+
46556+#ifdef CONFIG_PAX_PAGEEXEC
46557+ pax_flags |= MF_PAX_PAGEEXEC;
46558+#endif
46559+
46560+#ifdef CONFIG_PAX_MPROTECT
46561+ pax_flags |= MF_PAX_MPROTECT;
46562+#endif
46563+
46564+#ifdef CONFIG_PAX_RANDMMAP
46565+ pax_flags |= MF_PAX_RANDMMAP;
46566+#endif
46567+
46568+#ifdef CONFIG_PAX_SEGMEXEC
46569+ if (!(__supported_pte_mask & _PAGE_NX)) {
46570+ pax_flags &= ~MF_PAX_PAGEEXEC;
46571+ pax_flags |= MF_PAX_SEGMEXEC;
46572+ }
46573+#endif
46574+
46575+#endif
46576+
46577+ return pax_flags;
46578+}
46579+
46580+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
46581+{
46582+
46583+#ifdef CONFIG_PAX_PT_PAX_FLAGS
46584+ unsigned long i;
46585+
46586+ for (i = 0UL; i < elf_ex->e_phnum; i++)
46587+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
46588+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
46589+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
46590+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
46591+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
46592+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
46593+ return ~0UL;
46594+
46595+#ifdef CONFIG_PAX_SOFTMODE
46596+ if (pax_softmode)
46597+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
46598+ else
46599+#endif
46600+
46601+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
46602+ break;
46603+ }
46604+#endif
46605+
46606+ return ~0UL;
46607+}
46608+
46609+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
46610+{
46611+ unsigned long pax_flags = 0UL;
46612+
46613+#ifdef CONFIG_PAX_PAGEEXEC
46614+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
46615+ pax_flags |= MF_PAX_PAGEEXEC;
46616+#endif
46617+
46618+#ifdef CONFIG_PAX_SEGMEXEC
46619+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
46620+ pax_flags |= MF_PAX_SEGMEXEC;
46621+#endif
46622+
46623+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46624+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46625+ if ((__supported_pte_mask & _PAGE_NX))
46626+ pax_flags &= ~MF_PAX_SEGMEXEC;
46627+ else
46628+ pax_flags &= ~MF_PAX_PAGEEXEC;
46629+ }
46630+#endif
46631+
46632+#ifdef CONFIG_PAX_EMUTRAMP
46633+ if (pax_flags_softmode & MF_PAX_EMUTRAMP)
46634+ pax_flags |= MF_PAX_EMUTRAMP;
46635+#endif
46636+
46637+#ifdef CONFIG_PAX_MPROTECT
46638+ if (pax_flags_softmode & MF_PAX_MPROTECT)
46639+ pax_flags |= MF_PAX_MPROTECT;
46640+#endif
46641+
46642+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46643+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
46644+ pax_flags |= MF_PAX_RANDMMAP;
46645+#endif
46646+
46647+ return pax_flags;
46648+}
46649+
46650+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
46651+{
46652+ unsigned long pax_flags = 0UL;
46653+
46654+#ifdef CONFIG_PAX_PAGEEXEC
46655+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
46656+ pax_flags |= MF_PAX_PAGEEXEC;
46657+#endif
46658+
46659+#ifdef CONFIG_PAX_SEGMEXEC
46660+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
46661+ pax_flags |= MF_PAX_SEGMEXEC;
46662+#endif
46663+
46664+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
46665+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46666+ if ((__supported_pte_mask & _PAGE_NX))
46667+ pax_flags &= ~MF_PAX_SEGMEXEC;
46668+ else
46669+ pax_flags &= ~MF_PAX_PAGEEXEC;
46670+ }
46671+#endif
46672+
46673+#ifdef CONFIG_PAX_EMUTRAMP
46674+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
46675+ pax_flags |= MF_PAX_EMUTRAMP;
46676+#endif
46677+
46678+#ifdef CONFIG_PAX_MPROTECT
46679+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
46680+ pax_flags |= MF_PAX_MPROTECT;
46681+#endif
46682+
46683+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
46684+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
46685+ pax_flags |= MF_PAX_RANDMMAP;
46686+#endif
46687+
46688+ return pax_flags;
46689+}
46690+
46691+static unsigned long pax_parse_xattr_pax(struct file * const file)
46692+{
46693+
46694+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
46695+ ssize_t xattr_size, i;
46696+ unsigned char xattr_value[5];
46697+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
46698+
46699+ xattr_size = vfs_getxattr(file->f_path.dentry, XATTR_NAME_PAX_FLAGS, xattr_value, sizeof xattr_value);
46700+ if (xattr_size <= 0)
46701+ return ~0UL;
46702+
46703+ for (i = 0; i < xattr_size; i++)
46704+ switch (xattr_value[i]) {
46705+ default:
46706+ return ~0UL;
46707+
46708+#define parse_flag(option1, option2, flag) \
46709+ case option1: \
46710+ pax_flags_hardmode |= MF_PAX_##flag; \
46711+ break; \
46712+ case option2: \
46713+ pax_flags_softmode |= MF_PAX_##flag; \
46714+ break;
46715+
46716+ parse_flag('p', 'P', PAGEEXEC);
46717+ parse_flag('e', 'E', EMUTRAMP);
46718+ parse_flag('m', 'M', MPROTECT);
46719+ parse_flag('r', 'R', RANDMMAP);
46720+ parse_flag('s', 'S', SEGMEXEC);
46721+
46722+#undef parse_flag
46723+ }
46724+
46725+ if (pax_flags_hardmode & pax_flags_softmode)
46726+ return ~0UL;
46727+
46728+#ifdef CONFIG_PAX_SOFTMODE
46729+ if (pax_softmode)
46730+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
46731+ else
46732+#endif
46733+
46734+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
46735+#else
46736+ return ~0UL;
46737+#endif
46738+}
46739+
46740+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46741+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
46742+{
46743+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
46744+
46745+ pax_flags = pax_parse_ei_pax(elf_ex);
46746+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
46747+ xattr_pax_flags = pax_parse_xattr_pax(file);
46748+
46749+ if (pt_pax_flags == ~0UL)
46750+ pt_pax_flags = xattr_pax_flags;
46751+ else if (xattr_pax_flags == ~0UL)
46752+ xattr_pax_flags = pt_pax_flags;
46753+ if (pt_pax_flags != xattr_pax_flags)
46754+ return -EINVAL;
46755+ if (pt_pax_flags != ~0UL)
46756+ pax_flags = pt_pax_flags;
46757+
46758+ if (0 > pax_check_flags(&pax_flags))
46759+ return -EINVAL;
46760+
46761+ current->mm->pax_flags = pax_flags;
46762+ return 0;
46763+}
46764+#endif
46765+
46766 /*
46767 * These are the functions used to load ELF style executables and shared
46768 * libraries. There is no binary dependent code anywhere else.
46769@@ -548,6 +916,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
46770 {
46771 unsigned int random_variable = 0;
46772
46773+#ifdef CONFIG_PAX_RANDUSTACK
46774+ if (randomize_va_space)
46775+ return stack_top - current->mm->delta_stack;
46776+#endif
46777+
46778 if ((current->flags & PF_RANDOMIZE) &&
46779 !(current->personality & ADDR_NO_RANDOMIZE)) {
46780 random_variable = get_random_int() & STACK_RND_MASK;
46781@@ -566,7 +939,7 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46782 unsigned long load_addr = 0, load_bias = 0;
46783 int load_addr_set = 0;
46784 char * elf_interpreter = NULL;
46785- unsigned long error;
46786+ unsigned long error = 0;
46787 struct elf_phdr *elf_ppnt, *elf_phdata;
46788 unsigned long elf_bss, elf_brk;
46789 int retval, i;
46790@@ -576,11 +949,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46791 unsigned long start_code, end_code, start_data, end_data;
46792 unsigned long reloc_func_desc = 0;
46793 int executable_stack = EXSTACK_DEFAULT;
46794- unsigned long def_flags = 0;
46795 struct {
46796 struct elfhdr elf_ex;
46797 struct elfhdr interp_elf_ex;
46798 } *loc;
46799+ unsigned long pax_task_size = TASK_SIZE;
46800
46801 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
46802 if (!loc) {
46803@@ -718,11 +1091,80 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46804
46805 /* OK, This is the point of no return */
46806 current->flags &= ~PF_FORKNOEXEC;
46807- current->mm->def_flags = def_flags;
46808+
46809+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
46810+ current->mm->pax_flags = 0UL;
46811+#endif
46812+
46813+#ifdef CONFIG_PAX_DLRESOLVE
46814+ current->mm->call_dl_resolve = 0UL;
46815+#endif
46816+
46817+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
46818+ current->mm->call_syscall = 0UL;
46819+#endif
46820+
46821+#ifdef CONFIG_PAX_ASLR
46822+ current->mm->delta_mmap = 0UL;
46823+ current->mm->delta_stack = 0UL;
46824+#endif
46825+
46826+ current->mm->def_flags = 0;
46827+
46828+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_XATTR_PAX_FLAGS)
46829+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
46830+ send_sig(SIGKILL, current, 0);
46831+ goto out_free_dentry;
46832+ }
46833+#endif
46834+
46835+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
46836+ pax_set_initial_flags(bprm);
46837+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
46838+ if (pax_set_initial_flags_func)
46839+ (pax_set_initial_flags_func)(bprm);
46840+#endif
46841+
46842+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46843+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
46844+ current->mm->context.user_cs_limit = PAGE_SIZE;
46845+ current->mm->def_flags |= VM_PAGEEXEC;
46846+ }
46847+#endif
46848+
46849+#ifdef CONFIG_PAX_SEGMEXEC
46850+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
46851+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
46852+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
46853+ pax_task_size = SEGMEXEC_TASK_SIZE;
46854+ }
46855+#endif
46856+
46857+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
46858+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46859+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
46860+ put_cpu();
46861+ }
46862+#endif
46863
46864 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
46865 may depend on the personality. */
46866 SET_PERSONALITY(loc->elf_ex);
46867+
46868+#ifdef CONFIG_PAX_ASLR
46869+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
46870+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
46871+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
46872+ }
46873+#endif
46874+
46875+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
46876+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
46877+ executable_stack = EXSTACK_DISABLE_X;
46878+ current->personality &= ~READ_IMPLIES_EXEC;
46879+ } else
46880+#endif
46881+
46882 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
46883 current->personality |= READ_IMPLIES_EXEC;
46884
46885@@ -800,10 +1242,27 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46886 * might try to exec. This is because the brk will
46887 * follow the loader, and is not movable. */
46888 #ifdef CONFIG_X86
46889- load_bias = 0;
46890+ if (current->flags & PF_RANDOMIZE)
46891+ load_bias = 0;
46892+ else
46893+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46894 #else
46895 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
46896 #endif
46897+
46898+#ifdef CONFIG_PAX_RANDMMAP
46899+ /* PaX: randomize base address at the default exe base if requested */
46900+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
46901+#ifdef CONFIG_SPARC64
46902+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
46903+#else
46904+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
46905+#endif
46906+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
46907+ elf_flags |= MAP_FIXED;
46908+ }
46909+#endif
46910+
46911 }
46912
46913 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
46914@@ -836,9 +1295,9 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46915 * allowed task size. Note that p_filesz must always be
46916 * <= p_memsz so it is only necessary to check p_memsz.
46917 */
46918- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46919- elf_ppnt->p_memsz > TASK_SIZE ||
46920- TASK_SIZE - elf_ppnt->p_memsz < k) {
46921+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
46922+ elf_ppnt->p_memsz > pax_task_size ||
46923+ pax_task_size - elf_ppnt->p_memsz < k) {
46924 /* set_brk can never work. Avoid overflows. */
46925 send_sig(SIGKILL, current, 0);
46926 retval = -EINVAL;
46927@@ -866,6 +1325,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46928 start_data += load_bias;
46929 end_data += load_bias;
46930
46931+#ifdef CONFIG_PAX_RANDMMAP
46932+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
46933+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
46934+#endif
46935+
46936 /* Calling set_brk effectively mmaps the pages that we need
46937 * for the bss and break sections. We must do this before
46938 * mapping in the interpreter, to make sure it doesn't wind
46939@@ -877,9 +1341,11 @@ static int load_elf_binary(struct linux_binprm *bprm, struct pt_regs *regs)
46940 goto out_free_dentry;
46941 }
46942 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
46943- send_sig(SIGSEGV, current, 0);
46944- retval = -EFAULT; /* Nobody gets to see this, but.. */
46945- goto out_free_dentry;
46946+ /*
46947+ * This bss-zeroing can fail if the ELF
46948+ * file specifies odd protections. So
46949+ * we don't check the return value
46950+ */
46951 }
46952
46953 if (elf_interpreter) {
46954@@ -1112,8 +1578,10 @@ static int dump_seek(struct file *file, loff_t off)
46955 unsigned long n = off;
46956 if (n > PAGE_SIZE)
46957 n = PAGE_SIZE;
46958- if (!dump_write(file, buf, n))
46959+ if (!dump_write(file, buf, n)) {
46960+ free_page((unsigned long)buf);
46961 return 0;
46962+ }
46963 off -= n;
46964 }
46965 free_page((unsigned long)buf);
46966@@ -1125,7 +1593,7 @@ static int dump_seek(struct file *file, loff_t off)
46967 * Decide what to dump of a segment, part, all or none.
46968 */
46969 static unsigned long vma_dump_size(struct vm_area_struct *vma,
46970- unsigned long mm_flags)
46971+ unsigned long mm_flags, long signr)
46972 {
46973 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
46974
46975@@ -1159,7 +1627,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
46976 if (vma->vm_file == NULL)
46977 return 0;
46978
46979- if (FILTER(MAPPED_PRIVATE))
46980+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
46981 goto whole;
46982
46983 /*
46984@@ -1255,8 +1723,11 @@ static int writenote(struct memelfnote *men, struct file *file,
46985 #undef DUMP_WRITE
46986
46987 #define DUMP_WRITE(addr, nr) \
46988+ do { \
46989+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
46990 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
46991- goto end_coredump;
46992+ goto end_coredump; \
46993+ } while (0);
46994
46995 static void fill_elf_header(struct elfhdr *elf, int segs,
46996 u16 machine, u32 flags, u8 osabi)
46997@@ -1385,9 +1856,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
46998 {
46999 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
47000 int i = 0;
47001- do
47002+ do {
47003 i += 2;
47004- while (auxv[i - 2] != AT_NULL);
47005+ } while (auxv[i - 2] != AT_NULL);
47006 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
47007 }
47008
47009@@ -1973,7 +2444,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47010 phdr.p_offset = offset;
47011 phdr.p_vaddr = vma->vm_start;
47012 phdr.p_paddr = 0;
47013- phdr.p_filesz = vma_dump_size(vma, mm_flags);
47014+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
47015 phdr.p_memsz = vma->vm_end - vma->vm_start;
47016 offset += phdr.p_filesz;
47017 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
47018@@ -2006,7 +2477,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47019 unsigned long addr;
47020 unsigned long end;
47021
47022- end = vma->vm_start + vma_dump_size(vma, mm_flags);
47023+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
47024
47025 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
47026 struct page *page;
47027@@ -2015,6 +2486,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
47028 page = get_dump_page(addr);
47029 if (page) {
47030 void *kaddr = kmap(page);
47031+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
47032 stop = ((size += PAGE_SIZE) > limit) ||
47033 !dump_write(file, kaddr, PAGE_SIZE);
47034 kunmap(page);
47035@@ -2042,6 +2514,97 @@ out:
47036
47037 #endif /* USE_ELF_CORE_DUMP */
47038
47039+#ifdef CONFIG_PAX_MPROTECT
47040+/* PaX: non-PIC ELF libraries need relocations on their executable segments
47041+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
47042+ * we'll remove VM_MAYWRITE for good on RELRO segments.
47043+ *
47044+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
47045+ * basis because we want to allow the common case and not the special ones.
47046+ */
47047+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
47048+{
47049+ struct elfhdr elf_h;
47050+ struct elf_phdr elf_p;
47051+ unsigned long i;
47052+ unsigned long oldflags;
47053+ bool is_textrel_rw, is_textrel_rx, is_relro;
47054+
47055+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
47056+ return;
47057+
47058+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
47059+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
47060+
47061+#ifdef CONFIG_PAX_ELFRELOCS
47062+ /* possible TEXTREL */
47063+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
47064+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
47065+#else
47066+ is_textrel_rw = false;
47067+ is_textrel_rx = false;
47068+#endif
47069+
47070+ /* possible RELRO */
47071+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
47072+
47073+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
47074+ return;
47075+
47076+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
47077+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
47078+
47079+#ifdef CONFIG_PAX_ETEXECRELOCS
47080+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47081+#else
47082+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
47083+#endif
47084+
47085+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
47086+ !elf_check_arch(&elf_h) ||
47087+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
47088+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
47089+ return;
47090+
47091+ for (i = 0UL; i < elf_h.e_phnum; i++) {
47092+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
47093+ return;
47094+ switch (elf_p.p_type) {
47095+ case PT_DYNAMIC:
47096+ if (!is_textrel_rw && !is_textrel_rx)
47097+ continue;
47098+ i = 0UL;
47099+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
47100+ elf_dyn dyn;
47101+
47102+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
47103+ return;
47104+ if (dyn.d_tag == DT_NULL)
47105+ return;
47106+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
47107+ gr_log_textrel(vma);
47108+ if (is_textrel_rw)
47109+ vma->vm_flags |= VM_MAYWRITE;
47110+ else
47111+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
47112+ vma->vm_flags &= ~VM_MAYWRITE;
47113+ return;
47114+ }
47115+ i++;
47116+ }
47117+ return;
47118+
47119+ case PT_GNU_RELRO:
47120+ if (!is_relro)
47121+ continue;
47122+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
47123+ vma->vm_flags &= ~VM_MAYWRITE;
47124+ return;
47125+ }
47126+ }
47127+}
47128+#endif
47129+
47130 static int __init init_elf_binfmt(void)
47131 {
47132 return register_binfmt(&elf_format);
47133diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
47134index ca88c46..f155a60 100644
47135--- a/fs/binfmt_flat.c
47136+++ b/fs/binfmt_flat.c
47137@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_binprm * bprm,
47138 realdatastart = (unsigned long) -ENOMEM;
47139 printk("Unable to allocate RAM for process data, errno %d\n",
47140 (int)-realdatastart);
47141+ down_write(&current->mm->mmap_sem);
47142 do_munmap(current->mm, textpos, text_len);
47143+ up_write(&current->mm->mmap_sem);
47144 ret = realdatastart;
47145 goto err;
47146 }
47147@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47148 }
47149 if (IS_ERR_VALUE(result)) {
47150 printk("Unable to read data+bss, errno %d\n", (int)-result);
47151+ down_write(&current->mm->mmap_sem);
47152 do_munmap(current->mm, textpos, text_len);
47153 do_munmap(current->mm, realdatastart, data_len + extra);
47154+ up_write(&current->mm->mmap_sem);
47155 ret = result;
47156 goto err;
47157 }
47158@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_binprm * bprm,
47159 }
47160 if (IS_ERR_VALUE(result)) {
47161 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
47162+ down_write(&current->mm->mmap_sem);
47163 do_munmap(current->mm, textpos, text_len + data_len + extra +
47164 MAX_SHARED_LIBS * sizeof(unsigned long));
47165+ up_write(&current->mm->mmap_sem);
47166 ret = result;
47167 goto err;
47168 }
47169diff --git a/fs/bio.c b/fs/bio.c
47170index e696713..83de133 100644
47171--- a/fs/bio.c
47172+++ b/fs/bio.c
47173@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
47174
47175 i = 0;
47176 while (i < bio_slab_nr) {
47177- struct bio_slab *bslab = &bio_slabs[i];
47178+ bslab = &bio_slabs[i];
47179
47180 if (!bslab->slab && entry == -1)
47181 entry = i;
47182@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
47183 const int read = bio_data_dir(bio) == READ;
47184 struct bio_map_data *bmd = bio->bi_private;
47185 int i;
47186- char *p = bmd->sgvecs[0].iov_base;
47187+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
47188
47189 __bio_for_each_segment(bvec, bio, i, 0) {
47190 char *addr = page_address(bvec->bv_page);
47191diff --git a/fs/block_dev.c b/fs/block_dev.c
47192index e65efa2..04fae57 100644
47193--- a/fs/block_dev.c
47194+++ b/fs/block_dev.c
47195@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev, void *holder)
47196 else if (bdev->bd_contains == bdev)
47197 res = 0; /* is a whole device which isn't held */
47198
47199- else if (bdev->bd_contains->bd_holder == bd_claim)
47200+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
47201 res = 0; /* is a partition of a device that is being partitioned */
47202 else if (bdev->bd_contains->bd_holder != NULL)
47203 res = -EBUSY; /* is a partition of a held device */
47204diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
47205index c4bc570..42acd8d 100644
47206--- a/fs/btrfs/ctree.c
47207+++ b/fs/btrfs/ctree.c
47208@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
47209 free_extent_buffer(buf);
47210 add_root_to_dirty_list(root);
47211 } else {
47212- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
47213- parent_start = parent->start;
47214- else
47215+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
47216+ if (parent)
47217+ parent_start = parent->start;
47218+ else
47219+ parent_start = 0;
47220+ } else
47221 parent_start = 0;
47222
47223 WARN_ON(trans->transid != btrfs_header_generation(parent));
47224@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans,
47225
47226 ret = 0;
47227 if (slot == 0) {
47228- struct btrfs_disk_key disk_key;
47229 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
47230 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
47231 }
47232diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
47233index f447188..59c17c5 100644
47234--- a/fs/btrfs/disk-io.c
47235+++ b/fs/btrfs/disk-io.c
47236@@ -39,7 +39,7 @@
47237 #include "tree-log.h"
47238 #include "free-space-cache.h"
47239
47240-static struct extent_io_ops btree_extent_io_ops;
47241+static const struct extent_io_ops btree_extent_io_ops;
47242 static void end_workqueue_fn(struct btrfs_work *work);
47243 static void free_fs_root(struct btrfs_root *root);
47244
47245@@ -2607,7 +2607,7 @@ out:
47246 return 0;
47247 }
47248
47249-static struct extent_io_ops btree_extent_io_ops = {
47250+static const struct extent_io_ops btree_extent_io_ops = {
47251 .write_cache_pages_lock_hook = btree_lock_page_hook,
47252 .readpage_end_io_hook = btree_readpage_end_io_hook,
47253 .submit_bio_hook = btree_submit_bio_hook,
47254diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
47255index 559f724..a026171 100644
47256--- a/fs/btrfs/extent-tree.c
47257+++ b/fs/btrfs/extent-tree.c
47258@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(struct btrfs_root *extent_root,
47259 u64 group_start = group->key.objectid;
47260 new_extents = kmalloc(sizeof(*new_extents),
47261 GFP_NOFS);
47262+ if (!new_extents) {
47263+ ret = -ENOMEM;
47264+ goto out;
47265+ }
47266 nr_extents = 1;
47267 ret = get_new_locations(reloc_inode,
47268 extent_key,
47269diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
47270index 36de250..7ec75c7 100644
47271--- a/fs/btrfs/extent_io.h
47272+++ b/fs/btrfs/extent_io.h
47273@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(struct inode *inode, int rw,
47274 struct bio *bio, int mirror_num,
47275 unsigned long bio_flags);
47276 struct extent_io_ops {
47277- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
47278+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
47279 u64 start, u64 end, int *page_started,
47280 unsigned long *nr_written);
47281- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
47282- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
47283+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
47284+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
47285 extent_submit_bio_hook_t *submit_bio_hook;
47286- int (*merge_bio_hook)(struct page *page, unsigned long offset,
47287+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
47288 size_t size, struct bio *bio,
47289 unsigned long bio_flags);
47290- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
47291- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
47292+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
47293+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
47294 u64 start, u64 end,
47295 struct extent_state *state);
47296- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
47297+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
47298 u64 start, u64 end,
47299 struct extent_state *state);
47300- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47301+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
47302 struct extent_state *state);
47303- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47304+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
47305 struct extent_state *state, int uptodate);
47306- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
47307+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
47308 unsigned long old, unsigned long bits);
47309- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
47310+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
47311 unsigned long bits);
47312- int (*merge_extent_hook)(struct inode *inode,
47313+ int (* const merge_extent_hook)(struct inode *inode,
47314 struct extent_state *new,
47315 struct extent_state *other);
47316- int (*split_extent_hook)(struct inode *inode,
47317+ int (* const split_extent_hook)(struct inode *inode,
47318 struct extent_state *orig, u64 split);
47319- int (*write_cache_pages_lock_hook)(struct page *page);
47320+ int (* const write_cache_pages_lock_hook)(struct page *page);
47321 };
47322
47323 struct extent_io_tree {
47324@@ -88,7 +88,7 @@ struct extent_io_tree {
47325 u64 dirty_bytes;
47326 spinlock_t lock;
47327 spinlock_t buffer_lock;
47328- struct extent_io_ops *ops;
47329+ const struct extent_io_ops *ops;
47330 };
47331
47332 struct extent_state {
47333diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
47334index cb2849f..3718fb4 100644
47335--- a/fs/btrfs/free-space-cache.c
47336+++ b/fs/btrfs/free-space-cache.c
47337@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
47338
47339 while(1) {
47340 if (entry->bytes < bytes || entry->offset < min_start) {
47341- struct rb_node *node;
47342-
47343 node = rb_next(&entry->offset_index);
47344 if (!node)
47345 break;
47346@@ -1226,7 +1224,7 @@ again:
47347 */
47348 while (entry->bitmap || found_bitmap ||
47349 (!entry->bitmap && entry->bytes < min_bytes)) {
47350- struct rb_node *node = rb_next(&entry->offset_index);
47351+ node = rb_next(&entry->offset_index);
47352
47353 if (entry->bitmap && entry->bytes > bytes + empty_size) {
47354 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
47355diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
47356index e03a836..323837e 100644
47357--- a/fs/btrfs/inode.c
47358+++ b/fs/btrfs/inode.c
47359@@ -63,7 +63,7 @@ static const struct inode_operations btrfs_file_inode_operations;
47360 static const struct address_space_operations btrfs_aops;
47361 static const struct address_space_operations btrfs_symlink_aops;
47362 static const struct file_operations btrfs_dir_file_operations;
47363-static struct extent_io_ops btrfs_extent_io_ops;
47364+static const struct extent_io_ops btrfs_extent_io_ops;
47365
47366 static struct kmem_cache *btrfs_inode_cachep;
47367 struct kmem_cache *btrfs_trans_handle_cachep;
47368@@ -925,6 +925,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
47369 1, 0, NULL, GFP_NOFS);
47370 while (start < end) {
47371 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
47372+ BUG_ON(!async_cow);
47373 async_cow->inode = inode;
47374 async_cow->root = root;
47375 async_cow->locked_page = locked_page;
47376@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(struct btrfs_path *path,
47377 inline_size = btrfs_file_extent_inline_item_len(leaf,
47378 btrfs_item_nr(leaf, path->slots[0]));
47379 tmp = kmalloc(inline_size, GFP_NOFS);
47380+ if (!tmp)
47381+ return -ENOMEM;
47382 ptr = btrfs_file_extent_inline_start(item);
47383
47384 read_extent_buffer(leaf, tmp, ptr, inline_size);
47385@@ -5410,7 +5413,7 @@ fail:
47386 return -ENOMEM;
47387 }
47388
47389-static int btrfs_getattr(struct vfsmount *mnt,
47390+int btrfs_getattr(struct vfsmount *mnt,
47391 struct dentry *dentry, struct kstat *stat)
47392 {
47393 struct inode *inode = dentry->d_inode;
47394@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount *mnt,
47395 return 0;
47396 }
47397
47398+EXPORT_SYMBOL(btrfs_getattr);
47399+
47400+dev_t get_btrfs_dev_from_inode(struct inode *inode)
47401+{
47402+ return BTRFS_I(inode)->root->anon_super.s_dev;
47403+}
47404+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
47405+
47406 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
47407 struct inode *new_dir, struct dentry *new_dentry)
47408 {
47409@@ -5972,7 +5983,7 @@ static const struct file_operations btrfs_dir_file_operations = {
47410 .fsync = btrfs_sync_file,
47411 };
47412
47413-static struct extent_io_ops btrfs_extent_io_ops = {
47414+static const struct extent_io_ops btrfs_extent_io_ops = {
47415 .fill_delalloc = run_delalloc_range,
47416 .submit_bio_hook = btrfs_submit_bio_hook,
47417 .merge_bio_hook = btrfs_merge_bio_hook,
47418diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
47419index ab7ab53..94e0781 100644
47420--- a/fs/btrfs/relocation.c
47421+++ b/fs/btrfs/relocation.c
47422@@ -884,7 +884,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
47423 }
47424 spin_unlock(&rc->reloc_root_tree.lock);
47425
47426- BUG_ON((struct btrfs_root *)node->data != root);
47427+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
47428
47429 if (!del) {
47430 spin_lock(&rc->reloc_root_tree.lock);
47431diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
47432index a240b6f..4ce16ef 100644
47433--- a/fs/btrfs/sysfs.c
47434+++ b/fs/btrfs/sysfs.c
47435@@ -164,12 +164,12 @@ static void btrfs_root_release(struct kobject *kobj)
47436 complete(&root->kobj_unregister);
47437 }
47438
47439-static struct sysfs_ops btrfs_super_attr_ops = {
47440+static const struct sysfs_ops btrfs_super_attr_ops = {
47441 .show = btrfs_super_attr_show,
47442 .store = btrfs_super_attr_store,
47443 };
47444
47445-static struct sysfs_ops btrfs_root_attr_ops = {
47446+static const struct sysfs_ops btrfs_root_attr_ops = {
47447 .show = btrfs_root_attr_show,
47448 .store = btrfs_root_attr_store,
47449 };
47450diff --git a/fs/buffer.c b/fs/buffer.c
47451index 6fa5302..395d9f6 100644
47452--- a/fs/buffer.c
47453+++ b/fs/buffer.c
47454@@ -25,6 +25,7 @@
47455 #include <linux/percpu.h>
47456 #include <linux/slab.h>
47457 #include <linux/capability.h>
47458+#include <linux/security.h>
47459 #include <linux/blkdev.h>
47460 #include <linux/file.h>
47461 #include <linux/quotaops.h>
47462diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
47463index 3797e00..ce776f6 100644
47464--- a/fs/cachefiles/bind.c
47465+++ b/fs/cachefiles/bind.c
47466@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
47467 args);
47468
47469 /* start by checking things over */
47470- ASSERT(cache->fstop_percent >= 0 &&
47471- cache->fstop_percent < cache->fcull_percent &&
47472+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
47473 cache->fcull_percent < cache->frun_percent &&
47474 cache->frun_percent < 100);
47475
47476- ASSERT(cache->bstop_percent >= 0 &&
47477- cache->bstop_percent < cache->bcull_percent &&
47478+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
47479 cache->bcull_percent < cache->brun_percent &&
47480 cache->brun_percent < 100);
47481
47482diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
47483index 4618516..bb30d01 100644
47484--- a/fs/cachefiles/daemon.c
47485+++ b/fs/cachefiles/daemon.c
47486@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
47487 if (test_bit(CACHEFILES_DEAD, &cache->flags))
47488 return -EIO;
47489
47490- if (datalen < 0 || datalen > PAGE_SIZE - 1)
47491+ if (datalen > PAGE_SIZE - 1)
47492 return -EOPNOTSUPP;
47493
47494 /* drag the command string into the kernel so we can parse it */
47495@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
47496 if (args[0] != '%' || args[1] != '\0')
47497 return -EINVAL;
47498
47499- if (fstop < 0 || fstop >= cache->fcull_percent)
47500+ if (fstop >= cache->fcull_percent)
47501 return cachefiles_daemon_range_error(cache, args);
47502
47503 cache->fstop_percent = fstop;
47504@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
47505 if (args[0] != '%' || args[1] != '\0')
47506 return -EINVAL;
47507
47508- if (bstop < 0 || bstop >= cache->bcull_percent)
47509+ if (bstop >= cache->bcull_percent)
47510 return cachefiles_daemon_range_error(cache, args);
47511
47512 cache->bstop_percent = bstop;
47513diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
47514index f7c255f..fcd61de 100644
47515--- a/fs/cachefiles/internal.h
47516+++ b/fs/cachefiles/internal.h
47517@@ -56,7 +56,7 @@ struct cachefiles_cache {
47518 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
47519 struct rb_root active_nodes; /* active nodes (can't be culled) */
47520 rwlock_t active_lock; /* lock for active_nodes */
47521- atomic_t gravecounter; /* graveyard uniquifier */
47522+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
47523 unsigned frun_percent; /* when to stop culling (% files) */
47524 unsigned fcull_percent; /* when to start culling (% files) */
47525 unsigned fstop_percent; /* when to stop allocating (% files) */
47526@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
47527 * proc.c
47528 */
47529 #ifdef CONFIG_CACHEFILES_HISTOGRAM
47530-extern atomic_t cachefiles_lookup_histogram[HZ];
47531-extern atomic_t cachefiles_mkdir_histogram[HZ];
47532-extern atomic_t cachefiles_create_histogram[HZ];
47533+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47534+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47535+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
47536
47537 extern int __init cachefiles_proc_init(void);
47538 extern void cachefiles_proc_cleanup(void);
47539 static inline
47540-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
47541+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
47542 {
47543 unsigned long jif = jiffies - start_jif;
47544 if (jif >= HZ)
47545 jif = HZ - 1;
47546- atomic_inc(&histogram[jif]);
47547+ atomic_inc_unchecked(&histogram[jif]);
47548 }
47549
47550 #else
47551diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
47552index 14ac480..a62766c 100644
47553--- a/fs/cachefiles/namei.c
47554+++ b/fs/cachefiles/namei.c
47555@@ -250,7 +250,7 @@ try_again:
47556 /* first step is to make up a grave dentry in the graveyard */
47557 sprintf(nbuffer, "%08x%08x",
47558 (uint32_t) get_seconds(),
47559- (uint32_t) atomic_inc_return(&cache->gravecounter));
47560+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
47561
47562 /* do the multiway lock magic */
47563 trap = lock_rename(cache->graveyard, dir);
47564diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
47565index eccd339..4c1d995 100644
47566--- a/fs/cachefiles/proc.c
47567+++ b/fs/cachefiles/proc.c
47568@@ -14,9 +14,9 @@
47569 #include <linux/seq_file.h>
47570 #include "internal.h"
47571
47572-atomic_t cachefiles_lookup_histogram[HZ];
47573-atomic_t cachefiles_mkdir_histogram[HZ];
47574-atomic_t cachefiles_create_histogram[HZ];
47575+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
47576+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
47577+atomic_unchecked_t cachefiles_create_histogram[HZ];
47578
47579 /*
47580 * display the latency histogram
47581@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
47582 return 0;
47583 default:
47584 index = (unsigned long) v - 3;
47585- x = atomic_read(&cachefiles_lookup_histogram[index]);
47586- y = atomic_read(&cachefiles_mkdir_histogram[index]);
47587- z = atomic_read(&cachefiles_create_histogram[index]);
47588+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
47589+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
47590+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
47591 if (x == 0 && y == 0 && z == 0)
47592 return 0;
47593
47594diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
47595index a6c8c6f..5cf8517 100644
47596--- a/fs/cachefiles/rdwr.c
47597+++ b/fs/cachefiles/rdwr.c
47598@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
47599 old_fs = get_fs();
47600 set_fs(KERNEL_DS);
47601 ret = file->f_op->write(
47602- file, (const void __user *) data, len, &pos);
47603+ file, (const void __force_user *) data, len, &pos);
47604 set_fs(old_fs);
47605 kunmap(page);
47606 if (ret != len)
47607diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
47608index 42cec2a..2aba466 100644
47609--- a/fs/cifs/cifs_debug.c
47610+++ b/fs/cifs/cifs_debug.c
47611@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(struct file *file,
47612 tcon = list_entry(tmp3,
47613 struct cifsTconInfo,
47614 tcon_list);
47615- atomic_set(&tcon->num_smbs_sent, 0);
47616- atomic_set(&tcon->num_writes, 0);
47617- atomic_set(&tcon->num_reads, 0);
47618- atomic_set(&tcon->num_oplock_brks, 0);
47619- atomic_set(&tcon->num_opens, 0);
47620- atomic_set(&tcon->num_posixopens, 0);
47621- atomic_set(&tcon->num_posixmkdirs, 0);
47622- atomic_set(&tcon->num_closes, 0);
47623- atomic_set(&tcon->num_deletes, 0);
47624- atomic_set(&tcon->num_mkdirs, 0);
47625- atomic_set(&tcon->num_rmdirs, 0);
47626- atomic_set(&tcon->num_renames, 0);
47627- atomic_set(&tcon->num_t2renames, 0);
47628- atomic_set(&tcon->num_ffirst, 0);
47629- atomic_set(&tcon->num_fnext, 0);
47630- atomic_set(&tcon->num_fclose, 0);
47631- atomic_set(&tcon->num_hardlinks, 0);
47632- atomic_set(&tcon->num_symlinks, 0);
47633- atomic_set(&tcon->num_locks, 0);
47634+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
47635+ atomic_set_unchecked(&tcon->num_writes, 0);
47636+ atomic_set_unchecked(&tcon->num_reads, 0);
47637+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
47638+ atomic_set_unchecked(&tcon->num_opens, 0);
47639+ atomic_set_unchecked(&tcon->num_posixopens, 0);
47640+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
47641+ atomic_set_unchecked(&tcon->num_closes, 0);
47642+ atomic_set_unchecked(&tcon->num_deletes, 0);
47643+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
47644+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
47645+ atomic_set_unchecked(&tcon->num_renames, 0);
47646+ atomic_set_unchecked(&tcon->num_t2renames, 0);
47647+ atomic_set_unchecked(&tcon->num_ffirst, 0);
47648+ atomic_set_unchecked(&tcon->num_fnext, 0);
47649+ atomic_set_unchecked(&tcon->num_fclose, 0);
47650+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
47651+ atomic_set_unchecked(&tcon->num_symlinks, 0);
47652+ atomic_set_unchecked(&tcon->num_locks, 0);
47653 }
47654 }
47655 }
47656@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
47657 if (tcon->need_reconnect)
47658 seq_puts(m, "\tDISCONNECTED ");
47659 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
47660- atomic_read(&tcon->num_smbs_sent),
47661- atomic_read(&tcon->num_oplock_brks));
47662+ atomic_read_unchecked(&tcon->num_smbs_sent),
47663+ atomic_read_unchecked(&tcon->num_oplock_brks));
47664 seq_printf(m, "\nReads: %d Bytes: %lld",
47665- atomic_read(&tcon->num_reads),
47666+ atomic_read_unchecked(&tcon->num_reads),
47667 (long long)(tcon->bytes_read));
47668 seq_printf(m, "\nWrites: %d Bytes: %lld",
47669- atomic_read(&tcon->num_writes),
47670+ atomic_read_unchecked(&tcon->num_writes),
47671 (long long)(tcon->bytes_written));
47672 seq_printf(m, "\nFlushes: %d",
47673- atomic_read(&tcon->num_flushes));
47674+ atomic_read_unchecked(&tcon->num_flushes));
47675 seq_printf(m, "\nLocks: %d HardLinks: %d "
47676 "Symlinks: %d",
47677- atomic_read(&tcon->num_locks),
47678- atomic_read(&tcon->num_hardlinks),
47679- atomic_read(&tcon->num_symlinks));
47680+ atomic_read_unchecked(&tcon->num_locks),
47681+ atomic_read_unchecked(&tcon->num_hardlinks),
47682+ atomic_read_unchecked(&tcon->num_symlinks));
47683 seq_printf(m, "\nOpens: %d Closes: %d "
47684 "Deletes: %d",
47685- atomic_read(&tcon->num_opens),
47686- atomic_read(&tcon->num_closes),
47687- atomic_read(&tcon->num_deletes));
47688+ atomic_read_unchecked(&tcon->num_opens),
47689+ atomic_read_unchecked(&tcon->num_closes),
47690+ atomic_read_unchecked(&tcon->num_deletes));
47691 seq_printf(m, "\nPosix Opens: %d "
47692 "Posix Mkdirs: %d",
47693- atomic_read(&tcon->num_posixopens),
47694- atomic_read(&tcon->num_posixmkdirs));
47695+ atomic_read_unchecked(&tcon->num_posixopens),
47696+ atomic_read_unchecked(&tcon->num_posixmkdirs));
47697 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
47698- atomic_read(&tcon->num_mkdirs),
47699- atomic_read(&tcon->num_rmdirs));
47700+ atomic_read_unchecked(&tcon->num_mkdirs),
47701+ atomic_read_unchecked(&tcon->num_rmdirs));
47702 seq_printf(m, "\nRenames: %d T2 Renames %d",
47703- atomic_read(&tcon->num_renames),
47704- atomic_read(&tcon->num_t2renames));
47705+ atomic_read_unchecked(&tcon->num_renames),
47706+ atomic_read_unchecked(&tcon->num_t2renames));
47707 seq_printf(m, "\nFindFirst: %d FNext %d "
47708 "FClose %d",
47709- atomic_read(&tcon->num_ffirst),
47710- atomic_read(&tcon->num_fnext),
47711- atomic_read(&tcon->num_fclose));
47712+ atomic_read_unchecked(&tcon->num_ffirst),
47713+ atomic_read_unchecked(&tcon->num_fnext),
47714+ atomic_read_unchecked(&tcon->num_fclose));
47715 }
47716 }
47717 }
47718diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
47719index 1445407..68cb0dc 100644
47720--- a/fs/cifs/cifsfs.c
47721+++ b/fs/cifs/cifsfs.c
47722@@ -869,7 +869,7 @@ cifs_init_request_bufs(void)
47723 cifs_req_cachep = kmem_cache_create("cifs_request",
47724 CIFSMaxBufSize +
47725 MAX_CIFS_HDR_SIZE, 0,
47726- SLAB_HWCACHE_ALIGN, NULL);
47727+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
47728 if (cifs_req_cachep == NULL)
47729 return -ENOMEM;
47730
47731@@ -896,7 +896,7 @@ cifs_init_request_bufs(void)
47732 efficient to alloc 1 per page off the slab compared to 17K (5page)
47733 alloc of large cifs buffers even when page debugging is on */
47734 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
47735- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
47736+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
47737 NULL);
47738 if (cifs_sm_req_cachep == NULL) {
47739 mempool_destroy(cifs_req_poolp);
47740@@ -991,8 +991,8 @@ init_cifs(void)
47741 atomic_set(&bufAllocCount, 0);
47742 atomic_set(&smBufAllocCount, 0);
47743 #ifdef CONFIG_CIFS_STATS2
47744- atomic_set(&totBufAllocCount, 0);
47745- atomic_set(&totSmBufAllocCount, 0);
47746+ atomic_set_unchecked(&totBufAllocCount, 0);
47747+ atomic_set_unchecked(&totSmBufAllocCount, 0);
47748 #endif /* CONFIG_CIFS_STATS2 */
47749
47750 atomic_set(&midCount, 0);
47751diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
47752index e29581e..1c22bab 100644
47753--- a/fs/cifs/cifsglob.h
47754+++ b/fs/cifs/cifsglob.h
47755@@ -252,28 +252,28 @@ struct cifsTconInfo {
47756 __u16 Flags; /* optional support bits */
47757 enum statusEnum tidStatus;
47758 #ifdef CONFIG_CIFS_STATS
47759- atomic_t num_smbs_sent;
47760- atomic_t num_writes;
47761- atomic_t num_reads;
47762- atomic_t num_flushes;
47763- atomic_t num_oplock_brks;
47764- atomic_t num_opens;
47765- atomic_t num_closes;
47766- atomic_t num_deletes;
47767- atomic_t num_mkdirs;
47768- atomic_t num_posixopens;
47769- atomic_t num_posixmkdirs;
47770- atomic_t num_rmdirs;
47771- atomic_t num_renames;
47772- atomic_t num_t2renames;
47773- atomic_t num_ffirst;
47774- atomic_t num_fnext;
47775- atomic_t num_fclose;
47776- atomic_t num_hardlinks;
47777- atomic_t num_symlinks;
47778- atomic_t num_locks;
47779- atomic_t num_acl_get;
47780- atomic_t num_acl_set;
47781+ atomic_unchecked_t num_smbs_sent;
47782+ atomic_unchecked_t num_writes;
47783+ atomic_unchecked_t num_reads;
47784+ atomic_unchecked_t num_flushes;
47785+ atomic_unchecked_t num_oplock_brks;
47786+ atomic_unchecked_t num_opens;
47787+ atomic_unchecked_t num_closes;
47788+ atomic_unchecked_t num_deletes;
47789+ atomic_unchecked_t num_mkdirs;
47790+ atomic_unchecked_t num_posixopens;
47791+ atomic_unchecked_t num_posixmkdirs;
47792+ atomic_unchecked_t num_rmdirs;
47793+ atomic_unchecked_t num_renames;
47794+ atomic_unchecked_t num_t2renames;
47795+ atomic_unchecked_t num_ffirst;
47796+ atomic_unchecked_t num_fnext;
47797+ atomic_unchecked_t num_fclose;
47798+ atomic_unchecked_t num_hardlinks;
47799+ atomic_unchecked_t num_symlinks;
47800+ atomic_unchecked_t num_locks;
47801+ atomic_unchecked_t num_acl_get;
47802+ atomic_unchecked_t num_acl_set;
47803 #ifdef CONFIG_CIFS_STATS2
47804 unsigned long long time_writes;
47805 unsigned long long time_reads;
47806@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb)
47807 }
47808
47809 #ifdef CONFIG_CIFS_STATS
47810-#define cifs_stats_inc atomic_inc
47811+#define cifs_stats_inc atomic_inc_unchecked
47812
47813 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
47814 unsigned int bytes)
47815@@ -701,8 +701,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
47816 /* Various Debug counters */
47817 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
47818 #ifdef CONFIG_CIFS_STATS2
47819-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
47820-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
47821+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
47822+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
47823 #endif
47824 GLOBAL_EXTERN atomic_t smBufAllocCount;
47825 GLOBAL_EXTERN atomic_t midCount;
47826diff --git a/fs/cifs/link.c b/fs/cifs/link.c
47827index fc1e048..28b3441 100644
47828--- a/fs/cifs/link.c
47829+++ b/fs/cifs/link.c
47830@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
47831
47832 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
47833 {
47834- char *p = nd_get_link(nd);
47835+ const char *p = nd_get_link(nd);
47836 if (!IS_ERR(p))
47837 kfree(p);
47838 }
47839diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
47840index 95b82e8..12a538d 100644
47841--- a/fs/cifs/misc.c
47842+++ b/fs/cifs/misc.c
47843@@ -155,7 +155,7 @@ cifs_buf_get(void)
47844 memset(ret_buf, 0, sizeof(struct smb_hdr) + 3);
47845 atomic_inc(&bufAllocCount);
47846 #ifdef CONFIG_CIFS_STATS2
47847- atomic_inc(&totBufAllocCount);
47848+ atomic_inc_unchecked(&totBufAllocCount);
47849 #endif /* CONFIG_CIFS_STATS2 */
47850 }
47851
47852@@ -190,7 +190,7 @@ cifs_small_buf_get(void)
47853 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
47854 atomic_inc(&smBufAllocCount);
47855 #ifdef CONFIG_CIFS_STATS2
47856- atomic_inc(&totSmBufAllocCount);
47857+ atomic_inc_unchecked(&totSmBufAllocCount);
47858 #endif /* CONFIG_CIFS_STATS2 */
47859
47860 }
47861diff --git a/fs/coda/cache.c b/fs/coda/cache.c
47862index a5bf577..6d19845 100644
47863--- a/fs/coda/cache.c
47864+++ b/fs/coda/cache.c
47865@@ -24,14 +24,14 @@
47866 #include <linux/coda_fs_i.h>
47867 #include <linux/coda_cache.h>
47868
47869-static atomic_t permission_epoch = ATOMIC_INIT(0);
47870+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
47871
47872 /* replace or extend an acl cache hit */
47873 void coda_cache_enter(struct inode *inode, int mask)
47874 {
47875 struct coda_inode_info *cii = ITOC(inode);
47876
47877- cii->c_cached_epoch = atomic_read(&permission_epoch);
47878+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
47879 if (cii->c_uid != current_fsuid()) {
47880 cii->c_uid = current_fsuid();
47881 cii->c_cached_perm = mask;
47882@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inode, int mask)
47883 void coda_cache_clear_inode(struct inode *inode)
47884 {
47885 struct coda_inode_info *cii = ITOC(inode);
47886- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
47887+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
47888 }
47889
47890 /* remove all acl caches */
47891 void coda_cache_clear_all(struct super_block *sb)
47892 {
47893- atomic_inc(&permission_epoch);
47894+ atomic_inc_unchecked(&permission_epoch);
47895 }
47896
47897
47898@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode, int mask)
47899
47900 hit = (mask & cii->c_cached_perm) == mask &&
47901 cii->c_uid == current_fsuid() &&
47902- cii->c_cached_epoch == atomic_read(&permission_epoch);
47903+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
47904
47905 return hit;
47906 }
47907diff --git a/fs/compat.c b/fs/compat.c
47908index d1e2411..b1eda5d 100644
47909--- a/fs/compat.c
47910+++ b/fs/compat.c
47911@@ -133,8 +133,8 @@ asmlinkage long compat_sys_utimes(char __user *filename, struct compat_timeval _
47912 static int cp_compat_stat(struct kstat *stat, struct compat_stat __user *ubuf)
47913 {
47914 compat_ino_t ino = stat->ino;
47915- typeof(ubuf->st_uid) uid = 0;
47916- typeof(ubuf->st_gid) gid = 0;
47917+ typeof(((struct compat_stat *)0)->st_uid) uid = 0;
47918+ typeof(((struct compat_stat *)0)->st_gid) gid = 0;
47919 int err;
47920
47921 SET_UID(uid, stat->uid);
47922@@ -533,7 +533,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
47923
47924 set_fs(KERNEL_DS);
47925 /* The __user pointer cast is valid because of the set_fs() */
47926- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
47927+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
47928 set_fs(oldfs);
47929 /* truncating is ok because it's a user address */
47930 if (!ret)
47931@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
47932
47933 struct compat_readdir_callback {
47934 struct compat_old_linux_dirent __user *dirent;
47935+ struct file * file;
47936 int result;
47937 };
47938
47939@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
47940 buf->result = -EOVERFLOW;
47941 return -EOVERFLOW;
47942 }
47943+
47944+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47945+ return 0;
47946+
47947 buf->result++;
47948 dirent = buf->dirent;
47949 if (!access_ok(VERIFY_WRITE, dirent,
47950@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
47951
47952 buf.result = 0;
47953 buf.dirent = dirent;
47954+ buf.file = file;
47955
47956 error = vfs_readdir(file, compat_fillonedir, &buf);
47957 if (buf.result)
47958@@ -899,6 +905,7 @@ struct compat_linux_dirent {
47959 struct compat_getdents_callback {
47960 struct compat_linux_dirent __user *current_dir;
47961 struct compat_linux_dirent __user *previous;
47962+ struct file * file;
47963 int count;
47964 int error;
47965 };
47966@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
47967 buf->error = -EOVERFLOW;
47968 return -EOVERFLOW;
47969 }
47970+
47971+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47972+ return 0;
47973+
47974 dirent = buf->previous;
47975 if (dirent) {
47976 if (__put_user(offset, &dirent->d_off))
47977@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
47978 buf.previous = NULL;
47979 buf.count = count;
47980 buf.error = 0;
47981+ buf.file = file;
47982
47983 error = vfs_readdir(file, compat_filldir, &buf);
47984 if (error >= 0)
47985@@ -987,6 +999,7 @@ out:
47986 struct compat_getdents_callback64 {
47987 struct linux_dirent64 __user *current_dir;
47988 struct linux_dirent64 __user *previous;
47989+ struct file * file;
47990 int count;
47991 int error;
47992 };
47993@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
47994 buf->error = -EINVAL; /* only used if we fail.. */
47995 if (reclen > buf->count)
47996 return -EINVAL;
47997+
47998+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
47999+ return 0;
48000+
48001 dirent = buf->previous;
48002
48003 if (dirent) {
48004@@ -1054,13 +1071,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
48005 buf.previous = NULL;
48006 buf.count = count;
48007 buf.error = 0;
48008+ buf.file = file;
48009
48010 error = vfs_readdir(file, compat_filldir64, &buf);
48011 if (error >= 0)
48012 error = buf.error;
48013 lastdirent = buf.previous;
48014 if (lastdirent) {
48015- typeof(lastdirent->d_off) d_off = file->f_pos;
48016+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
48017 if (__put_user_unaligned(d_off, &lastdirent->d_off))
48018 error = -EFAULT;
48019 else
48020@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
48021 * verify all the pointers
48022 */
48023 ret = -EINVAL;
48024- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
48025+ if (nr_segs > UIO_MAXIOV)
48026 goto out;
48027 if (!file->f_op)
48028 goto out;
48029@@ -1463,11 +1481,35 @@ int compat_do_execve(char * filename,
48030 compat_uptr_t __user *envp,
48031 struct pt_regs * regs)
48032 {
48033+#ifdef CONFIG_GRKERNSEC
48034+ struct file *old_exec_file;
48035+ struct acl_subject_label *old_acl;
48036+ struct rlimit old_rlim[RLIM_NLIMITS];
48037+#endif
48038 struct linux_binprm *bprm;
48039 struct file *file;
48040 struct files_struct *displaced;
48041 bool clear_in_exec;
48042 int retval;
48043+ const struct cred *cred = current_cred();
48044+
48045+ /*
48046+ * We move the actual failure in case of RLIMIT_NPROC excess from
48047+ * set*uid() to execve() because too many poorly written programs
48048+ * don't check setuid() return code. Here we additionally recheck
48049+ * whether NPROC limit is still exceeded.
48050+ */
48051+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48052+
48053+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48054+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48055+ retval = -EAGAIN;
48056+ goto out_ret;
48057+ }
48058+
48059+ /* We're below the limit (still or again), so we don't want to make
48060+ * further execve() calls fail. */
48061+ current->flags &= ~PF_NPROC_EXCEEDED;
48062
48063 retval = unshare_files(&displaced);
48064 if (retval)
48065@@ -1493,12 +1535,26 @@ int compat_do_execve(char * filename,
48066 if (IS_ERR(file))
48067 goto out_unmark;
48068
48069+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48070+ retval = -EPERM;
48071+ goto out_file;
48072+ }
48073+
48074 sched_exec();
48075
48076 bprm->file = file;
48077 bprm->filename = filename;
48078 bprm->interp = filename;
48079
48080+ if (gr_process_user_ban()) {
48081+ retval = -EPERM;
48082+ goto out_file;
48083+ }
48084+
48085+ retval = -EACCES;
48086+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
48087+ goto out_file;
48088+
48089 retval = bprm_mm_init(bprm);
48090 if (retval)
48091 goto out_file;
48092@@ -1528,9 +1584,40 @@ int compat_do_execve(char * filename,
48093 if (retval < 0)
48094 goto out;
48095
48096+ if (!gr_tpe_allow(file)) {
48097+ retval = -EACCES;
48098+ goto out;
48099+ }
48100+
48101+ if (gr_check_crash_exec(file)) {
48102+ retval = -EACCES;
48103+ goto out;
48104+ }
48105+
48106+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48107+
48108+ gr_handle_exec_args_compat(bprm, argv);
48109+
48110+#ifdef CONFIG_GRKERNSEC
48111+ old_acl = current->acl;
48112+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48113+ old_exec_file = current->exec_file;
48114+ get_file(file);
48115+ current->exec_file = file;
48116+#endif
48117+
48118+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48119+ bprm->unsafe);
48120+ if (retval < 0)
48121+ goto out_fail;
48122+
48123 retval = search_binary_handler(bprm, regs);
48124 if (retval < 0)
48125- goto out;
48126+ goto out_fail;
48127+#ifdef CONFIG_GRKERNSEC
48128+ if (old_exec_file)
48129+ fput(old_exec_file);
48130+#endif
48131
48132 /* execve succeeded */
48133 current->fs->in_exec = 0;
48134@@ -1541,6 +1628,14 @@ int compat_do_execve(char * filename,
48135 put_files_struct(displaced);
48136 return retval;
48137
48138+out_fail:
48139+#ifdef CONFIG_GRKERNSEC
48140+ current->acl = old_acl;
48141+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48142+ fput(current->exec_file);
48143+ current->exec_file = old_exec_file;
48144+#endif
48145+
48146 out:
48147 if (bprm->mm) {
48148 acct_arg_size(bprm, 0);
48149@@ -1711,6 +1806,8 @@ int compat_core_sys_select(int n, compat_ulong_t __user *inp,
48150 struct fdtable *fdt;
48151 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
48152
48153+ pax_track_stack();
48154+
48155 if (n < 0)
48156 goto out_nofds;
48157
48158@@ -2151,7 +2248,7 @@ asmlinkage long compat_sys_nfsservctl(int cmd,
48159 oldfs = get_fs();
48160 set_fs(KERNEL_DS);
48161 /* The __user pointer casts are valid because of the set_fs() */
48162- err = sys_nfsservctl(cmd, (void __user *) karg, (void __user *) kres);
48163+ err = sys_nfsservctl(cmd, (void __force_user *) karg, (void __force_user *) kres);
48164 set_fs(oldfs);
48165
48166 if (err)
48167diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
48168index 0adced2..bbb1b0d 100644
48169--- a/fs/compat_binfmt_elf.c
48170+++ b/fs/compat_binfmt_elf.c
48171@@ -29,10 +29,12 @@
48172 #undef elfhdr
48173 #undef elf_phdr
48174 #undef elf_note
48175+#undef elf_dyn
48176 #undef elf_addr_t
48177 #define elfhdr elf32_hdr
48178 #define elf_phdr elf32_phdr
48179 #define elf_note elf32_note
48180+#define elf_dyn Elf32_Dyn
48181 #define elf_addr_t Elf32_Addr
48182
48183 /*
48184diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
48185index d84e705..d8c364c 100644
48186--- a/fs/compat_ioctl.c
48187+++ b/fs/compat_ioctl.c
48188@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, unsigned
48189 up = (struct compat_video_spu_palette __user *) arg;
48190 err = get_user(palp, &up->palette);
48191 err |= get_user(length, &up->length);
48192+ if (err)
48193+ return -EFAULT;
48194
48195 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
48196 err = put_user(compat_ptr(palp), &up_native->palette);
48197@@ -1513,7 +1515,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd, unsigned long arg)
48198 return -EFAULT;
48199 if (__get_user(udata, &ss32->iomem_base))
48200 return -EFAULT;
48201- ss.iomem_base = compat_ptr(udata);
48202+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
48203 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
48204 __get_user(ss.port_high, &ss32->port_high))
48205 return -EFAULT;
48206@@ -1809,7 +1811,7 @@ static int compat_ioctl_preallocate(struct file *file, unsigned long arg)
48207 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
48208 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
48209 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
48210- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48211+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
48212 return -EFAULT;
48213
48214 return ioctl_preallocate(file, p);
48215diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
48216index 8e48b52..f01ed91 100644
48217--- a/fs/configfs/dir.c
48218+++ b/fs/configfs/dir.c
48219@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48220 }
48221 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
48222 struct configfs_dirent *next;
48223- const char * name;
48224+ const unsigned char * name;
48225+ char d_name[sizeof(next->s_dentry->d_iname)];
48226 int len;
48227
48228 next = list_entry(p, struct configfs_dirent,
48229@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
48230 continue;
48231
48232 name = configfs_get_name(next);
48233- len = strlen(name);
48234+ if (next->s_dentry && name == next->s_dentry->d_iname) {
48235+ len = next->s_dentry->d_name.len;
48236+ memcpy(d_name, name, len);
48237+ name = d_name;
48238+ } else
48239+ len = strlen(name);
48240 if (next->s_dentry)
48241 ino = next->s_dentry->d_inode->i_ino;
48242 else
48243diff --git a/fs/dcache.c b/fs/dcache.c
48244index 44c0aea..2529092 100644
48245--- a/fs/dcache.c
48246+++ b/fs/dcache.c
48247@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
48248
48249 static struct kmem_cache *dentry_cache __read_mostly;
48250
48251-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
48252-
48253 /*
48254 * This is the single most critical data structure when it comes
48255 * to the dcache: the hashtable for lookups. Somebody should try
48256@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned long mempages)
48257 mempages -= reserve;
48258
48259 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
48260- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
48261+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
48262
48263 dcache_init();
48264 inode_init();
48265diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
48266index 39c6ee8..dcee0f1 100644
48267--- a/fs/debugfs/inode.c
48268+++ b/fs/debugfs/inode.c
48269@@ -269,7 +269,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
48270 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
48271 {
48272 return debugfs_create_file(name,
48273+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
48274+ S_IFDIR | S_IRWXU,
48275+#else
48276 S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
48277+#endif
48278 parent, NULL, NULL);
48279 }
48280 EXPORT_SYMBOL_GPL(debugfs_create_dir);
48281diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
48282index c010ecf..a8d8c59 100644
48283--- a/fs/dlm/lockspace.c
48284+++ b/fs/dlm/lockspace.c
48285@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struct kobject *k)
48286 kfree(ls);
48287 }
48288
48289-static struct sysfs_ops dlm_attr_ops = {
48290+static const struct sysfs_ops dlm_attr_ops = {
48291 .show = dlm_attr_show,
48292 .store = dlm_attr_store,
48293 };
48294diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
48295index 7a5f1ac..205b034 100644
48296--- a/fs/ecryptfs/crypto.c
48297+++ b/fs/ecryptfs/crypto.c
48298@@ -418,17 +418,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48299 rc);
48300 goto out;
48301 }
48302- if (unlikely(ecryptfs_verbosity > 0)) {
48303- ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
48304- "with iv:\n");
48305- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48306- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48307- "encryption:\n");
48308- ecryptfs_dump_hex((char *)
48309- (page_address(page)
48310- + (extent_offset * crypt_stat->extent_size)),
48311- 8);
48312- }
48313 rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
48314 page, (extent_offset
48315 * crypt_stat->extent_size),
48316@@ -441,14 +430,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
48317 goto out;
48318 }
48319 rc = 0;
48320- if (unlikely(ecryptfs_verbosity > 0)) {
48321- ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16x]; "
48322- "rc = [%d]\n", (extent_base + extent_offset),
48323- rc);
48324- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48325- "encryption:\n");
48326- ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
48327- }
48328 out:
48329 return rc;
48330 }
48331@@ -545,17 +526,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48332 rc);
48333 goto out;
48334 }
48335- if (unlikely(ecryptfs_verbosity > 0)) {
48336- ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
48337- "with iv:\n");
48338- ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
48339- ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
48340- "decryption:\n");
48341- ecryptfs_dump_hex((char *)
48342- (page_address(enc_extent_page)
48343- + (extent_offset * crypt_stat->extent_size)),
48344- 8);
48345- }
48346 rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
48347 (extent_offset
48348 * crypt_stat->extent_size),
48349@@ -569,16 +539,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
48350 goto out;
48351 }
48352 rc = 0;
48353- if (unlikely(ecryptfs_verbosity > 0)) {
48354- ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16x]; "
48355- "rc = [%d]\n", (extent_base + extent_offset),
48356- rc);
48357- ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
48358- "decryption:\n");
48359- ecryptfs_dump_hex((char *)(page_address(page)
48360- + (extent_offset
48361- * crypt_stat->extent_size)), 8);
48362- }
48363 out:
48364 return rc;
48365 }
48366diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
48367index 88ba4d4..073f003 100644
48368--- a/fs/ecryptfs/inode.c
48369+++ b/fs/ecryptfs/inode.c
48370@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
48371 old_fs = get_fs();
48372 set_fs(get_ds());
48373 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
48374- (char __user *)lower_buf,
48375+ (char __force_user *)lower_buf,
48376 lower_bufsiz);
48377 set_fs(old_fs);
48378 if (rc < 0)
48379@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48380 }
48381 old_fs = get_fs();
48382 set_fs(get_ds());
48383- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
48384+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
48385 set_fs(old_fs);
48386 if (rc < 0)
48387 goto out_free;
48388diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
48389index 6b78546..7ba3260 100644
48390--- a/fs/ecryptfs/read_write.c
48391+++ b/fs/ecryptfs/read_write.c
48392@@ -134,7 +134,12 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48393 pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
48394 size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
48395 size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
48396- size_t total_remaining_bytes = ((offset + size) - pos);
48397+ loff_t total_remaining_bytes = ((offset + size) - pos);
48398+
48399+ if (fatal_signal_pending(current)) {
48400+ rc = -EINTR;
48401+ break;
48402+ }
48403
48404 if (fatal_signal_pending(current)) {
48405 rc = -EINTR;
48406@@ -145,7 +150,7 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
48407 num_bytes = total_remaining_bytes;
48408 if (pos < offset) {
48409 /* remaining zeros to write, up to destination offset */
48410- size_t total_remaining_zeros = (offset - pos);
48411+ loff_t total_remaining_zeros = (offset - pos);
48412
48413 if (num_bytes > total_remaining_zeros)
48414 num_bytes = total_remaining_zeros;
48415diff --git a/fs/exec.c b/fs/exec.c
48416index 86fafc6..5033350 100644
48417--- a/fs/exec.c
48418+++ b/fs/exec.c
48419@@ -56,12 +56,28 @@
48420 #include <linux/fsnotify.h>
48421 #include <linux/fs_struct.h>
48422 #include <linux/pipe_fs_i.h>
48423+#include <linux/random.h>
48424+#include <linux/seq_file.h>
48425+
48426+#ifdef CONFIG_PAX_REFCOUNT
48427+#include <linux/kallsyms.h>
48428+#include <linux/kdebug.h>
48429+#endif
48430
48431 #include <asm/uaccess.h>
48432 #include <asm/mmu_context.h>
48433 #include <asm/tlb.h>
48434 #include "internal.h"
48435
48436+#ifndef CONFIG_PAX_HAVE_ACL_FLAGS
48437+void __weak pax_set_initial_flags(struct linux_binprm *bprm) {}
48438+#endif
48439+
48440+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
48441+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
48442+EXPORT_SYMBOL(pax_set_initial_flags_func);
48443+#endif
48444+
48445 int core_uses_pid;
48446 char core_pattern[CORENAME_MAX_SIZE] = "core";
48447 unsigned int core_pipe_limit;
48448@@ -178,18 +194,10 @@ struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
48449 int write)
48450 {
48451 struct page *page;
48452- int ret;
48453
48454-#ifdef CONFIG_STACK_GROWSUP
48455- if (write) {
48456- ret = expand_stack_downwards(bprm->vma, pos);
48457- if (ret < 0)
48458- return NULL;
48459- }
48460-#endif
48461- ret = get_user_pages(current, bprm->mm, pos,
48462- 1, write, 1, &page, NULL);
48463- if (ret <= 0)
48464+ if (0 > expand_stack_downwards(bprm->vma, pos))
48465+ return NULL;
48466+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
48467 return NULL;
48468
48469 if (write) {
48470@@ -263,6 +271,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48471 vma->vm_end = STACK_TOP_MAX;
48472 vma->vm_start = vma->vm_end - PAGE_SIZE;
48473 vma->vm_flags = VM_STACK_FLAGS;
48474+
48475+#ifdef CONFIG_PAX_SEGMEXEC
48476+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
48477+#endif
48478+
48479 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
48480
48481 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
48482@@ -276,6 +289,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
48483 mm->stack_vm = mm->total_vm = 1;
48484 up_write(&mm->mmap_sem);
48485 bprm->p = vma->vm_end - sizeof(void *);
48486+
48487+#ifdef CONFIG_PAX_RANDUSTACK
48488+ if (randomize_va_space)
48489+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
48490+#endif
48491+
48492 return 0;
48493 err:
48494 up_write(&mm->mmap_sem);
48495@@ -510,7 +529,7 @@ int copy_strings_kernel(int argc,char ** argv, struct linux_binprm *bprm)
48496 int r;
48497 mm_segment_t oldfs = get_fs();
48498 set_fs(KERNEL_DS);
48499- r = copy_strings(argc, (char __user * __user *)argv, bprm);
48500+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
48501 set_fs(oldfs);
48502 return r;
48503 }
48504@@ -540,7 +559,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48505 unsigned long new_end = old_end - shift;
48506 struct mmu_gather *tlb;
48507
48508- BUG_ON(new_start > new_end);
48509+ if (new_start >= new_end || new_start < mmap_min_addr)
48510+ return -ENOMEM;
48511
48512 /*
48513 * ensure there are no vmas between where we want to go
48514@@ -549,6 +569,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
48515 if (vma != find_vma(mm, new_start))
48516 return -EFAULT;
48517
48518+#ifdef CONFIG_PAX_SEGMEXEC
48519+ BUG_ON(pax_find_mirror_vma(vma));
48520+#endif
48521+
48522 /*
48523 * cover the whole range: [new_start, old_end)
48524 */
48525@@ -630,10 +654,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
48526 stack_top = arch_align_stack(stack_top);
48527 stack_top = PAGE_ALIGN(stack_top);
48528
48529- if (unlikely(stack_top < mmap_min_addr) ||
48530- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
48531- return -ENOMEM;
48532-
48533 stack_shift = vma->vm_end - stack_top;
48534
48535 bprm->p -= stack_shift;
48536@@ -645,6 +665,14 @@ int setup_arg_pages(struct linux_binprm *bprm,
48537 bprm->exec -= stack_shift;
48538
48539 down_write(&mm->mmap_sem);
48540+
48541+ /* Move stack pages down in memory. */
48542+ if (stack_shift) {
48543+ ret = shift_arg_pages(vma, stack_shift);
48544+ if (ret)
48545+ goto out_unlock;
48546+ }
48547+
48548 vm_flags = VM_STACK_FLAGS;
48549
48550 /*
48551@@ -658,19 +686,24 @@ int setup_arg_pages(struct linux_binprm *bprm,
48552 vm_flags &= ~VM_EXEC;
48553 vm_flags |= mm->def_flags;
48554
48555+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48556+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
48557+ vm_flags &= ~VM_EXEC;
48558+
48559+#ifdef CONFIG_PAX_MPROTECT
48560+ if (mm->pax_flags & MF_PAX_MPROTECT)
48561+ vm_flags &= ~VM_MAYEXEC;
48562+#endif
48563+
48564+ }
48565+#endif
48566+
48567 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
48568 vm_flags);
48569 if (ret)
48570 goto out_unlock;
48571 BUG_ON(prev != vma);
48572
48573- /* Move stack pages down in memory. */
48574- if (stack_shift) {
48575- ret = shift_arg_pages(vma, stack_shift);
48576- if (ret)
48577- goto out_unlock;
48578- }
48579-
48580 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
48581 stack_size = vma->vm_end - vma->vm_start;
48582 /*
48583@@ -744,7 +777,7 @@ int kernel_read(struct file *file, loff_t offset,
48584 old_fs = get_fs();
48585 set_fs(get_ds());
48586 /* The cast to a user pointer is valid due to the set_fs() */
48587- result = vfs_read(file, (void __user *)addr, count, &pos);
48588+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
48589 set_fs(old_fs);
48590 return result;
48591 }
48592@@ -1152,7 +1185,7 @@ int check_unsafe_exec(struct linux_binprm *bprm)
48593 }
48594 rcu_read_unlock();
48595
48596- if (p->fs->users > n_fs) {
48597+ if (atomic_read(&p->fs->users) > n_fs) {
48598 bprm->unsafe |= LSM_UNSAFE_SHARE;
48599 } else {
48600 res = -EAGAIN;
48601@@ -1347,11 +1380,35 @@ int do_execve(char * filename,
48602 char __user *__user *envp,
48603 struct pt_regs * regs)
48604 {
48605+#ifdef CONFIG_GRKERNSEC
48606+ struct file *old_exec_file;
48607+ struct acl_subject_label *old_acl;
48608+ struct rlimit old_rlim[RLIM_NLIMITS];
48609+#endif
48610 struct linux_binprm *bprm;
48611 struct file *file;
48612 struct files_struct *displaced;
48613 bool clear_in_exec;
48614 int retval;
48615+ const struct cred *cred = current_cred();
48616+
48617+ /*
48618+ * We move the actual failure in case of RLIMIT_NPROC excess from
48619+ * set*uid() to execve() because too many poorly written programs
48620+ * don't check setuid() return code. Here we additionally recheck
48621+ * whether NPROC limit is still exceeded.
48622+ */
48623+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
48624+
48625+ if ((current->flags & PF_NPROC_EXCEEDED) &&
48626+ atomic_read(&cred->user->processes) > current->signal->rlim[RLIMIT_NPROC].rlim_cur) {
48627+ retval = -EAGAIN;
48628+ goto out_ret;
48629+ }
48630+
48631+ /* We're below the limit (still or again), so we don't want to make
48632+ * further execve() calls fail. */
48633+ current->flags &= ~PF_NPROC_EXCEEDED;
48634
48635 retval = unshare_files(&displaced);
48636 if (retval)
48637@@ -1377,12 +1434,27 @@ int do_execve(char * filename,
48638 if (IS_ERR(file))
48639 goto out_unmark;
48640
48641+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
48642+ retval = -EPERM;
48643+ goto out_file;
48644+ }
48645+
48646 sched_exec();
48647
48648 bprm->file = file;
48649 bprm->filename = filename;
48650 bprm->interp = filename;
48651
48652+ if (gr_process_user_ban()) {
48653+ retval = -EPERM;
48654+ goto out_file;
48655+ }
48656+
48657+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
48658+ retval = -EACCES;
48659+ goto out_file;
48660+ }
48661+
48662 retval = bprm_mm_init(bprm);
48663 if (retval)
48664 goto out_file;
48665@@ -1412,10 +1484,41 @@ int do_execve(char * filename,
48666 if (retval < 0)
48667 goto out;
48668
48669+ if (!gr_tpe_allow(file)) {
48670+ retval = -EACCES;
48671+ goto out;
48672+ }
48673+
48674+ if (gr_check_crash_exec(file)) {
48675+ retval = -EACCES;
48676+ goto out;
48677+ }
48678+
48679+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
48680+
48681+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
48682+
48683+#ifdef CONFIG_GRKERNSEC
48684+ old_acl = current->acl;
48685+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
48686+ old_exec_file = current->exec_file;
48687+ get_file(file);
48688+ current->exec_file = file;
48689+#endif
48690+
48691+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
48692+ bprm->unsafe);
48693+ if (retval < 0)
48694+ goto out_fail;
48695+
48696 current->flags &= ~PF_KTHREAD;
48697 retval = search_binary_handler(bprm,regs);
48698 if (retval < 0)
48699- goto out;
48700+ goto out_fail;
48701+#ifdef CONFIG_GRKERNSEC
48702+ if (old_exec_file)
48703+ fput(old_exec_file);
48704+#endif
48705
48706 /* execve succeeded */
48707 current->fs->in_exec = 0;
48708@@ -1426,6 +1529,14 @@ int do_execve(char * filename,
48709 put_files_struct(displaced);
48710 return retval;
48711
48712+out_fail:
48713+#ifdef CONFIG_GRKERNSEC
48714+ current->acl = old_acl;
48715+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
48716+ fput(current->exec_file);
48717+ current->exec_file = old_exec_file;
48718+#endif
48719+
48720 out:
48721 if (bprm->mm) {
48722 acct_arg_size(bprm, 0);
48723@@ -1591,6 +1702,220 @@ out:
48724 return ispipe;
48725 }
48726
48727+int pax_check_flags(unsigned long *flags)
48728+{
48729+ int retval = 0;
48730+
48731+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
48732+ if (*flags & MF_PAX_SEGMEXEC)
48733+ {
48734+ *flags &= ~MF_PAX_SEGMEXEC;
48735+ retval = -EINVAL;
48736+ }
48737+#endif
48738+
48739+ if ((*flags & MF_PAX_PAGEEXEC)
48740+
48741+#ifdef CONFIG_PAX_PAGEEXEC
48742+ && (*flags & MF_PAX_SEGMEXEC)
48743+#endif
48744+
48745+ )
48746+ {
48747+ *flags &= ~MF_PAX_PAGEEXEC;
48748+ retval = -EINVAL;
48749+ }
48750+
48751+ if ((*flags & MF_PAX_MPROTECT)
48752+
48753+#ifdef CONFIG_PAX_MPROTECT
48754+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48755+#endif
48756+
48757+ )
48758+ {
48759+ *flags &= ~MF_PAX_MPROTECT;
48760+ retval = -EINVAL;
48761+ }
48762+
48763+ if ((*flags & MF_PAX_EMUTRAMP)
48764+
48765+#ifdef CONFIG_PAX_EMUTRAMP
48766+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
48767+#endif
48768+
48769+ )
48770+ {
48771+ *flags &= ~MF_PAX_EMUTRAMP;
48772+ retval = -EINVAL;
48773+ }
48774+
48775+ return retval;
48776+}
48777+
48778+EXPORT_SYMBOL(pax_check_flags);
48779+
48780+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
48781+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
48782+{
48783+ struct task_struct *tsk = current;
48784+ struct mm_struct *mm = current->mm;
48785+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
48786+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
48787+ char *path_exec = NULL;
48788+ char *path_fault = NULL;
48789+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
48790+
48791+ if (buffer_exec && buffer_fault) {
48792+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
48793+
48794+ down_read(&mm->mmap_sem);
48795+ vma = mm->mmap;
48796+ while (vma && (!vma_exec || !vma_fault)) {
48797+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
48798+ vma_exec = vma;
48799+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
48800+ vma_fault = vma;
48801+ vma = vma->vm_next;
48802+ }
48803+ if (vma_exec) {
48804+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
48805+ if (IS_ERR(path_exec))
48806+ path_exec = "<path too long>";
48807+ else {
48808+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
48809+ if (path_exec) {
48810+ *path_exec = 0;
48811+ path_exec = buffer_exec;
48812+ } else
48813+ path_exec = "<path too long>";
48814+ }
48815+ }
48816+ if (vma_fault) {
48817+ start = vma_fault->vm_start;
48818+ end = vma_fault->vm_end;
48819+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
48820+ if (vma_fault->vm_file) {
48821+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
48822+ if (IS_ERR(path_fault))
48823+ path_fault = "<path too long>";
48824+ else {
48825+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
48826+ if (path_fault) {
48827+ *path_fault = 0;
48828+ path_fault = buffer_fault;
48829+ } else
48830+ path_fault = "<path too long>";
48831+ }
48832+ } else
48833+ path_fault = "<anonymous mapping>";
48834+ }
48835+ up_read(&mm->mmap_sem);
48836+ }
48837+ if (tsk->signal->curr_ip)
48838+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
48839+ else
48840+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
48841+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
48842+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
48843+ task_uid(tsk), task_euid(tsk), pc, sp);
48844+ free_page((unsigned long)buffer_exec);
48845+ free_page((unsigned long)buffer_fault);
48846+ pax_report_insns(regs, pc, sp);
48847+ do_coredump(SIGKILL, SIGKILL, regs);
48848+}
48849+#endif
48850+
48851+#ifdef CONFIG_PAX_REFCOUNT
48852+void pax_report_refcount_overflow(struct pt_regs *regs)
48853+{
48854+ if (current->signal->curr_ip)
48855+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48856+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
48857+ else
48858+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
48859+ current->comm, task_pid_nr(current), current_uid(), current_euid());
48860+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
48861+ show_regs(regs);
48862+ force_sig_specific(SIGKILL, current);
48863+}
48864+#endif
48865+
48866+#ifdef CONFIG_PAX_USERCOPY
48867+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
48868+int object_is_on_stack(const void *obj, unsigned long len)
48869+{
48870+ const void * const stack = task_stack_page(current);
48871+ const void * const stackend = stack + THREAD_SIZE;
48872+
48873+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48874+ const void *frame = NULL;
48875+ const void *oldframe;
48876+#endif
48877+
48878+ if (obj + len < obj)
48879+ return -1;
48880+
48881+ if (obj + len <= stack || stackend <= obj)
48882+ return 0;
48883+
48884+ if (obj < stack || stackend < obj + len)
48885+ return -1;
48886+
48887+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
48888+ oldframe = __builtin_frame_address(1);
48889+ if (oldframe)
48890+ frame = __builtin_frame_address(2);
48891+ /*
48892+ low ----------------------------------------------> high
48893+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
48894+ ^----------------^
48895+ allow copies only within here
48896+ */
48897+ while (stack <= frame && frame < stackend) {
48898+ /* if obj + len extends past the last frame, this
48899+ check won't pass and the next frame will be 0,
48900+ causing us to bail out and correctly report
48901+ the copy as invalid
48902+ */
48903+ if (obj + len <= frame)
48904+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
48905+ oldframe = frame;
48906+ frame = *(const void * const *)frame;
48907+ }
48908+ return -1;
48909+#else
48910+ return 1;
48911+#endif
48912+}
48913+
48914+
48915+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
48916+{
48917+ if (current->signal->curr_ip)
48918+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48919+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48920+ else
48921+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
48922+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
48923+
48924+ dump_stack();
48925+ gr_handle_kernel_exploit();
48926+ do_group_exit(SIGKILL);
48927+}
48928+#endif
48929+
48930+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
48931+void pax_track_stack(void)
48932+{
48933+ unsigned long sp = (unsigned long)&sp;
48934+ if (sp < current_thread_info()->lowest_stack &&
48935+ sp > (unsigned long)task_stack_page(current))
48936+ current_thread_info()->lowest_stack = sp;
48937+}
48938+EXPORT_SYMBOL(pax_track_stack);
48939+#endif
48940+
48941 static int zap_process(struct task_struct *start)
48942 {
48943 struct task_struct *t;
48944@@ -1793,17 +2118,17 @@ static void wait_for_dump_helpers(struct file *file)
48945 pipe = file->f_path.dentry->d_inode->i_pipe;
48946
48947 pipe_lock(pipe);
48948- pipe->readers++;
48949- pipe->writers--;
48950+ atomic_inc(&pipe->readers);
48951+ atomic_dec(&pipe->writers);
48952
48953- while ((pipe->readers > 1) && (!signal_pending(current))) {
48954+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
48955 wake_up_interruptible_sync(&pipe->wait);
48956 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
48957 pipe_wait(pipe);
48958 }
48959
48960- pipe->readers--;
48961- pipe->writers++;
48962+ atomic_dec(&pipe->readers);
48963+ atomic_inc(&pipe->writers);
48964 pipe_unlock(pipe);
48965
48966 }
48967@@ -1826,10 +2151,13 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48968 char **helper_argv = NULL;
48969 int helper_argc = 0;
48970 int dump_count = 0;
48971- static atomic_t core_dump_count = ATOMIC_INIT(0);
48972+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
48973
48974 audit_core_dumps(signr);
48975
48976+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
48977+ gr_handle_brute_attach(current, mm->flags);
48978+
48979 binfmt = mm->binfmt;
48980 if (!binfmt || !binfmt->core_dump)
48981 goto fail;
48982@@ -1874,6 +2202,8 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48983 */
48984 clear_thread_flag(TIF_SIGPENDING);
48985
48986+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
48987+
48988 /*
48989 * lock_kernel() because format_corename() is controlled by sysctl, which
48990 * uses lock_kernel()
48991@@ -1908,7 +2238,7 @@ void do_coredump(long signr, int exit_code, struct pt_regs *regs)
48992 goto fail_unlock;
48993 }
48994
48995- dump_count = atomic_inc_return(&core_dump_count);
48996+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
48997 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
48998 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
48999 task_tgid_vnr(current), current->comm);
49000@@ -1972,7 +2302,7 @@ close_fail:
49001 filp_close(file, NULL);
49002 fail_dropcount:
49003 if (dump_count)
49004- atomic_dec(&core_dump_count);
49005+ atomic_dec_unchecked(&core_dump_count);
49006 fail_unlock:
49007 if (helper_argv)
49008 argv_free(helper_argv);
49009diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
49010index 7f8d2e5..a1abdbb 100644
49011--- a/fs/ext2/balloc.c
49012+++ b/fs/ext2/balloc.c
49013@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
49014
49015 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49016 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49017- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49018+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49019 sbi->s_resuid != current_fsuid() &&
49020 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49021 return 0;
49022diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
49023index 27967f9..9f2a5fb 100644
49024--- a/fs/ext3/balloc.c
49025+++ b/fs/ext3/balloc.c
49026@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
49027
49028 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
49029 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
49030- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
49031+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
49032 sbi->s_resuid != current_fsuid() &&
49033 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
49034 return 0;
49035diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
49036index e85b63c..80398e6 100644
49037--- a/fs/ext4/balloc.c
49038+++ b/fs/ext4/balloc.c
49039@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks)
49040 /* Hm, nope. Are (enough) root reserved blocks available? */
49041 if (sbi->s_resuid == current_fsuid() ||
49042 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
49043- capable(CAP_SYS_RESOURCE)) {
49044+ capable_nolog(CAP_SYS_RESOURCE)) {
49045 if (free_blocks >= (nblocks + dirty_blocks))
49046 return 1;
49047 }
49048diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
49049index 67c46ed..1f237e5 100644
49050--- a/fs/ext4/ext4.h
49051+++ b/fs/ext4/ext4.h
49052@@ -1077,19 +1077,19 @@ struct ext4_sb_info {
49053
49054 /* stats for buddy allocator */
49055 spinlock_t s_mb_pa_lock;
49056- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
49057- atomic_t s_bal_success; /* we found long enough chunks */
49058- atomic_t s_bal_allocated; /* in blocks */
49059- atomic_t s_bal_ex_scanned; /* total extents scanned */
49060- atomic_t s_bal_goals; /* goal hits */
49061- atomic_t s_bal_breaks; /* too long searches */
49062- atomic_t s_bal_2orders; /* 2^order hits */
49063+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
49064+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
49065+ atomic_unchecked_t s_bal_allocated; /* in blocks */
49066+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
49067+ atomic_unchecked_t s_bal_goals; /* goal hits */
49068+ atomic_unchecked_t s_bal_breaks; /* too long searches */
49069+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
49070 spinlock_t s_bal_lock;
49071 unsigned long s_mb_buddies_generated;
49072 unsigned long long s_mb_generation_time;
49073- atomic_t s_mb_lost_chunks;
49074- atomic_t s_mb_preallocated;
49075- atomic_t s_mb_discarded;
49076+ atomic_unchecked_t s_mb_lost_chunks;
49077+ atomic_unchecked_t s_mb_preallocated;
49078+ atomic_unchecked_t s_mb_discarded;
49079 atomic_t s_lock_busy;
49080
49081 /* locality groups */
49082diff --git a/fs/ext4/file.c b/fs/ext4/file.c
49083index 2a60541..7439d61 100644
49084--- a/fs/ext4/file.c
49085+++ b/fs/ext4/file.c
49086@@ -122,8 +122,8 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
49087 cp = d_path(&path, buf, sizeof(buf));
49088 path_put(&path);
49089 if (!IS_ERR(cp)) {
49090- memcpy(sbi->s_es->s_last_mounted, cp,
49091- sizeof(sbi->s_es->s_last_mounted));
49092+ strlcpy(sbi->s_es->s_last_mounted, cp,
49093+ sizeof(sbi->s_es->s_last_mounted));
49094 sb->s_dirt = 1;
49095 }
49096 }
49097diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
49098index 42bac1b..0aab9d8 100644
49099--- a/fs/ext4/mballoc.c
49100+++ b/fs/ext4/mballoc.c
49101@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
49102 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
49103
49104 if (EXT4_SB(sb)->s_mb_stats)
49105- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
49106+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
49107
49108 break;
49109 }
49110@@ -2131,7 +2131,7 @@ repeat:
49111 ac->ac_status = AC_STATUS_CONTINUE;
49112 ac->ac_flags |= EXT4_MB_HINT_FIRST;
49113 cr = 3;
49114- atomic_inc(&sbi->s_mb_lost_chunks);
49115+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
49116 goto repeat;
49117 }
49118 }
49119@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v)
49120 ext4_grpblk_t counters[16];
49121 } sg;
49122
49123+ pax_track_stack();
49124+
49125 group--;
49126 if (group == 0)
49127 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
49128@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *sb)
49129 if (sbi->s_mb_stats) {
49130 printk(KERN_INFO
49131 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
49132- atomic_read(&sbi->s_bal_allocated),
49133- atomic_read(&sbi->s_bal_reqs),
49134- atomic_read(&sbi->s_bal_success));
49135+ atomic_read_unchecked(&sbi->s_bal_allocated),
49136+ atomic_read_unchecked(&sbi->s_bal_reqs),
49137+ atomic_read_unchecked(&sbi->s_bal_success));
49138 printk(KERN_INFO
49139 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
49140 "%u 2^N hits, %u breaks, %u lost\n",
49141- atomic_read(&sbi->s_bal_ex_scanned),
49142- atomic_read(&sbi->s_bal_goals),
49143- atomic_read(&sbi->s_bal_2orders),
49144- atomic_read(&sbi->s_bal_breaks),
49145- atomic_read(&sbi->s_mb_lost_chunks));
49146+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
49147+ atomic_read_unchecked(&sbi->s_bal_goals),
49148+ atomic_read_unchecked(&sbi->s_bal_2orders),
49149+ atomic_read_unchecked(&sbi->s_bal_breaks),
49150+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
49151 printk(KERN_INFO
49152 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
49153 sbi->s_mb_buddies_generated++,
49154 sbi->s_mb_generation_time);
49155 printk(KERN_INFO
49156 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
49157- atomic_read(&sbi->s_mb_preallocated),
49158- atomic_read(&sbi->s_mb_discarded));
49159+ atomic_read_unchecked(&sbi->s_mb_preallocated),
49160+ atomic_read_unchecked(&sbi->s_mb_discarded));
49161 }
49162
49163 free_percpu(sbi->s_locality_groups);
49164@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
49165 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
49166
49167 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
49168- atomic_inc(&sbi->s_bal_reqs);
49169- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49170+ atomic_inc_unchecked(&sbi->s_bal_reqs);
49171+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
49172 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
49173- atomic_inc(&sbi->s_bal_success);
49174- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
49175+ atomic_inc_unchecked(&sbi->s_bal_success);
49176+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
49177 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
49178 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
49179- atomic_inc(&sbi->s_bal_goals);
49180+ atomic_inc_unchecked(&sbi->s_bal_goals);
49181 if (ac->ac_found > sbi->s_mb_max_to_scan)
49182- atomic_inc(&sbi->s_bal_breaks);
49183+ atomic_inc_unchecked(&sbi->s_bal_breaks);
49184 }
49185
49186 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
49187@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
49188 trace_ext4_mb_new_inode_pa(ac, pa);
49189
49190 ext4_mb_use_inode_pa(ac, pa);
49191- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49192+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49193
49194 ei = EXT4_I(ac->ac_inode);
49195 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49196@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
49197 trace_ext4_mb_new_group_pa(ac, pa);
49198
49199 ext4_mb_use_group_pa(ac, pa);
49200- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49201+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
49202
49203 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
49204 lg = ac->ac_lg;
49205@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
49206 * from the bitmap and continue.
49207 */
49208 }
49209- atomic_add(free, &sbi->s_mb_discarded);
49210+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
49211
49212 return err;
49213 }
49214@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
49215 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
49216 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
49217 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
49218- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49219+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
49220
49221 if (ac) {
49222 ac->ac_sb = sb;
49223diff --git a/fs/ext4/super.c b/fs/ext4/super.c
49224index f1e7077..edd86b2 100644
49225--- a/fs/ext4/super.c
49226+++ b/fs/ext4/super.c
49227@@ -2286,7 +2286,7 @@ static void ext4_sb_release(struct kobject *kobj)
49228 }
49229
49230
49231-static struct sysfs_ops ext4_attr_ops = {
49232+static const struct sysfs_ops ext4_attr_ops = {
49233 .show = ext4_attr_show,
49234 .store = ext4_attr_store,
49235 };
49236diff --git a/fs/fcntl.c b/fs/fcntl.c
49237index 97e01dc..e9aab2d 100644
49238--- a/fs/fcntl.c
49239+++ b/fs/fcntl.c
49240@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
49241 if (err)
49242 return err;
49243
49244+ if (gr_handle_chroot_fowner(pid, type))
49245+ return -ENOENT;
49246+ if (gr_check_protected_task_fowner(pid, type))
49247+ return -EACCES;
49248+
49249 f_modown(filp, pid, type, force);
49250 return 0;
49251 }
49252@@ -265,7 +270,7 @@ pid_t f_getown(struct file *filp)
49253
49254 static int f_setown_ex(struct file *filp, unsigned long arg)
49255 {
49256- struct f_owner_ex * __user owner_p = (void * __user)arg;
49257+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49258 struct f_owner_ex owner;
49259 struct pid *pid;
49260 int type;
49261@@ -305,7 +310,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
49262
49263 static int f_getown_ex(struct file *filp, unsigned long arg)
49264 {
49265- struct f_owner_ex * __user owner_p = (void * __user)arg;
49266+ struct f_owner_ex __user *owner_p = (void __user *)arg;
49267 struct f_owner_ex owner;
49268 int ret = 0;
49269
49270@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned int cmd, unsigned long arg,
49271 switch (cmd) {
49272 case F_DUPFD:
49273 case F_DUPFD_CLOEXEC:
49274+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
49275 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49276 break;
49277 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
49278diff --git a/fs/fifo.c b/fs/fifo.c
49279index f8f97b8..b1f2259 100644
49280--- a/fs/fifo.c
49281+++ b/fs/fifo.c
49282@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
49283 */
49284 filp->f_op = &read_pipefifo_fops;
49285 pipe->r_counter++;
49286- if (pipe->readers++ == 0)
49287+ if (atomic_inc_return(&pipe->readers) == 1)
49288 wake_up_partner(inode);
49289
49290- if (!pipe->writers) {
49291+ if (!atomic_read(&pipe->writers)) {
49292 if ((filp->f_flags & O_NONBLOCK)) {
49293 /* suppress POLLHUP until we have
49294 * seen a writer */
49295@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode, struct file *filp)
49296 * errno=ENXIO when there is no process reading the FIFO.
49297 */
49298 ret = -ENXIO;
49299- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
49300+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
49301 goto err;
49302
49303 filp->f_op = &write_pipefifo_fops;
49304 pipe->w_counter++;
49305- if (!pipe->writers++)
49306+ if (atomic_inc_return(&pipe->writers) == 1)
49307 wake_up_partner(inode);
49308
49309- if (!pipe->readers) {
49310+ if (!atomic_read(&pipe->readers)) {
49311 wait_for_partner(inode, &pipe->r_counter);
49312 if (signal_pending(current))
49313 goto err_wr;
49314@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
49315 */
49316 filp->f_op = &rdwr_pipefifo_fops;
49317
49318- pipe->readers++;
49319- pipe->writers++;
49320+ atomic_inc(&pipe->readers);
49321+ atomic_inc(&pipe->writers);
49322 pipe->r_counter++;
49323 pipe->w_counter++;
49324- if (pipe->readers == 1 || pipe->writers == 1)
49325+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
49326 wake_up_partner(inode);
49327 break;
49328
49329@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode, struct file *filp)
49330 return 0;
49331
49332 err_rd:
49333- if (!--pipe->readers)
49334+ if (atomic_dec_and_test(&pipe->readers))
49335 wake_up_interruptible(&pipe->wait);
49336 ret = -ERESTARTSYS;
49337 goto err;
49338
49339 err_wr:
49340- if (!--pipe->writers)
49341+ if (atomic_dec_and_test(&pipe->writers))
49342 wake_up_interruptible(&pipe->wait);
49343 ret = -ERESTARTSYS;
49344 goto err;
49345
49346 err:
49347- if (!pipe->readers && !pipe->writers)
49348+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
49349 free_pipe_info(inode);
49350
49351 err_nocleanup:
49352diff --git a/fs/file.c b/fs/file.c
49353index 87e1290..a930cc4 100644
49354--- a/fs/file.c
49355+++ b/fs/file.c
49356@@ -14,6 +14,7 @@
49357 #include <linux/slab.h>
49358 #include <linux/vmalloc.h>
49359 #include <linux/file.h>
49360+#include <linux/security.h>
49361 #include <linux/fdtable.h>
49362 #include <linux/bitops.h>
49363 #include <linux/interrupt.h>
49364@@ -257,6 +258,8 @@ int expand_files(struct files_struct *files, int nr)
49365 * N.B. For clone tasks sharing a files structure, this test
49366 * will limit the total number of files that can be opened.
49367 */
49368+
49369+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
49370 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
49371 return -EMFILE;
49372
49373diff --git a/fs/filesystems.c b/fs/filesystems.c
49374index a24c58e..53f91ee 100644
49375--- a/fs/filesystems.c
49376+++ b/fs/filesystems.c
49377@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(const char *name)
49378 int len = dot ? dot - name : strlen(name);
49379
49380 fs = __get_fs_type(name, len);
49381+
49382+#ifdef CONFIG_GRKERNSEC_MODHARDEN
49383+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
49384+#else
49385 if (!fs && (request_module("%.*s", len, name) == 0))
49386+#endif
49387 fs = __get_fs_type(name, len);
49388
49389 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
49390diff --git a/fs/fs_struct.c b/fs/fs_struct.c
49391index eee0590..ef5bc0e 100644
49392--- a/fs/fs_struct.c
49393+++ b/fs/fs_struct.c
49394@@ -4,6 +4,7 @@
49395 #include <linux/path.h>
49396 #include <linux/slab.h>
49397 #include <linux/fs_struct.h>
49398+#include <linux/grsecurity.h>
49399
49400 /*
49401 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
49402@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, struct path *path)
49403 old_root = fs->root;
49404 fs->root = *path;
49405 path_get(path);
49406+ gr_set_chroot_entries(current, path);
49407 write_unlock(&fs->lock);
49408 if (old_root.dentry)
49409 path_put(&old_root);
49410@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_root, struct path *new_root)
49411 && fs->root.mnt == old_root->mnt) {
49412 path_get(new_root);
49413 fs->root = *new_root;
49414+ gr_set_chroot_entries(p, new_root);
49415 count++;
49416 }
49417 if (fs->pwd.dentry == old_root->dentry
49418@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
49419 task_lock(tsk);
49420 write_lock(&fs->lock);
49421 tsk->fs = NULL;
49422- kill = !--fs->users;
49423+ gr_clear_chroot_entries(tsk);
49424+ kill = !atomic_dec_return(&fs->users);
49425 write_unlock(&fs->lock);
49426 task_unlock(tsk);
49427 if (kill)
49428@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
49429 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
49430 /* We don't need to lock fs - think why ;-) */
49431 if (fs) {
49432- fs->users = 1;
49433+ atomic_set(&fs->users, 1);
49434 fs->in_exec = 0;
49435 rwlock_init(&fs->lock);
49436 fs->umask = old->umask;
49437@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
49438
49439 task_lock(current);
49440 write_lock(&fs->lock);
49441- kill = !--fs->users;
49442+ kill = !atomic_dec_return(&fs->users);
49443 current->fs = new_fs;
49444+ gr_set_chroot_entries(current, &new_fs->root);
49445 write_unlock(&fs->lock);
49446 task_unlock(current);
49447
49448@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
49449
49450 /* to be mentioned only in INIT_TASK */
49451 struct fs_struct init_fs = {
49452- .users = 1,
49453+ .users = ATOMIC_INIT(1),
49454 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
49455 .umask = 0022,
49456 };
49457@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
49458 task_lock(current);
49459
49460 write_lock(&init_fs.lock);
49461- init_fs.users++;
49462+ atomic_inc(&init_fs.users);
49463 write_unlock(&init_fs.lock);
49464
49465 write_lock(&fs->lock);
49466 current->fs = &init_fs;
49467- kill = !--fs->users;
49468+ gr_set_chroot_entries(current, &current->fs->root);
49469+ kill = !atomic_dec_return(&fs->users);
49470 write_unlock(&fs->lock);
49471
49472 task_unlock(current);
49473diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
49474index 9905350..02eaec4 100644
49475--- a/fs/fscache/cookie.c
49476+++ b/fs/fscache/cookie.c
49477@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
49478 parent ? (char *) parent->def->name : "<no-parent>",
49479 def->name, netfs_data);
49480
49481- fscache_stat(&fscache_n_acquires);
49482+ fscache_stat_unchecked(&fscache_n_acquires);
49483
49484 /* if there's no parent cookie, then we don't create one here either */
49485 if (!parent) {
49486- fscache_stat(&fscache_n_acquires_null);
49487+ fscache_stat_unchecked(&fscache_n_acquires_null);
49488 _leave(" [no parent]");
49489 return NULL;
49490 }
49491@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
49492 /* allocate and initialise a cookie */
49493 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
49494 if (!cookie) {
49495- fscache_stat(&fscache_n_acquires_oom);
49496+ fscache_stat_unchecked(&fscache_n_acquires_oom);
49497 _leave(" [ENOMEM]");
49498 return NULL;
49499 }
49500@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49501
49502 switch (cookie->def->type) {
49503 case FSCACHE_COOKIE_TYPE_INDEX:
49504- fscache_stat(&fscache_n_cookie_index);
49505+ fscache_stat_unchecked(&fscache_n_cookie_index);
49506 break;
49507 case FSCACHE_COOKIE_TYPE_DATAFILE:
49508- fscache_stat(&fscache_n_cookie_data);
49509+ fscache_stat_unchecked(&fscache_n_cookie_data);
49510 break;
49511 default:
49512- fscache_stat(&fscache_n_cookie_special);
49513+ fscache_stat_unchecked(&fscache_n_cookie_special);
49514 break;
49515 }
49516
49517@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
49518 if (fscache_acquire_non_index_cookie(cookie) < 0) {
49519 atomic_dec(&parent->n_children);
49520 __fscache_cookie_put(cookie);
49521- fscache_stat(&fscache_n_acquires_nobufs);
49522+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
49523 _leave(" = NULL");
49524 return NULL;
49525 }
49526 }
49527
49528- fscache_stat(&fscache_n_acquires_ok);
49529+ fscache_stat_unchecked(&fscache_n_acquires_ok);
49530 _leave(" = %p", cookie);
49531 return cookie;
49532 }
49533@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
49534 cache = fscache_select_cache_for_object(cookie->parent);
49535 if (!cache) {
49536 up_read(&fscache_addremove_sem);
49537- fscache_stat(&fscache_n_acquires_no_cache);
49538+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
49539 _leave(" = -ENOMEDIUM [no cache]");
49540 return -ENOMEDIUM;
49541 }
49542@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
49543 object = cache->ops->alloc_object(cache, cookie);
49544 fscache_stat_d(&fscache_n_cop_alloc_object);
49545 if (IS_ERR(object)) {
49546- fscache_stat(&fscache_n_object_no_alloc);
49547+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
49548 ret = PTR_ERR(object);
49549 goto error;
49550 }
49551
49552- fscache_stat(&fscache_n_object_alloc);
49553+ fscache_stat_unchecked(&fscache_n_object_alloc);
49554
49555 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
49556
49557@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
49558 struct fscache_object *object;
49559 struct hlist_node *_p;
49560
49561- fscache_stat(&fscache_n_updates);
49562+ fscache_stat_unchecked(&fscache_n_updates);
49563
49564 if (!cookie) {
49565- fscache_stat(&fscache_n_updates_null);
49566+ fscache_stat_unchecked(&fscache_n_updates_null);
49567 _leave(" [no cookie]");
49568 return;
49569 }
49570@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49571 struct fscache_object *object;
49572 unsigned long event;
49573
49574- fscache_stat(&fscache_n_relinquishes);
49575+ fscache_stat_unchecked(&fscache_n_relinquishes);
49576 if (retire)
49577- fscache_stat(&fscache_n_relinquishes_retire);
49578+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
49579
49580 if (!cookie) {
49581- fscache_stat(&fscache_n_relinquishes_null);
49582+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
49583 _leave(" [no cookie]");
49584 return;
49585 }
49586@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
49587
49588 /* wait for the cookie to finish being instantiated (or to fail) */
49589 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
49590- fscache_stat(&fscache_n_relinquishes_waitcrt);
49591+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
49592 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
49593 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
49594 }
49595diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
49596index edd7434..0725e66 100644
49597--- a/fs/fscache/internal.h
49598+++ b/fs/fscache/internal.h
49599@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
49600 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
49601 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
49602
49603-extern atomic_t fscache_n_op_pend;
49604-extern atomic_t fscache_n_op_run;
49605-extern atomic_t fscache_n_op_enqueue;
49606-extern atomic_t fscache_n_op_deferred_release;
49607-extern atomic_t fscache_n_op_release;
49608-extern atomic_t fscache_n_op_gc;
49609-extern atomic_t fscache_n_op_cancelled;
49610-extern atomic_t fscache_n_op_rejected;
49611+extern atomic_unchecked_t fscache_n_op_pend;
49612+extern atomic_unchecked_t fscache_n_op_run;
49613+extern atomic_unchecked_t fscache_n_op_enqueue;
49614+extern atomic_unchecked_t fscache_n_op_deferred_release;
49615+extern atomic_unchecked_t fscache_n_op_release;
49616+extern atomic_unchecked_t fscache_n_op_gc;
49617+extern atomic_unchecked_t fscache_n_op_cancelled;
49618+extern atomic_unchecked_t fscache_n_op_rejected;
49619
49620-extern atomic_t fscache_n_attr_changed;
49621-extern atomic_t fscache_n_attr_changed_ok;
49622-extern atomic_t fscache_n_attr_changed_nobufs;
49623-extern atomic_t fscache_n_attr_changed_nomem;
49624-extern atomic_t fscache_n_attr_changed_calls;
49625+extern atomic_unchecked_t fscache_n_attr_changed;
49626+extern atomic_unchecked_t fscache_n_attr_changed_ok;
49627+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
49628+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
49629+extern atomic_unchecked_t fscache_n_attr_changed_calls;
49630
49631-extern atomic_t fscache_n_allocs;
49632-extern atomic_t fscache_n_allocs_ok;
49633-extern atomic_t fscache_n_allocs_wait;
49634-extern atomic_t fscache_n_allocs_nobufs;
49635-extern atomic_t fscache_n_allocs_intr;
49636-extern atomic_t fscache_n_allocs_object_dead;
49637-extern atomic_t fscache_n_alloc_ops;
49638-extern atomic_t fscache_n_alloc_op_waits;
49639+extern atomic_unchecked_t fscache_n_allocs;
49640+extern atomic_unchecked_t fscache_n_allocs_ok;
49641+extern atomic_unchecked_t fscache_n_allocs_wait;
49642+extern atomic_unchecked_t fscache_n_allocs_nobufs;
49643+extern atomic_unchecked_t fscache_n_allocs_intr;
49644+extern atomic_unchecked_t fscache_n_allocs_object_dead;
49645+extern atomic_unchecked_t fscache_n_alloc_ops;
49646+extern atomic_unchecked_t fscache_n_alloc_op_waits;
49647
49648-extern atomic_t fscache_n_retrievals;
49649-extern atomic_t fscache_n_retrievals_ok;
49650-extern atomic_t fscache_n_retrievals_wait;
49651-extern atomic_t fscache_n_retrievals_nodata;
49652-extern atomic_t fscache_n_retrievals_nobufs;
49653-extern atomic_t fscache_n_retrievals_intr;
49654-extern atomic_t fscache_n_retrievals_nomem;
49655-extern atomic_t fscache_n_retrievals_object_dead;
49656-extern atomic_t fscache_n_retrieval_ops;
49657-extern atomic_t fscache_n_retrieval_op_waits;
49658+extern atomic_unchecked_t fscache_n_retrievals;
49659+extern atomic_unchecked_t fscache_n_retrievals_ok;
49660+extern atomic_unchecked_t fscache_n_retrievals_wait;
49661+extern atomic_unchecked_t fscache_n_retrievals_nodata;
49662+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
49663+extern atomic_unchecked_t fscache_n_retrievals_intr;
49664+extern atomic_unchecked_t fscache_n_retrievals_nomem;
49665+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
49666+extern atomic_unchecked_t fscache_n_retrieval_ops;
49667+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
49668
49669-extern atomic_t fscache_n_stores;
49670-extern atomic_t fscache_n_stores_ok;
49671-extern atomic_t fscache_n_stores_again;
49672-extern atomic_t fscache_n_stores_nobufs;
49673-extern atomic_t fscache_n_stores_oom;
49674-extern atomic_t fscache_n_store_ops;
49675-extern atomic_t fscache_n_store_calls;
49676-extern atomic_t fscache_n_store_pages;
49677-extern atomic_t fscache_n_store_radix_deletes;
49678-extern atomic_t fscache_n_store_pages_over_limit;
49679+extern atomic_unchecked_t fscache_n_stores;
49680+extern atomic_unchecked_t fscache_n_stores_ok;
49681+extern atomic_unchecked_t fscache_n_stores_again;
49682+extern atomic_unchecked_t fscache_n_stores_nobufs;
49683+extern atomic_unchecked_t fscache_n_stores_oom;
49684+extern atomic_unchecked_t fscache_n_store_ops;
49685+extern atomic_unchecked_t fscache_n_store_calls;
49686+extern atomic_unchecked_t fscache_n_store_pages;
49687+extern atomic_unchecked_t fscache_n_store_radix_deletes;
49688+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
49689
49690-extern atomic_t fscache_n_store_vmscan_not_storing;
49691-extern atomic_t fscache_n_store_vmscan_gone;
49692-extern atomic_t fscache_n_store_vmscan_busy;
49693-extern atomic_t fscache_n_store_vmscan_cancelled;
49694+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
49695+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
49696+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
49697+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
49698
49699-extern atomic_t fscache_n_marks;
49700-extern atomic_t fscache_n_uncaches;
49701+extern atomic_unchecked_t fscache_n_marks;
49702+extern atomic_unchecked_t fscache_n_uncaches;
49703
49704-extern atomic_t fscache_n_acquires;
49705-extern atomic_t fscache_n_acquires_null;
49706-extern atomic_t fscache_n_acquires_no_cache;
49707-extern atomic_t fscache_n_acquires_ok;
49708-extern atomic_t fscache_n_acquires_nobufs;
49709-extern atomic_t fscache_n_acquires_oom;
49710+extern atomic_unchecked_t fscache_n_acquires;
49711+extern atomic_unchecked_t fscache_n_acquires_null;
49712+extern atomic_unchecked_t fscache_n_acquires_no_cache;
49713+extern atomic_unchecked_t fscache_n_acquires_ok;
49714+extern atomic_unchecked_t fscache_n_acquires_nobufs;
49715+extern atomic_unchecked_t fscache_n_acquires_oom;
49716
49717-extern atomic_t fscache_n_updates;
49718-extern atomic_t fscache_n_updates_null;
49719-extern atomic_t fscache_n_updates_run;
49720+extern atomic_unchecked_t fscache_n_updates;
49721+extern atomic_unchecked_t fscache_n_updates_null;
49722+extern atomic_unchecked_t fscache_n_updates_run;
49723
49724-extern atomic_t fscache_n_relinquishes;
49725-extern atomic_t fscache_n_relinquishes_null;
49726-extern atomic_t fscache_n_relinquishes_waitcrt;
49727-extern atomic_t fscache_n_relinquishes_retire;
49728+extern atomic_unchecked_t fscache_n_relinquishes;
49729+extern atomic_unchecked_t fscache_n_relinquishes_null;
49730+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
49731+extern atomic_unchecked_t fscache_n_relinquishes_retire;
49732
49733-extern atomic_t fscache_n_cookie_index;
49734-extern atomic_t fscache_n_cookie_data;
49735-extern atomic_t fscache_n_cookie_special;
49736+extern atomic_unchecked_t fscache_n_cookie_index;
49737+extern atomic_unchecked_t fscache_n_cookie_data;
49738+extern atomic_unchecked_t fscache_n_cookie_special;
49739
49740-extern atomic_t fscache_n_object_alloc;
49741-extern atomic_t fscache_n_object_no_alloc;
49742-extern atomic_t fscache_n_object_lookups;
49743-extern atomic_t fscache_n_object_lookups_negative;
49744-extern atomic_t fscache_n_object_lookups_positive;
49745-extern atomic_t fscache_n_object_lookups_timed_out;
49746-extern atomic_t fscache_n_object_created;
49747-extern atomic_t fscache_n_object_avail;
49748-extern atomic_t fscache_n_object_dead;
49749+extern atomic_unchecked_t fscache_n_object_alloc;
49750+extern atomic_unchecked_t fscache_n_object_no_alloc;
49751+extern atomic_unchecked_t fscache_n_object_lookups;
49752+extern atomic_unchecked_t fscache_n_object_lookups_negative;
49753+extern atomic_unchecked_t fscache_n_object_lookups_positive;
49754+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
49755+extern atomic_unchecked_t fscache_n_object_created;
49756+extern atomic_unchecked_t fscache_n_object_avail;
49757+extern atomic_unchecked_t fscache_n_object_dead;
49758
49759-extern atomic_t fscache_n_checkaux_none;
49760-extern atomic_t fscache_n_checkaux_okay;
49761-extern atomic_t fscache_n_checkaux_update;
49762-extern atomic_t fscache_n_checkaux_obsolete;
49763+extern atomic_unchecked_t fscache_n_checkaux_none;
49764+extern atomic_unchecked_t fscache_n_checkaux_okay;
49765+extern atomic_unchecked_t fscache_n_checkaux_update;
49766+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
49767
49768 extern atomic_t fscache_n_cop_alloc_object;
49769 extern atomic_t fscache_n_cop_lookup_object;
49770@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t *stat)
49771 atomic_inc(stat);
49772 }
49773
49774+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
49775+{
49776+ atomic_inc_unchecked(stat);
49777+}
49778+
49779 static inline void fscache_stat_d(atomic_t *stat)
49780 {
49781 atomic_dec(stat);
49782@@ -259,6 +264,7 @@ extern const struct file_operations fscache_stats_fops;
49783
49784 #define __fscache_stat(stat) (NULL)
49785 #define fscache_stat(stat) do {} while (0)
49786+#define fscache_stat_unchecked(stat) do {} while (0)
49787 #define fscache_stat_d(stat) do {} while (0)
49788 #endif
49789
49790diff --git a/fs/fscache/object.c b/fs/fscache/object.c
49791index e513ac5..e888d34 100644
49792--- a/fs/fscache/object.c
49793+++ b/fs/fscache/object.c
49794@@ -144,7 +144,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49795 /* update the object metadata on disk */
49796 case FSCACHE_OBJECT_UPDATING:
49797 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
49798- fscache_stat(&fscache_n_updates_run);
49799+ fscache_stat_unchecked(&fscache_n_updates_run);
49800 fscache_stat(&fscache_n_cop_update_object);
49801 object->cache->ops->update_object(object);
49802 fscache_stat_d(&fscache_n_cop_update_object);
49803@@ -233,7 +233,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49804 spin_lock(&object->lock);
49805 object->state = FSCACHE_OBJECT_DEAD;
49806 spin_unlock(&object->lock);
49807- fscache_stat(&fscache_n_object_dead);
49808+ fscache_stat_unchecked(&fscache_n_object_dead);
49809 goto terminal_transit;
49810
49811 /* handle the parent cache of this object being withdrawn from
49812@@ -248,7 +248,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
49813 spin_lock(&object->lock);
49814 object->state = FSCACHE_OBJECT_DEAD;
49815 spin_unlock(&object->lock);
49816- fscache_stat(&fscache_n_object_dead);
49817+ fscache_stat_unchecked(&fscache_n_object_dead);
49818 goto terminal_transit;
49819
49820 /* complain about the object being woken up once it is
49821@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49822 parent->cookie->def->name, cookie->def->name,
49823 object->cache->tag->name);
49824
49825- fscache_stat(&fscache_n_object_lookups);
49826+ fscache_stat_unchecked(&fscache_n_object_lookups);
49827 fscache_stat(&fscache_n_cop_lookup_object);
49828 ret = object->cache->ops->lookup_object(object);
49829 fscache_stat_d(&fscache_n_cop_lookup_object);
49830@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct fscache_object *object)
49831 if (ret == -ETIMEDOUT) {
49832 /* probably stuck behind another object, so move this one to
49833 * the back of the queue */
49834- fscache_stat(&fscache_n_object_lookups_timed_out);
49835+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
49836 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49837 }
49838
49839@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
49840
49841 spin_lock(&object->lock);
49842 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49843- fscache_stat(&fscache_n_object_lookups_negative);
49844+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
49845
49846 /* transit here to allow write requests to begin stacking up
49847 * and read requests to begin returning ENODATA */
49848@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fscache_object *object)
49849 * result, in which case there may be data available */
49850 spin_lock(&object->lock);
49851 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
49852- fscache_stat(&fscache_n_object_lookups_positive);
49853+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
49854
49855 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
49856
49857@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fscache_object *object)
49858 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
49859 } else {
49860 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
49861- fscache_stat(&fscache_n_object_created);
49862+ fscache_stat_unchecked(&fscache_n_object_created);
49863
49864 object->state = FSCACHE_OBJECT_AVAILABLE;
49865 spin_unlock(&object->lock);
49866@@ -633,7 +633,7 @@ static void fscache_object_available(struct fscache_object *object)
49867 fscache_enqueue_dependents(object);
49868
49869 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
49870- fscache_stat(&fscache_n_object_avail);
49871+ fscache_stat_unchecked(&fscache_n_object_avail);
49872
49873 _leave("");
49874 }
49875@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49876 enum fscache_checkaux result;
49877
49878 if (!object->cookie->def->check_aux) {
49879- fscache_stat(&fscache_n_checkaux_none);
49880+ fscache_stat_unchecked(&fscache_n_checkaux_none);
49881 return FSCACHE_CHECKAUX_OKAY;
49882 }
49883
49884@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
49885 switch (result) {
49886 /* entry okay as is */
49887 case FSCACHE_CHECKAUX_OKAY:
49888- fscache_stat(&fscache_n_checkaux_okay);
49889+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
49890 break;
49891
49892 /* entry requires update */
49893 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
49894- fscache_stat(&fscache_n_checkaux_update);
49895+ fscache_stat_unchecked(&fscache_n_checkaux_update);
49896 break;
49897
49898 /* entry requires deletion */
49899 case FSCACHE_CHECKAUX_OBSOLETE:
49900- fscache_stat(&fscache_n_checkaux_obsolete);
49901+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
49902 break;
49903
49904 default:
49905diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
49906index 313e79a..775240f 100644
49907--- a/fs/fscache/operation.c
49908+++ b/fs/fscache/operation.c
49909@@ -16,7 +16,7 @@
49910 #include <linux/seq_file.h>
49911 #include "internal.h"
49912
49913-atomic_t fscache_op_debug_id;
49914+atomic_unchecked_t fscache_op_debug_id;
49915 EXPORT_SYMBOL(fscache_op_debug_id);
49916
49917 /**
49918@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
49919 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
49920 ASSERTCMP(atomic_read(&op->usage), >, 0);
49921
49922- fscache_stat(&fscache_n_op_enqueue);
49923+ fscache_stat_unchecked(&fscache_n_op_enqueue);
49924 switch (op->flags & FSCACHE_OP_TYPE) {
49925 case FSCACHE_OP_FAST:
49926 _debug("queue fast");
49927@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscache_object *object,
49928 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
49929 if (op->processor)
49930 fscache_enqueue_operation(op);
49931- fscache_stat(&fscache_n_op_run);
49932+ fscache_stat_unchecked(&fscache_n_op_run);
49933 }
49934
49935 /*
49936@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49937 if (object->n_ops > 0) {
49938 atomic_inc(&op->usage);
49939 list_add_tail(&op->pend_link, &object->pending_ops);
49940- fscache_stat(&fscache_n_op_pend);
49941+ fscache_stat_unchecked(&fscache_n_op_pend);
49942 } else if (!list_empty(&object->pending_ops)) {
49943 atomic_inc(&op->usage);
49944 list_add_tail(&op->pend_link, &object->pending_ops);
49945- fscache_stat(&fscache_n_op_pend);
49946+ fscache_stat_unchecked(&fscache_n_op_pend);
49947 fscache_start_operations(object);
49948 } else {
49949 ASSERTCMP(object->n_in_progress, ==, 0);
49950@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
49951 object->n_exclusive++; /* reads and writes must wait */
49952 atomic_inc(&op->usage);
49953 list_add_tail(&op->pend_link, &object->pending_ops);
49954- fscache_stat(&fscache_n_op_pend);
49955+ fscache_stat_unchecked(&fscache_n_op_pend);
49956 ret = 0;
49957 } else {
49958 /* not allowed to submit ops in any other state */
49959@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_object *object,
49960 if (object->n_exclusive > 0) {
49961 atomic_inc(&op->usage);
49962 list_add_tail(&op->pend_link, &object->pending_ops);
49963- fscache_stat(&fscache_n_op_pend);
49964+ fscache_stat_unchecked(&fscache_n_op_pend);
49965 } else if (!list_empty(&object->pending_ops)) {
49966 atomic_inc(&op->usage);
49967 list_add_tail(&op->pend_link, &object->pending_ops);
49968- fscache_stat(&fscache_n_op_pend);
49969+ fscache_stat_unchecked(&fscache_n_op_pend);
49970 fscache_start_operations(object);
49971 } else {
49972 ASSERTCMP(object->n_exclusive, ==, 0);
49973@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_object *object,
49974 object->n_ops++;
49975 atomic_inc(&op->usage);
49976 list_add_tail(&op->pend_link, &object->pending_ops);
49977- fscache_stat(&fscache_n_op_pend);
49978+ fscache_stat_unchecked(&fscache_n_op_pend);
49979 ret = 0;
49980 } else if (object->state == FSCACHE_OBJECT_DYING ||
49981 object->state == FSCACHE_OBJECT_LC_DYING ||
49982 object->state == FSCACHE_OBJECT_WITHDRAWING) {
49983- fscache_stat(&fscache_n_op_rejected);
49984+ fscache_stat_unchecked(&fscache_n_op_rejected);
49985 ret = -ENOBUFS;
49986 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
49987 fscache_report_unexpected_submission(object, op, ostate);
49988@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_operation *op)
49989
49990 ret = -EBUSY;
49991 if (!list_empty(&op->pend_link)) {
49992- fscache_stat(&fscache_n_op_cancelled);
49993+ fscache_stat_unchecked(&fscache_n_op_cancelled);
49994 list_del_init(&op->pend_link);
49995 object->n_ops--;
49996 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
49997@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscache_operation *op)
49998 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
49999 BUG();
50000
50001- fscache_stat(&fscache_n_op_release);
50002+ fscache_stat_unchecked(&fscache_n_op_release);
50003
50004 if (op->release) {
50005 op->release(op);
50006@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscache_operation *op)
50007 * lock, and defer it otherwise */
50008 if (!spin_trylock(&object->lock)) {
50009 _debug("defer put");
50010- fscache_stat(&fscache_n_op_deferred_release);
50011+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
50012
50013 cache = object->cache;
50014 spin_lock(&cache->op_gc_list_lock);
50015@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_struct *work)
50016
50017 _debug("GC DEFERRED REL OBJ%x OP%x",
50018 object->debug_id, op->debug_id);
50019- fscache_stat(&fscache_n_op_gc);
50020+ fscache_stat_unchecked(&fscache_n_op_gc);
50021
50022 ASSERTCMP(atomic_read(&op->usage), ==, 0);
50023
50024diff --git a/fs/fscache/page.c b/fs/fscache/page.c
50025index c598ea4..6aac13e 100644
50026--- a/fs/fscache/page.c
50027+++ b/fs/fscache/page.c
50028@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50029 val = radix_tree_lookup(&cookie->stores, page->index);
50030 if (!val) {
50031 rcu_read_unlock();
50032- fscache_stat(&fscache_n_store_vmscan_not_storing);
50033+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
50034 __fscache_uncache_page(cookie, page);
50035 return true;
50036 }
50037@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
50038 spin_unlock(&cookie->stores_lock);
50039
50040 if (xpage) {
50041- fscache_stat(&fscache_n_store_vmscan_cancelled);
50042- fscache_stat(&fscache_n_store_radix_deletes);
50043+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
50044+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50045 ASSERTCMP(xpage, ==, page);
50046 } else {
50047- fscache_stat(&fscache_n_store_vmscan_gone);
50048+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
50049 }
50050
50051 wake_up_bit(&cookie->flags, 0);
50052@@ -106,7 +106,7 @@ page_busy:
50053 /* we might want to wait here, but that could deadlock the allocator as
50054 * the slow-work threads writing to the cache may all end up sleeping
50055 * on memory allocation */
50056- fscache_stat(&fscache_n_store_vmscan_busy);
50057+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
50058 return false;
50059 }
50060 EXPORT_SYMBOL(__fscache_maybe_release_page);
50061@@ -130,7 +130,7 @@ static void fscache_end_page_write(struct fscache_object *object,
50062 FSCACHE_COOKIE_STORING_TAG);
50063 if (!radix_tree_tag_get(&cookie->stores, page->index,
50064 FSCACHE_COOKIE_PENDING_TAG)) {
50065- fscache_stat(&fscache_n_store_radix_deletes);
50066+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
50067 xpage = radix_tree_delete(&cookie->stores, page->index);
50068 }
50069 spin_unlock(&cookie->stores_lock);
50070@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
50071
50072 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
50073
50074- fscache_stat(&fscache_n_attr_changed_calls);
50075+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
50076
50077 if (fscache_object_is_active(object)) {
50078 fscache_set_op_state(op, "CallFS");
50079@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50080
50081 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50082
50083- fscache_stat(&fscache_n_attr_changed);
50084+ fscache_stat_unchecked(&fscache_n_attr_changed);
50085
50086 op = kzalloc(sizeof(*op), GFP_KERNEL);
50087 if (!op) {
50088- fscache_stat(&fscache_n_attr_changed_nomem);
50089+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
50090 _leave(" = -ENOMEM");
50091 return -ENOMEM;
50092 }
50093@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50094 if (fscache_submit_exclusive_op(object, op) < 0)
50095 goto nobufs;
50096 spin_unlock(&cookie->lock);
50097- fscache_stat(&fscache_n_attr_changed_ok);
50098+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
50099 fscache_put_operation(op);
50100 _leave(" = 0");
50101 return 0;
50102@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
50103 nobufs:
50104 spin_unlock(&cookie->lock);
50105 kfree(op);
50106- fscache_stat(&fscache_n_attr_changed_nobufs);
50107+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
50108 _leave(" = %d", -ENOBUFS);
50109 return -ENOBUFS;
50110 }
50111@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
50112 /* allocate a retrieval operation and attempt to submit it */
50113 op = kzalloc(sizeof(*op), GFP_NOIO);
50114 if (!op) {
50115- fscache_stat(&fscache_n_retrievals_nomem);
50116+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50117 return NULL;
50118 }
50119
50120@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50121 return 0;
50122 }
50123
50124- fscache_stat(&fscache_n_retrievals_wait);
50125+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
50126
50127 jif = jiffies;
50128 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
50129 fscache_wait_bit_interruptible,
50130 TASK_INTERRUPTIBLE) != 0) {
50131- fscache_stat(&fscache_n_retrievals_intr);
50132+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50133 _leave(" = -ERESTARTSYS");
50134 return -ERESTARTSYS;
50135 }
50136@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
50137 */
50138 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50139 struct fscache_retrieval *op,
50140- atomic_t *stat_op_waits,
50141- atomic_t *stat_object_dead)
50142+ atomic_unchecked_t *stat_op_waits,
50143+ atomic_unchecked_t *stat_object_dead)
50144 {
50145 int ret;
50146
50147@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50148 goto check_if_dead;
50149
50150 _debug(">>> WT");
50151- fscache_stat(stat_op_waits);
50152+ fscache_stat_unchecked(stat_op_waits);
50153 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
50154 fscache_wait_bit_interruptible,
50155 TASK_INTERRUPTIBLE) < 0) {
50156@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
50157
50158 check_if_dead:
50159 if (unlikely(fscache_object_is_dead(object))) {
50160- fscache_stat(stat_object_dead);
50161+ fscache_stat_unchecked(stat_object_dead);
50162 return -ENOBUFS;
50163 }
50164 return 0;
50165@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50166
50167 _enter("%p,%p,,,", cookie, page);
50168
50169- fscache_stat(&fscache_n_retrievals);
50170+ fscache_stat_unchecked(&fscache_n_retrievals);
50171
50172 if (hlist_empty(&cookie->backing_objects))
50173 goto nobufs;
50174@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50175 goto nobufs_unlock;
50176 spin_unlock(&cookie->lock);
50177
50178- fscache_stat(&fscache_n_retrieval_ops);
50179+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50180
50181 /* pin the netfs read context in case we need to do the actual netfs
50182 * read because we've encountered a cache read failure */
50183@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
50184
50185 error:
50186 if (ret == -ENOMEM)
50187- fscache_stat(&fscache_n_retrievals_nomem);
50188+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50189 else if (ret == -ERESTARTSYS)
50190- fscache_stat(&fscache_n_retrievals_intr);
50191+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50192 else if (ret == -ENODATA)
50193- fscache_stat(&fscache_n_retrievals_nodata);
50194+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50195 else if (ret < 0)
50196- fscache_stat(&fscache_n_retrievals_nobufs);
50197+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50198 else
50199- fscache_stat(&fscache_n_retrievals_ok);
50200+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50201
50202 fscache_put_retrieval(op);
50203 _leave(" = %d", ret);
50204@@ -453,7 +453,7 @@ nobufs_unlock:
50205 spin_unlock(&cookie->lock);
50206 kfree(op);
50207 nobufs:
50208- fscache_stat(&fscache_n_retrievals_nobufs);
50209+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50210 _leave(" = -ENOBUFS");
50211 return -ENOBUFS;
50212 }
50213@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50214
50215 _enter("%p,,%d,,,", cookie, *nr_pages);
50216
50217- fscache_stat(&fscache_n_retrievals);
50218+ fscache_stat_unchecked(&fscache_n_retrievals);
50219
50220 if (hlist_empty(&cookie->backing_objects))
50221 goto nobufs;
50222@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50223 goto nobufs_unlock;
50224 spin_unlock(&cookie->lock);
50225
50226- fscache_stat(&fscache_n_retrieval_ops);
50227+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
50228
50229 /* pin the netfs read context in case we need to do the actual netfs
50230 * read because we've encountered a cache read failure */
50231@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
50232
50233 error:
50234 if (ret == -ENOMEM)
50235- fscache_stat(&fscache_n_retrievals_nomem);
50236+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
50237 else if (ret == -ERESTARTSYS)
50238- fscache_stat(&fscache_n_retrievals_intr);
50239+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
50240 else if (ret == -ENODATA)
50241- fscache_stat(&fscache_n_retrievals_nodata);
50242+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
50243 else if (ret < 0)
50244- fscache_stat(&fscache_n_retrievals_nobufs);
50245+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50246 else
50247- fscache_stat(&fscache_n_retrievals_ok);
50248+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
50249
50250 fscache_put_retrieval(op);
50251 _leave(" = %d", ret);
50252@@ -570,7 +570,7 @@ nobufs_unlock:
50253 spin_unlock(&cookie->lock);
50254 kfree(op);
50255 nobufs:
50256- fscache_stat(&fscache_n_retrievals_nobufs);
50257+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
50258 _leave(" = -ENOBUFS");
50259 return -ENOBUFS;
50260 }
50261@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50262
50263 _enter("%p,%p,,,", cookie, page);
50264
50265- fscache_stat(&fscache_n_allocs);
50266+ fscache_stat_unchecked(&fscache_n_allocs);
50267
50268 if (hlist_empty(&cookie->backing_objects))
50269 goto nobufs;
50270@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50271 goto nobufs_unlock;
50272 spin_unlock(&cookie->lock);
50273
50274- fscache_stat(&fscache_n_alloc_ops);
50275+ fscache_stat_unchecked(&fscache_n_alloc_ops);
50276
50277 ret = fscache_wait_for_retrieval_activation(
50278 object, op,
50279@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
50280
50281 error:
50282 if (ret == -ERESTARTSYS)
50283- fscache_stat(&fscache_n_allocs_intr);
50284+ fscache_stat_unchecked(&fscache_n_allocs_intr);
50285 else if (ret < 0)
50286- fscache_stat(&fscache_n_allocs_nobufs);
50287+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50288 else
50289- fscache_stat(&fscache_n_allocs_ok);
50290+ fscache_stat_unchecked(&fscache_n_allocs_ok);
50291
50292 fscache_put_retrieval(op);
50293 _leave(" = %d", ret);
50294@@ -651,7 +651,7 @@ nobufs_unlock:
50295 spin_unlock(&cookie->lock);
50296 kfree(op);
50297 nobufs:
50298- fscache_stat(&fscache_n_allocs_nobufs);
50299+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
50300 _leave(" = -ENOBUFS");
50301 return -ENOBUFS;
50302 }
50303@@ -694,7 +694,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50304
50305 spin_lock(&cookie->stores_lock);
50306
50307- fscache_stat(&fscache_n_store_calls);
50308+ fscache_stat_unchecked(&fscache_n_store_calls);
50309
50310 /* find a page to store */
50311 page = NULL;
50312@@ -705,7 +705,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50313 page = results[0];
50314 _debug("gang %d [%lx]", n, page->index);
50315 if (page->index > op->store_limit) {
50316- fscache_stat(&fscache_n_store_pages_over_limit);
50317+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
50318 goto superseded;
50319 }
50320
50321@@ -721,7 +721,7 @@ static void fscache_write_op(struct fscache_operation *_op)
50322
50323 if (page) {
50324 fscache_set_op_state(&op->op, "Store");
50325- fscache_stat(&fscache_n_store_pages);
50326+ fscache_stat_unchecked(&fscache_n_store_pages);
50327 fscache_stat(&fscache_n_cop_write_page);
50328 ret = object->cache->ops->write_page(op, page);
50329 fscache_stat_d(&fscache_n_cop_write_page);
50330@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50331 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50332 ASSERT(PageFsCache(page));
50333
50334- fscache_stat(&fscache_n_stores);
50335+ fscache_stat_unchecked(&fscache_n_stores);
50336
50337 op = kzalloc(sizeof(*op), GFP_NOIO);
50338 if (!op)
50339@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50340 spin_unlock(&cookie->stores_lock);
50341 spin_unlock(&object->lock);
50342
50343- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
50344+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
50345 op->store_limit = object->store_limit;
50346
50347 if (fscache_submit_op(object, &op->op) < 0)
50348@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50349
50350 spin_unlock(&cookie->lock);
50351 radix_tree_preload_end();
50352- fscache_stat(&fscache_n_store_ops);
50353- fscache_stat(&fscache_n_stores_ok);
50354+ fscache_stat_unchecked(&fscache_n_store_ops);
50355+ fscache_stat_unchecked(&fscache_n_stores_ok);
50356
50357 /* the slow work queue now carries its own ref on the object */
50358 fscache_put_operation(&op->op);
50359@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
50360 return 0;
50361
50362 already_queued:
50363- fscache_stat(&fscache_n_stores_again);
50364+ fscache_stat_unchecked(&fscache_n_stores_again);
50365 already_pending:
50366 spin_unlock(&cookie->stores_lock);
50367 spin_unlock(&object->lock);
50368 spin_unlock(&cookie->lock);
50369 radix_tree_preload_end();
50370 kfree(op);
50371- fscache_stat(&fscache_n_stores_ok);
50372+ fscache_stat_unchecked(&fscache_n_stores_ok);
50373 _leave(" = 0");
50374 return 0;
50375
50376@@ -886,14 +886,14 @@ nobufs:
50377 spin_unlock(&cookie->lock);
50378 radix_tree_preload_end();
50379 kfree(op);
50380- fscache_stat(&fscache_n_stores_nobufs);
50381+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
50382 _leave(" = -ENOBUFS");
50383 return -ENOBUFS;
50384
50385 nomem_free:
50386 kfree(op);
50387 nomem:
50388- fscache_stat(&fscache_n_stores_oom);
50389+ fscache_stat_unchecked(&fscache_n_stores_oom);
50390 _leave(" = -ENOMEM");
50391 return -ENOMEM;
50392 }
50393@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
50394 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
50395 ASSERTCMP(page, !=, NULL);
50396
50397- fscache_stat(&fscache_n_uncaches);
50398+ fscache_stat_unchecked(&fscache_n_uncaches);
50399
50400 /* cache withdrawal may beat us to it */
50401 if (!PageFsCache(page))
50402@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fscache_retrieval *op,
50403 unsigned long loop;
50404
50405 #ifdef CONFIG_FSCACHE_STATS
50406- atomic_add(pagevec->nr, &fscache_n_marks);
50407+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
50408 #endif
50409
50410 for (loop = 0; loop < pagevec->nr; loop++) {
50411diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
50412index 46435f3..8cddf18 100644
50413--- a/fs/fscache/stats.c
50414+++ b/fs/fscache/stats.c
50415@@ -18,95 +18,95 @@
50416 /*
50417 * operation counters
50418 */
50419-atomic_t fscache_n_op_pend;
50420-atomic_t fscache_n_op_run;
50421-atomic_t fscache_n_op_enqueue;
50422-atomic_t fscache_n_op_requeue;
50423-atomic_t fscache_n_op_deferred_release;
50424-atomic_t fscache_n_op_release;
50425-atomic_t fscache_n_op_gc;
50426-atomic_t fscache_n_op_cancelled;
50427-atomic_t fscache_n_op_rejected;
50428+atomic_unchecked_t fscache_n_op_pend;
50429+atomic_unchecked_t fscache_n_op_run;
50430+atomic_unchecked_t fscache_n_op_enqueue;
50431+atomic_unchecked_t fscache_n_op_requeue;
50432+atomic_unchecked_t fscache_n_op_deferred_release;
50433+atomic_unchecked_t fscache_n_op_release;
50434+atomic_unchecked_t fscache_n_op_gc;
50435+atomic_unchecked_t fscache_n_op_cancelled;
50436+atomic_unchecked_t fscache_n_op_rejected;
50437
50438-atomic_t fscache_n_attr_changed;
50439-atomic_t fscache_n_attr_changed_ok;
50440-atomic_t fscache_n_attr_changed_nobufs;
50441-atomic_t fscache_n_attr_changed_nomem;
50442-atomic_t fscache_n_attr_changed_calls;
50443+atomic_unchecked_t fscache_n_attr_changed;
50444+atomic_unchecked_t fscache_n_attr_changed_ok;
50445+atomic_unchecked_t fscache_n_attr_changed_nobufs;
50446+atomic_unchecked_t fscache_n_attr_changed_nomem;
50447+atomic_unchecked_t fscache_n_attr_changed_calls;
50448
50449-atomic_t fscache_n_allocs;
50450-atomic_t fscache_n_allocs_ok;
50451-atomic_t fscache_n_allocs_wait;
50452-atomic_t fscache_n_allocs_nobufs;
50453-atomic_t fscache_n_allocs_intr;
50454-atomic_t fscache_n_allocs_object_dead;
50455-atomic_t fscache_n_alloc_ops;
50456-atomic_t fscache_n_alloc_op_waits;
50457+atomic_unchecked_t fscache_n_allocs;
50458+atomic_unchecked_t fscache_n_allocs_ok;
50459+atomic_unchecked_t fscache_n_allocs_wait;
50460+atomic_unchecked_t fscache_n_allocs_nobufs;
50461+atomic_unchecked_t fscache_n_allocs_intr;
50462+atomic_unchecked_t fscache_n_allocs_object_dead;
50463+atomic_unchecked_t fscache_n_alloc_ops;
50464+atomic_unchecked_t fscache_n_alloc_op_waits;
50465
50466-atomic_t fscache_n_retrievals;
50467-atomic_t fscache_n_retrievals_ok;
50468-atomic_t fscache_n_retrievals_wait;
50469-atomic_t fscache_n_retrievals_nodata;
50470-atomic_t fscache_n_retrievals_nobufs;
50471-atomic_t fscache_n_retrievals_intr;
50472-atomic_t fscache_n_retrievals_nomem;
50473-atomic_t fscache_n_retrievals_object_dead;
50474-atomic_t fscache_n_retrieval_ops;
50475-atomic_t fscache_n_retrieval_op_waits;
50476+atomic_unchecked_t fscache_n_retrievals;
50477+atomic_unchecked_t fscache_n_retrievals_ok;
50478+atomic_unchecked_t fscache_n_retrievals_wait;
50479+atomic_unchecked_t fscache_n_retrievals_nodata;
50480+atomic_unchecked_t fscache_n_retrievals_nobufs;
50481+atomic_unchecked_t fscache_n_retrievals_intr;
50482+atomic_unchecked_t fscache_n_retrievals_nomem;
50483+atomic_unchecked_t fscache_n_retrievals_object_dead;
50484+atomic_unchecked_t fscache_n_retrieval_ops;
50485+atomic_unchecked_t fscache_n_retrieval_op_waits;
50486
50487-atomic_t fscache_n_stores;
50488-atomic_t fscache_n_stores_ok;
50489-atomic_t fscache_n_stores_again;
50490-atomic_t fscache_n_stores_nobufs;
50491-atomic_t fscache_n_stores_oom;
50492-atomic_t fscache_n_store_ops;
50493-atomic_t fscache_n_store_calls;
50494-atomic_t fscache_n_store_pages;
50495-atomic_t fscache_n_store_radix_deletes;
50496-atomic_t fscache_n_store_pages_over_limit;
50497+atomic_unchecked_t fscache_n_stores;
50498+atomic_unchecked_t fscache_n_stores_ok;
50499+atomic_unchecked_t fscache_n_stores_again;
50500+atomic_unchecked_t fscache_n_stores_nobufs;
50501+atomic_unchecked_t fscache_n_stores_oom;
50502+atomic_unchecked_t fscache_n_store_ops;
50503+atomic_unchecked_t fscache_n_store_calls;
50504+atomic_unchecked_t fscache_n_store_pages;
50505+atomic_unchecked_t fscache_n_store_radix_deletes;
50506+atomic_unchecked_t fscache_n_store_pages_over_limit;
50507
50508-atomic_t fscache_n_store_vmscan_not_storing;
50509-atomic_t fscache_n_store_vmscan_gone;
50510-atomic_t fscache_n_store_vmscan_busy;
50511-atomic_t fscache_n_store_vmscan_cancelled;
50512+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
50513+atomic_unchecked_t fscache_n_store_vmscan_gone;
50514+atomic_unchecked_t fscache_n_store_vmscan_busy;
50515+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
50516
50517-atomic_t fscache_n_marks;
50518-atomic_t fscache_n_uncaches;
50519+atomic_unchecked_t fscache_n_marks;
50520+atomic_unchecked_t fscache_n_uncaches;
50521
50522-atomic_t fscache_n_acquires;
50523-atomic_t fscache_n_acquires_null;
50524-atomic_t fscache_n_acquires_no_cache;
50525-atomic_t fscache_n_acquires_ok;
50526-atomic_t fscache_n_acquires_nobufs;
50527-atomic_t fscache_n_acquires_oom;
50528+atomic_unchecked_t fscache_n_acquires;
50529+atomic_unchecked_t fscache_n_acquires_null;
50530+atomic_unchecked_t fscache_n_acquires_no_cache;
50531+atomic_unchecked_t fscache_n_acquires_ok;
50532+atomic_unchecked_t fscache_n_acquires_nobufs;
50533+atomic_unchecked_t fscache_n_acquires_oom;
50534
50535-atomic_t fscache_n_updates;
50536-atomic_t fscache_n_updates_null;
50537-atomic_t fscache_n_updates_run;
50538+atomic_unchecked_t fscache_n_updates;
50539+atomic_unchecked_t fscache_n_updates_null;
50540+atomic_unchecked_t fscache_n_updates_run;
50541
50542-atomic_t fscache_n_relinquishes;
50543-atomic_t fscache_n_relinquishes_null;
50544-atomic_t fscache_n_relinquishes_waitcrt;
50545-atomic_t fscache_n_relinquishes_retire;
50546+atomic_unchecked_t fscache_n_relinquishes;
50547+atomic_unchecked_t fscache_n_relinquishes_null;
50548+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
50549+atomic_unchecked_t fscache_n_relinquishes_retire;
50550
50551-atomic_t fscache_n_cookie_index;
50552-atomic_t fscache_n_cookie_data;
50553-atomic_t fscache_n_cookie_special;
50554+atomic_unchecked_t fscache_n_cookie_index;
50555+atomic_unchecked_t fscache_n_cookie_data;
50556+atomic_unchecked_t fscache_n_cookie_special;
50557
50558-atomic_t fscache_n_object_alloc;
50559-atomic_t fscache_n_object_no_alloc;
50560-atomic_t fscache_n_object_lookups;
50561-atomic_t fscache_n_object_lookups_negative;
50562-atomic_t fscache_n_object_lookups_positive;
50563-atomic_t fscache_n_object_lookups_timed_out;
50564-atomic_t fscache_n_object_created;
50565-atomic_t fscache_n_object_avail;
50566-atomic_t fscache_n_object_dead;
50567+atomic_unchecked_t fscache_n_object_alloc;
50568+atomic_unchecked_t fscache_n_object_no_alloc;
50569+atomic_unchecked_t fscache_n_object_lookups;
50570+atomic_unchecked_t fscache_n_object_lookups_negative;
50571+atomic_unchecked_t fscache_n_object_lookups_positive;
50572+atomic_unchecked_t fscache_n_object_lookups_timed_out;
50573+atomic_unchecked_t fscache_n_object_created;
50574+atomic_unchecked_t fscache_n_object_avail;
50575+atomic_unchecked_t fscache_n_object_dead;
50576
50577-atomic_t fscache_n_checkaux_none;
50578-atomic_t fscache_n_checkaux_okay;
50579-atomic_t fscache_n_checkaux_update;
50580-atomic_t fscache_n_checkaux_obsolete;
50581+atomic_unchecked_t fscache_n_checkaux_none;
50582+atomic_unchecked_t fscache_n_checkaux_okay;
50583+atomic_unchecked_t fscache_n_checkaux_update;
50584+atomic_unchecked_t fscache_n_checkaux_obsolete;
50585
50586 atomic_t fscache_n_cop_alloc_object;
50587 atomic_t fscache_n_cop_lookup_object;
50588@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq_file *m, void *v)
50589 seq_puts(m, "FS-Cache statistics\n");
50590
50591 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
50592- atomic_read(&fscache_n_cookie_index),
50593- atomic_read(&fscache_n_cookie_data),
50594- atomic_read(&fscache_n_cookie_special));
50595+ atomic_read_unchecked(&fscache_n_cookie_index),
50596+ atomic_read_unchecked(&fscache_n_cookie_data),
50597+ atomic_read_unchecked(&fscache_n_cookie_special));
50598
50599 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
50600- atomic_read(&fscache_n_object_alloc),
50601- atomic_read(&fscache_n_object_no_alloc),
50602- atomic_read(&fscache_n_object_avail),
50603- atomic_read(&fscache_n_object_dead));
50604+ atomic_read_unchecked(&fscache_n_object_alloc),
50605+ atomic_read_unchecked(&fscache_n_object_no_alloc),
50606+ atomic_read_unchecked(&fscache_n_object_avail),
50607+ atomic_read_unchecked(&fscache_n_object_dead));
50608 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
50609- atomic_read(&fscache_n_checkaux_none),
50610- atomic_read(&fscache_n_checkaux_okay),
50611- atomic_read(&fscache_n_checkaux_update),
50612- atomic_read(&fscache_n_checkaux_obsolete));
50613+ atomic_read_unchecked(&fscache_n_checkaux_none),
50614+ atomic_read_unchecked(&fscache_n_checkaux_okay),
50615+ atomic_read_unchecked(&fscache_n_checkaux_update),
50616+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
50617
50618 seq_printf(m, "Pages : mrk=%u unc=%u\n",
50619- atomic_read(&fscache_n_marks),
50620- atomic_read(&fscache_n_uncaches));
50621+ atomic_read_unchecked(&fscache_n_marks),
50622+ atomic_read_unchecked(&fscache_n_uncaches));
50623
50624 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
50625 " oom=%u\n",
50626- atomic_read(&fscache_n_acquires),
50627- atomic_read(&fscache_n_acquires_null),
50628- atomic_read(&fscache_n_acquires_no_cache),
50629- atomic_read(&fscache_n_acquires_ok),
50630- atomic_read(&fscache_n_acquires_nobufs),
50631- atomic_read(&fscache_n_acquires_oom));
50632+ atomic_read_unchecked(&fscache_n_acquires),
50633+ atomic_read_unchecked(&fscache_n_acquires_null),
50634+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
50635+ atomic_read_unchecked(&fscache_n_acquires_ok),
50636+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
50637+ atomic_read_unchecked(&fscache_n_acquires_oom));
50638
50639 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
50640- atomic_read(&fscache_n_object_lookups),
50641- atomic_read(&fscache_n_object_lookups_negative),
50642- atomic_read(&fscache_n_object_lookups_positive),
50643- atomic_read(&fscache_n_object_lookups_timed_out),
50644- atomic_read(&fscache_n_object_created));
50645+ atomic_read_unchecked(&fscache_n_object_lookups),
50646+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
50647+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
50648+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
50649+ atomic_read_unchecked(&fscache_n_object_created));
50650
50651 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
50652- atomic_read(&fscache_n_updates),
50653- atomic_read(&fscache_n_updates_null),
50654- atomic_read(&fscache_n_updates_run));
50655+ atomic_read_unchecked(&fscache_n_updates),
50656+ atomic_read_unchecked(&fscache_n_updates_null),
50657+ atomic_read_unchecked(&fscache_n_updates_run));
50658
50659 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
50660- atomic_read(&fscache_n_relinquishes),
50661- atomic_read(&fscache_n_relinquishes_null),
50662- atomic_read(&fscache_n_relinquishes_waitcrt),
50663- atomic_read(&fscache_n_relinquishes_retire));
50664+ atomic_read_unchecked(&fscache_n_relinquishes),
50665+ atomic_read_unchecked(&fscache_n_relinquishes_null),
50666+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
50667+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
50668
50669 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
50670- atomic_read(&fscache_n_attr_changed),
50671- atomic_read(&fscache_n_attr_changed_ok),
50672- atomic_read(&fscache_n_attr_changed_nobufs),
50673- atomic_read(&fscache_n_attr_changed_nomem),
50674- atomic_read(&fscache_n_attr_changed_calls));
50675+ atomic_read_unchecked(&fscache_n_attr_changed),
50676+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
50677+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
50678+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
50679+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
50680
50681 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
50682- atomic_read(&fscache_n_allocs),
50683- atomic_read(&fscache_n_allocs_ok),
50684- atomic_read(&fscache_n_allocs_wait),
50685- atomic_read(&fscache_n_allocs_nobufs),
50686- atomic_read(&fscache_n_allocs_intr));
50687+ atomic_read_unchecked(&fscache_n_allocs),
50688+ atomic_read_unchecked(&fscache_n_allocs_ok),
50689+ atomic_read_unchecked(&fscache_n_allocs_wait),
50690+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
50691+ atomic_read_unchecked(&fscache_n_allocs_intr));
50692 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
50693- atomic_read(&fscache_n_alloc_ops),
50694- atomic_read(&fscache_n_alloc_op_waits),
50695- atomic_read(&fscache_n_allocs_object_dead));
50696+ atomic_read_unchecked(&fscache_n_alloc_ops),
50697+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
50698+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
50699
50700 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
50701 " int=%u oom=%u\n",
50702- atomic_read(&fscache_n_retrievals),
50703- atomic_read(&fscache_n_retrievals_ok),
50704- atomic_read(&fscache_n_retrievals_wait),
50705- atomic_read(&fscache_n_retrievals_nodata),
50706- atomic_read(&fscache_n_retrievals_nobufs),
50707- atomic_read(&fscache_n_retrievals_intr),
50708- atomic_read(&fscache_n_retrievals_nomem));
50709+ atomic_read_unchecked(&fscache_n_retrievals),
50710+ atomic_read_unchecked(&fscache_n_retrievals_ok),
50711+ atomic_read_unchecked(&fscache_n_retrievals_wait),
50712+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
50713+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
50714+ atomic_read_unchecked(&fscache_n_retrievals_intr),
50715+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
50716 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
50717- atomic_read(&fscache_n_retrieval_ops),
50718- atomic_read(&fscache_n_retrieval_op_waits),
50719- atomic_read(&fscache_n_retrievals_object_dead));
50720+ atomic_read_unchecked(&fscache_n_retrieval_ops),
50721+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
50722+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
50723
50724 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
50725- atomic_read(&fscache_n_stores),
50726- atomic_read(&fscache_n_stores_ok),
50727- atomic_read(&fscache_n_stores_again),
50728- atomic_read(&fscache_n_stores_nobufs),
50729- atomic_read(&fscache_n_stores_oom));
50730+ atomic_read_unchecked(&fscache_n_stores),
50731+ atomic_read_unchecked(&fscache_n_stores_ok),
50732+ atomic_read_unchecked(&fscache_n_stores_again),
50733+ atomic_read_unchecked(&fscache_n_stores_nobufs),
50734+ atomic_read_unchecked(&fscache_n_stores_oom));
50735 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
50736- atomic_read(&fscache_n_store_ops),
50737- atomic_read(&fscache_n_store_calls),
50738- atomic_read(&fscache_n_store_pages),
50739- atomic_read(&fscache_n_store_radix_deletes),
50740- atomic_read(&fscache_n_store_pages_over_limit));
50741+ atomic_read_unchecked(&fscache_n_store_ops),
50742+ atomic_read_unchecked(&fscache_n_store_calls),
50743+ atomic_read_unchecked(&fscache_n_store_pages),
50744+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
50745+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
50746
50747 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
50748- atomic_read(&fscache_n_store_vmscan_not_storing),
50749- atomic_read(&fscache_n_store_vmscan_gone),
50750- atomic_read(&fscache_n_store_vmscan_busy),
50751- atomic_read(&fscache_n_store_vmscan_cancelled));
50752+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
50753+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
50754+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
50755+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
50756
50757 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
50758- atomic_read(&fscache_n_op_pend),
50759- atomic_read(&fscache_n_op_run),
50760- atomic_read(&fscache_n_op_enqueue),
50761- atomic_read(&fscache_n_op_cancelled),
50762- atomic_read(&fscache_n_op_rejected));
50763+ atomic_read_unchecked(&fscache_n_op_pend),
50764+ atomic_read_unchecked(&fscache_n_op_run),
50765+ atomic_read_unchecked(&fscache_n_op_enqueue),
50766+ atomic_read_unchecked(&fscache_n_op_cancelled),
50767+ atomic_read_unchecked(&fscache_n_op_rejected));
50768 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
50769- atomic_read(&fscache_n_op_deferred_release),
50770- atomic_read(&fscache_n_op_release),
50771- atomic_read(&fscache_n_op_gc));
50772+ atomic_read_unchecked(&fscache_n_op_deferred_release),
50773+ atomic_read_unchecked(&fscache_n_op_release),
50774+ atomic_read_unchecked(&fscache_n_op_gc));
50775
50776 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
50777 atomic_read(&fscache_n_cop_alloc_object),
50778diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
50779index de792dc..448b532 100644
50780--- a/fs/fuse/cuse.c
50781+++ b/fs/fuse/cuse.c
50782@@ -576,10 +576,12 @@ static int __init cuse_init(void)
50783 INIT_LIST_HEAD(&cuse_conntbl[i]);
50784
50785 /* inherit and extend fuse_dev_operations */
50786- cuse_channel_fops = fuse_dev_operations;
50787- cuse_channel_fops.owner = THIS_MODULE;
50788- cuse_channel_fops.open = cuse_channel_open;
50789- cuse_channel_fops.release = cuse_channel_release;
50790+ pax_open_kernel();
50791+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
50792+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
50793+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
50794+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
50795+ pax_close_kernel();
50796
50797 cuse_class = class_create(THIS_MODULE, "cuse");
50798 if (IS_ERR(cuse_class))
50799diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
50800index 1facb39..7f48557 100644
50801--- a/fs/fuse/dev.c
50802+++ b/fs/fuse/dev.c
50803@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50804 {
50805 struct fuse_notify_inval_entry_out outarg;
50806 int err = -EINVAL;
50807- char buf[FUSE_NAME_MAX+1];
50808+ char *buf = NULL;
50809 struct qstr name;
50810
50811 if (size < sizeof(outarg))
50812@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50813 if (outarg.namelen > FUSE_NAME_MAX)
50814 goto err;
50815
50816+ err = -ENOMEM;
50817+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
50818+ if (!buf)
50819+ goto err;
50820+
50821 err = -EINVAL;
50822 if (size != sizeof(outarg) + outarg.namelen + 1)
50823 goto err;
50824@@ -914,17 +919,15 @@ static int fuse_notify_inval_entry(struct fuse_conn *fc, unsigned int size,
50825
50826 down_read(&fc->killsb);
50827 err = -ENOENT;
50828- if (!fc->sb)
50829- goto err_unlock;
50830-
50831- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50832-
50833-err_unlock:
50834+ if (fc->sb)
50835+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
50836 up_read(&fc->killsb);
50837+ kfree(buf);
50838 return err;
50839
50840 err:
50841 fuse_copy_finish(cs);
50842+ kfree(buf);
50843 return err;
50844 }
50845
50846diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
50847index 4787ae6..73efff7 100644
50848--- a/fs/fuse/dir.c
50849+++ b/fs/fuse/dir.c
50850@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *dentry)
50851 return link;
50852 }
50853
50854-static void free_link(char *link)
50855+static void free_link(const char *link)
50856 {
50857 if (!IS_ERR(link))
50858 free_page((unsigned long) link);
50859diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
50860index 247436c..e650ccb 100644
50861--- a/fs/gfs2/ops_inode.c
50862+++ b/fs/gfs2/ops_inode.c
50863@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odir, struct dentry *odentry,
50864 unsigned int x;
50865 int error;
50866
50867+ pax_track_stack();
50868+
50869 if (ndentry->d_inode) {
50870 nip = GFS2_I(ndentry->d_inode);
50871 if (ip == nip)
50872diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
50873index 4463297..4fed53b 100644
50874--- a/fs/gfs2/sys.c
50875+++ b/fs/gfs2/sys.c
50876@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct kobject *kobj, struct attribute *attr,
50877 return a->store ? a->store(sdp, buf, len) : len;
50878 }
50879
50880-static struct sysfs_ops gfs2_attr_ops = {
50881+static const struct sysfs_ops gfs2_attr_ops = {
50882 .show = gfs2_attr_show,
50883 .store = gfs2_attr_store,
50884 };
50885@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset, struct kobject *kobj,
50886 return 0;
50887 }
50888
50889-static struct kset_uevent_ops gfs2_uevent_ops = {
50890+static const struct kset_uevent_ops gfs2_uevent_ops = {
50891 .uevent = gfs2_uevent,
50892 };
50893
50894diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
50895index f6874ac..7cd98a8 100644
50896--- a/fs/hfsplus/catalog.c
50897+++ b/fs/hfsplus/catalog.c
50898@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
50899 int err;
50900 u16 type;
50901
50902+ pax_track_stack();
50903+
50904 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
50905 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
50906 if (err)
50907@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct ino
50908 int entry_size;
50909 int err;
50910
50911+ pax_track_stack();
50912+
50913 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
50914 sb = dir->i_sb;
50915 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
50916@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
50917 int entry_size, type;
50918 int err = 0;
50919
50920+ pax_track_stack();
50921+
50922 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
50923 dst_dir->i_ino, dst_name->name);
50924 sb = src_dir->i_sb;
50925diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c
50926index 5f40236..dac3421 100644
50927--- a/fs/hfsplus/dir.c
50928+++ b/fs/hfsplus/dir.c
50929@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *filp, void *dirent, filldir_t filldir)
50930 struct hfsplus_readdir_data *rd;
50931 u16 type;
50932
50933+ pax_track_stack();
50934+
50935 if (filp->f_pos >= inode->i_size)
50936 return 0;
50937
50938diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
50939index 1bcf597..905a251 100644
50940--- a/fs/hfsplus/inode.c
50941+++ b/fs/hfsplus/inode.c
50942@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
50943 int res = 0;
50944 u16 type;
50945
50946+ pax_track_stack();
50947+
50948 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
50949
50950 HFSPLUS_I(inode).dev = 0;
50951@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode *inode)
50952 struct hfs_find_data fd;
50953 hfsplus_cat_entry entry;
50954
50955+ pax_track_stack();
50956+
50957 if (HFSPLUS_IS_RSRC(inode))
50958 main_inode = HFSPLUS_I(inode).rsrc_inode;
50959
50960diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
50961index f457d2c..7ef4ad5 100644
50962--- a/fs/hfsplus/ioctl.c
50963+++ b/fs/hfsplus/ioctl.c
50964@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dentry, const char *name,
50965 struct hfsplus_cat_file *file;
50966 int res;
50967
50968+ pax_track_stack();
50969+
50970 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50971 return -EOPNOTSUPP;
50972
50973@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name,
50974 struct hfsplus_cat_file *file;
50975 ssize_t res = 0;
50976
50977+ pax_track_stack();
50978+
50979 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
50980 return -EOPNOTSUPP;
50981
50982diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
50983index 43022f3..7298079 100644
50984--- a/fs/hfsplus/super.c
50985+++ b/fs/hfsplus/super.c
50986@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct super_block *sb, void *data, int silent)
50987 struct nls_table *nls = NULL;
50988 int err = -EINVAL;
50989
50990+ pax_track_stack();
50991+
50992 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
50993 if (!sbi)
50994 return -ENOMEM;
50995diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
50996index 87a1258..5694d91 100644
50997--- a/fs/hugetlbfs/inode.c
50998+++ b/fs/hugetlbfs/inode.c
50999@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs_fs_type = {
51000 .kill_sb = kill_litter_super,
51001 };
51002
51003-static struct vfsmount *hugetlbfs_vfsmount;
51004+struct vfsmount *hugetlbfs_vfsmount;
51005
51006 static int can_do_hugetlb_shm(void)
51007 {
51008diff --git a/fs/ioctl.c b/fs/ioctl.c
51009index 6c75110..19d2c3c 100644
51010--- a/fs/ioctl.c
51011+++ b/fs/ioctl.c
51012@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
51013 u64 phys, u64 len, u32 flags)
51014 {
51015 struct fiemap_extent extent;
51016- struct fiemap_extent *dest = fieinfo->fi_extents_start;
51017+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
51018
51019 /* only count the extents */
51020 if (fieinfo->fi_extents_max == 0) {
51021@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51022
51023 fieinfo.fi_flags = fiemap.fm_flags;
51024 fieinfo.fi_extents_max = fiemap.fm_extent_count;
51025- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
51026+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
51027
51028 if (fiemap.fm_extent_count != 0 &&
51029 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
51030@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *filp, unsigned long arg)
51031 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
51032 fiemap.fm_flags = fieinfo.fi_flags;
51033 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
51034- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
51035+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
51036 error = -EFAULT;
51037
51038 return error;
51039diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
51040index b0435dd..81ee0be 100644
51041--- a/fs/jbd/checkpoint.c
51042+++ b/fs/jbd/checkpoint.c
51043@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal)
51044 tid_t this_tid;
51045 int result;
51046
51047+ pax_track_stack();
51048+
51049 jbd_debug(1, "Start checkpoint\n");
51050
51051 /*
51052diff --git a/fs/jffs2/compr_rtime.c b/fs/jffs2/compr_rtime.c
51053index 546d153..736896c 100644
51054--- a/fs/jffs2/compr_rtime.c
51055+++ b/fs/jffs2/compr_rtime.c
51056@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned char *data_in,
51057 int outpos = 0;
51058 int pos=0;
51059
51060+ pax_track_stack();
51061+
51062 memset(positions,0,sizeof(positions));
51063
51064 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
51065@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsigned char *data_in,
51066 int outpos = 0;
51067 int pos=0;
51068
51069+ pax_track_stack();
51070+
51071 memset(positions,0,sizeof(positions));
51072
51073 while (outpos<destlen) {
51074diff --git a/fs/jffs2/compr_rubin.c b/fs/jffs2/compr_rubin.c
51075index 170d289..3254b98 100644
51076--- a/fs/jffs2/compr_rubin.c
51077+++ b/fs/jffs2/compr_rubin.c
51078@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsigned char *data_in,
51079 int ret;
51080 uint32_t mysrclen, mydstlen;
51081
51082+ pax_track_stack();
51083+
51084 mysrclen = *sourcelen;
51085 mydstlen = *dstlen - 8;
51086
51087diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
51088index b47679b..00d65d3 100644
51089--- a/fs/jffs2/erase.c
51090+++ b/fs/jffs2/erase.c
51091@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
51092 struct jffs2_unknown_node marker = {
51093 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
51094 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51095- .totlen = cpu_to_je32(c->cleanmarker_size)
51096+ .totlen = cpu_to_je32(c->cleanmarker_size),
51097+ .hdr_crc = cpu_to_je32(0)
51098 };
51099
51100 jffs2_prealloc_raw_node_refs(c, jeb, 1);
51101diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
51102index 5ef7bac..4fd1e3c 100644
51103--- a/fs/jffs2/wbuf.c
51104+++ b/fs/jffs2/wbuf.c
51105@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
51106 {
51107 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
51108 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
51109- .totlen = constant_cpu_to_je32(8)
51110+ .totlen = constant_cpu_to_je32(8),
51111+ .hdr_crc = constant_cpu_to_je32(0)
51112 };
51113
51114 /*
51115diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
51116index 082e844..52012a1 100644
51117--- a/fs/jffs2/xattr.c
51118+++ b/fs/jffs2/xattr.c
51119@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct jffs2_sb_info *c)
51120
51121 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
51122
51123+ pax_track_stack();
51124+
51125 /* Phase.1 : Merge same xref */
51126 for (i=0; i < XREF_TMPHASH_SIZE; i++)
51127 xref_tmphash[i] = NULL;
51128diff --git a/fs/jfs/super.c b/fs/jfs/super.c
51129index 2234c73..f6e6e6b 100644
51130--- a/fs/jfs/super.c
51131+++ b/fs/jfs/super.c
51132@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
51133
51134 jfs_inode_cachep =
51135 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
51136- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
51137+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
51138 init_once);
51139 if (jfs_inode_cachep == NULL)
51140 return -ENOMEM;
51141diff --git a/fs/libfs.c b/fs/libfs.c
51142index ba36e93..3153fce 100644
51143--- a/fs/libfs.c
51144+++ b/fs/libfs.c
51145@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
51146
51147 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
51148 struct dentry *next;
51149+ char d_name[sizeof(next->d_iname)];
51150+ const unsigned char *name;
51151+
51152 next = list_entry(p, struct dentry, d_u.d_child);
51153 if (d_unhashed(next) || !next->d_inode)
51154 continue;
51155
51156 spin_unlock(&dcache_lock);
51157- if (filldir(dirent, next->d_name.name,
51158+ name = next->d_name.name;
51159+ if (name == next->d_iname) {
51160+ memcpy(d_name, name, next->d_name.len);
51161+ name = d_name;
51162+ }
51163+ if (filldir(dirent, name,
51164 next->d_name.len, filp->f_pos,
51165 next->d_inode->i_ino,
51166 dt_type(next->d_inode)) < 0)
51167diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
51168index c325a83..d15b07b 100644
51169--- a/fs/lockd/clntproc.c
51170+++ b/fs/lockd/clntproc.c
51171@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
51172 /*
51173 * Cookie counter for NLM requests
51174 */
51175-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
51176+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
51177
51178 void nlmclnt_next_cookie(struct nlm_cookie *c)
51179 {
51180- u32 cookie = atomic_inc_return(&nlm_cookie);
51181+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
51182
51183 memcpy(c->data, &cookie, 4);
51184 c->len=4;
51185@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl)
51186 struct nlm_rqst reqst, *req;
51187 int status;
51188
51189+ pax_track_stack();
51190+
51191 req = &reqst;
51192 memset(req, 0, sizeof(*req));
51193 locks_init_lock(&req->a_args.lock.fl);
51194diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
51195index 1a54ae1..6a16c27 100644
51196--- a/fs/lockd/svc.c
51197+++ b/fs/lockd/svc.c
51198@@ -43,7 +43,7 @@
51199
51200 static struct svc_program nlmsvc_program;
51201
51202-struct nlmsvc_binding * nlmsvc_ops;
51203+const struct nlmsvc_binding * nlmsvc_ops;
51204 EXPORT_SYMBOL_GPL(nlmsvc_ops);
51205
51206 static DEFINE_MUTEX(nlmsvc_mutex);
51207diff --git a/fs/locks.c b/fs/locks.c
51208index a8794f2..4041e55 100644
51209--- a/fs/locks.c
51210+++ b/fs/locks.c
51211@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
51212
51213 static struct kmem_cache *filelock_cache __read_mostly;
51214
51215+static void locks_init_lock_always(struct file_lock *fl)
51216+{
51217+ fl->fl_next = NULL;
51218+ fl->fl_fasync = NULL;
51219+ fl->fl_owner = NULL;
51220+ fl->fl_pid = 0;
51221+ fl->fl_nspid = NULL;
51222+ fl->fl_file = NULL;
51223+ fl->fl_flags = 0;
51224+ fl->fl_type = 0;
51225+ fl->fl_start = fl->fl_end = 0;
51226+}
51227+
51228 /* Allocate an empty lock structure. */
51229 static struct file_lock *locks_alloc_lock(void)
51230 {
51231- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51232+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
51233+
51234+ if (fl)
51235+ locks_init_lock_always(fl);
51236+
51237+ return fl;
51238 }
51239
51240 void locks_release_private(struct file_lock *fl)
51241@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *fl)
51242 INIT_LIST_HEAD(&fl->fl_link);
51243 INIT_LIST_HEAD(&fl->fl_block);
51244 init_waitqueue_head(&fl->fl_wait);
51245- fl->fl_next = NULL;
51246- fl->fl_fasync = NULL;
51247- fl->fl_owner = NULL;
51248- fl->fl_pid = 0;
51249- fl->fl_nspid = NULL;
51250- fl->fl_file = NULL;
51251- fl->fl_flags = 0;
51252- fl->fl_type = 0;
51253- fl->fl_start = fl->fl_end = 0;
51254 fl->fl_ops = NULL;
51255 fl->fl_lmops = NULL;
51256+ locks_init_lock_always(fl);
51257 }
51258
51259 EXPORT_SYMBOL(locks_init_lock);
51260@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *filp)
51261 return;
51262
51263 if (filp->f_op && filp->f_op->flock) {
51264- struct file_lock fl = {
51265+ struct file_lock flock = {
51266 .fl_pid = current->tgid,
51267 .fl_file = filp,
51268 .fl_flags = FL_FLOCK,
51269 .fl_type = F_UNLCK,
51270 .fl_end = OFFSET_MAX,
51271 };
51272- filp->f_op->flock(filp, F_SETLKW, &fl);
51273- if (fl.fl_ops && fl.fl_ops->fl_release_private)
51274- fl.fl_ops->fl_release_private(&fl);
51275+ filp->f_op->flock(filp, F_SETLKW, &flock);
51276+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
51277+ flock.fl_ops->fl_release_private(&flock);
51278 }
51279
51280 lock_kernel();
51281diff --git a/fs/mbcache.c b/fs/mbcache.c
51282index ec88ff3..b843a82 100644
51283--- a/fs/mbcache.c
51284+++ b/fs/mbcache.c
51285@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct mb_cache_op *cache_op,
51286 if (!cache)
51287 goto fail;
51288 cache->c_name = name;
51289- cache->c_op.free = NULL;
51290+ *(void **)&cache->c_op.free = NULL;
51291 if (cache_op)
51292- cache->c_op.free = cache_op->free;
51293+ *(void **)&cache->c_op.free = cache_op->free;
51294 atomic_set(&cache->c_entry_count, 0);
51295 cache->c_bucket_bits = bucket_bits;
51296 #ifdef MB_CACHE_INDEXES_COUNT
51297diff --git a/fs/namei.c b/fs/namei.c
51298index b0afbd4..8d065a1 100644
51299--- a/fs/namei.c
51300+++ b/fs/namei.c
51301@@ -224,6 +224,14 @@ int generic_permission(struct inode *inode, int mask,
51302 return ret;
51303
51304 /*
51305+ * Searching includes executable on directories, else just read.
51306+ */
51307+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51308+ if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51309+ if (capable(CAP_DAC_READ_SEARCH))
51310+ return 0;
51311+
51312+ /*
51313 * Read/write DACs are always overridable.
51314 * Executable DACs are overridable if at least one exec bit is set.
51315 */
51316@@ -231,14 +239,6 @@ int generic_permission(struct inode *inode, int mask,
51317 if (capable(CAP_DAC_OVERRIDE))
51318 return 0;
51319
51320- /*
51321- * Searching includes executable on directories, else just read.
51322- */
51323- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
51324- if (mask == MAY_READ || (S_ISDIR(inode->i_mode) && !(mask & MAY_WRITE)))
51325- if (capable(CAP_DAC_READ_SEARCH))
51326- return 0;
51327-
51328 return -EACCES;
51329 }
51330
51331@@ -458,7 +458,8 @@ static int exec_permission_lite(struct inode *inode)
51332 if (!ret)
51333 goto ok;
51334
51335- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
51336+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
51337+ capable(CAP_DAC_OVERRIDE))
51338 goto ok;
51339
51340 return ret;
51341@@ -638,7 +639,7 @@ static __always_inline int __do_follow_link(struct path *path, struct nameidata
51342 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
51343 error = PTR_ERR(cookie);
51344 if (!IS_ERR(cookie)) {
51345- char *s = nd_get_link(nd);
51346+ const char *s = nd_get_link(nd);
51347 error = 0;
51348 if (s)
51349 error = __vfs_follow_link(nd, s);
51350@@ -669,6 +670,13 @@ static inline int do_follow_link(struct path *path, struct nameidata *nd)
51351 err = security_inode_follow_link(path->dentry, nd);
51352 if (err)
51353 goto loop;
51354+
51355+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
51356+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
51357+ err = -EACCES;
51358+ goto loop;
51359+ }
51360+
51361 current->link_count++;
51362 current->total_link_count++;
51363 nd->depth++;
51364@@ -1016,11 +1024,19 @@ return_reval:
51365 break;
51366 }
51367 return_base:
51368+ if (!(nd->flags & (LOOKUP_CONTINUE | LOOKUP_PARENT)) &&
51369+ !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
51370+ path_put(&nd->path);
51371+ return -ENOENT;
51372+ }
51373 return 0;
51374 out_dput:
51375 path_put_conditional(&next, nd);
51376 break;
51377 }
51378+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
51379+ err = -ENOENT;
51380+
51381 path_put(&nd->path);
51382 return_err:
51383 return err;
51384@@ -1091,13 +1107,20 @@ static int do_path_lookup(int dfd, const char *name,
51385 int retval = path_init(dfd, name, flags, nd);
51386 if (!retval)
51387 retval = path_walk(name, nd);
51388- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
51389- nd->path.dentry->d_inode))
51390- audit_inode(name, nd->path.dentry);
51391+
51392+ if (likely(!retval)) {
51393+ if (nd->path.dentry && nd->path.dentry->d_inode) {
51394+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
51395+ retval = -ENOENT;
51396+ if (!audit_dummy_context())
51397+ audit_inode(name, nd->path.dentry);
51398+ }
51399+ }
51400 if (nd->root.mnt) {
51401 path_put(&nd->root);
51402 nd->root.mnt = NULL;
51403 }
51404+
51405 return retval;
51406 }
51407
51408@@ -1576,6 +1599,20 @@ int may_open(struct path *path, int acc_mode, int flag)
51409 if (error)
51410 goto err_out;
51411
51412+
51413+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
51414+ error = -EPERM;
51415+ goto err_out;
51416+ }
51417+ if (gr_handle_rawio(inode)) {
51418+ error = -EPERM;
51419+ goto err_out;
51420+ }
51421+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode)) {
51422+ error = -EACCES;
51423+ goto err_out;
51424+ }
51425+
51426 if (flag & O_TRUNC) {
51427 error = get_write_access(inode);
51428 if (error)
51429@@ -1620,6 +1657,17 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51430 {
51431 int error;
51432 struct dentry *dir = nd->path.dentry;
51433+ int acc_mode = ACC_MODE(flag);
51434+
51435+ if (flag & O_TRUNC)
51436+ acc_mode |= MAY_WRITE;
51437+ if (flag & O_APPEND)
51438+ acc_mode |= MAY_APPEND;
51439+
51440+ if (!gr_acl_handle_creat(path->dentry, dir, nd->path.mnt, flag, acc_mode, mode)) {
51441+ error = -EACCES;
51442+ goto out_unlock;
51443+ }
51444
51445 if (!IS_POSIXACL(dir->d_inode))
51446 mode &= ~current_umask();
51447@@ -1627,6 +1675,8 @@ static int __open_namei_create(struct nameidata *nd, struct path *path,
51448 if (error)
51449 goto out_unlock;
51450 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
51451+ if (!error)
51452+ gr_handle_create(path->dentry, nd->path.mnt);
51453 out_unlock:
51454 mutex_unlock(&dir->d_inode->i_mutex);
51455 dput(nd->path.dentry);
51456@@ -1709,6 +1759,22 @@ struct file *do_filp_open(int dfd, const char *pathname,
51457 &nd, flag);
51458 if (error)
51459 return ERR_PTR(error);
51460+
51461+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
51462+ error = -EPERM;
51463+ goto exit;
51464+ }
51465+
51466+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
51467+ error = -EPERM;
51468+ goto exit;
51469+ }
51470+
51471+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, acc_mode)) {
51472+ error = -EACCES;
51473+ goto exit;
51474+ }
51475+
51476 goto ok;
51477 }
51478
51479@@ -1795,6 +1861,19 @@ do_last:
51480 /*
51481 * It already exists.
51482 */
51483+
51484+ if (!gr_acl_handle_hidden_file(path.dentry, path.mnt)) {
51485+ error = -ENOENT;
51486+ goto exit_mutex_unlock;
51487+ }
51488+
51489+ /* only check if O_CREAT is specified, all other checks need
51490+ to go into may_open */
51491+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
51492+ error = -EACCES;
51493+ goto exit_mutex_unlock;
51494+ }
51495+
51496 mutex_unlock(&dir->d_inode->i_mutex);
51497 audit_inode(pathname, path.dentry);
51498
51499@@ -1887,6 +1966,13 @@ do_link:
51500 error = security_inode_follow_link(path.dentry, &nd);
51501 if (error)
51502 goto exit_dput;
51503+
51504+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
51505+ path.dentry, nd.path.mnt)) {
51506+ error = -EACCES;
51507+ goto exit_dput;
51508+ }
51509+
51510 error = __do_follow_link(&path, &nd);
51511 if (error) {
51512 /* Does someone understand code flow here? Or it is only
51513@@ -1984,6 +2070,10 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
51514 }
51515 return dentry;
51516 eexist:
51517+ if (!gr_acl_handle_hidden_file(dentry, nd->path.mnt)) {
51518+ dput(dentry);
51519+ return ERR_PTR(-ENOENT);
51520+ }
51521 dput(dentry);
51522 dentry = ERR_PTR(-EEXIST);
51523 fail:
51524@@ -2061,6 +2151,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51525 error = may_mknod(mode);
51526 if (error)
51527 goto out_dput;
51528+
51529+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
51530+ error = -EPERM;
51531+ goto out_dput;
51532+ }
51533+
51534+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
51535+ error = -EACCES;
51536+ goto out_dput;
51537+ }
51538+
51539 error = mnt_want_write(nd.path.mnt);
51540 if (error)
51541 goto out_dput;
51542@@ -2081,6 +2182,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const char __user *, filename, int, mode,
51543 }
51544 out_drop_write:
51545 mnt_drop_write(nd.path.mnt);
51546+
51547+ if (!error)
51548+ gr_handle_create(dentry, nd.path.mnt);
51549 out_dput:
51550 dput(dentry);
51551 out_unlock:
51552@@ -2134,6 +2238,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51553 if (IS_ERR(dentry))
51554 goto out_unlock;
51555
51556+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
51557+ error = -EACCES;
51558+ goto out_dput;
51559+ }
51560+
51561 if (!IS_POSIXACL(nd.path.dentry->d_inode))
51562 mode &= ~current_umask();
51563 error = mnt_want_write(nd.path.mnt);
51564@@ -2145,6 +2254,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const char __user *, pathname, int, mode)
51565 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
51566 out_drop_write:
51567 mnt_drop_write(nd.path.mnt);
51568+
51569+ if (!error)
51570+ gr_handle_create(dentry, nd.path.mnt);
51571+
51572 out_dput:
51573 dput(dentry);
51574 out_unlock:
51575@@ -2226,6 +2339,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51576 char * name;
51577 struct dentry *dentry;
51578 struct nameidata nd;
51579+ ino_t saved_ino = 0;
51580+ dev_t saved_dev = 0;
51581
51582 error = user_path_parent(dfd, pathname, &nd, &name);
51583 if (error)
51584@@ -2250,6 +2365,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
51585 error = PTR_ERR(dentry);
51586 if (IS_ERR(dentry))
51587 goto exit2;
51588+
51589+ if (dentry->d_inode != NULL) {
51590+ saved_ino = dentry->d_inode->i_ino;
51591+ saved_dev = gr_get_dev_from_dentry(dentry);
51592+
51593+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
51594+ error = -EACCES;
51595+ goto exit3;
51596+ }
51597+ }
51598+
51599 error = mnt_want_write(nd.path.mnt);
51600 if (error)
51601 goto exit3;
51602@@ -2257,6 +2383,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
51603 if (error)
51604 goto exit4;
51605 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
51606+ if (!error && (saved_dev || saved_ino))
51607+ gr_handle_delete(saved_ino, saved_dev);
51608 exit4:
51609 mnt_drop_write(nd.path.mnt);
51610 exit3:
51611@@ -2318,6 +2446,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51612 struct dentry *dentry;
51613 struct nameidata nd;
51614 struct inode *inode = NULL;
51615+ ino_t saved_ino = 0;
51616+ dev_t saved_dev = 0;
51617
51618 error = user_path_parent(dfd, pathname, &nd, &name);
51619 if (error)
51620@@ -2337,8 +2467,19 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51621 if (nd.last.name[nd.last.len])
51622 goto slashes;
51623 inode = dentry->d_inode;
51624- if (inode)
51625+ if (inode) {
51626+ if (inode->i_nlink <= 1) {
51627+ saved_ino = inode->i_ino;
51628+ saved_dev = gr_get_dev_from_dentry(dentry);
51629+ }
51630+
51631 atomic_inc(&inode->i_count);
51632+
51633+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
51634+ error = -EACCES;
51635+ goto exit2;
51636+ }
51637+ }
51638 error = mnt_want_write(nd.path.mnt);
51639 if (error)
51640 goto exit2;
51641@@ -2346,6 +2487,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
51642 if (error)
51643 goto exit3;
51644 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
51645+ if (!error && (saved_ino || saved_dev))
51646+ gr_handle_delete(saved_ino, saved_dev);
51647 exit3:
51648 mnt_drop_write(nd.path.mnt);
51649 exit2:
51650@@ -2424,6 +2567,11 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51651 if (IS_ERR(dentry))
51652 goto out_unlock;
51653
51654+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
51655+ error = -EACCES;
51656+ goto out_dput;
51657+ }
51658+
51659 error = mnt_want_write(nd.path.mnt);
51660 if (error)
51661 goto out_dput;
51662@@ -2431,6 +2579,8 @@ SYSCALL_DEFINE3(symlinkat, const char __user *, oldname,
51663 if (error)
51664 goto out_drop_write;
51665 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
51666+ if (!error)
51667+ gr_handle_create(dentry, nd.path.mnt);
51668 out_drop_write:
51669 mnt_drop_write(nd.path.mnt);
51670 out_dput:
51671@@ -2524,6 +2674,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51672 error = PTR_ERR(new_dentry);
51673 if (IS_ERR(new_dentry))
51674 goto out_unlock;
51675+
51676+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
51677+ old_path.dentry->d_inode,
51678+ old_path.dentry->d_inode->i_mode, to)) {
51679+ error = -EACCES;
51680+ goto out_dput;
51681+ }
51682+
51683+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
51684+ old_path.dentry, old_path.mnt, to)) {
51685+ error = -EACCES;
51686+ goto out_dput;
51687+ }
51688+
51689 error = mnt_want_write(nd.path.mnt);
51690 if (error)
51691 goto out_dput;
51692@@ -2531,6 +2695,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
51693 if (error)
51694 goto out_drop_write;
51695 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
51696+ if (!error)
51697+ gr_handle_create(new_dentry, nd.path.mnt);
51698 out_drop_write:
51699 mnt_drop_write(nd.path.mnt);
51700 out_dput:
51701@@ -2708,6 +2874,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51702 char *to;
51703 int error;
51704
51705+ pax_track_stack();
51706+
51707 error = user_path_parent(olddfd, oldname, &oldnd, &from);
51708 if (error)
51709 goto exit;
51710@@ -2764,6 +2932,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51711 if (new_dentry == trap)
51712 goto exit5;
51713
51714+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
51715+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
51716+ to);
51717+ if (error)
51718+ goto exit5;
51719+
51720 error = mnt_want_write(oldnd.path.mnt);
51721 if (error)
51722 goto exit5;
51723@@ -2773,6 +2947,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, const char __user *, oldname,
51724 goto exit6;
51725 error = vfs_rename(old_dir->d_inode, old_dentry,
51726 new_dir->d_inode, new_dentry);
51727+ if (!error)
51728+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
51729+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
51730 exit6:
51731 mnt_drop_write(oldnd.path.mnt);
51732 exit5:
51733@@ -2798,6 +2975,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
51734
51735 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
51736 {
51737+ char tmpbuf[64];
51738+ const char *newlink;
51739 int len;
51740
51741 len = PTR_ERR(link);
51742@@ -2807,7 +2986,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
51743 len = strlen(link);
51744 if (len > (unsigned) buflen)
51745 len = buflen;
51746- if (copy_to_user(buffer, link, len))
51747+
51748+ if (len < sizeof(tmpbuf)) {
51749+ memcpy(tmpbuf, link, len);
51750+ newlink = tmpbuf;
51751+ } else
51752+ newlink = link;
51753+
51754+ if (copy_to_user(buffer, newlink, len))
51755 len = -EFAULT;
51756 out:
51757 return len;
51758diff --git a/fs/namespace.c b/fs/namespace.c
51759index 2beb0fb..11a95a5 100644
51760--- a/fs/namespace.c
51761+++ b/fs/namespace.c
51762@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51763 if (!(sb->s_flags & MS_RDONLY))
51764 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
51765 up_write(&sb->s_umount);
51766+
51767+ gr_log_remount(mnt->mnt_devname, retval);
51768+
51769 return retval;
51770 }
51771
51772@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mnt, int flags)
51773 security_sb_umount_busy(mnt);
51774 up_write(&namespace_sem);
51775 release_mounts(&umount_list);
51776+
51777+ gr_log_unmount(mnt->mnt_devname, retval);
51778+
51779 return retval;
51780 }
51781
51782@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51783 if (retval)
51784 goto dput_out;
51785
51786+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
51787+ retval = -EPERM;
51788+ goto dput_out;
51789+ }
51790+
51791+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
51792+ retval = -EPERM;
51793+ goto dput_out;
51794+ }
51795+
51796 if (flags & MS_REMOUNT)
51797 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
51798 data_page);
51799@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_name, char *type_page,
51800 dev_name, data_page);
51801 dput_out:
51802 path_put(&path);
51803+
51804+ gr_log_mount(dev_name, dir_name, retval);
51805+
51806 return retval;
51807 }
51808
51809@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
51810 goto out1;
51811 }
51812
51813+ if (gr_handle_chroot_pivot()) {
51814+ error = -EPERM;
51815+ path_put(&old);
51816+ goto out1;
51817+ }
51818+
51819 read_lock(&current->fs->lock);
51820 root = current->fs->root;
51821 path_get(&current->fs->root);
51822diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c
51823index b8b5b30..2bd9ccb 100644
51824--- a/fs/ncpfs/dir.c
51825+++ b/fs/ncpfs/dir.c
51826@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *dentry)
51827 int res, val = 0, len;
51828 __u8 __name[NCP_MAXPATHLEN + 1];
51829
51830+ pax_track_stack();
51831+
51832 parent = dget_parent(dentry);
51833 dir = parent->d_inode;
51834
51835@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct inode *dir, struct dentry *dentry, struc
51836 int error, res, len;
51837 __u8 __name[NCP_MAXPATHLEN + 1];
51838
51839+ pax_track_stack();
51840+
51841 lock_kernel();
51842 error = -EIO;
51843 if (!ncp_conn_valid(server))
51844@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, struct dentry *dentry, int mode,
51845 int error, result, len;
51846 int opmode;
51847 __u8 __name[NCP_MAXPATHLEN + 1];
51848-
51849+
51850 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
51851 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
51852
51853+ pax_track_stack();
51854+
51855 error = -EIO;
51856 lock_kernel();
51857 if (!ncp_conn_valid(server))
51858@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51859 int error, len;
51860 __u8 __name[NCP_MAXPATHLEN + 1];
51861
51862+ pax_track_stack();
51863+
51864 DPRINTK("ncp_mkdir: making %s/%s\n",
51865 dentry->d_parent->d_name.name, dentry->d_name.name);
51866
51867@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir, struct dentry *dentry, int mode)
51868 if (!ncp_conn_valid(server))
51869 goto out;
51870
51871+ pax_track_stack();
51872+
51873 ncp_age_dentry(server, dentry);
51874 len = sizeof(__name);
51875 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
51876@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry,
51877 int old_len, new_len;
51878 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
51879
51880+ pax_track_stack();
51881+
51882 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
51883 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
51884 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
51885diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
51886index cf98da1..da890a9 100644
51887--- a/fs/ncpfs/inode.c
51888+++ b/fs/ncpfs/inode.c
51889@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_block *sb, void *raw_data, int silent)
51890 #endif
51891 struct ncp_entry_info finfo;
51892
51893+ pax_track_stack();
51894+
51895 data.wdog_pid = NULL;
51896 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
51897 if (!server)
51898diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
51899index bfaef7b..e9d03ca 100644
51900--- a/fs/nfs/inode.c
51901+++ b/fs/nfs/inode.c
51902@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
51903 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
51904 nfsi->attrtimeo_timestamp = jiffies;
51905
51906- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
51907+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
51908 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
51909 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
51910 else
51911@@ -973,16 +973,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
51912 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
51913 }
51914
51915-static atomic_long_t nfs_attr_generation_counter;
51916+static atomic_long_unchecked_t nfs_attr_generation_counter;
51917
51918 static unsigned long nfs_read_attr_generation_counter(void)
51919 {
51920- return atomic_long_read(&nfs_attr_generation_counter);
51921+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
51922 }
51923
51924 unsigned long nfs_inc_attr_generation_counter(void)
51925 {
51926- return atomic_long_inc_return(&nfs_attr_generation_counter);
51927+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
51928 }
51929
51930 void nfs_fattr_init(struct nfs_fattr *fattr)
51931diff --git a/fs/nfsd/lockd.c b/fs/nfsd/lockd.c
51932index cc2f505..f6a236f 100644
51933--- a/fs/nfsd/lockd.c
51934+++ b/fs/nfsd/lockd.c
51935@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
51936 fput(filp);
51937 }
51938
51939-static struct nlmsvc_binding nfsd_nlm_ops = {
51940+static const struct nlmsvc_binding nfsd_nlm_ops = {
51941 .fopen = nlm_fopen, /* open file for locking */
51942 .fclose = nlm_fclose, /* close file */
51943 };
51944diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
51945index cfc3391..dcc083a 100644
51946--- a/fs/nfsd/nfs4state.c
51947+++ b/fs/nfsd/nfs4state.c
51948@@ -3459,6 +3459,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
51949 unsigned int cmd;
51950 int err;
51951
51952+ pax_track_stack();
51953+
51954 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
51955 (long long) lock->lk_offset,
51956 (long long) lock->lk_length);
51957diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
51958index 4a82a96..0d5fb49 100644
51959--- a/fs/nfsd/nfs4xdr.c
51960+++ b/fs/nfsd/nfs4xdr.c
51961@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
51962 struct nfsd4_compoundres *resp = rqstp->rq_resp;
51963 u32 minorversion = resp->cstate.minorversion;
51964
51965+ pax_track_stack();
51966+
51967 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
51968 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
51969 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
51970diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
51971index 2e09588..596421d 100644
51972--- a/fs/nfsd/vfs.c
51973+++ b/fs/nfsd/vfs.c
51974@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51975 } else {
51976 oldfs = get_fs();
51977 set_fs(KERNEL_DS);
51978- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
51979+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
51980 set_fs(oldfs);
51981 }
51982
51983@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
51984
51985 /* Write the data. */
51986 oldfs = get_fs(); set_fs(KERNEL_DS);
51987- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
51988+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &offset);
51989 set_fs(oldfs);
51990 if (host_err < 0)
51991 goto out_nfserr;
51992@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
51993 */
51994
51995 oldfs = get_fs(); set_fs(KERNEL_DS);
51996- host_err = inode->i_op->readlink(dentry, buf, *lenp);
51997+ host_err = inode->i_op->readlink(dentry, (char __force_user *)buf, *lenp);
51998 set_fs(oldfs);
51999
52000 if (host_err < 0)
52001diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
52002index f6af760..d0adf34 100644
52003--- a/fs/nilfs2/ioctl.c
52004+++ b/fs/nilfs2/ioctl.c
52005@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52006 unsigned int cmd, void __user *argp)
52007 {
52008 struct nilfs_argv argv[5];
52009- const static size_t argsz[5] = {
52010+ static const size_t argsz[5] = {
52011 sizeof(struct nilfs_vdesc),
52012 sizeof(struct nilfs_period),
52013 sizeof(__u64),
52014@@ -522,6 +522,9 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
52015 if (argv[n].v_nmembs > nsegs * nilfs->ns_blocks_per_segment)
52016 goto out_free;
52017
52018+ if (argv[n].v_nmembs >= UINT_MAX / argv[n].v_size)
52019+ goto out_free;
52020+
52021 len = argv[n].v_size * argv[n].v_nmembs;
52022 base = (void __user *)(unsigned long)argv[n].v_base;
52023 if (len == 0) {
52024diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
52025index 7e54e52..9337248 100644
52026--- a/fs/notify/dnotify/dnotify.c
52027+++ b/fs/notify/dnotify/dnotify.c
52028@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsnotify_mark_entry *entry)
52029 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
52030 }
52031
52032-static struct fsnotify_ops dnotify_fsnotify_ops = {
52033+static const struct fsnotify_ops dnotify_fsnotify_ops = {
52034 .handle_event = dnotify_handle_event,
52035 .should_send_event = dnotify_should_send_event,
52036 .free_group_priv = NULL,
52037diff --git a/fs/notify/notification.c b/fs/notify/notification.c
52038index b8bf53b..c518688 100644
52039--- a/fs/notify/notification.c
52040+++ b/fs/notify/notification.c
52041@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
52042 * get set to 0 so it will never get 'freed'
52043 */
52044 static struct fsnotify_event q_overflow_event;
52045-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52046+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52047
52048 /**
52049 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
52050@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
52051 */
52052 u32 fsnotify_get_cookie(void)
52053 {
52054- return atomic_inc_return(&fsnotify_sync_cookie);
52055+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
52056 }
52057 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
52058
52059diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
52060index 5a9e344..0f8cd28 100644
52061--- a/fs/ntfs/dir.c
52062+++ b/fs/ntfs/dir.c
52063@@ -1328,7 +1328,7 @@ find_next_index_buffer:
52064 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
52065 ~(s64)(ndir->itype.index.block_size - 1)));
52066 /* Bounds checks. */
52067- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52068+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
52069 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
52070 "inode 0x%lx or driver bug.", vdir->i_ino);
52071 goto err_out;
52072diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
52073index 663c0e3..b6868e9 100644
52074--- a/fs/ntfs/file.c
52075+++ b/fs/ntfs/file.c
52076@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_inode_ops = {
52077 #endif /* NTFS_RW */
52078 };
52079
52080-const struct file_operations ntfs_empty_file_ops = {};
52081+const struct file_operations ntfs_empty_file_ops __read_only;
52082
52083-const struct inode_operations ntfs_empty_inode_ops = {};
52084+const struct inode_operations ntfs_empty_inode_ops __read_only;
52085diff --git a/fs/ocfs2/cluster/masklog.c b/fs/ocfs2/cluster/masklog.c
52086index 1cd2934..880b5d2 100644
52087--- a/fs/ocfs2/cluster/masklog.c
52088+++ b/fs/ocfs2/cluster/masklog.c
52089@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject *obj, struct attribute *attr,
52090 return mlog_mask_store(mlog_attr->mask, buf, count);
52091 }
52092
52093-static struct sysfs_ops mlog_attr_ops = {
52094+static const struct sysfs_ops mlog_attr_ops = {
52095 .show = mlog_show,
52096 .store = mlog_store,
52097 };
52098diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
52099index ac10f83..2cd2607 100644
52100--- a/fs/ocfs2/localalloc.c
52101+++ b/fs/ocfs2/localalloc.c
52102@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
52103 goto bail;
52104 }
52105
52106- atomic_inc(&osb->alloc_stats.moves);
52107+ atomic_inc_unchecked(&osb->alloc_stats.moves);
52108
52109 status = 0;
52110 bail:
52111diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
52112index f010b22..9f9ed34 100644
52113--- a/fs/ocfs2/namei.c
52114+++ b/fs/ocfs2/namei.c
52115@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *old_dir,
52116 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
52117 struct ocfs2_dir_lookup_result target_insert = { NULL, };
52118
52119+ pax_track_stack();
52120+
52121 /* At some point it might be nice to break this function up a
52122 * bit. */
52123
52124diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
52125index d963d86..914cfbd 100644
52126--- a/fs/ocfs2/ocfs2.h
52127+++ b/fs/ocfs2/ocfs2.h
52128@@ -217,11 +217,11 @@ enum ocfs2_vol_state
52129
52130 struct ocfs2_alloc_stats
52131 {
52132- atomic_t moves;
52133- atomic_t local_data;
52134- atomic_t bitmap_data;
52135- atomic_t bg_allocs;
52136- atomic_t bg_extends;
52137+ atomic_unchecked_t moves;
52138+ atomic_unchecked_t local_data;
52139+ atomic_unchecked_t bitmap_data;
52140+ atomic_unchecked_t bg_allocs;
52141+ atomic_unchecked_t bg_extends;
52142 };
52143
52144 enum ocfs2_local_alloc_state
52145diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
52146index 79b5dac..d322952 100644
52147--- a/fs/ocfs2/suballoc.c
52148+++ b/fs/ocfs2/suballoc.c
52149@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
52150 mlog_errno(status);
52151 goto bail;
52152 }
52153- atomic_inc(&osb->alloc_stats.bg_extends);
52154+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
52155
52156 /* You should never ask for this much metadata */
52157 BUG_ON(bits_wanted >
52158@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_super *osb,
52159 mlog_errno(status);
52160 goto bail;
52161 }
52162- atomic_inc(&osb->alloc_stats.bg_allocs);
52163+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52164
52165 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
52166 ac->ac_bits_given += (*num_bits);
52167@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_super *osb,
52168 mlog_errno(status);
52169 goto bail;
52170 }
52171- atomic_inc(&osb->alloc_stats.bg_allocs);
52172+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
52173
52174 BUG_ON(num_bits != 1);
52175
52176@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52177 cluster_start,
52178 num_clusters);
52179 if (!status)
52180- atomic_inc(&osb->alloc_stats.local_data);
52181+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
52182 } else {
52183 if (min_clusters > (osb->bitmap_cpg - 1)) {
52184 /* The only paths asking for contiguousness
52185@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_super *osb,
52186 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
52187 bg_blkno,
52188 bg_bit_off);
52189- atomic_inc(&osb->alloc_stats.bitmap_data);
52190+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
52191 }
52192 }
52193 if (status < 0) {
52194diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
52195index 9f55be4..a3f8048 100644
52196--- a/fs/ocfs2/super.c
52197+++ b/fs/ocfs2/super.c
52198@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
52199 "%10s => GlobalAllocs: %d LocalAllocs: %d "
52200 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
52201 "Stats",
52202- atomic_read(&osb->alloc_stats.bitmap_data),
52203- atomic_read(&osb->alloc_stats.local_data),
52204- atomic_read(&osb->alloc_stats.bg_allocs),
52205- atomic_read(&osb->alloc_stats.moves),
52206- atomic_read(&osb->alloc_stats.bg_extends));
52207+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
52208+ atomic_read_unchecked(&osb->alloc_stats.local_data),
52209+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
52210+ atomic_read_unchecked(&osb->alloc_stats.moves),
52211+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
52212
52213 out += snprintf(buf + out, len - out,
52214 "%10s => State: %u Descriptor: %llu Size: %u bits "
52215@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
52216 spin_lock_init(&osb->osb_xattr_lock);
52217 ocfs2_init_inode_steal_slot(osb);
52218
52219- atomic_set(&osb->alloc_stats.moves, 0);
52220- atomic_set(&osb->alloc_stats.local_data, 0);
52221- atomic_set(&osb->alloc_stats.bitmap_data, 0);
52222- atomic_set(&osb->alloc_stats.bg_allocs, 0);
52223- atomic_set(&osb->alloc_stats.bg_extends, 0);
52224+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
52225+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
52226+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
52227+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
52228+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
52229
52230 /* Copy the blockcheck stats from the superblock probe */
52231 osb->osb_ecc_stats = *stats;
52232diff --git a/fs/open.c b/fs/open.c
52233index 4f01e06..091f6c3 100644
52234--- a/fs/open.c
52235+++ b/fs/open.c
52236@@ -275,6 +275,10 @@ static long do_sys_truncate(const char __user *pathname, loff_t length)
52237 error = locks_verify_truncate(inode, NULL, length);
52238 if (!error)
52239 error = security_path_truncate(&path, length, 0);
52240+
52241+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
52242+ error = -EACCES;
52243+
52244 if (!error) {
52245 vfs_dq_init(inode);
52246 error = do_truncate(path.dentry, length, 0, NULL);
52247@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, const char __user *, filename, int, mode)
52248 if (__mnt_is_readonly(path.mnt))
52249 res = -EROFS;
52250
52251+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
52252+ res = -EACCES;
52253+
52254 out_path_release:
52255 path_put(&path);
52256 out:
52257@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user *, filename)
52258 if (error)
52259 goto dput_and_out;
52260
52261+ gr_log_chdir(path.dentry, path.mnt);
52262+
52263 set_fs_pwd(current->fs, &path);
52264
52265 dput_and_out:
52266@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
52267 goto out_putf;
52268
52269 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
52270+
52271+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
52272+ error = -EPERM;
52273+
52274+ if (!error)
52275+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
52276+
52277 if (!error)
52278 set_fs_pwd(current->fs, &file->f_path);
52279 out_putf:
52280@@ -588,7 +604,13 @@ SYSCALL_DEFINE1(chroot, const char __user *, filename)
52281 if (!capable(CAP_SYS_CHROOT))
52282 goto dput_and_out;
52283
52284+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
52285+ goto dput_and_out;
52286+
52287 set_fs_root(current->fs, &path);
52288+
52289+ gr_handle_chroot_chdir(&path);
52290+
52291 error = 0;
52292 dput_and_out:
52293 path_put(&path);
52294@@ -616,12 +638,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd, mode_t, mode)
52295 err = mnt_want_write_file(file);
52296 if (err)
52297 goto out_putf;
52298+
52299 mutex_lock(&inode->i_mutex);
52300+
52301+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
52302+ err = -EACCES;
52303+ goto out_unlock;
52304+ }
52305+
52306 if (mode == (mode_t) -1)
52307 mode = inode->i_mode;
52308+
52309+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
52310+ err = -EPERM;
52311+ goto out_unlock;
52312+ }
52313+
52314 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52315 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52316 err = notify_change(dentry, &newattrs);
52317+
52318+out_unlock:
52319 mutex_unlock(&inode->i_mutex);
52320 mnt_drop_write(file->f_path.mnt);
52321 out_putf:
52322@@ -645,12 +682,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, const char __user *, filename, mode_t, mode)
52323 error = mnt_want_write(path.mnt);
52324 if (error)
52325 goto dput_and_out;
52326+
52327 mutex_lock(&inode->i_mutex);
52328+
52329+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
52330+ error = -EACCES;
52331+ goto out_unlock;
52332+ }
52333+
52334 if (mode == (mode_t) -1)
52335 mode = inode->i_mode;
52336+
52337+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
52338+ error = -EACCES;
52339+ goto out_unlock;
52340+ }
52341+
52342 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
52343 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
52344 error = notify_change(path.dentry, &newattrs);
52345+
52346+out_unlock:
52347 mutex_unlock(&inode->i_mutex);
52348 mnt_drop_write(path.mnt);
52349 dput_and_out:
52350@@ -664,12 +716,15 @@ SYSCALL_DEFINE2(chmod, const char __user *, filename, mode_t, mode)
52351 return sys_fchmodat(AT_FDCWD, filename, mode);
52352 }
52353
52354-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
52355+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
52356 {
52357 struct inode *inode = dentry->d_inode;
52358 int error;
52359 struct iattr newattrs;
52360
52361+ if (!gr_acl_handle_chown(dentry, mnt))
52362+ return -EACCES;
52363+
52364 newattrs.ia_valid = ATTR_CTIME;
52365 if (user != (uid_t) -1) {
52366 newattrs.ia_valid |= ATTR_UID;
52367@@ -700,7 +755,7 @@ SYSCALL_DEFINE3(chown, const char __user *, filename, uid_t, user, gid_t, group)
52368 error = mnt_want_write(path.mnt);
52369 if (error)
52370 goto out_release;
52371- error = chown_common(path.dentry, user, group);
52372+ error = chown_common(path.dentry, user, group, path.mnt);
52373 mnt_drop_write(path.mnt);
52374 out_release:
52375 path_put(&path);
52376@@ -725,7 +780,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, const char __user *, filename, uid_t, user,
52377 error = mnt_want_write(path.mnt);
52378 if (error)
52379 goto out_release;
52380- error = chown_common(path.dentry, user, group);
52381+ error = chown_common(path.dentry, user, group, path.mnt);
52382 mnt_drop_write(path.mnt);
52383 out_release:
52384 path_put(&path);
52385@@ -744,7 +799,7 @@ SYSCALL_DEFINE3(lchown, const char __user *, filename, uid_t, user, gid_t, group
52386 error = mnt_want_write(path.mnt);
52387 if (error)
52388 goto out_release;
52389- error = chown_common(path.dentry, user, group);
52390+ error = chown_common(path.dentry, user, group, path.mnt);
52391 mnt_drop_write(path.mnt);
52392 out_release:
52393 path_put(&path);
52394@@ -767,7 +822,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd, uid_t, user, gid_t, group)
52395 goto out_fput;
52396 dentry = file->f_path.dentry;
52397 audit_inode(NULL, dentry);
52398- error = chown_common(dentry, user, group);
52399+ error = chown_common(dentry, user, group, file->f_path.mnt);
52400 mnt_drop_write(file->f_path.mnt);
52401 out_fput:
52402 fput(file);
52403@@ -1036,7 +1091,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, int mode)
52404 if (!IS_ERR(tmp)) {
52405 fd = get_unused_fd_flags(flags);
52406 if (fd >= 0) {
52407- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52408+ struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
52409 if (IS_ERR(f)) {
52410 put_unused_fd(fd);
52411 fd = PTR_ERR(f);
52412diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c
52413index 6ab70f4..f4103d1 100644
52414--- a/fs/partitions/efi.c
52415+++ b/fs/partitions/efi.c
52416@@ -231,14 +231,14 @@ alloc_read_gpt_entries(struct block_device *bdev, gpt_header *gpt)
52417 if (!bdev || !gpt)
52418 return NULL;
52419
52420+ if (!le32_to_cpu(gpt->num_partition_entries))
52421+ return NULL;
52422+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
52423+ if (!pte)
52424+ return NULL;
52425+
52426 count = le32_to_cpu(gpt->num_partition_entries) *
52427 le32_to_cpu(gpt->sizeof_partition_entry);
52428- if (!count)
52429- return NULL;
52430- pte = kzalloc(count, GFP_KERNEL);
52431- if (!pte)
52432- return NULL;
52433-
52434 if (read_lba(bdev, le64_to_cpu(gpt->partition_entry_lba),
52435 (u8 *) pte,
52436 count) < count) {
52437diff --git a/fs/partitions/ldm.c b/fs/partitions/ldm.c
52438index dd6efdb..3babc6c 100644
52439--- a/fs/partitions/ldm.c
52440+++ b/fs/partitions/ldm.c
52441@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52442 ldm_error ("A VBLK claims to have %d parts.", num);
52443 return false;
52444 }
52445+
52446 if (rec >= num) {
52447 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
52448 return false;
52449@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data, int size, struct list_head *frags)
52450 goto found;
52451 }
52452
52453- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
52454+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
52455 if (!f) {
52456 ldm_crit ("Out of memory.");
52457 return false;
52458diff --git a/fs/partitions/mac.c b/fs/partitions/mac.c
52459index 5765198..7f8e9e0 100644
52460--- a/fs/partitions/mac.c
52461+++ b/fs/partitions/mac.c
52462@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitions *state, struct block_device *bdev)
52463 return 0; /* not a MacOS disk */
52464 }
52465 blocks_in_map = be32_to_cpu(part->map_count);
52466- if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52467- put_dev_sector(sect);
52468- return 0;
52469- }
52470 printk(" [mac]");
52471+ if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
52472+ put_dev_sector(sect);
52473+ return 0;
52474+ }
52475 for (slot = 1; slot <= blocks_in_map; ++slot) {
52476 int pos = slot * secsize;
52477 put_dev_sector(sect);
52478diff --git a/fs/pipe.c b/fs/pipe.c
52479index d0cc080..8a6f211 100644
52480--- a/fs/pipe.c
52481+++ b/fs/pipe.c
52482@@ -401,9 +401,9 @@ redo:
52483 }
52484 if (bufs) /* More to do? */
52485 continue;
52486- if (!pipe->writers)
52487+ if (!atomic_read(&pipe->writers))
52488 break;
52489- if (!pipe->waiting_writers) {
52490+ if (!atomic_read(&pipe->waiting_writers)) {
52491 /* syscall merging: Usually we must not sleep
52492 * if O_NONBLOCK is set, or if we got some data.
52493 * But if a writer sleeps in kernel space, then
52494@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
52495 mutex_lock(&inode->i_mutex);
52496 pipe = inode->i_pipe;
52497
52498- if (!pipe->readers) {
52499+ if (!atomic_read(&pipe->readers)) {
52500 send_sig(SIGPIPE, current, 0);
52501 ret = -EPIPE;
52502 goto out;
52503@@ -511,7 +511,7 @@ redo1:
52504 for (;;) {
52505 int bufs;
52506
52507- if (!pipe->readers) {
52508+ if (!atomic_read(&pipe->readers)) {
52509 send_sig(SIGPIPE, current, 0);
52510 if (!ret)
52511 ret = -EPIPE;
52512@@ -597,9 +597,9 @@ redo2:
52513 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
52514 do_wakeup = 0;
52515 }
52516- pipe->waiting_writers++;
52517+ atomic_inc(&pipe->waiting_writers);
52518 pipe_wait(pipe);
52519- pipe->waiting_writers--;
52520+ atomic_dec(&pipe->waiting_writers);
52521 }
52522 out:
52523 mutex_unlock(&inode->i_mutex);
52524@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52525 mask = 0;
52526 if (filp->f_mode & FMODE_READ) {
52527 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
52528- if (!pipe->writers && filp->f_version != pipe->w_counter)
52529+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
52530 mask |= POLLHUP;
52531 }
52532
52533@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table *wait)
52534 * Most Unices do not set POLLERR for FIFOs but on Linux they
52535 * behave exactly like pipes for poll().
52536 */
52537- if (!pipe->readers)
52538+ if (!atomic_read(&pipe->readers))
52539 mask |= POLLERR;
52540 }
52541
52542@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int decr, int decw)
52543
52544 mutex_lock(&inode->i_mutex);
52545 pipe = inode->i_pipe;
52546- pipe->readers -= decr;
52547- pipe->writers -= decw;
52548+ atomic_sub(decr, &pipe->readers);
52549+ atomic_sub(decw, &pipe->writers);
52550
52551- if (!pipe->readers && !pipe->writers) {
52552+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
52553 free_pipe_info(inode);
52554 } else {
52555 wake_up_interruptible_sync(&pipe->wait);
52556@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, struct file *filp)
52557
52558 if (inode->i_pipe) {
52559 ret = 0;
52560- inode->i_pipe->readers++;
52561+ atomic_inc(&inode->i_pipe->readers);
52562 }
52563
52564 mutex_unlock(&inode->i_mutex);
52565@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, struct file *filp)
52566
52567 if (inode->i_pipe) {
52568 ret = 0;
52569- inode->i_pipe->writers++;
52570+ atomic_inc(&inode->i_pipe->writers);
52571 }
52572
52573 mutex_unlock(&inode->i_mutex);
52574@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, struct file *filp)
52575 if (inode->i_pipe) {
52576 ret = 0;
52577 if (filp->f_mode & FMODE_READ)
52578- inode->i_pipe->readers++;
52579+ atomic_inc(&inode->i_pipe->readers);
52580 if (filp->f_mode & FMODE_WRITE)
52581- inode->i_pipe->writers++;
52582+ atomic_inc(&inode->i_pipe->writers);
52583 }
52584
52585 mutex_unlock(&inode->i_mutex);
52586@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
52587 inode->i_pipe = NULL;
52588 }
52589
52590-static struct vfsmount *pipe_mnt __read_mostly;
52591+struct vfsmount *pipe_mnt __read_mostly;
52592 static int pipefs_delete_dentry(struct dentry *dentry)
52593 {
52594 /*
52595@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(void)
52596 goto fail_iput;
52597 inode->i_pipe = pipe;
52598
52599- pipe->readers = pipe->writers = 1;
52600+ atomic_set(&pipe->readers, 1);
52601+ atomic_set(&pipe->writers, 1);
52602 inode->i_fop = &rdwr_pipefifo_fops;
52603
52604 /*
52605diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
52606index 50f8f06..c5755df 100644
52607--- a/fs/proc/Kconfig
52608+++ b/fs/proc/Kconfig
52609@@ -30,12 +30,12 @@ config PROC_FS
52610
52611 config PROC_KCORE
52612 bool "/proc/kcore support" if !ARM
52613- depends on PROC_FS && MMU
52614+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
52615
52616 config PROC_VMCORE
52617 bool "/proc/vmcore support (EXPERIMENTAL)"
52618- depends on PROC_FS && CRASH_DUMP
52619- default y
52620+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
52621+ default n
52622 help
52623 Exports the dump image of crashed kernel in ELF format.
52624
52625@@ -59,8 +59,8 @@ config PROC_SYSCTL
52626 limited in memory.
52627
52628 config PROC_PAGE_MONITOR
52629- default y
52630- depends on PROC_FS && MMU
52631+ default n
52632+ depends on PROC_FS && MMU && !GRKERNSEC
52633 bool "Enable /proc page monitoring" if EMBEDDED
52634 help
52635 Various /proc files exist to monitor process memory utilization:
52636diff --git a/fs/proc/array.c b/fs/proc/array.c
52637index c5ef152..1363194 100644
52638--- a/fs/proc/array.c
52639+++ b/fs/proc/array.c
52640@@ -60,6 +60,7 @@
52641 #include <linux/tty.h>
52642 #include <linux/string.h>
52643 #include <linux/mman.h>
52644+#include <linux/grsecurity.h>
52645 #include <linux/proc_fs.h>
52646 #include <linux/ioport.h>
52647 #include <linux/uaccess.h>
52648@@ -321,6 +322,21 @@ static inline void task_context_switch_counts(struct seq_file *m,
52649 p->nivcsw);
52650 }
52651
52652+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52653+static inline void task_pax(struct seq_file *m, struct task_struct *p)
52654+{
52655+ if (p->mm)
52656+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
52657+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
52658+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
52659+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
52660+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
52661+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
52662+ else
52663+ seq_printf(m, "PaX:\t-----\n");
52664+}
52665+#endif
52666+
52667 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52668 struct pid *pid, struct task_struct *task)
52669 {
52670@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
52671 task_cap(m, task);
52672 cpuset_task_status_allowed(m, task);
52673 task_context_switch_counts(m, task);
52674+
52675+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
52676+ task_pax(m, task);
52677+#endif
52678+
52679+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
52680+ task_grsec_rbac(m, task);
52681+#endif
52682+
52683 return 0;
52684 }
52685
52686+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52687+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52688+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52689+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52690+#endif
52691+
52692 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52693 struct pid *pid, struct task_struct *task, int whole)
52694 {
52695@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52696 cputime_t cutime, cstime, utime, stime;
52697 cputime_t cgtime, gtime;
52698 unsigned long rsslim = 0;
52699- char tcomm[sizeof(task->comm)];
52700+ char tcomm[sizeof(task->comm)] = { 0 };
52701 unsigned long flags;
52702
52703+ pax_track_stack();
52704+
52705 state = *get_task_state(task);
52706 vsize = eip = esp = 0;
52707 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
52708@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52709 gtime = task_gtime(task);
52710 }
52711
52712+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52713+ if (PAX_RAND_FLAGS(mm)) {
52714+ eip = 0;
52715+ esp = 0;
52716+ wchan = 0;
52717+ }
52718+#endif
52719+#ifdef CONFIG_GRKERNSEC_HIDESYM
52720+ wchan = 0;
52721+ eip =0;
52722+ esp =0;
52723+#endif
52724+
52725 /* scale priority and nice values from timeslices to -20..20 */
52726 /* to make it look like a "normal" Unix priority/nice value */
52727 priority = task_prio(task);
52728@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
52729 vsize,
52730 mm ? get_mm_rss(mm) : 0,
52731 rsslim,
52732+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52733+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
52734+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
52735+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
52736+#else
52737 mm ? (permitted ? mm->start_code : 1) : 0,
52738 mm ? (permitted ? mm->end_code : 1) : 0,
52739 (permitted && mm) ? mm->start_stack : 0,
52740+#endif
52741 esp,
52742 eip,
52743 /* The signal information here is obsolete.
52744@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
52745
52746 return 0;
52747 }
52748+
52749+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
52750+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
52751+{
52752+ u32 curr_ip = 0;
52753+ unsigned long flags;
52754+
52755+ if (lock_task_sighand(task, &flags)) {
52756+ curr_ip = task->signal->curr_ip;
52757+ unlock_task_sighand(task, &flags);
52758+ }
52759+
52760+ return sprintf(buffer, "%pI4\n", &curr_ip);
52761+}
52762+#endif
52763diff --git a/fs/proc/base.c b/fs/proc/base.c
52764index 67f7dc0..67ab883 100644
52765--- a/fs/proc/base.c
52766+++ b/fs/proc/base.c
52767@@ -102,6 +102,22 @@ struct pid_entry {
52768 union proc_op op;
52769 };
52770
52771+struct getdents_callback {
52772+ struct linux_dirent __user * current_dir;
52773+ struct linux_dirent __user * previous;
52774+ struct file * file;
52775+ int count;
52776+ int error;
52777+};
52778+
52779+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
52780+ loff_t offset, u64 ino, unsigned int d_type)
52781+{
52782+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
52783+ buf->error = -EINVAL;
52784+ return 0;
52785+}
52786+
52787 #define NOD(NAME, MODE, IOP, FOP, OP) { \
52788 .name = (NAME), \
52789 .len = sizeof(NAME) - 1, \
52790@@ -213,6 +229,9 @@ static int check_mem_permission(struct task_struct *task)
52791 if (task == current)
52792 return 0;
52793
52794+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
52795+ return -EPERM;
52796+
52797 /*
52798 * If current is actively ptrace'ing, and would also be
52799 * permitted to freshly attach with ptrace now, permit it.
52800@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
52801 if (!mm->arg_end)
52802 goto out_mm; /* Shh! No looking before we're done */
52803
52804+ if (gr_acl_handle_procpidmem(task))
52805+ goto out_mm;
52806+
52807 len = mm->arg_end - mm->arg_start;
52808
52809 if (len > PAGE_SIZE)
52810@@ -287,12 +309,28 @@ out:
52811 return res;
52812 }
52813
52814+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52815+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
52816+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
52817+ _mm->pax_flags & MF_PAX_SEGMEXEC))
52818+#endif
52819+
52820 static int proc_pid_auxv(struct task_struct *task, char *buffer)
52821 {
52822 int res = 0;
52823 struct mm_struct *mm = get_task_mm(task);
52824 if (mm) {
52825 unsigned int nwords = 0;
52826+
52827+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
52828+ /* allow if we're currently ptracing this task */
52829+ if (PAX_RAND_FLAGS(mm) &&
52830+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
52831+ mmput(mm);
52832+ return 0;
52833+ }
52834+#endif
52835+
52836 do {
52837 nwords += 2;
52838 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
52839@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
52840 }
52841
52842
52843-#ifdef CONFIG_KALLSYMS
52844+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52845 /*
52846 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
52847 * Returns the resolved symbol. If that fails, simply return the address.
52848@@ -345,7 +383,7 @@ static void unlock_trace(struct task_struct *task)
52849 mutex_unlock(&task->cred_guard_mutex);
52850 }
52851
52852-#ifdef CONFIG_STACKTRACE
52853+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
52854
52855 #define MAX_STACK_TRACE_DEPTH 64
52856
52857@@ -545,7 +583,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
52858 return count;
52859 }
52860
52861-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
52862+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
52863 static int proc_pid_syscall(struct task_struct *task, char *buffer)
52864 {
52865 long nr;
52866@@ -574,7 +612,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
52867 /************************************************************************/
52868
52869 /* permission checks */
52870-static int proc_fd_access_allowed(struct inode *inode)
52871+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
52872 {
52873 struct task_struct *task;
52874 int allowed = 0;
52875@@ -584,7 +622,10 @@ static int proc_fd_access_allowed(struct inode *inode)
52876 */
52877 task = get_proc_task(inode);
52878 if (task) {
52879- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52880+ if (log)
52881+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
52882+ else
52883+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
52884 put_task_struct(task);
52885 }
52886 return allowed;
52887@@ -809,6 +850,8 @@ static int mem_open(struct inode* inode, struct file* file)
52888 return 0;
52889 }
52890
52891+static int task_dumpable(struct task_struct *task);
52892+
52893 static ssize_t mem_read(struct file * file, char __user * buf,
52894 size_t count, loff_t *ppos)
52895 {
52896@@ -824,6 +867,12 @@ static ssize_t mem_read(struct file * file, char __user * buf,
52897 if (check_mem_permission(task))
52898 goto out;
52899
52900+ // XXX: temporary workaround
52901+ if (!task_dumpable(task) && task == current) {
52902+ ret = -EACCES;
52903+ goto out;
52904+ }
52905+
52906 ret = -ENOMEM;
52907 page = (char *)__get_free_page(GFP_TEMPORARY);
52908 if (!page)
52909@@ -963,6 +1012,9 @@ static ssize_t environ_read(struct file *file, char __user *buf,
52910 if (!task)
52911 goto out_no_task;
52912
52913+ if (gr_acl_handle_procpidmem(task))
52914+ goto out;
52915+
52916 if (!ptrace_may_access(task, PTRACE_MODE_READ))
52917 goto out;
52918
52919@@ -1377,7 +1429,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
52920 path_put(&nd->path);
52921
52922 /* Are we allowed to snoop on the tasks file descriptors? */
52923- if (!proc_fd_access_allowed(inode))
52924+ if (!proc_fd_access_allowed(inode,0))
52925 goto out;
52926
52927 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
52928@@ -1417,8 +1469,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
52929 struct path path;
52930
52931 /* Are we allowed to snoop on the tasks file descriptors? */
52932- if (!proc_fd_access_allowed(inode))
52933- goto out;
52934+ /* logging this is needed for learning on chromium to work properly,
52935+ but we don't want to flood the logs from 'ps' which does a readlink
52936+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
52937+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
52938+ */
52939+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
52940+ if (!proc_fd_access_allowed(inode,0))
52941+ goto out;
52942+ } else {
52943+ if (!proc_fd_access_allowed(inode,1))
52944+ goto out;
52945+ }
52946
52947 error = PROC_I(inode)->op.proc_get_link(inode, &path);
52948 if (error)
52949@@ -1483,7 +1545,11 @@ static struct inode *proc_pid_make_inode(struct super_block * sb, struct task_st
52950 rcu_read_lock();
52951 cred = __task_cred(task);
52952 inode->i_uid = cred->euid;
52953+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52954+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
52955+#else
52956 inode->i_gid = cred->egid;
52957+#endif
52958 rcu_read_unlock();
52959 }
52960 security_task_to_inode(task, inode);
52961@@ -1501,6 +1567,9 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52962 struct inode *inode = dentry->d_inode;
52963 struct task_struct *task;
52964 const struct cred *cred;
52965+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52966+ const struct cred *tmpcred = current_cred();
52967+#endif
52968
52969 generic_fillattr(inode, stat);
52970
52971@@ -1508,13 +1577,41 @@ static int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat
52972 stat->uid = 0;
52973 stat->gid = 0;
52974 task = pid_task(proc_pid(inode), PIDTYPE_PID);
52975+
52976+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
52977+ rcu_read_unlock();
52978+ return -ENOENT;
52979+ }
52980+
52981 if (task) {
52982+ cred = __task_cred(task);
52983+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52984+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
52985+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
52986+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
52987+#endif
52988+ ) {
52989+#endif
52990 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
52991+#ifdef CONFIG_GRKERNSEC_PROC_USER
52992+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
52993+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
52994+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
52995+#endif
52996 task_dumpable(task)) {
52997- cred = __task_cred(task);
52998 stat->uid = cred->euid;
52999+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53000+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
53001+#else
53002 stat->gid = cred->egid;
53003+#endif
53004 }
53005+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53006+ } else {
53007+ rcu_read_unlock();
53008+ return -ENOENT;
53009+ }
53010+#endif
53011 }
53012 rcu_read_unlock();
53013 return 0;
53014@@ -1545,11 +1642,20 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
53015
53016 if (task) {
53017 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
53018+#ifdef CONFIG_GRKERNSEC_PROC_USER
53019+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
53020+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53021+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
53022+#endif
53023 task_dumpable(task)) {
53024 rcu_read_lock();
53025 cred = __task_cred(task);
53026 inode->i_uid = cred->euid;
53027+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53028+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53029+#else
53030 inode->i_gid = cred->egid;
53031+#endif
53032 rcu_read_unlock();
53033 } else {
53034 inode->i_uid = 0;
53035@@ -1670,7 +1776,8 @@ static int proc_fd_info(struct inode *inode, struct path *path, char *info)
53036 int fd = proc_fd(inode);
53037
53038 if (task) {
53039- files = get_files_struct(task);
53040+ if (!gr_acl_handle_procpidmem(task))
53041+ files = get_files_struct(task);
53042 put_task_struct(task);
53043 }
53044 if (files) {
53045@@ -1922,12 +2029,22 @@ static const struct file_operations proc_fd_operations = {
53046 static int proc_fd_permission(struct inode *inode, int mask)
53047 {
53048 int rv;
53049+ struct task_struct *task;
53050
53051 rv = generic_permission(inode, mask, NULL);
53052- if (rv == 0)
53053- return 0;
53054+
53055 if (task_pid(current) == proc_pid(inode))
53056 rv = 0;
53057+
53058+ task = get_proc_task(inode);
53059+ if (task == NULL)
53060+ return rv;
53061+
53062+ if (gr_acl_handle_procpidmem(task))
53063+ rv = -EACCES;
53064+
53065+ put_task_struct(task);
53066+
53067 return rv;
53068 }
53069
53070@@ -2036,6 +2153,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
53071 if (!task)
53072 goto out_no_task;
53073
53074+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53075+ goto out;
53076+
53077 /*
53078 * Yes, it does not scale. And it should not. Don't add
53079 * new entries into /proc/<tgid>/ without very good reasons.
53080@@ -2080,6 +2200,9 @@ static int proc_pident_readdir(struct file *filp,
53081 if (!task)
53082 goto out_no_task;
53083
53084+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53085+ goto out;
53086+
53087 ret = 0;
53088 i = filp->f_pos;
53089 switch (i) {
53090@@ -2347,7 +2470,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
53091 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
53092 void *cookie)
53093 {
53094- char *s = nd_get_link(nd);
53095+ const char *s = nd_get_link(nd);
53096 if (!IS_ERR(s))
53097 __putname(s);
53098 }
53099@@ -2553,7 +2676,7 @@ static const struct pid_entry tgid_base_stuff[] = {
53100 #ifdef CONFIG_SCHED_DEBUG
53101 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53102 #endif
53103-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53104+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53105 INF("syscall", S_IRUGO, proc_pid_syscall),
53106 #endif
53107 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53108@@ -2578,10 +2701,10 @@ static const struct pid_entry tgid_base_stuff[] = {
53109 #ifdef CONFIG_SECURITY
53110 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53111 #endif
53112-#ifdef CONFIG_KALLSYMS
53113+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53114 INF("wchan", S_IRUGO, proc_pid_wchan),
53115 #endif
53116-#ifdef CONFIG_STACKTRACE
53117+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53118 ONE("stack", S_IRUGO, proc_pid_stack),
53119 #endif
53120 #ifdef CONFIG_SCHEDSTATS
53121@@ -2611,6 +2734,9 @@ static const struct pid_entry tgid_base_stuff[] = {
53122 #ifdef CONFIG_TASK_IO_ACCOUNTING
53123 INF("io", S_IRUSR, proc_tgid_io_accounting),
53124 #endif
53125+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53126+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
53127+#endif
53128 };
53129
53130 static int proc_tgid_base_readdir(struct file * filp,
53131@@ -2735,7 +2861,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
53132 if (!inode)
53133 goto out;
53134
53135+#ifdef CONFIG_GRKERNSEC_PROC_USER
53136+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
53137+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53138+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53139+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
53140+#else
53141 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
53142+#endif
53143 inode->i_op = &proc_tgid_base_inode_operations;
53144 inode->i_fop = &proc_tgid_base_operations;
53145 inode->i_flags|=S_IMMUTABLE;
53146@@ -2777,7 +2910,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct
53147 if (!task)
53148 goto out;
53149
53150+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
53151+ goto out_put_task;
53152+
53153 result = proc_pid_instantiate(dir, dentry, task, NULL);
53154+out_put_task:
53155 put_task_struct(task);
53156 out:
53157 return result;
53158@@ -2842,6 +2979,11 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53159 {
53160 unsigned int nr;
53161 struct task_struct *reaper;
53162+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53163+ const struct cred *tmpcred = current_cred();
53164+ const struct cred *itercred;
53165+#endif
53166+ filldir_t __filldir = filldir;
53167 struct tgid_iter iter;
53168 struct pid_namespace *ns;
53169
53170@@ -2865,8 +3007,27 @@ int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir)
53171 for (iter = next_tgid(ns, iter);
53172 iter.task;
53173 iter.tgid += 1, iter = next_tgid(ns, iter)) {
53174+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53175+ rcu_read_lock();
53176+ itercred = __task_cred(iter.task);
53177+#endif
53178+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
53179+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53180+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
53181+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53182+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
53183+#endif
53184+ )
53185+#endif
53186+ )
53187+ __filldir = &gr_fake_filldir;
53188+ else
53189+ __filldir = filldir;
53190+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53191+ rcu_read_unlock();
53192+#endif
53193 filp->f_pos = iter.tgid + TGID_OFFSET;
53194- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
53195+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
53196 put_task_struct(iter.task);
53197 goto out;
53198 }
53199@@ -2892,7 +3053,7 @@ static const struct pid_entry tid_base_stuff[] = {
53200 #ifdef CONFIG_SCHED_DEBUG
53201 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
53202 #endif
53203-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
53204+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
53205 INF("syscall", S_IRUGO, proc_pid_syscall),
53206 #endif
53207 INF("cmdline", S_IRUGO, proc_pid_cmdline),
53208@@ -2916,10 +3077,10 @@ static const struct pid_entry tid_base_stuff[] = {
53209 #ifdef CONFIG_SECURITY
53210 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
53211 #endif
53212-#ifdef CONFIG_KALLSYMS
53213+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53214 INF("wchan", S_IRUGO, proc_pid_wchan),
53215 #endif
53216-#ifdef CONFIG_STACKTRACE
53217+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
53218 ONE("stack", S_IRUGO, proc_pid_stack),
53219 #endif
53220 #ifdef CONFIG_SCHEDSTATS
53221diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
53222index 82676e3..5f8518a 100644
53223--- a/fs/proc/cmdline.c
53224+++ b/fs/proc/cmdline.c
53225@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
53226
53227 static int __init proc_cmdline_init(void)
53228 {
53229+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53230+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
53231+#else
53232 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
53233+#endif
53234 return 0;
53235 }
53236 module_init(proc_cmdline_init);
53237diff --git a/fs/proc/devices.c b/fs/proc/devices.c
53238index 59ee7da..469b4b6 100644
53239--- a/fs/proc/devices.c
53240+++ b/fs/proc/devices.c
53241@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
53242
53243 static int __init proc_devices_init(void)
53244 {
53245+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53246+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
53247+#else
53248 proc_create("devices", 0, NULL, &proc_devinfo_operations);
53249+#endif
53250 return 0;
53251 }
53252 module_init(proc_devices_init);
53253diff --git a/fs/proc/inode.c b/fs/proc/inode.c
53254index d78ade3..81767f9 100644
53255--- a/fs/proc/inode.c
53256+++ b/fs/proc/inode.c
53257@@ -18,12 +18,19 @@
53258 #include <linux/module.h>
53259 #include <linux/smp_lock.h>
53260 #include <linux/sysctl.h>
53261+#include <linux/grsecurity.h>
53262
53263 #include <asm/system.h>
53264 #include <asm/uaccess.h>
53265
53266 #include "internal.h"
53267
53268+#ifdef CONFIG_PROC_SYSCTL
53269+extern const struct inode_operations proc_sys_inode_operations;
53270+extern const struct inode_operations proc_sys_dir_operations;
53271+#endif
53272+
53273+
53274 struct proc_dir_entry *de_get(struct proc_dir_entry *de)
53275 {
53276 atomic_inc(&de->count);
53277@@ -62,6 +69,13 @@ static void proc_delete_inode(struct inode *inode)
53278 de_put(de);
53279 if (PROC_I(inode)->sysctl)
53280 sysctl_head_put(PROC_I(inode)->sysctl);
53281+
53282+#ifdef CONFIG_PROC_SYSCTL
53283+ if (inode->i_op == &proc_sys_inode_operations ||
53284+ inode->i_op == &proc_sys_dir_operations)
53285+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
53286+#endif
53287+
53288 clear_inode(inode);
53289 }
53290
53291@@ -457,7 +471,11 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
53292 if (de->mode) {
53293 inode->i_mode = de->mode;
53294 inode->i_uid = de->uid;
53295+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
53296+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
53297+#else
53298 inode->i_gid = de->gid;
53299+#endif
53300 }
53301 if (de->size)
53302 inode->i_size = de->size;
53303diff --git a/fs/proc/internal.h b/fs/proc/internal.h
53304index 753ca37..26bcf3b 100644
53305--- a/fs/proc/internal.h
53306+++ b/fs/proc/internal.h
53307@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
53308 struct pid *pid, struct task_struct *task);
53309 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
53310 struct pid *pid, struct task_struct *task);
53311+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
53312+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
53313+#endif
53314 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
53315
53316 extern const struct file_operations proc_maps_operations;
53317diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
53318index b442dac..aab29cb 100644
53319--- a/fs/proc/kcore.c
53320+++ b/fs/proc/kcore.c
53321@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bufp, int nphdr, int dataoff)
53322 off_t offset = 0;
53323 struct kcore_list *m;
53324
53325+ pax_track_stack();
53326+
53327 /* setup ELF header */
53328 elf = (struct elfhdr *) bufp;
53329 bufp += sizeof(struct elfhdr);
53330@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53331 * the addresses in the elf_phdr on our list.
53332 */
53333 start = kc_offset_to_vaddr(*fpos - elf_buflen);
53334- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
53335+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
53336+ if (tsz > buflen)
53337 tsz = buflen;
53338-
53339+
53340 while (buflen) {
53341 struct kcore_list *m;
53342
53343@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53344 kfree(elf_buf);
53345 } else {
53346 if (kern_addr_valid(start)) {
53347- unsigned long n;
53348+ char *elf_buf;
53349+ mm_segment_t oldfs;
53350
53351- n = copy_to_user(buffer, (char *)start, tsz);
53352- /*
53353- * We cannot distingush between fault on source
53354- * and fault on destination. When this happens
53355- * we clear too and hope it will trigger the
53356- * EFAULT again.
53357- */
53358- if (n) {
53359- if (clear_user(buffer + tsz - n,
53360- n))
53361+ elf_buf = kmalloc(tsz, GFP_KERNEL);
53362+ if (!elf_buf)
53363+ return -ENOMEM;
53364+ oldfs = get_fs();
53365+ set_fs(KERNEL_DS);
53366+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
53367+ set_fs(oldfs);
53368+ if (copy_to_user(buffer, elf_buf, tsz)) {
53369+ kfree(elf_buf);
53370 return -EFAULT;
53371+ }
53372 }
53373+ set_fs(oldfs);
53374+ kfree(elf_buf);
53375 } else {
53376 if (clear_user(buffer, tsz))
53377 return -EFAULT;
53378@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
53379
53380 static int open_kcore(struct inode *inode, struct file *filp)
53381 {
53382+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
53383+ return -EPERM;
53384+#endif
53385 if (!capable(CAP_SYS_RAWIO))
53386 return -EPERM;
53387 if (kcore_need_update)
53388diff --git a/fs/proc/kmsg.c b/fs/proc/kmsg.c
53389index 7ca7834..cfe90a4 100644
53390--- a/fs/proc/kmsg.c
53391+++ b/fs/proc/kmsg.c
53392@@ -12,37 +12,37 @@
53393 #include <linux/poll.h>
53394 #include <linux/proc_fs.h>
53395 #include <linux/fs.h>
53396+#include <linux/syslog.h>
53397
53398 #include <asm/uaccess.h>
53399 #include <asm/io.h>
53400
53401 extern wait_queue_head_t log_wait;
53402
53403-extern int do_syslog(int type, char __user *bug, int count);
53404-
53405 static int kmsg_open(struct inode * inode, struct file * file)
53406 {
53407- return do_syslog(1,NULL,0);
53408+ return do_syslog(SYSLOG_ACTION_OPEN, NULL, 0, SYSLOG_FROM_FILE);
53409 }
53410
53411 static int kmsg_release(struct inode * inode, struct file * file)
53412 {
53413- (void) do_syslog(0,NULL,0);
53414+ (void) do_syslog(SYSLOG_ACTION_CLOSE, NULL, 0, SYSLOG_FROM_FILE);
53415 return 0;
53416 }
53417
53418 static ssize_t kmsg_read(struct file *file, char __user *buf,
53419 size_t count, loff_t *ppos)
53420 {
53421- if ((file->f_flags & O_NONBLOCK) && !do_syslog(9, NULL, 0))
53422+ if ((file->f_flags & O_NONBLOCK) &&
53423+ !do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53424 return -EAGAIN;
53425- return do_syslog(2, buf, count);
53426+ return do_syslog(SYSLOG_ACTION_READ, buf, count, SYSLOG_FROM_FILE);
53427 }
53428
53429 static unsigned int kmsg_poll(struct file *file, poll_table *wait)
53430 {
53431 poll_wait(file, &log_wait, wait);
53432- if (do_syslog(9, NULL, 0))
53433+ if (do_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0, SYSLOG_FROM_FILE))
53434 return POLLIN | POLLRDNORM;
53435 return 0;
53436 }
53437diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
53438index a65239c..ad1182a 100644
53439--- a/fs/proc/meminfo.c
53440+++ b/fs/proc/meminfo.c
53441@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53442 unsigned long pages[NR_LRU_LISTS];
53443 int lru;
53444
53445+ pax_track_stack();
53446+
53447 /*
53448 * display in kilobytes.
53449 */
53450@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
53451 vmi.used >> 10,
53452 vmi.largest_chunk >> 10
53453 #ifdef CONFIG_MEMORY_FAILURE
53454- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
53455+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
53456 #endif
53457 );
53458
53459diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
53460index 9fe7d7e..cdb62c9 100644
53461--- a/fs/proc/nommu.c
53462+++ b/fs/proc/nommu.c
53463@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
53464 if (len < 1)
53465 len = 1;
53466 seq_printf(m, "%*c", len, ' ');
53467- seq_path(m, &file->f_path, "");
53468+ seq_path(m, &file->f_path, "\n\\");
53469 }
53470
53471 seq_putc(m, '\n');
53472diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
53473index 04d1270..25e1173 100644
53474--- a/fs/proc/proc_net.c
53475+++ b/fs/proc/proc_net.c
53476@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(struct inode *dir)
53477 struct task_struct *task;
53478 struct nsproxy *ns;
53479 struct net *net = NULL;
53480+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53481+ const struct cred *cred = current_cred();
53482+#endif
53483+
53484+#ifdef CONFIG_GRKERNSEC_PROC_USER
53485+ if (cred->fsuid)
53486+ return net;
53487+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53488+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
53489+ return net;
53490+#endif
53491
53492 rcu_read_lock();
53493 task = pid_task(proc_pid(dir), PIDTYPE_PID);
53494diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
53495index f667e8a..55f4d96 100644
53496--- a/fs/proc/proc_sysctl.c
53497+++ b/fs/proc/proc_sysctl.c
53498@@ -7,11 +7,13 @@
53499 #include <linux/security.h>
53500 #include "internal.h"
53501
53502+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
53503+
53504 static const struct dentry_operations proc_sys_dentry_operations;
53505 static const struct file_operations proc_sys_file_operations;
53506-static const struct inode_operations proc_sys_inode_operations;
53507+const struct inode_operations proc_sys_inode_operations;
53508 static const struct file_operations proc_sys_dir_file_operations;
53509-static const struct inode_operations proc_sys_dir_operations;
53510+const struct inode_operations proc_sys_dir_operations;
53511
53512 static struct inode *proc_sys_make_inode(struct super_block *sb,
53513 struct ctl_table_header *head, struct ctl_table *table)
53514@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53515 if (!p)
53516 goto out;
53517
53518+ if (gr_handle_sysctl(p, MAY_EXEC))
53519+ goto out;
53520+
53521 err = ERR_PTR(-ENOMEM);
53522 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
53523 if (h)
53524@@ -119,6 +124,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
53525
53526 err = NULL;
53527 dentry->d_op = &proc_sys_dentry_operations;
53528+
53529+ gr_handle_proc_create(dentry, inode);
53530+
53531 d_add(dentry, inode);
53532
53533 out:
53534@@ -200,6 +208,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
53535 return -ENOMEM;
53536 } else {
53537 child->d_op = &proc_sys_dentry_operations;
53538+
53539+ gr_handle_proc_create(child, inode);
53540+
53541 d_add(child, inode);
53542 }
53543 } else {
53544@@ -228,6 +239,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
53545 if (*pos < file->f_pos)
53546 continue;
53547
53548+ if (gr_handle_sysctl(table, 0))
53549+ continue;
53550+
53551 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
53552 if (res)
53553 return res;
53554@@ -344,6 +358,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
53555 if (IS_ERR(head))
53556 return PTR_ERR(head);
53557
53558+ if (table && gr_handle_sysctl(table, MAY_EXEC))
53559+ return -ENOENT;
53560+
53561 generic_fillattr(inode, stat);
53562 if (table)
53563 stat->mode = (stat->mode & S_IFMT) | table->mode;
53564@@ -358,17 +375,18 @@ static const struct file_operations proc_sys_file_operations = {
53565 };
53566
53567 static const struct file_operations proc_sys_dir_file_operations = {
53568+ .read = generic_read_dir,
53569 .readdir = proc_sys_readdir,
53570 .llseek = generic_file_llseek,
53571 };
53572
53573-static const struct inode_operations proc_sys_inode_operations = {
53574+const struct inode_operations proc_sys_inode_operations = {
53575 .permission = proc_sys_permission,
53576 .setattr = proc_sys_setattr,
53577 .getattr = proc_sys_getattr,
53578 };
53579
53580-static const struct inode_operations proc_sys_dir_operations = {
53581+const struct inode_operations proc_sys_dir_operations = {
53582 .lookup = proc_sys_lookup,
53583 .permission = proc_sys_permission,
53584 .setattr = proc_sys_setattr,
53585diff --git a/fs/proc/root.c b/fs/proc/root.c
53586index b080b79..d957e63 100644
53587--- a/fs/proc/root.c
53588+++ b/fs/proc/root.c
53589@@ -134,7 +134,15 @@ void __init proc_root_init(void)
53590 #ifdef CONFIG_PROC_DEVICETREE
53591 proc_device_tree_init();
53592 #endif
53593+#ifdef CONFIG_GRKERNSEC_PROC_ADD
53594+#ifdef CONFIG_GRKERNSEC_PROC_USER
53595+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
53596+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
53597+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
53598+#endif
53599+#else
53600 proc_mkdir("bus", NULL);
53601+#endif
53602 proc_sys_init();
53603 }
53604
53605diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
53606index 3b7b82a..7dbb571 100644
53607--- a/fs/proc/task_mmu.c
53608+++ b/fs/proc/task_mmu.c
53609@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53610 "VmStk:\t%8lu kB\n"
53611 "VmExe:\t%8lu kB\n"
53612 "VmLib:\t%8lu kB\n"
53613- "VmPTE:\t%8lu kB\n",
53614- hiwater_vm << (PAGE_SHIFT-10),
53615+ "VmPTE:\t%8lu kB\n"
53616+
53617+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53618+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
53619+#endif
53620+
53621+ ,hiwater_vm << (PAGE_SHIFT-10),
53622 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
53623 mm->locked_vm << (PAGE_SHIFT-10),
53624 hiwater_rss << (PAGE_SHIFT-10),
53625 total_rss << (PAGE_SHIFT-10),
53626 data << (PAGE_SHIFT-10),
53627 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
53628- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
53629+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
53630+
53631+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
53632+ , mm->context.user_cs_base, mm->context.user_cs_limit
53633+#endif
53634+
53635+ );
53636 }
53637
53638 unsigned long task_vsize(struct mm_struct *mm)
53639@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, void *v)
53640 struct proc_maps_private *priv = m->private;
53641 struct vm_area_struct *vma = v;
53642
53643- vma_stop(priv, vma);
53644+ if (!IS_ERR(vma))
53645+ vma_stop(priv, vma);
53646 if (priv->task)
53647 put_task_struct(priv->task);
53648 }
53649@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *inode, struct file *file,
53650 return ret;
53651 }
53652
53653+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53654+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
53655+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
53656+ _mm->pax_flags & MF_PAX_SEGMEXEC))
53657+#endif
53658+
53659 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53660 {
53661 struct mm_struct *mm = vma->vm_mm;
53662@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53663 int flags = vma->vm_flags;
53664 unsigned long ino = 0;
53665 unsigned long long pgoff = 0;
53666- unsigned long start;
53667 dev_t dev = 0;
53668 int len;
53669
53670@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53671 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
53672 }
53673
53674- /* We don't show the stack guard page in /proc/maps */
53675- start = vma->vm_start;
53676- if (vma->vm_flags & VM_GROWSDOWN)
53677- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
53678- start += PAGE_SIZE;
53679-
53680 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
53681- start,
53682+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53683+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
53684+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
53685+#else
53686+ vma->vm_start,
53687 vma->vm_end,
53688+#endif
53689 flags & VM_READ ? 'r' : '-',
53690 flags & VM_WRITE ? 'w' : '-',
53691 flags & VM_EXEC ? 'x' : '-',
53692 flags & VM_MAYSHARE ? 's' : 'p',
53693+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53694+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
53695+#else
53696 pgoff,
53697+#endif
53698 MAJOR(dev), MINOR(dev), ino, &len);
53699
53700 /*
53701@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53702 */
53703 if (file) {
53704 pad_len_spaces(m, len);
53705- seq_path(m, &file->f_path, "\n");
53706+ seq_path(m, &file->f_path, "\n\\");
53707 } else {
53708 const char *name = arch_vma_name(vma);
53709 if (!name) {
53710@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
53711 if (vma->vm_start <= mm->brk &&
53712 vma->vm_end >= mm->start_brk) {
53713 name = "[heap]";
53714- } else if (vma->vm_start <= mm->start_stack &&
53715- vma->vm_end >= mm->start_stack) {
53716+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
53717+ (vma->vm_start <= mm->start_stack &&
53718+ vma->vm_end >= mm->start_stack)) {
53719 name = "[stack]";
53720 }
53721 } else {
53722@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m, void *v)
53723 };
53724
53725 memset(&mss, 0, sizeof mss);
53726- mss.vma = vma;
53727- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53728- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53729+
53730+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53731+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
53732+#endif
53733+ mss.vma = vma;
53734+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
53735+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
53736+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53737+ }
53738+#endif
53739
53740 show_map_vma(m, vma);
53741
53742@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m, void *v)
53743 "Swap: %8lu kB\n"
53744 "KernelPageSize: %8lu kB\n"
53745 "MMUPageSize: %8lu kB\n",
53746+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
53747+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
53748+#else
53749 (vma->vm_end - vma->vm_start) >> 10,
53750+#endif
53751 mss.resident >> 10,
53752 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
53753 mss.shared_clean >> 10,
53754diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
53755index 8f5c05d..c99c76d 100644
53756--- a/fs/proc/task_nommu.c
53757+++ b/fs/proc/task_nommu.c
53758@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
53759 else
53760 bytes += kobjsize(mm);
53761
53762- if (current->fs && current->fs->users > 1)
53763+ if (current->fs && atomic_read(&current->fs->users) > 1)
53764 sbytes += kobjsize(current->fs);
53765 else
53766 bytes += kobjsize(current->fs);
53767@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma)
53768 if (len < 1)
53769 len = 1;
53770 seq_printf(m, "%*c", len, ' ');
53771- seq_path(m, &file->f_path, "");
53772+ seq_path(m, &file->f_path, "\n\\");
53773 }
53774
53775 seq_putc(m, '\n');
53776diff --git a/fs/readdir.c b/fs/readdir.c
53777index 7723401..30059a6 100644
53778--- a/fs/readdir.c
53779+++ b/fs/readdir.c
53780@@ -16,6 +16,7 @@
53781 #include <linux/security.h>
53782 #include <linux/syscalls.h>
53783 #include <linux/unistd.h>
53784+#include <linux/namei.h>
53785
53786 #include <asm/uaccess.h>
53787
53788@@ -67,6 +68,7 @@ struct old_linux_dirent {
53789
53790 struct readdir_callback {
53791 struct old_linux_dirent __user * dirent;
53792+ struct file * file;
53793 int result;
53794 };
53795
53796@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
53797 buf->result = -EOVERFLOW;
53798 return -EOVERFLOW;
53799 }
53800+
53801+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53802+ return 0;
53803+
53804 buf->result++;
53805 dirent = buf->dirent;
53806 if (!access_ok(VERIFY_WRITE, dirent,
53807@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
53808
53809 buf.result = 0;
53810 buf.dirent = dirent;
53811+ buf.file = file;
53812
53813 error = vfs_readdir(file, fillonedir, &buf);
53814 if (buf.result)
53815@@ -142,6 +149,7 @@ struct linux_dirent {
53816 struct getdents_callback {
53817 struct linux_dirent __user * current_dir;
53818 struct linux_dirent __user * previous;
53819+ struct file * file;
53820 int count;
53821 int error;
53822 };
53823@@ -162,6 +170,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
53824 buf->error = -EOVERFLOW;
53825 return -EOVERFLOW;
53826 }
53827+
53828+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53829+ return 0;
53830+
53831 dirent = buf->previous;
53832 if (dirent) {
53833 if (__put_user(offset, &dirent->d_off))
53834@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
53835 buf.previous = NULL;
53836 buf.count = count;
53837 buf.error = 0;
53838+ buf.file = file;
53839
53840 error = vfs_readdir(file, filldir, &buf);
53841 if (error >= 0)
53842@@ -228,6 +241,7 @@ out:
53843 struct getdents_callback64 {
53844 struct linux_dirent64 __user * current_dir;
53845 struct linux_dirent64 __user * previous;
53846+ struct file *file;
53847 int count;
53848 int error;
53849 };
53850@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
53851 buf->error = -EINVAL; /* only used if we fail.. */
53852 if (reclen > buf->count)
53853 return -EINVAL;
53854+
53855+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
53856+ return 0;
53857+
53858 dirent = buf->previous;
53859 if (dirent) {
53860 if (__put_user(offset, &dirent->d_off))
53861@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53862
53863 buf.current_dir = dirent;
53864 buf.previous = NULL;
53865+ buf.file = file;
53866 buf.count = count;
53867 buf.error = 0;
53868
53869@@ -297,7 +316,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
53870 error = buf.error;
53871 lastdirent = buf.previous;
53872 if (lastdirent) {
53873- typeof(lastdirent->d_off) d_off = file->f_pos;
53874+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = file->f_pos;
53875 if (__put_user(d_off, &lastdirent->d_off))
53876 error = -EFAULT;
53877 else
53878diff --git a/fs/reiserfs/dir.c b/fs/reiserfs/dir.c
53879index d42c30c..4fd8718 100644
53880--- a/fs/reiserfs/dir.c
53881+++ b/fs/reiserfs/dir.c
53882@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
53883 struct reiserfs_dir_entry de;
53884 int ret = 0;
53885
53886+ pax_track_stack();
53887+
53888 reiserfs_write_lock(inode->i_sb);
53889
53890 reiserfs_check_lock_depth(inode->i_sb, "readdir");
53891diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
53892index 128d3f7..8840d44 100644
53893--- a/fs/reiserfs/do_balan.c
53894+++ b/fs/reiserfs/do_balan.c
53895@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
53896 return;
53897 }
53898
53899- atomic_inc(&(fs_generation(tb->tb_sb)));
53900+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
53901 do_balance_starts(tb);
53902
53903 /* balance leaf returns 0 except if combining L R and S into
53904diff --git a/fs/reiserfs/item_ops.c b/fs/reiserfs/item_ops.c
53905index 72cb1cc..d0e3181 100644
53906--- a/fs/reiserfs/item_ops.c
53907+++ b/fs/reiserfs/item_ops.c
53908@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_item *vi)
53909 vi->vi_index, vi->vi_type, vi->vi_ih);
53910 }
53911
53912-static struct item_operations stat_data_ops = {
53913+static const struct item_operations stat_data_ops = {
53914 .bytes_number = sd_bytes_number,
53915 .decrement_key = sd_decrement_key,
53916 .is_left_mergeable = sd_is_left_mergeable,
53917@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtual_item *vi)
53918 vi->vi_index, vi->vi_type, vi->vi_ih);
53919 }
53920
53921-static struct item_operations direct_ops = {
53922+static const struct item_operations direct_ops = {
53923 .bytes_number = direct_bytes_number,
53924 .decrement_key = direct_decrement_key,
53925 .is_left_mergeable = direct_is_left_mergeable,
53926@@ -341,7 +341,7 @@ static void indirect_print_vi(struct virtual_item *vi)
53927 vi->vi_index, vi->vi_type, vi->vi_ih);
53928 }
53929
53930-static struct item_operations indirect_ops = {
53931+static const struct item_operations indirect_ops = {
53932 .bytes_number = indirect_bytes_number,
53933 .decrement_key = indirect_decrement_key,
53934 .is_left_mergeable = indirect_is_left_mergeable,
53935@@ -628,7 +628,7 @@ static void direntry_print_vi(struct virtual_item *vi)
53936 printk("\n");
53937 }
53938
53939-static struct item_operations direntry_ops = {
53940+static const struct item_operations direntry_ops = {
53941 .bytes_number = direntry_bytes_number,
53942 .decrement_key = direntry_decrement_key,
53943 .is_left_mergeable = direntry_is_left_mergeable,
53944@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct virtual_item *vi)
53945 "Invalid item type observed, run fsck ASAP");
53946 }
53947
53948-static struct item_operations errcatch_ops = {
53949+static const struct item_operations errcatch_ops = {
53950 errcatch_bytes_number,
53951 errcatch_decrement_key,
53952 errcatch_is_left_mergeable,
53953@@ -746,7 +746,7 @@ static struct item_operations errcatch_ops = {
53954 #error Item types must use disk-format assigned values.
53955 #endif
53956
53957-struct item_operations *item_ops[TYPE_ANY + 1] = {
53958+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
53959 &stat_data_ops,
53960 &indirect_ops,
53961 &direct_ops,
53962diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
53963index b5fe0aa..e0e25c4 100644
53964--- a/fs/reiserfs/journal.c
53965+++ b/fs/reiserfs/journal.c
53966@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_breada(struct block_device *dev,
53967 struct buffer_head *bh;
53968 int i, j;
53969
53970+ pax_track_stack();
53971+
53972 bh = __getblk(dev, block, bufsize);
53973 if (buffer_uptodate(bh))
53974 return (bh);
53975diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c
53976index 2715791..b8996db 100644
53977--- a/fs/reiserfs/namei.c
53978+++ b/fs/reiserfs/namei.c
53979@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
53980 unsigned long savelink = 1;
53981 struct timespec ctime;
53982
53983+ pax_track_stack();
53984+
53985 /* three balancings: (1) old name removal, (2) new name insertion
53986 and (3) maybe "save" link insertion
53987 stat data updates: (1) old directory,
53988diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
53989index 9229e55..3d2e3b7 100644
53990--- a/fs/reiserfs/procfs.c
53991+++ b/fs/reiserfs/procfs.c
53992@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
53993 "SMALL_TAILS " : "NO_TAILS ",
53994 replay_only(sb) ? "REPLAY_ONLY " : "",
53995 convert_reiserfs(sb) ? "CONV " : "",
53996- atomic_read(&r->s_generation_counter),
53997+ atomic_read_unchecked(&r->s_generation_counter),
53998 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
53999 SF(s_do_balance), SF(s_unneeded_left_neighbor),
54000 SF(s_good_search_by_key_reada), SF(s_bmaps),
54001@@ -309,6 +309,8 @@ static int show_journal(struct seq_file *m, struct super_block *sb)
54002 struct journal_params *jp = &rs->s_v1.s_journal;
54003 char b[BDEVNAME_SIZE];
54004
54005+ pax_track_stack();
54006+
54007 seq_printf(m, /* on-disk fields */
54008 "jp_journal_1st_block: \t%i\n"
54009 "jp_journal_dev: \t%s[%x]\n"
54010diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
54011index d036ee5..4c7dca1 100644
54012--- a/fs/reiserfs/stree.c
54013+++ b/fs/reiserfs/stree.c
54014@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
54015 int iter = 0;
54016 #endif
54017
54018+ pax_track_stack();
54019+
54020 BUG_ON(!th->t_trans_id);
54021
54022 init_tb_struct(th, &s_del_balance, sb, path,
54023@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
54024 int retval;
54025 int quota_cut_bytes = 0;
54026
54027+ pax_track_stack();
54028+
54029 BUG_ON(!th->t_trans_id);
54030
54031 le_key2cpu_key(&cpu_key, key);
54032@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
54033 int quota_cut_bytes;
54034 loff_t tail_pos = 0;
54035
54036+ pax_track_stack();
54037+
54038 BUG_ON(!th->t_trans_id);
54039
54040 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
54041@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
54042 int retval;
54043 int fs_gen;
54044
54045+ pax_track_stack();
54046+
54047 BUG_ON(!th->t_trans_id);
54048
54049 fs_gen = get_generation(inode->i_sb);
54050@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
54051 int fs_gen = 0;
54052 int quota_bytes = 0;
54053
54054+ pax_track_stack();
54055+
54056 BUG_ON(!th->t_trans_id);
54057
54058 if (inode) { /* Do we count quotas for item? */
54059diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
54060index 7cb1285..c726cd0 100644
54061--- a/fs/reiserfs/super.c
54062+++ b/fs/reiserfs/super.c
54063@@ -916,6 +916,8 @@ static int reiserfs_parse_options(struct super_block *s, char *options, /* strin
54064 {.option_name = NULL}
54065 };
54066
54067+ pax_track_stack();
54068+
54069 *blocks = 0;
54070 if (!options || !*options)
54071 /* use default configuration: create tails, journaling on, no
54072diff --git a/fs/select.c b/fs/select.c
54073index fd38ce2..f5381b8 100644
54074--- a/fs/select.c
54075+++ b/fs/select.c
54076@@ -20,6 +20,7 @@
54077 #include <linux/module.h>
54078 #include <linux/slab.h>
54079 #include <linux/poll.h>
54080+#include <linux/security.h>
54081 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
54082 #include <linux/file.h>
54083 #include <linux/fdtable.h>
54084@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
54085 int retval, i, timed_out = 0;
54086 unsigned long slack = 0;
54087
54088+ pax_track_stack();
54089+
54090 rcu_read_lock();
54091 retval = max_select_fd(n, fds);
54092 rcu_read_unlock();
54093@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
54094 /* Allocate small arguments on the stack to save memory and be faster */
54095 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
54096
54097+ pax_track_stack();
54098+
54099 ret = -EINVAL;
54100 if (n < 0)
54101 goto out_nofds;
54102@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
54103 struct poll_list *walk = head;
54104 unsigned long todo = nfds;
54105
54106+ pax_track_stack();
54107+
54108+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
54109 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
54110 return -EINVAL;
54111
54112diff --git a/fs/seq_file.c b/fs/seq_file.c
54113index eae7d9d..679f099 100644
54114--- a/fs/seq_file.c
54115+++ b/fs/seq_file.c
54116@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54117 return 0;
54118 }
54119 if (!m->buf) {
54120- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54121+ m->size = PAGE_SIZE;
54122+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54123 if (!m->buf)
54124 return -ENOMEM;
54125 }
54126@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, loff_t offset)
54127 Eoverflow:
54128 m->op->stop(m, p);
54129 kfree(m->buf);
54130- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54131+ m->size <<= 1;
54132+ m->buf = kmalloc(m->size, GFP_KERNEL);
54133 return !m->buf ? -ENOMEM : -EAGAIN;
54134 }
54135
54136@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54137 m->version = file->f_version;
54138 /* grab buffer if we didn't have one */
54139 if (!m->buf) {
54140- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
54141+ m->size = PAGE_SIZE;
54142+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
54143 if (!m->buf)
54144 goto Enomem;
54145 }
54146@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
54147 goto Fill;
54148 m->op->stop(m, p);
54149 kfree(m->buf);
54150- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
54151+ m->size <<= 1;
54152+ m->buf = kmalloc(m->size, GFP_KERNEL);
54153 if (!m->buf)
54154 goto Enomem;
54155 m->count = 0;
54156@@ -551,7 +555,7 @@ static void single_stop(struct seq_file *p, void *v)
54157 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
54158 void *data)
54159 {
54160- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
54161+ seq_operations_no_const *op = kmalloc(sizeof(*op), GFP_KERNEL);
54162 int res = -ENOMEM;
54163
54164 if (op) {
54165diff --git a/fs/smbfs/proc.c b/fs/smbfs/proc.c
54166index 71c29b6..54694dd 100644
54167--- a/fs/smbfs/proc.c
54168+++ b/fs/smbfs/proc.c
54169@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *server, struct smb_nls_codepage *cp)
54170
54171 out:
54172 if (server->local_nls != NULL && server->remote_nls != NULL)
54173- server->ops->convert = convert_cp;
54174+ *(void **)&server->ops->convert = convert_cp;
54175 else
54176- server->ops->convert = convert_memcpy;
54177+ *(void **)&server->ops->convert = convert_memcpy;
54178
54179 smb_unlock_server(server);
54180 return n;
54181@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server, struct smb_conn_opt *opt)
54182
54183 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
54184 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
54185- server->ops->getattr = smb_proc_getattr_core;
54186+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
54187 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
54188- server->ops->getattr = smb_proc_getattr_ff;
54189+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
54190 }
54191
54192 /* Decode server capabilities */
54193@@ -3439,7 +3439,7 @@ out:
54194 static void
54195 install_ops(struct smb_ops *dst, struct smb_ops *src)
54196 {
54197- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54198+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
54199 }
54200
54201 /* < LANMAN2 */
54202diff --git a/fs/smbfs/symlink.c b/fs/smbfs/symlink.c
54203index 00b2909..2ace383 100644
54204--- a/fs/smbfs/symlink.c
54205+++ b/fs/smbfs/symlink.c
54206@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dentry *dentry, struct nameidata *nd)
54207
54208 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
54209 {
54210- char *s = nd_get_link(nd);
54211+ const char *s = nd_get_link(nd);
54212 if (!IS_ERR(s))
54213 __putname(s);
54214 }
54215diff --git a/fs/splice.c b/fs/splice.c
54216index bb92b7c..5aa72b0 100644
54217--- a/fs/splice.c
54218+++ b/fs/splice.c
54219@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54220 pipe_lock(pipe);
54221
54222 for (;;) {
54223- if (!pipe->readers) {
54224+ if (!atomic_read(&pipe->readers)) {
54225 send_sig(SIGPIPE, current, 0);
54226 if (!ret)
54227 ret = -EPIPE;
54228@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
54229 do_wakeup = 0;
54230 }
54231
54232- pipe->waiting_writers++;
54233+ atomic_inc(&pipe->waiting_writers);
54234 pipe_wait(pipe);
54235- pipe->waiting_writers--;
54236+ atomic_dec(&pipe->waiting_writers);
54237 }
54238
54239 pipe_unlock(pipe);
54240@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *in, loff_t *ppos,
54241 .spd_release = spd_release_page,
54242 };
54243
54244+ pax_track_stack();
54245+
54246 index = *ppos >> PAGE_CACHE_SHIFT;
54247 loff = *ppos & ~PAGE_CACHE_MASK;
54248 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54249@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
54250 old_fs = get_fs();
54251 set_fs(get_ds());
54252 /* The cast to a user pointer is valid due to the set_fs() */
54253- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
54254+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
54255 set_fs(old_fs);
54256
54257 return res;
54258@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file *file, const char *buf, size_t count,
54259 old_fs = get_fs();
54260 set_fs(get_ds());
54261 /* The cast to a user pointer is valid due to the set_fs() */
54262- res = vfs_write(file, (const char __user *)buf, count, &pos);
54263+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
54264 set_fs(old_fs);
54265
54266 return res;
54267@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54268 .spd_release = spd_release_page,
54269 };
54270
54271+ pax_track_stack();
54272+
54273 index = *ppos >> PAGE_CACHE_SHIFT;
54274 offset = *ppos & ~PAGE_CACHE_MASK;
54275 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
54276@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
54277 goto err;
54278
54279 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
54280- vec[i].iov_base = (void __user *) page_address(page);
54281+ vec[i].iov_base = (__force void __user *) page_address(page);
54282 vec[i].iov_len = this_len;
54283 pages[i] = page;
54284 spd.nr_pages++;
54285@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
54286 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
54287 {
54288 while (!pipe->nrbufs) {
54289- if (!pipe->writers)
54290+ if (!atomic_read(&pipe->writers))
54291 return 0;
54292
54293- if (!pipe->waiting_writers && sd->num_spliced)
54294+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
54295 return 0;
54296
54297 if (sd->flags & SPLICE_F_NONBLOCK)
54298@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
54299 * out of the pipe right after the splice_to_pipe(). So set
54300 * PIPE_READERS appropriately.
54301 */
54302- pipe->readers = 1;
54303+ atomic_set(&pipe->readers, 1);
54304
54305 current->splice_pipe = pipe;
54306 }
54307@@ -1593,6 +1597,8 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov,
54308 .spd_release = spd_release_page,
54309 };
54310
54311+ pax_track_stack();
54312+
54313 pipe = pipe_info(file->f_path.dentry->d_inode);
54314 if (!pipe)
54315 return -EBADF;
54316@@ -1701,9 +1707,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54317 ret = -ERESTARTSYS;
54318 break;
54319 }
54320- if (!pipe->writers)
54321+ if (!atomic_read(&pipe->writers))
54322 break;
54323- if (!pipe->waiting_writers) {
54324+ if (!atomic_read(&pipe->waiting_writers)) {
54325 if (flags & SPLICE_F_NONBLOCK) {
54326 ret = -EAGAIN;
54327 break;
54328@@ -1735,7 +1741,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54329 pipe_lock(pipe);
54330
54331 while (pipe->nrbufs >= PIPE_BUFFERS) {
54332- if (!pipe->readers) {
54333+ if (!atomic_read(&pipe->readers)) {
54334 send_sig(SIGPIPE, current, 0);
54335 ret = -EPIPE;
54336 break;
54337@@ -1748,9 +1754,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
54338 ret = -ERESTARTSYS;
54339 break;
54340 }
54341- pipe->waiting_writers++;
54342+ atomic_inc(&pipe->waiting_writers);
54343 pipe_wait(pipe);
54344- pipe->waiting_writers--;
54345+ atomic_dec(&pipe->waiting_writers);
54346 }
54347
54348 pipe_unlock(pipe);
54349@@ -1786,14 +1792,14 @@ retry:
54350 pipe_double_lock(ipipe, opipe);
54351
54352 do {
54353- if (!opipe->readers) {
54354+ if (!atomic_read(&opipe->readers)) {
54355 send_sig(SIGPIPE, current, 0);
54356 if (!ret)
54357 ret = -EPIPE;
54358 break;
54359 }
54360
54361- if (!ipipe->nrbufs && !ipipe->writers)
54362+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
54363 break;
54364
54365 /*
54366@@ -1893,7 +1899,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54367 pipe_double_lock(ipipe, opipe);
54368
54369 do {
54370- if (!opipe->readers) {
54371+ if (!atomic_read(&opipe->readers)) {
54372 send_sig(SIGPIPE, current, 0);
54373 if (!ret)
54374 ret = -EPIPE;
54375@@ -1938,7 +1944,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
54376 * return EAGAIN if we have the potential of some data in the
54377 * future, otherwise just return 0
54378 */
54379- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
54380+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
54381 ret = -EAGAIN;
54382
54383 pipe_unlock(ipipe);
54384diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
54385index e020183..18d64b4 100644
54386--- a/fs/sysfs/dir.c
54387+++ b/fs/sysfs/dir.c
54388@@ -678,6 +678,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
54389 struct sysfs_dirent *sd;
54390 int rc;
54391
54392+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
54393+ const char *parent_name = parent_sd->s_name;
54394+
54395+ mode = S_IFDIR | S_IRWXU;
54396+
54397+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
54398+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
54399+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
54400+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
54401+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
54402+#endif
54403+
54404 /* allocate */
54405 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
54406 if (!sd)
54407diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
54408index 7118a38..70af853 100644
54409--- a/fs/sysfs/file.c
54410+++ b/fs/sysfs/file.c
54411@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
54412
54413 struct sysfs_open_dirent {
54414 atomic_t refcnt;
54415- atomic_t event;
54416+ atomic_unchecked_t event;
54417 wait_queue_head_t poll;
54418 struct list_head buffers; /* goes through sysfs_buffer.list */
54419 };
54420@@ -53,7 +53,7 @@ struct sysfs_buffer {
54421 size_t count;
54422 loff_t pos;
54423 char * page;
54424- struct sysfs_ops * ops;
54425+ const struct sysfs_ops * ops;
54426 struct mutex mutex;
54427 int needs_read_fill;
54428 int event;
54429@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54430 {
54431 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54432 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54433- struct sysfs_ops * ops = buffer->ops;
54434+ const struct sysfs_ops * ops = buffer->ops;
54435 int ret = 0;
54436 ssize_t count;
54437
54438@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
54439 if (!sysfs_get_active_two(attr_sd))
54440 return -ENODEV;
54441
54442- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
54443+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
54444 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
54445
54446 sysfs_put_active_two(attr_sd);
54447@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentry, struct sysfs_buffer * buffer, size_t
54448 {
54449 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
54450 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54451- struct sysfs_ops * ops = buffer->ops;
54452+ const struct sysfs_ops * ops = buffer->ops;
54453 int rc;
54454
54455 /* need attr_sd for attr and ops, its parent for kobj */
54456@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
54457 return -ENOMEM;
54458
54459 atomic_set(&new_od->refcnt, 0);
54460- atomic_set(&new_od->event, 1);
54461+ atomic_set_unchecked(&new_od->event, 1);
54462 init_waitqueue_head(&new_od->poll);
54463 INIT_LIST_HEAD(&new_od->buffers);
54464 goto retry;
54465@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
54466 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
54467 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
54468 struct sysfs_buffer *buffer;
54469- struct sysfs_ops *ops;
54470+ const struct sysfs_ops *ops;
54471 int error = -EACCES;
54472 char *p;
54473
54474@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
54475
54476 sysfs_put_active_two(attr_sd);
54477
54478- if (buffer->event != atomic_read(&od->event))
54479+ if (buffer->event != atomic_read_unchecked(&od->event))
54480 goto trigger;
54481
54482 return DEFAULT_POLLMASK;
54483@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
54484
54485 od = sd->s_attr.open;
54486 if (od) {
54487- atomic_inc(&od->event);
54488+ atomic_inc_unchecked(&od->event);
54489 wake_up_interruptible(&od->poll);
54490 }
54491
54492diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
54493index c5081ad..342ea86 100644
54494--- a/fs/sysfs/symlink.c
54495+++ b/fs/sysfs/symlink.c
54496@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
54497
54498 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
54499 {
54500- char *page = nd_get_link(nd);
54501+ const char *page = nd_get_link(nd);
54502 if (!IS_ERR(page))
54503 free_page((unsigned long)page);
54504 }
54505diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
54506index 1e06853..b06d325 100644
54507--- a/fs/udf/balloc.c
54508+++ b/fs/udf/balloc.c
54509@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
54510
54511 mutex_lock(&sbi->s_alloc_mutex);
54512 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54513- if (bloc->logicalBlockNum < 0 ||
54514- (bloc->logicalBlockNum + count) >
54515- partmap->s_partition_len) {
54516+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54517 udf_debug("%d < %d || %d + %d > %d\n",
54518 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
54519 count, partmap->s_partition_len);
54520@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct super_block *sb,
54521
54522 mutex_lock(&sbi->s_alloc_mutex);
54523 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
54524- if (bloc->logicalBlockNum < 0 ||
54525- (bloc->logicalBlockNum + count) >
54526- partmap->s_partition_len) {
54527+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
54528 udf_debug("%d < %d || %d + %d > %d\n",
54529 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
54530 partmap->s_partition_len);
54531diff --git a/fs/udf/inode.c b/fs/udf/inode.c
54532index 6d24c2c..fff470f 100644
54533--- a/fs/udf/inode.c
54534+++ b/fs/udf/inode.c
54535@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
54536 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
54537 int lastblock = 0;
54538
54539+ pax_track_stack();
54540+
54541 prev_epos.offset = udf_file_entry_alloc_offset(inode);
54542 prev_epos.block = iinfo->i_location;
54543 prev_epos.bh = NULL;
54544diff --git a/fs/udf/misc.c b/fs/udf/misc.c
54545index 9215700..bf1f68e 100644
54546--- a/fs/udf/misc.c
54547+++ b/fs/udf/misc.c
54548@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
54549
54550 u8 udf_tag_checksum(const struct tag *t)
54551 {
54552- u8 *data = (u8 *)t;
54553+ const u8 *data = (const u8 *)t;
54554 u8 checksum = 0;
54555 int i;
54556 for (i = 0; i < sizeof(struct tag); ++i)
54557diff --git a/fs/utimes.c b/fs/utimes.c
54558index e4c75db..b4df0e0 100644
54559--- a/fs/utimes.c
54560+++ b/fs/utimes.c
54561@@ -1,6 +1,7 @@
54562 #include <linux/compiler.h>
54563 #include <linux/file.h>
54564 #include <linux/fs.h>
54565+#include <linux/security.h>
54566 #include <linux/linkage.h>
54567 #include <linux/mount.h>
54568 #include <linux/namei.h>
54569@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
54570 goto mnt_drop_write_and_out;
54571 }
54572 }
54573+
54574+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
54575+ error = -EACCES;
54576+ goto mnt_drop_write_and_out;
54577+ }
54578+
54579 mutex_lock(&inode->i_mutex);
54580 error = notify_change(path->dentry, &newattrs);
54581 mutex_unlock(&inode->i_mutex);
54582diff --git a/fs/xattr.c b/fs/xattr.c
54583index 6d4f6d3..cda3958 100644
54584--- a/fs/xattr.c
54585+++ b/fs/xattr.c
54586@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
54587 * Extended attribute SET operations
54588 */
54589 static long
54590-setxattr(struct dentry *d, const char __user *name, const void __user *value,
54591+setxattr(struct path *path, const char __user *name, const void __user *value,
54592 size_t size, int flags)
54593 {
54594 int error;
54595@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
54596 return PTR_ERR(kvalue);
54597 }
54598
54599- error = vfs_setxattr(d, kname, kvalue, size, flags);
54600+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
54601+ error = -EACCES;
54602+ goto out;
54603+ }
54604+
54605+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
54606+out:
54607 kfree(kvalue);
54608 return error;
54609 }
54610@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __user *, pathname,
54611 return error;
54612 error = mnt_want_write(path.mnt);
54613 if (!error) {
54614- error = setxattr(path.dentry, name, value, size, flags);
54615+ error = setxattr(&path, name, value, size, flags);
54616 mnt_drop_write(path.mnt);
54617 }
54618 path_put(&path);
54619@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
54620 return error;
54621 error = mnt_want_write(path.mnt);
54622 if (!error) {
54623- error = setxattr(path.dentry, name, value, size, flags);
54624+ error = setxattr(&path, name, value, size, flags);
54625 mnt_drop_write(path.mnt);
54626 }
54627 path_put(&path);
54628@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
54629 const void __user *,value, size_t, size, int, flags)
54630 {
54631 struct file *f;
54632- struct dentry *dentry;
54633 int error = -EBADF;
54634
54635 f = fget(fd);
54636 if (!f)
54637 return error;
54638- dentry = f->f_path.dentry;
54639- audit_inode(NULL, dentry);
54640+ audit_inode(NULL, f->f_path.dentry);
54641 error = mnt_want_write_file(f);
54642 if (!error) {
54643- error = setxattr(dentry, name, value, size, flags);
54644+ error = setxattr(&f->f_path, name, value, size, flags);
54645 mnt_drop_write(f->f_path.mnt);
54646 }
54647 fput(f);
54648diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
54649index c6ad7c7..f2847a7 100644
54650--- a/fs/xattr_acl.c
54651+++ b/fs/xattr_acl.c
54652@@ -17,8 +17,8 @@
54653 struct posix_acl *
54654 posix_acl_from_xattr(const void *value, size_t size)
54655 {
54656- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
54657- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
54658+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
54659+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
54660 int count;
54661 struct posix_acl *acl;
54662 struct posix_acl_entry *acl_e;
54663diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
54664index 942362f..88f96f5 100644
54665--- a/fs/xfs/linux-2.6/xfs_ioctl.c
54666+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
54667@@ -134,7 +134,7 @@ xfs_find_handle(
54668 }
54669
54670 error = -EFAULT;
54671- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
54672+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
54673 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
54674 goto out_put;
54675
54676@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
54677 if (IS_ERR(dentry))
54678 return PTR_ERR(dentry);
54679
54680- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
54681+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
54682 if (!kbuf)
54683 goto out_dput;
54684
54685@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
54686 xfs_mount_t *mp,
54687 void __user *arg)
54688 {
54689- xfs_fsop_geom_t fsgeo;
54690+ xfs_fsop_geom_t fsgeo;
54691 int error;
54692
54693 error = xfs_fs_geometry(mp, &fsgeo, 3);
54694diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
54695index bad485a..479bd32 100644
54696--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
54697+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
54698@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
54699 xfs_fsop_geom_t fsgeo;
54700 int error;
54701
54702+ memset(&fsgeo, 0, sizeof(fsgeo));
54703 error = xfs_fs_geometry(mp, &fsgeo, 3);
54704 if (error)
54705 return -error;
54706diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
54707index 1f3b4b8..6102f6d 100644
54708--- a/fs/xfs/linux-2.6/xfs_iops.c
54709+++ b/fs/xfs/linux-2.6/xfs_iops.c
54710@@ -468,7 +468,7 @@ xfs_vn_put_link(
54711 struct nameidata *nd,
54712 void *p)
54713 {
54714- char *s = nd_get_link(nd);
54715+ const char *s = nd_get_link(nd);
54716
54717 if (!IS_ERR(s))
54718 kfree(s);
54719diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
54720index 8971fb0..5fc1eb2 100644
54721--- a/fs/xfs/xfs_bmap.c
54722+++ b/fs/xfs/xfs_bmap.c
54723@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
54724 int nmap,
54725 int ret_nmap);
54726 #else
54727-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
54728+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
54729 #endif /* DEBUG */
54730
54731 #if defined(XFS_RW_TRACE)
54732diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
54733index e89734e..5e84d8d 100644
54734--- a/fs/xfs/xfs_dir2_sf.c
54735+++ b/fs/xfs/xfs_dir2_sf.c
54736@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
54737 }
54738
54739 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
54740- if (filldir(dirent, sfep->name, sfep->namelen,
54741+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
54742+ char name[sfep->namelen];
54743+ memcpy(name, sfep->name, sfep->namelen);
54744+ if (filldir(dirent, name, sfep->namelen,
54745+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
54746+ *offset = off & 0x7fffffff;
54747+ return 0;
54748+ }
54749+ } else if (filldir(dirent, sfep->name, sfep->namelen,
54750 off & 0x7fffffff, ino, DT_UNKNOWN)) {
54751 *offset = off & 0x7fffffff;
54752 return 0;
54753diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
54754index 8f32f50..b6a41e8 100644
54755--- a/fs/xfs/xfs_vnodeops.c
54756+++ b/fs/xfs/xfs_vnodeops.c
54757@@ -564,13 +564,18 @@ xfs_readlink(
54758
54759 xfs_ilock(ip, XFS_ILOCK_SHARED);
54760
54761- ASSERT((ip->i_d.di_mode & S_IFMT) == S_IFLNK);
54762- ASSERT(ip->i_d.di_size <= MAXPATHLEN);
54763-
54764 pathlen = ip->i_d.di_size;
54765 if (!pathlen)
54766 goto out;
54767
54768+ if (pathlen > MAXPATHLEN) {
54769+ xfs_fs_cmn_err(CE_ALERT, mp, "%s: inode (%llu) symlink length (%d) too long",
54770+ __func__, (unsigned long long)ip->i_ino, pathlen);
54771+ ASSERT(0);
54772+ error = XFS_ERROR(EFSCORRUPTED);
54773+ goto out;
54774+ }
54775+
54776 if (ip->i_df.if_flags & XFS_IFINLINE) {
54777 memcpy(link, ip->i_df.if_u1.if_data, pathlen);
54778 link[pathlen] = '\0';
54779diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
54780new file mode 100644
54781index 0000000..8cac8cb
54782--- /dev/null
54783+++ b/grsecurity/Kconfig
54784@@ -0,0 +1,1068 @@
54785+#
54786+# grecurity configuration
54787+#
54788+
54789+menu "Grsecurity"
54790+
54791+config GRKERNSEC
54792+ bool "Grsecurity"
54793+ select CRYPTO
54794+ select CRYPTO_SHA256
54795+ help
54796+ If you say Y here, you will be able to configure many features
54797+ that will enhance the security of your system. It is highly
54798+ recommended that you say Y here and read through the help
54799+ for each option so that you fully understand the features and
54800+ can evaluate their usefulness for your machine.
54801+
54802+choice
54803+ prompt "Security Level"
54804+ depends on GRKERNSEC
54805+ default GRKERNSEC_CUSTOM
54806+
54807+config GRKERNSEC_LOW
54808+ bool "Low"
54809+ select GRKERNSEC_LINK
54810+ select GRKERNSEC_FIFO
54811+ select GRKERNSEC_RANDNET
54812+ select GRKERNSEC_DMESG
54813+ select GRKERNSEC_CHROOT
54814+ select GRKERNSEC_CHROOT_CHDIR
54815+
54816+ help
54817+ If you choose this option, several of the grsecurity options will
54818+ be enabled that will give you greater protection against a number
54819+ of attacks, while assuring that none of your software will have any
54820+ conflicts with the additional security measures. If you run a lot
54821+ of unusual software, or you are having problems with the higher
54822+ security levels, you should say Y here. With this option, the
54823+ following features are enabled:
54824+
54825+ - Linking restrictions
54826+ - FIFO restrictions
54827+ - Restricted dmesg
54828+ - Enforced chdir("/") on chroot
54829+ - Runtime module disabling
54830+
54831+config GRKERNSEC_MEDIUM
54832+ bool "Medium"
54833+ select PAX
54834+ select PAX_EI_PAX
54835+ select PAX_PT_PAX_FLAGS
54836+ select PAX_HAVE_ACL_FLAGS
54837+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54838+ select GRKERNSEC_CHROOT
54839+ select GRKERNSEC_CHROOT_SYSCTL
54840+ select GRKERNSEC_LINK
54841+ select GRKERNSEC_FIFO
54842+ select GRKERNSEC_DMESG
54843+ select GRKERNSEC_RANDNET
54844+ select GRKERNSEC_FORKFAIL
54845+ select GRKERNSEC_TIME
54846+ select GRKERNSEC_SIGNAL
54847+ select GRKERNSEC_CHROOT
54848+ select GRKERNSEC_CHROOT_UNIX
54849+ select GRKERNSEC_CHROOT_MOUNT
54850+ select GRKERNSEC_CHROOT_PIVOT
54851+ select GRKERNSEC_CHROOT_DOUBLE
54852+ select GRKERNSEC_CHROOT_CHDIR
54853+ select GRKERNSEC_CHROOT_MKNOD
54854+ select GRKERNSEC_PROC
54855+ select GRKERNSEC_PROC_USERGROUP
54856+ select PAX_RANDUSTACK
54857+ select PAX_ASLR
54858+ select PAX_RANDMMAP
54859+ select PAX_REFCOUNT if (X86 || SPARC64)
54860+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54861+
54862+ help
54863+ If you say Y here, several features in addition to those included
54864+ in the low additional security level will be enabled. These
54865+ features provide even more security to your system, though in rare
54866+ cases they may be incompatible with very old or poorly written
54867+ software. If you enable this option, make sure that your auth
54868+ service (identd) is running as gid 1001. With this option,
54869+ the following features (in addition to those provided in the
54870+ low additional security level) will be enabled:
54871+
54872+ - Failed fork logging
54873+ - Time change logging
54874+ - Signal logging
54875+ - Deny mounts in chroot
54876+ - Deny double chrooting
54877+ - Deny sysctl writes in chroot
54878+ - Deny mknod in chroot
54879+ - Deny access to abstract AF_UNIX sockets out of chroot
54880+ - Deny pivot_root in chroot
54881+ - Denied reads/writes of /dev/kmem, /dev/mem, and /dev/port
54882+ - /proc restrictions with special GID set to 10 (usually wheel)
54883+ - Address Space Layout Randomization (ASLR)
54884+ - Prevent exploitation of most refcount overflows
54885+ - Bounds checking of copying between the kernel and userland
54886+
54887+config GRKERNSEC_HIGH
54888+ bool "High"
54889+ select GRKERNSEC_LINK
54890+ select GRKERNSEC_FIFO
54891+ select GRKERNSEC_DMESG
54892+ select GRKERNSEC_FORKFAIL
54893+ select GRKERNSEC_TIME
54894+ select GRKERNSEC_SIGNAL
54895+ select GRKERNSEC_CHROOT
54896+ select GRKERNSEC_CHROOT_SHMAT
54897+ select GRKERNSEC_CHROOT_UNIX
54898+ select GRKERNSEC_CHROOT_MOUNT
54899+ select GRKERNSEC_CHROOT_FCHDIR
54900+ select GRKERNSEC_CHROOT_PIVOT
54901+ select GRKERNSEC_CHROOT_DOUBLE
54902+ select GRKERNSEC_CHROOT_CHDIR
54903+ select GRKERNSEC_CHROOT_MKNOD
54904+ select GRKERNSEC_CHROOT_CAPS
54905+ select GRKERNSEC_CHROOT_SYSCTL
54906+ select GRKERNSEC_CHROOT_FINDTASK
54907+ select GRKERNSEC_SYSFS_RESTRICT
54908+ select GRKERNSEC_PROC
54909+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
54910+ select GRKERNSEC_HIDESYM
54911+ select GRKERNSEC_BRUTE
54912+ select GRKERNSEC_PROC_USERGROUP
54913+ select GRKERNSEC_KMEM
54914+ select GRKERNSEC_RESLOG
54915+ select GRKERNSEC_RANDNET
54916+ select GRKERNSEC_PROC_ADD
54917+ select GRKERNSEC_CHROOT_CHMOD
54918+ select GRKERNSEC_CHROOT_NICE
54919+ select GRKERNSEC_SETXID
54920+ select GRKERNSEC_AUDIT_MOUNT
54921+ select GRKERNSEC_MODHARDEN if (MODULES)
54922+ select GRKERNSEC_HARDEN_PTRACE
54923+ select GRKERNSEC_PTRACE_READEXEC
54924+ select GRKERNSEC_VM86 if (X86_32)
54925+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
54926+ select PAX
54927+ select PAX_RANDUSTACK
54928+ select PAX_ASLR
54929+ select PAX_RANDMMAP
54930+ select PAX_NOEXEC
54931+ select PAX_MPROTECT
54932+ select PAX_EI_PAX
54933+ select PAX_PT_PAX_FLAGS
54934+ select PAX_HAVE_ACL_FLAGS
54935+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
54936+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
54937+ select PAX_RANDKSTACK if (X86_TSC && X86)
54938+ select PAX_SEGMEXEC if (X86_32)
54939+ select PAX_PAGEEXEC
54940+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
54941+ select PAX_EMUTRAMP if (PARISC)
54942+ select PAX_EMUSIGRT if (PARISC)
54943+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
54944+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
54945+ select PAX_REFCOUNT if (X86 || SPARC64)
54946+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
54947+ help
54948+ If you say Y here, many of the features of grsecurity will be
54949+ enabled, which will protect you against many kinds of attacks
54950+ against your system. The heightened security comes at a cost
54951+ of an increased chance of incompatibilities with rare software
54952+ on your machine. Since this security level enables PaX, you should
54953+ view <http://pax.grsecurity.net> and read about the PaX
54954+ project. While you are there, download chpax and run it on
54955+ binaries that cause problems with PaX. Also remember that
54956+ since the /proc restrictions are enabled, you must run your
54957+ identd as gid 1001. This security level enables the following
54958+ features in addition to those listed in the low and medium
54959+ security levels:
54960+
54961+ - Additional /proc restrictions
54962+ - Chmod restrictions in chroot
54963+ - No signals, ptrace, or viewing of processes outside of chroot
54964+ - Capability restrictions in chroot
54965+ - Deny fchdir out of chroot
54966+ - Priority restrictions in chroot
54967+ - Segmentation-based implementation of PaX
54968+ - Mprotect restrictions
54969+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
54970+ - Kernel stack randomization
54971+ - Mount/unmount/remount logging
54972+ - Kernel symbol hiding
54973+ - Hardening of module auto-loading
54974+ - Ptrace restrictions
54975+ - Restricted vm86 mode
54976+ - Restricted sysfs/debugfs
54977+ - Active kernel exploit response
54978+
54979+config GRKERNSEC_CUSTOM
54980+ bool "Custom"
54981+ help
54982+ If you say Y here, you will be able to configure every grsecurity
54983+ option, which allows you to enable many more features that aren't
54984+ covered in the basic security levels. These additional features
54985+ include TPE, socket restrictions, and the sysctl system for
54986+ grsecurity. It is advised that you read through the help for
54987+ each option to determine its usefulness in your situation.
54988+
54989+endchoice
54990+
54991+menu "Address Space Protection"
54992+depends on GRKERNSEC
54993+
54994+config GRKERNSEC_KMEM
54995+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
54996+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
54997+ help
54998+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
54999+ be written to or read from to modify or leak the contents of the running
55000+ kernel. /dev/port will also not be allowed to be opened. If you have module
55001+ support disabled, enabling this will close up four ways that are
55002+ currently used to insert malicious code into the running kernel.
55003+ Even with all these features enabled, we still highly recommend that
55004+ you use the RBAC system, as it is still possible for an attacker to
55005+ modify the running kernel through privileged I/O granted by ioperm/iopl.
55006+ If you are not using XFree86, you may be able to stop this additional
55007+ case by enabling the 'Disable privileged I/O' option. Though nothing
55008+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
55009+ but only to video memory, which is the only writing we allow in this
55010+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
55011+ not be allowed to mprotect it with PROT_WRITE later.
55012+ It is highly recommended that you say Y here if you meet all the
55013+ conditions above.
55014+
55015+config GRKERNSEC_VM86
55016+ bool "Restrict VM86 mode"
55017+ depends on X86_32
55018+
55019+ help
55020+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
55021+ make use of a special execution mode on 32bit x86 processors called
55022+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
55023+ video cards and will still work with this option enabled. The purpose
55024+ of the option is to prevent exploitation of emulation errors in
55025+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
55026+ Nearly all users should be able to enable this option.
55027+
55028+config GRKERNSEC_IO
55029+ bool "Disable privileged I/O"
55030+ depends on X86
55031+ select RTC_CLASS
55032+ select RTC_INTF_DEV
55033+ select RTC_DRV_CMOS
55034+
55035+ help
55036+ If you say Y here, all ioperm and iopl calls will return an error.
55037+ Ioperm and iopl can be used to modify the running kernel.
55038+ Unfortunately, some programs need this access to operate properly,
55039+ the most notable of which are XFree86 and hwclock. hwclock can be
55040+ remedied by having RTC support in the kernel, so real-time
55041+ clock support is enabled if this option is enabled, to ensure
55042+ that hwclock operates correctly. XFree86 still will not
55043+ operate correctly with this option enabled, so DO NOT CHOOSE Y
55044+ IF YOU USE XFree86. If you use XFree86 and you still want to
55045+ protect your kernel against modification, use the RBAC system.
55046+
55047+config GRKERNSEC_PROC_MEMMAP
55048+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
55049+ default y if (PAX_NOEXEC || PAX_ASLR)
55050+ depends on PAX_NOEXEC || PAX_ASLR
55051+ help
55052+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
55053+ give no information about the addresses of its mappings if
55054+ PaX features that rely on random addresses are enabled on the task.
55055+ If you use PaX it is greatly recommended that you say Y here as it
55056+ closes up a hole that makes the full ASLR useless for suid
55057+ binaries.
55058+
55059+config GRKERNSEC_BRUTE
55060+ bool "Deter exploit bruteforcing"
55061+ help
55062+ If you say Y here, attempts to bruteforce exploits against forking
55063+ daemons such as apache or sshd, as well as against suid/sgid binaries
55064+ will be deterred. When a child of a forking daemon is killed by PaX
55065+ or crashes due to an illegal instruction or other suspicious signal,
55066+ the parent process will be delayed 30 seconds upon every subsequent
55067+ fork until the administrator is able to assess the situation and
55068+ restart the daemon.
55069+ In the suid/sgid case, the attempt is logged, the user has all their
55070+ processes terminated, and they are prevented from executing any further
55071+ processes for 15 minutes.
55072+ It is recommended that you also enable signal logging in the auditing
55073+ section so that logs are generated when a process triggers a suspicious
55074+ signal.
55075+ If the sysctl option is enabled, a sysctl option with name
55076+ "deter_bruteforce" is created.
55077+
55078+config GRKERNSEC_MODHARDEN
55079+ bool "Harden module auto-loading"
55080+ depends on MODULES
55081+ help
55082+ If you say Y here, module auto-loading in response to use of some
55083+ feature implemented by an unloaded module will be restricted to
55084+ root users. Enabling this option helps defend against attacks
55085+ by unprivileged users who abuse the auto-loading behavior to
55086+ cause a vulnerable module to load that is then exploited.
55087+
55088+ If this option prevents a legitimate use of auto-loading for a
55089+ non-root user, the administrator can execute modprobe manually
55090+ with the exact name of the module mentioned in the alert log.
55091+ Alternatively, the administrator can add the module to the list
55092+ of modules loaded at boot by modifying init scripts.
55093+
55094+ Modification of init scripts will most likely be needed on
55095+ Ubuntu servers with encrypted home directory support enabled,
55096+ as the first non-root user logging in will cause the ecb(aes),
55097+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
55098+
55099+config GRKERNSEC_HIDESYM
55100+ bool "Hide kernel symbols"
55101+ help
55102+ If you say Y here, getting information on loaded modules, and
55103+ displaying all kernel symbols through a syscall will be restricted
55104+ to users with CAP_SYS_MODULE. For software compatibility reasons,
55105+ /proc/kallsyms will be restricted to the root user. The RBAC
55106+ system can hide that entry even from root.
55107+
55108+ This option also prevents leaking of kernel addresses through
55109+ several /proc entries.
55110+
55111+ Note that this option is only effective provided the following
55112+ conditions are met:
55113+ 1) The kernel using grsecurity is not precompiled by some distribution
55114+ 2) You have also enabled GRKERNSEC_DMESG
55115+ 3) You are using the RBAC system and hiding other files such as your
55116+ kernel image and System.map. Alternatively, enabling this option
55117+ causes the permissions on /boot, /lib/modules, and the kernel
55118+ source directory to change at compile time to prevent
55119+ reading by non-root users.
55120+ If the above conditions are met, this option will aid in providing a
55121+ useful protection against local kernel exploitation of overflows
55122+ and arbitrary read/write vulnerabilities.
55123+
55124+config GRKERNSEC_KERN_LOCKOUT
55125+ bool "Active kernel exploit response"
55126+ depends on X86 || ARM || PPC || SPARC
55127+ help
55128+ If you say Y here, when a PaX alert is triggered due to suspicious
55129+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
55130+ or an OOPs occurs due to bad memory accesses, instead of just
55131+ terminating the offending process (and potentially allowing
55132+ a subsequent exploit from the same user), we will take one of two
55133+ actions:
55134+ If the user was root, we will panic the system
55135+ If the user was non-root, we will log the attempt, terminate
55136+ all processes owned by the user, then prevent them from creating
55137+ any new processes until the system is restarted
55138+ This deters repeated kernel exploitation/bruteforcing attempts
55139+ and is useful for later forensics.
55140+
55141+endmenu
55142+menu "Role Based Access Control Options"
55143+depends on GRKERNSEC
55144+
55145+config GRKERNSEC_RBAC_DEBUG
55146+ bool
55147+
55148+config GRKERNSEC_NO_RBAC
55149+ bool "Disable RBAC system"
55150+ help
55151+ If you say Y here, the /dev/grsec device will be removed from the kernel,
55152+ preventing the RBAC system from being enabled. You should only say Y
55153+ here if you have no intention of using the RBAC system, so as to prevent
55154+ an attacker with root access from misusing the RBAC system to hide files
55155+ and processes when loadable module support and /dev/[k]mem have been
55156+ locked down.
55157+
55158+config GRKERNSEC_ACL_HIDEKERN
55159+ bool "Hide kernel processes"
55160+ help
55161+ If you say Y here, all kernel threads will be hidden to all
55162+ processes but those whose subject has the "view hidden processes"
55163+ flag.
55164+
55165+config GRKERNSEC_ACL_MAXTRIES
55166+ int "Maximum tries before password lockout"
55167+ default 3
55168+ help
55169+ This option enforces the maximum number of times a user can attempt
55170+ to authorize themselves with the grsecurity RBAC system before being
55171+ denied the ability to attempt authorization again for a specified time.
55172+ The lower the number, the harder it will be to brute-force a password.
55173+
55174+config GRKERNSEC_ACL_TIMEOUT
55175+ int "Time to wait after max password tries, in seconds"
55176+ default 30
55177+ help
55178+ This option specifies the time the user must wait after attempting to
55179+ authorize to the RBAC system with the maximum number of invalid
55180+ passwords. The higher the number, the harder it will be to brute-force
55181+ a password.
55182+
55183+endmenu
55184+menu "Filesystem Protections"
55185+depends on GRKERNSEC
55186+
55187+config GRKERNSEC_PROC
55188+ bool "Proc restrictions"
55189+ help
55190+ If you say Y here, the permissions of the /proc filesystem
55191+ will be altered to enhance system security and privacy. You MUST
55192+ choose either a user only restriction or a user and group restriction.
55193+ Depending upon the option you choose, you can either restrict users to
55194+ see only the processes they themselves run, or choose a group that can
55195+ view all processes and files normally restricted to root if you choose
55196+ the "restrict to user only" option. NOTE: If you're running identd as
55197+ a non-root user, you will have to run it as the group you specify here.
55198+
55199+config GRKERNSEC_PROC_USER
55200+ bool "Restrict /proc to user only"
55201+ depends on GRKERNSEC_PROC
55202+ help
55203+ If you say Y here, non-root users will only be able to view their own
55204+ processes, and restricts them from viewing network-related information,
55205+ and viewing kernel symbol and module information.
55206+
55207+config GRKERNSEC_PROC_USERGROUP
55208+ bool "Allow special group"
55209+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
55210+ help
55211+ If you say Y here, you will be able to select a group that will be
55212+ able to view all processes and network-related information. If you've
55213+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
55214+ remain hidden. This option is useful if you want to run identd as
55215+ a non-root user.
55216+
55217+config GRKERNSEC_PROC_GID
55218+ int "GID for special group"
55219+ depends on GRKERNSEC_PROC_USERGROUP
55220+ default 1001
55221+
55222+config GRKERNSEC_PROC_ADD
55223+ bool "Additional restrictions"
55224+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
55225+ help
55226+ If you say Y here, additional restrictions will be placed on
55227+ /proc that keep normal users from viewing device information and
55228+ slabinfo information that could be useful for exploits.
55229+
55230+config GRKERNSEC_LINK
55231+ bool "Linking restrictions"
55232+ help
55233+ If you say Y here, /tmp race exploits will be prevented, since users
55234+ will no longer be able to follow symlinks owned by other users in
55235+ world-writable +t directories (e.g. /tmp), unless the owner of the
55236+ symlink is the owner of the directory. users will also not be
55237+ able to hardlink to files they do not own. If the sysctl option is
55238+ enabled, a sysctl option with name "linking_restrictions" is created.
55239+
55240+config GRKERNSEC_FIFO
55241+ bool "FIFO restrictions"
55242+ help
55243+ If you say Y here, users will not be able to write to FIFOs they don't
55244+ own in world-writable +t directories (e.g. /tmp), unless the owner of
55245+ the FIFO is the same owner of the directory it's held in. If the sysctl
55246+ option is enabled, a sysctl option with name "fifo_restrictions" is
55247+ created.
55248+
55249+config GRKERNSEC_SYSFS_RESTRICT
55250+ bool "Sysfs/debugfs restriction"
55251+ depends on SYSFS
55252+ help
55253+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
55254+ any filesystem normally mounted under it (e.g. debugfs) will be
55255+ mostly accessible only by root. These filesystems generally provide access
55256+ to hardware and debug information that isn't appropriate for unprivileged
55257+ users of the system. Sysfs and debugfs have also become a large source
55258+ of new vulnerabilities, ranging from infoleaks to local compromise.
55259+ There has been very little oversight with an eye toward security involved
55260+ in adding new exporters of information to these filesystems, so their
55261+ use is discouraged.
55262+ For reasons of compatibility, a few directories have been whitelisted
55263+ for access by non-root users:
55264+ /sys/fs/selinux
55265+ /sys/fs/fuse
55266+ /sys/devices/system/cpu
55267+
55268+config GRKERNSEC_ROFS
55269+ bool "Runtime read-only mount protection"
55270+ help
55271+ If you say Y here, a sysctl option with name "romount_protect" will
55272+ be created. By setting this option to 1 at runtime, filesystems
55273+ will be protected in the following ways:
55274+ * No new writable mounts will be allowed
55275+ * Existing read-only mounts won't be able to be remounted read/write
55276+ * Write operations will be denied on all block devices
55277+ This option acts independently of grsec_lock: once it is set to 1,
55278+ it cannot be turned off. Therefore, please be mindful of the resulting
55279+ behavior if this option is enabled in an init script on a read-only
55280+ filesystem. This feature is mainly intended for secure embedded systems.
55281+
55282+config GRKERNSEC_CHROOT
55283+ bool "Chroot jail restrictions"
55284+ help
55285+ If you say Y here, you will be able to choose several options that will
55286+ make breaking out of a chrooted jail much more difficult. If you
55287+ encounter no software incompatibilities with the following options, it
55288+ is recommended that you enable each one.
55289+
55290+config GRKERNSEC_CHROOT_MOUNT
55291+ bool "Deny mounts"
55292+ depends on GRKERNSEC_CHROOT
55293+ help
55294+ If you say Y here, processes inside a chroot will not be able to
55295+ mount or remount filesystems. If the sysctl option is enabled, a
55296+ sysctl option with name "chroot_deny_mount" is created.
55297+
55298+config GRKERNSEC_CHROOT_DOUBLE
55299+ bool "Deny double-chroots"
55300+ depends on GRKERNSEC_CHROOT
55301+ help
55302+ If you say Y here, processes inside a chroot will not be able to chroot
55303+ again outside the chroot. This is a widely used method of breaking
55304+ out of a chroot jail and should not be allowed. If the sysctl
55305+ option is enabled, a sysctl option with name
55306+ "chroot_deny_chroot" is created.
55307+
55308+config GRKERNSEC_CHROOT_PIVOT
55309+ bool "Deny pivot_root in chroot"
55310+ depends on GRKERNSEC_CHROOT
55311+ help
55312+ If you say Y here, processes inside a chroot will not be able to use
55313+ a function called pivot_root() that was introduced in Linux 2.3.41. It
55314+ works similar to chroot in that it changes the root filesystem. This
55315+ function could be misused in a chrooted process to attempt to break out
55316+ of the chroot, and therefore should not be allowed. If the sysctl
55317+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
55318+ created.
55319+
55320+config GRKERNSEC_CHROOT_CHDIR
55321+ bool "Enforce chdir(\"/\") on all chroots"
55322+ depends on GRKERNSEC_CHROOT
55323+ help
55324+ If you say Y here, the current working directory of all newly-chrooted
55325+ applications will be set to the the root directory of the chroot.
55326+ The man page on chroot(2) states:
55327+ Note that this call does not change the current working
55328+ directory, so that `.' can be outside the tree rooted at
55329+ `/'. In particular, the super-user can escape from a
55330+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
55331+
55332+ It is recommended that you say Y here, since it's not known to break
55333+ any software. If the sysctl option is enabled, a sysctl option with
55334+ name "chroot_enforce_chdir" is created.
55335+
55336+config GRKERNSEC_CHROOT_CHMOD
55337+ bool "Deny (f)chmod +s"
55338+ depends on GRKERNSEC_CHROOT
55339+ help
55340+ If you say Y here, processes inside a chroot will not be able to chmod
55341+ or fchmod files to make them have suid or sgid bits. This protects
55342+ against another published method of breaking a chroot. If the sysctl
55343+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
55344+ created.
55345+
55346+config GRKERNSEC_CHROOT_FCHDIR
55347+ bool "Deny fchdir out of chroot"
55348+ depends on GRKERNSEC_CHROOT
55349+ help
55350+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
55351+ to a file descriptor of the chrooting process that points to a directory
55352+ outside the filesystem will be stopped. If the sysctl option
55353+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
55354+
55355+config GRKERNSEC_CHROOT_MKNOD
55356+ bool "Deny mknod"
55357+ depends on GRKERNSEC_CHROOT
55358+ help
55359+ If you say Y here, processes inside a chroot will not be allowed to
55360+ mknod. The problem with using mknod inside a chroot is that it
55361+ would allow an attacker to create a device entry that is the same
55362+ as one on the physical root of your system, which could range from
55363+ anything from the console device to a device for your harddrive (which
55364+ they could then use to wipe the drive or steal data). It is recommended
55365+ that you say Y here, unless you run into software incompatibilities.
55366+ If the sysctl option is enabled, a sysctl option with name
55367+ "chroot_deny_mknod" is created.
55368+
55369+config GRKERNSEC_CHROOT_SHMAT
55370+ bool "Deny shmat() out of chroot"
55371+ depends on GRKERNSEC_CHROOT
55372+ help
55373+ If you say Y here, processes inside a chroot will not be able to attach
55374+ to shared memory segments that were created outside of the chroot jail.
55375+ It is recommended that you say Y here. If the sysctl option is enabled,
55376+ a sysctl option with name "chroot_deny_shmat" is created.
55377+
55378+config GRKERNSEC_CHROOT_UNIX
55379+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
55380+ depends on GRKERNSEC_CHROOT
55381+ help
55382+ If you say Y here, processes inside a chroot will not be able to
55383+ connect to abstract (meaning not belonging to a filesystem) Unix
55384+ domain sockets that were bound outside of a chroot. It is recommended
55385+ that you say Y here. If the sysctl option is enabled, a sysctl option
55386+ with name "chroot_deny_unix" is created.
55387+
55388+config GRKERNSEC_CHROOT_FINDTASK
55389+ bool "Protect outside processes"
55390+ depends on GRKERNSEC_CHROOT
55391+ help
55392+ If you say Y here, processes inside a chroot will not be able to
55393+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
55394+ getsid, or view any process outside of the chroot. If the sysctl
55395+ option is enabled, a sysctl option with name "chroot_findtask" is
55396+ created.
55397+
55398+config GRKERNSEC_CHROOT_NICE
55399+ bool "Restrict priority changes"
55400+ depends on GRKERNSEC_CHROOT
55401+ help
55402+ If you say Y here, processes inside a chroot will not be able to raise
55403+ the priority of processes in the chroot, or alter the priority of
55404+ processes outside the chroot. This provides more security than simply
55405+ removing CAP_SYS_NICE from the process' capability set. If the
55406+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
55407+ is created.
55408+
55409+config GRKERNSEC_CHROOT_SYSCTL
55410+ bool "Deny sysctl writes"
55411+ depends on GRKERNSEC_CHROOT
55412+ help
55413+ If you say Y here, an attacker in a chroot will not be able to
55414+ write to sysctl entries, either by sysctl(2) or through a /proc
55415+ interface. It is strongly recommended that you say Y here. If the
55416+ sysctl option is enabled, a sysctl option with name
55417+ "chroot_deny_sysctl" is created.
55418+
55419+config GRKERNSEC_CHROOT_CAPS
55420+ bool "Capability restrictions"
55421+ depends on GRKERNSEC_CHROOT
55422+ help
55423+ If you say Y here, the capabilities on all processes within a
55424+ chroot jail will be lowered to stop module insertion, raw i/o,
55425+ system and net admin tasks, rebooting the system, modifying immutable
55426+ files, modifying IPC owned by another, and changing the system time.
55427+ This is left an option because it can break some apps. Disable this
55428+ if your chrooted apps are having problems performing those kinds of
55429+ tasks. If the sysctl option is enabled, a sysctl option with
55430+ name "chroot_caps" is created.
55431+
55432+endmenu
55433+menu "Kernel Auditing"
55434+depends on GRKERNSEC
55435+
55436+config GRKERNSEC_AUDIT_GROUP
55437+ bool "Single group for auditing"
55438+ help
55439+ If you say Y here, the exec, chdir, and (un)mount logging features
55440+ will only operate on a group you specify. This option is recommended
55441+ if you only want to watch certain users instead of having a large
55442+ amount of logs from the entire system. If the sysctl option is enabled,
55443+ a sysctl option with name "audit_group" is created.
55444+
55445+config GRKERNSEC_AUDIT_GID
55446+ int "GID for auditing"
55447+ depends on GRKERNSEC_AUDIT_GROUP
55448+ default 1007
55449+
55450+config GRKERNSEC_EXECLOG
55451+ bool "Exec logging"
55452+ help
55453+ If you say Y here, all execve() calls will be logged (since the
55454+ other exec*() calls are frontends to execve(), all execution
55455+ will be logged). Useful for shell-servers that like to keep track
55456+ of their users. If the sysctl option is enabled, a sysctl option with
55457+ name "exec_logging" is created.
55458+ WARNING: This option when enabled will produce a LOT of logs, especially
55459+ on an active system.
55460+
55461+config GRKERNSEC_RESLOG
55462+ bool "Resource logging"
55463+ help
55464+ If you say Y here, all attempts to overstep resource limits will
55465+ be logged with the resource name, the requested size, and the current
55466+ limit. It is highly recommended that you say Y here. If the sysctl
55467+ option is enabled, a sysctl option with name "resource_logging" is
55468+ created. If the RBAC system is enabled, the sysctl value is ignored.
55469+
55470+config GRKERNSEC_CHROOT_EXECLOG
55471+ bool "Log execs within chroot"
55472+ help
55473+ If you say Y here, all executions inside a chroot jail will be logged
55474+ to syslog. This can cause a large amount of logs if certain
55475+ applications (eg. djb's daemontools) are installed on the system, and
55476+ is therefore left as an option. If the sysctl option is enabled, a
55477+ sysctl option with name "chroot_execlog" is created.
55478+
55479+config GRKERNSEC_AUDIT_PTRACE
55480+ bool "Ptrace logging"
55481+ help
55482+ If you say Y here, all attempts to attach to a process via ptrace
55483+ will be logged. If the sysctl option is enabled, a sysctl option
55484+ with name "audit_ptrace" is created.
55485+
55486+config GRKERNSEC_AUDIT_CHDIR
55487+ bool "Chdir logging"
55488+ help
55489+ If you say Y here, all chdir() calls will be logged. If the sysctl
55490+ option is enabled, a sysctl option with name "audit_chdir" is created.
55491+
55492+config GRKERNSEC_AUDIT_MOUNT
55493+ bool "(Un)Mount logging"
55494+ help
55495+ If you say Y here, all mounts and unmounts will be logged. If the
55496+ sysctl option is enabled, a sysctl option with name "audit_mount" is
55497+ created.
55498+
55499+config GRKERNSEC_SIGNAL
55500+ bool "Signal logging"
55501+ help
55502+ If you say Y here, certain important signals will be logged, such as
55503+ SIGSEGV, which will as a result inform you of when a error in a program
55504+ occurred, which in some cases could mean a possible exploit attempt.
55505+ If the sysctl option is enabled, a sysctl option with name
55506+ "signal_logging" is created.
55507+
55508+config GRKERNSEC_FORKFAIL
55509+ bool "Fork failure logging"
55510+ help
55511+ If you say Y here, all failed fork() attempts will be logged.
55512+ This could suggest a fork bomb, or someone attempting to overstep
55513+ their process limit. If the sysctl option is enabled, a sysctl option
55514+ with name "forkfail_logging" is created.
55515+
55516+config GRKERNSEC_TIME
55517+ bool "Time change logging"
55518+ help
55519+ If you say Y here, any changes of the system clock will be logged.
55520+ If the sysctl option is enabled, a sysctl option with name
55521+ "timechange_logging" is created.
55522+
55523+config GRKERNSEC_PROC_IPADDR
55524+ bool "/proc/<pid>/ipaddr support"
55525+ help
55526+ If you say Y here, a new entry will be added to each /proc/<pid>
55527+ directory that contains the IP address of the person using the task.
55528+ The IP is carried across local TCP and AF_UNIX stream sockets.
55529+ This information can be useful for IDS/IPSes to perform remote response
55530+ to a local attack. The entry is readable by only the owner of the
55531+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
55532+ the RBAC system), and thus does not create privacy concerns.
55533+
55534+config GRKERNSEC_RWXMAP_LOG
55535+ bool 'Denied RWX mmap/mprotect logging'
55536+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
55537+ help
55538+ If you say Y here, calls to mmap() and mprotect() with explicit
55539+ usage of PROT_WRITE and PROT_EXEC together will be logged when
55540+ denied by the PAX_MPROTECT feature. If the sysctl option is
55541+ enabled, a sysctl option with name "rwxmap_logging" is created.
55542+
55543+config GRKERNSEC_AUDIT_TEXTREL
55544+ bool 'ELF text relocations logging (READ HELP)'
55545+ depends on PAX_MPROTECT
55546+ help
55547+ If you say Y here, text relocations will be logged with the filename
55548+ of the offending library or binary. The purpose of the feature is
55549+ to help Linux distribution developers get rid of libraries and
55550+ binaries that need text relocations which hinder the future progress
55551+ of PaX. Only Linux distribution developers should say Y here, and
55552+ never on a production machine, as this option creates an information
55553+ leak that could aid an attacker in defeating the randomization of
55554+ a single memory region. If the sysctl option is enabled, a sysctl
55555+ option with name "audit_textrel" is created.
55556+
55557+endmenu
55558+
55559+menu "Executable Protections"
55560+depends on GRKERNSEC
55561+
55562+config GRKERNSEC_DMESG
55563+ bool "Dmesg(8) restriction"
55564+ help
55565+ If you say Y here, non-root users will not be able to use dmesg(8)
55566+ to view up to the last 4kb of messages in the kernel's log buffer.
55567+ The kernel's log buffer often contains kernel addresses and other
55568+ identifying information useful to an attacker in fingerprinting a
55569+ system for a targeted exploit.
55570+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
55571+ created.
55572+
55573+config GRKERNSEC_HARDEN_PTRACE
55574+ bool "Deter ptrace-based process snooping"
55575+ help
55576+ If you say Y here, TTY sniffers and other malicious monitoring
55577+ programs implemented through ptrace will be defeated. If you
55578+ have been using the RBAC system, this option has already been
55579+ enabled for several years for all users, with the ability to make
55580+ fine-grained exceptions.
55581+
55582+ This option only affects the ability of non-root users to ptrace
55583+ processes that are not a descendent of the ptracing process.
55584+ This means that strace ./binary and gdb ./binary will still work,
55585+ but attaching to arbitrary processes will not. If the sysctl
55586+ option is enabled, a sysctl option with name "harden_ptrace" is
55587+ created.
55588+
55589+config GRKERNSEC_PTRACE_READEXEC
55590+ bool "Require read access to ptrace sensitive binaries"
55591+ help
55592+ If you say Y here, unprivileged users will not be able to ptrace unreadable
55593+ binaries. This option is useful in environments that
55594+ remove the read bits (e.g. file mode 4711) from suid binaries to
55595+ prevent infoleaking of their contents. This option adds
55596+ consistency to the use of that file mode, as the binary could normally
55597+ be read out when run without privileges while ptracing.
55598+
55599+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
55600+ is created.
55601+
55602+config GRKERNSEC_SETXID
55603+ bool "Enforce consistent multithreaded privileges"
55604+ help
55605+ If you say Y here, a change from a root uid to a non-root uid
55606+ in a multithreaded application will cause the resulting uids,
55607+ gids, supplementary groups, and capabilities in that thread
55608+ to be propagated to the other threads of the process. In most
55609+ cases this is unnecessary, as glibc will emulate this behavior
55610+ on behalf of the application. Other libcs do not act in the
55611+ same way, allowing the other threads of the process to continue
55612+ running with root privileges. If the sysctl option is enabled,
55613+ a sysctl option with name "consistent_setxid" is created.
55614+
55615+config GRKERNSEC_TPE
55616+ bool "Trusted Path Execution (TPE)"
55617+ help
55618+ If you say Y here, you will be able to choose a gid to add to the
55619+ supplementary groups of users you want to mark as "untrusted."
55620+ These users will not be able to execute any files that are not in
55621+ root-owned directories writable only by root. If the sysctl option
55622+ is enabled, a sysctl option with name "tpe" is created.
55623+
55624+config GRKERNSEC_TPE_ALL
55625+ bool "Partially restrict all non-root users"
55626+ depends on GRKERNSEC_TPE
55627+ help
55628+ If you say Y here, all non-root users will be covered under
55629+ a weaker TPE restriction. This is separate from, and in addition to,
55630+ the main TPE options that you have selected elsewhere. Thus, if a
55631+ "trusted" GID is chosen, this restriction applies to even that GID.
55632+ Under this restriction, all non-root users will only be allowed to
55633+ execute files in directories they own that are not group or
55634+ world-writable, or in directories owned by root and writable only by
55635+ root. If the sysctl option is enabled, a sysctl option with name
55636+ "tpe_restrict_all" is created.
55637+
55638+config GRKERNSEC_TPE_INVERT
55639+ bool "Invert GID option"
55640+ depends on GRKERNSEC_TPE
55641+ help
55642+ If you say Y here, the group you specify in the TPE configuration will
55643+ decide what group TPE restrictions will be *disabled* for. This
55644+ option is useful if you want TPE restrictions to be applied to most
55645+ users on the system. If the sysctl option is enabled, a sysctl option
55646+ with name "tpe_invert" is created. Unlike other sysctl options, this
55647+ entry will default to on for backward-compatibility.
55648+
55649+config GRKERNSEC_TPE_GID
55650+ int "GID for untrusted users"
55651+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
55652+ default 1005
55653+ help
55654+ Setting this GID determines what group TPE restrictions will be
55655+ *enabled* for. If the sysctl option is enabled, a sysctl option
55656+ with name "tpe_gid" is created.
55657+
55658+config GRKERNSEC_TPE_GID
55659+ int "GID for trusted users"
55660+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
55661+ default 1005
55662+ help
55663+ Setting this GID determines what group TPE restrictions will be
55664+ *disabled* for. If the sysctl option is enabled, a sysctl option
55665+ with name "tpe_gid" is created.
55666+
55667+endmenu
55668+menu "Network Protections"
55669+depends on GRKERNSEC
55670+
55671+config GRKERNSEC_RANDNET
55672+ bool "Larger entropy pools"
55673+ help
55674+ If you say Y here, the entropy pools used for many features of Linux
55675+ and grsecurity will be doubled in size. Since several grsecurity
55676+ features use additional randomness, it is recommended that you say Y
55677+ here. Saying Y here has a similar effect as modifying
55678+ /proc/sys/kernel/random/poolsize.
55679+
55680+config GRKERNSEC_BLACKHOLE
55681+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
55682+ depends on NET
55683+ help
55684+ If you say Y here, neither TCP resets nor ICMP
55685+ destination-unreachable packets will be sent in response to packets
55686+ sent to ports for which no associated listening process exists.
55687+ This feature supports both IPV4 and IPV6 and exempts the
55688+ loopback interface from blackholing. Enabling this feature
55689+ makes a host more resilient to DoS attacks and reduces network
55690+ visibility against scanners.
55691+
55692+ The blackhole feature as-implemented is equivalent to the FreeBSD
55693+ blackhole feature, as it prevents RST responses to all packets, not
55694+ just SYNs. Under most application behavior this causes no
55695+ problems, but applications (like haproxy) may not close certain
55696+ connections in a way that cleanly terminates them on the remote
55697+ end, leaving the remote host in LAST_ACK state. Because of this
55698+ side-effect and to prevent intentional LAST_ACK DoSes, this
55699+ feature also adds automatic mitigation against such attacks.
55700+ The mitigation drastically reduces the amount of time a socket
55701+ can spend in LAST_ACK state. If you're using haproxy and not
55702+ all servers it connects to have this option enabled, consider
55703+ disabling this feature on the haproxy host.
55704+
55705+ If the sysctl option is enabled, two sysctl options with names
55706+ "ip_blackhole" and "lastack_retries" will be created.
55707+ While "ip_blackhole" takes the standard zero/non-zero on/off
55708+ toggle, "lastack_retries" uses the same kinds of values as
55709+ "tcp_retries1" and "tcp_retries2". The default value of 4
55710+ prevents a socket from lasting more than 45 seconds in LAST_ACK
55711+ state.
55712+
55713+config GRKERNSEC_SOCKET
55714+ bool "Socket restrictions"
55715+ depends on NET
55716+ help
55717+ If you say Y here, you will be able to choose from several options.
55718+ If you assign a GID on your system and add it to the supplementary
55719+ groups of users you want to restrict socket access to, this patch
55720+ will perform up to three things, based on the option(s) you choose.
55721+
55722+config GRKERNSEC_SOCKET_ALL
55723+ bool "Deny any sockets to group"
55724+ depends on GRKERNSEC_SOCKET
55725+ help
55726+ If you say Y here, you will be able to choose a GID of whose users will
55727+ be unable to connect to other hosts from your machine or run server
55728+ applications from your machine. If the sysctl option is enabled, a
55729+ sysctl option with name "socket_all" is created.
55730+
55731+config GRKERNSEC_SOCKET_ALL_GID
55732+ int "GID to deny all sockets for"
55733+ depends on GRKERNSEC_SOCKET_ALL
55734+ default 1004
55735+ help
55736+ Here you can choose the GID to disable socket access for. Remember to
55737+ add the users you want socket access disabled for to the GID
55738+ specified here. If the sysctl option is enabled, a sysctl option
55739+ with name "socket_all_gid" is created.
55740+
55741+config GRKERNSEC_SOCKET_CLIENT
55742+ bool "Deny client sockets to group"
55743+ depends on GRKERNSEC_SOCKET
55744+ help
55745+ If you say Y here, you will be able to choose a GID of whose users will
55746+ be unable to connect to other hosts from your machine, but will be
55747+ able to run servers. If this option is enabled, all users in the group
55748+ you specify will have to use passive mode when initiating ftp transfers
55749+ from the shell on your machine. If the sysctl option is enabled, a
55750+ sysctl option with name "socket_client" is created.
55751+
55752+config GRKERNSEC_SOCKET_CLIENT_GID
55753+ int "GID to deny client sockets for"
55754+ depends on GRKERNSEC_SOCKET_CLIENT
55755+ default 1003
55756+ help
55757+ Here you can choose the GID to disable client socket access for.
55758+ Remember to add the users you want client socket access disabled for to
55759+ the GID specified here. If the sysctl option is enabled, a sysctl
55760+ option with name "socket_client_gid" is created.
55761+
55762+config GRKERNSEC_SOCKET_SERVER
55763+ bool "Deny server sockets to group"
55764+ depends on GRKERNSEC_SOCKET
55765+ help
55766+ If you say Y here, you will be able to choose a GID of whose users will
55767+ be unable to run server applications from your machine. If the sysctl
55768+ option is enabled, a sysctl option with name "socket_server" is created.
55769+
55770+config GRKERNSEC_SOCKET_SERVER_GID
55771+ int "GID to deny server sockets for"
55772+ depends on GRKERNSEC_SOCKET_SERVER
55773+ default 1002
55774+ help
55775+ Here you can choose the GID to disable server socket access for.
55776+ Remember to add the users you want server socket access disabled for to
55777+ the GID specified here. If the sysctl option is enabled, a sysctl
55778+ option with name "socket_server_gid" is created.
55779+
55780+endmenu
55781+menu "Sysctl support"
55782+depends on GRKERNSEC && SYSCTL
55783+
55784+config GRKERNSEC_SYSCTL
55785+ bool "Sysctl support"
55786+ help
55787+ If you say Y here, you will be able to change the options that
55788+ grsecurity runs with at bootup, without having to recompile your
55789+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
55790+ to enable (1) or disable (0) various features. All the sysctl entries
55791+ are mutable until the "grsec_lock" entry is set to a non-zero value.
55792+ All features enabled in the kernel configuration are disabled at boot
55793+ if you do not say Y to the "Turn on features by default" option.
55794+ All options should be set at startup, and the grsec_lock entry should
55795+ be set to a non-zero value after all the options are set.
55796+ *THIS IS EXTREMELY IMPORTANT*
55797+
55798+config GRKERNSEC_SYSCTL_DISTRO
55799+ bool "Extra sysctl support for distro makers (READ HELP)"
55800+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
55801+ help
55802+ If you say Y here, additional sysctl options will be created
55803+ for features that affect processes running as root. Therefore,
55804+ it is critical when using this option that the grsec_lock entry be
55805+ enabled after boot. Only distros with prebuilt kernel packages
55806+ with this option enabled that can ensure grsec_lock is enabled
55807+ after boot should use this option.
55808+ *Failure to set grsec_lock after boot makes all grsec features
55809+ this option covers useless*
55810+
55811+ Currently this option creates the following sysctl entries:
55812+ "Disable Privileged I/O": "disable_priv_io"
55813+
55814+config GRKERNSEC_SYSCTL_ON
55815+ bool "Turn on features by default"
55816+ depends on GRKERNSEC_SYSCTL
55817+ help
55818+ If you say Y here, instead of having all features enabled in the
55819+ kernel configuration disabled at boot time, the features will be
55820+ enabled at boot time. It is recommended you say Y here unless
55821+ there is some reason you would want all sysctl-tunable features to
55822+ be disabled by default. As mentioned elsewhere, it is important
55823+ to enable the grsec_lock entry once you have finished modifying
55824+ the sysctl entries.
55825+
55826+endmenu
55827+menu "Logging Options"
55828+depends on GRKERNSEC
55829+
55830+config GRKERNSEC_FLOODTIME
55831+ int "Seconds in between log messages (minimum)"
55832+ default 10
55833+ help
55834+ This option allows you to enforce the number of seconds between
55835+ grsecurity log messages. The default should be suitable for most
55836+ people, however, if you choose to change it, choose a value small enough
55837+ to allow informative logs to be produced, but large enough to
55838+ prevent flooding.
55839+
55840+config GRKERNSEC_FLOODBURST
55841+ int "Number of messages in a burst (maximum)"
55842+ default 6
55843+ help
55844+ This option allows you to choose the maximum number of messages allowed
55845+ within the flood time interval you chose in a separate option. The
55846+ default should be suitable for most people, however if you find that
55847+ many of your logs are being interpreted as flooding, you may want to
55848+ raise this value.
55849+
55850+endmenu
55851+
55852+endmenu
55853diff --git a/grsecurity/Makefile b/grsecurity/Makefile
55854new file mode 100644
55855index 0000000..be9ae3a
55856--- /dev/null
55857+++ b/grsecurity/Makefile
55858@@ -0,0 +1,36 @@
55859+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
55860+# during 2001-2009 it has been completely redesigned by Brad Spengler
55861+# into an RBAC system
55862+#
55863+# All code in this directory and various hooks inserted throughout the kernel
55864+# are copyright Brad Spengler - Open Source Security, Inc., and released
55865+# under the GPL v2 or higher
55866+
55867+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
55868+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
55869+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
55870+
55871+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
55872+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
55873+ gracl_learn.o grsec_log.o
55874+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
55875+
55876+ifdef CONFIG_NET
55877+obj-y += grsec_sock.o
55878+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
55879+endif
55880+
55881+ifndef CONFIG_GRKERNSEC
55882+obj-y += grsec_disabled.o
55883+endif
55884+
55885+ifdef CONFIG_GRKERNSEC_HIDESYM
55886+extra-y := grsec_hidesym.o
55887+$(obj)/grsec_hidesym.o:
55888+ @-chmod -f 500 /boot
55889+ @-chmod -f 500 /lib/modules
55890+ @-chmod -f 500 /lib64/modules
55891+ @-chmod -f 500 /lib32/modules
55892+ @-chmod -f 700 .
55893+ @echo ' grsec: protected kernel image paths'
55894+endif
55895diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
55896new file mode 100644
55897index 0000000..71cb167
55898--- /dev/null
55899+++ b/grsecurity/gracl.c
55900@@ -0,0 +1,4140 @@
55901+#include <linux/kernel.h>
55902+#include <linux/module.h>
55903+#include <linux/sched.h>
55904+#include <linux/mm.h>
55905+#include <linux/file.h>
55906+#include <linux/fs.h>
55907+#include <linux/namei.h>
55908+#include <linux/mount.h>
55909+#include <linux/tty.h>
55910+#include <linux/proc_fs.h>
55911+#include <linux/smp_lock.h>
55912+#include <linux/slab.h>
55913+#include <linux/vmalloc.h>
55914+#include <linux/types.h>
55915+#include <linux/sysctl.h>
55916+#include <linux/netdevice.h>
55917+#include <linux/ptrace.h>
55918+#include <linux/gracl.h>
55919+#include <linux/gralloc.h>
55920+#include <linux/security.h>
55921+#include <linux/grinternal.h>
55922+#include <linux/pid_namespace.h>
55923+#include <linux/fdtable.h>
55924+#include <linux/percpu.h>
55925+
55926+#include <asm/uaccess.h>
55927+#include <asm/errno.h>
55928+#include <asm/mman.h>
55929+
55930+static struct acl_role_db acl_role_set;
55931+static struct name_db name_set;
55932+static struct inodev_db inodev_set;
55933+
55934+/* for keeping track of userspace pointers used for subjects, so we
55935+ can share references in the kernel as well
55936+*/
55937+
55938+static struct dentry *real_root;
55939+static struct vfsmount *real_root_mnt;
55940+
55941+static struct acl_subj_map_db subj_map_set;
55942+
55943+static struct acl_role_label *default_role;
55944+
55945+static struct acl_role_label *role_list;
55946+
55947+static u16 acl_sp_role_value;
55948+
55949+extern char *gr_shared_page[4];
55950+static DEFINE_MUTEX(gr_dev_mutex);
55951+DEFINE_RWLOCK(gr_inode_lock);
55952+
55953+struct gr_arg *gr_usermode;
55954+
55955+static unsigned int gr_status __read_only = GR_STATUS_INIT;
55956+
55957+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
55958+extern void gr_clear_learn_entries(void);
55959+
55960+#ifdef CONFIG_GRKERNSEC_RESLOG
55961+extern void gr_log_resource(const struct task_struct *task,
55962+ const int res, const unsigned long wanted, const int gt);
55963+#endif
55964+
55965+unsigned char *gr_system_salt;
55966+unsigned char *gr_system_sum;
55967+
55968+static struct sprole_pw **acl_special_roles = NULL;
55969+static __u16 num_sprole_pws = 0;
55970+
55971+static struct acl_role_label *kernel_role = NULL;
55972+
55973+static unsigned int gr_auth_attempts = 0;
55974+static unsigned long gr_auth_expires = 0UL;
55975+
55976+#ifdef CONFIG_NET
55977+extern struct vfsmount *sock_mnt;
55978+#endif
55979+extern struct vfsmount *pipe_mnt;
55980+extern struct vfsmount *shm_mnt;
55981+#ifdef CONFIG_HUGETLBFS
55982+extern struct vfsmount *hugetlbfs_vfsmount;
55983+#endif
55984+
55985+static struct acl_object_label *fakefs_obj_rw;
55986+static struct acl_object_label *fakefs_obj_rwx;
55987+
55988+extern int gr_init_uidset(void);
55989+extern void gr_free_uidset(void);
55990+extern void gr_remove_uid(uid_t uid);
55991+extern int gr_find_uid(uid_t uid);
55992+
55993+__inline__ int
55994+gr_acl_is_enabled(void)
55995+{
55996+ return (gr_status & GR_READY);
55997+}
55998+
55999+#ifdef CONFIG_BTRFS_FS
56000+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
56001+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
56002+#endif
56003+
56004+static inline dev_t __get_dev(const struct dentry *dentry)
56005+{
56006+#ifdef CONFIG_BTRFS_FS
56007+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
56008+ return get_btrfs_dev_from_inode(dentry->d_inode);
56009+ else
56010+#endif
56011+ return dentry->d_inode->i_sb->s_dev;
56012+}
56013+
56014+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
56015+{
56016+ return __get_dev(dentry);
56017+}
56018+
56019+static char gr_task_roletype_to_char(struct task_struct *task)
56020+{
56021+ switch (task->role->roletype &
56022+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
56023+ GR_ROLE_SPECIAL)) {
56024+ case GR_ROLE_DEFAULT:
56025+ return 'D';
56026+ case GR_ROLE_USER:
56027+ return 'U';
56028+ case GR_ROLE_GROUP:
56029+ return 'G';
56030+ case GR_ROLE_SPECIAL:
56031+ return 'S';
56032+ }
56033+
56034+ return 'X';
56035+}
56036+
56037+char gr_roletype_to_char(void)
56038+{
56039+ return gr_task_roletype_to_char(current);
56040+}
56041+
56042+__inline__ int
56043+gr_acl_tpe_check(void)
56044+{
56045+ if (unlikely(!(gr_status & GR_READY)))
56046+ return 0;
56047+ if (current->role->roletype & GR_ROLE_TPE)
56048+ return 1;
56049+ else
56050+ return 0;
56051+}
56052+
56053+int
56054+gr_handle_rawio(const struct inode *inode)
56055+{
56056+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
56057+ if (inode && S_ISBLK(inode->i_mode) &&
56058+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
56059+ !capable(CAP_SYS_RAWIO))
56060+ return 1;
56061+#endif
56062+ return 0;
56063+}
56064+
56065+static int
56066+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
56067+{
56068+ if (likely(lena != lenb))
56069+ return 0;
56070+
56071+ return !memcmp(a, b, lena);
56072+}
56073+
56074+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
56075+{
56076+ *buflen -= namelen;
56077+ if (*buflen < 0)
56078+ return -ENAMETOOLONG;
56079+ *buffer -= namelen;
56080+ memcpy(*buffer, str, namelen);
56081+ return 0;
56082+}
56083+
56084+/* this must be called with vfsmount_lock and dcache_lock held */
56085+
56086+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56087+ struct dentry *root, struct vfsmount *rootmnt,
56088+ char *buffer, int buflen)
56089+{
56090+ char * end = buffer+buflen;
56091+ char * retval;
56092+ int namelen;
56093+
56094+ *--end = '\0';
56095+ buflen--;
56096+
56097+ if (buflen < 1)
56098+ goto Elong;
56099+ /* Get '/' right */
56100+ retval = end-1;
56101+ *retval = '/';
56102+
56103+ for (;;) {
56104+ struct dentry * parent;
56105+
56106+ if (dentry == root && vfsmnt == rootmnt)
56107+ break;
56108+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
56109+ /* Global root? */
56110+ if (vfsmnt->mnt_parent == vfsmnt)
56111+ goto global_root;
56112+ dentry = vfsmnt->mnt_mountpoint;
56113+ vfsmnt = vfsmnt->mnt_parent;
56114+ continue;
56115+ }
56116+ parent = dentry->d_parent;
56117+ prefetch(parent);
56118+ namelen = dentry->d_name.len;
56119+ buflen -= namelen + 1;
56120+ if (buflen < 0)
56121+ goto Elong;
56122+ end -= namelen;
56123+ memcpy(end, dentry->d_name.name, namelen);
56124+ *--end = '/';
56125+ retval = end;
56126+ dentry = parent;
56127+ }
56128+
56129+out:
56130+ return retval;
56131+
56132+global_root:
56133+ namelen = dentry->d_name.len;
56134+ buflen -= namelen;
56135+ if (buflen < 0)
56136+ goto Elong;
56137+ retval -= namelen-1; /* hit the slash */
56138+ memcpy(retval, dentry->d_name.name, namelen);
56139+ goto out;
56140+Elong:
56141+ retval = ERR_PTR(-ENAMETOOLONG);
56142+ goto out;
56143+}
56144+
56145+static char *
56146+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
56147+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
56148+{
56149+ char *retval;
56150+
56151+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
56152+ if (unlikely(IS_ERR(retval)))
56153+ retval = strcpy(buf, "<path too long>");
56154+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
56155+ retval[1] = '\0';
56156+
56157+ return retval;
56158+}
56159+
56160+static char *
56161+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56162+ char *buf, int buflen)
56163+{
56164+ char *res;
56165+
56166+ /* we can use real_root, real_root_mnt, because this is only called
56167+ by the RBAC system */
56168+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
56169+
56170+ return res;
56171+}
56172+
56173+static char *
56174+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
56175+ char *buf, int buflen)
56176+{
56177+ char *res;
56178+ struct dentry *root;
56179+ struct vfsmount *rootmnt;
56180+ struct task_struct *reaper = &init_task;
56181+
56182+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
56183+ read_lock(&reaper->fs->lock);
56184+ root = dget(reaper->fs->root.dentry);
56185+ rootmnt = mntget(reaper->fs->root.mnt);
56186+ read_unlock(&reaper->fs->lock);
56187+
56188+ spin_lock(&dcache_lock);
56189+ spin_lock(&vfsmount_lock);
56190+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
56191+ spin_unlock(&vfsmount_lock);
56192+ spin_unlock(&dcache_lock);
56193+
56194+ dput(root);
56195+ mntput(rootmnt);
56196+ return res;
56197+}
56198+
56199+static char *
56200+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56201+{
56202+ char *ret;
56203+ spin_lock(&dcache_lock);
56204+ spin_lock(&vfsmount_lock);
56205+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56206+ PAGE_SIZE);
56207+ spin_unlock(&vfsmount_lock);
56208+ spin_unlock(&dcache_lock);
56209+ return ret;
56210+}
56211+
56212+static char *
56213+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
56214+{
56215+ char *ret;
56216+ char *buf;
56217+ int buflen;
56218+
56219+ spin_lock(&dcache_lock);
56220+ spin_lock(&vfsmount_lock);
56221+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
56222+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
56223+ buflen = (int)(ret - buf);
56224+ if (buflen >= 5)
56225+ prepend(&ret, &buflen, "/proc", 5);
56226+ else
56227+ ret = strcpy(buf, "<path too long>");
56228+ spin_unlock(&vfsmount_lock);
56229+ spin_unlock(&dcache_lock);
56230+ return ret;
56231+}
56232+
56233+char *
56234+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
56235+{
56236+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
56237+ PAGE_SIZE);
56238+}
56239+
56240+char *
56241+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
56242+{
56243+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
56244+ PAGE_SIZE);
56245+}
56246+
56247+char *
56248+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
56249+{
56250+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
56251+ PAGE_SIZE);
56252+}
56253+
56254+char *
56255+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
56256+{
56257+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
56258+ PAGE_SIZE);
56259+}
56260+
56261+char *
56262+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
56263+{
56264+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
56265+ PAGE_SIZE);
56266+}
56267+
56268+__inline__ __u32
56269+to_gr_audit(const __u32 reqmode)
56270+{
56271+ /* masks off auditable permission flags, then shifts them to create
56272+ auditing flags, and adds the special case of append auditing if
56273+ we're requesting write */
56274+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
56275+}
56276+
56277+struct acl_subject_label *
56278+lookup_subject_map(const struct acl_subject_label *userp)
56279+{
56280+ unsigned int index = shash(userp, subj_map_set.s_size);
56281+ struct subject_map *match;
56282+
56283+ match = subj_map_set.s_hash[index];
56284+
56285+ while (match && match->user != userp)
56286+ match = match->next;
56287+
56288+ if (match != NULL)
56289+ return match->kernel;
56290+ else
56291+ return NULL;
56292+}
56293+
56294+static void
56295+insert_subj_map_entry(struct subject_map *subjmap)
56296+{
56297+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
56298+ struct subject_map **curr;
56299+
56300+ subjmap->prev = NULL;
56301+
56302+ curr = &subj_map_set.s_hash[index];
56303+ if (*curr != NULL)
56304+ (*curr)->prev = subjmap;
56305+
56306+ subjmap->next = *curr;
56307+ *curr = subjmap;
56308+
56309+ return;
56310+}
56311+
56312+static struct acl_role_label *
56313+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
56314+ const gid_t gid)
56315+{
56316+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
56317+ struct acl_role_label *match;
56318+ struct role_allowed_ip *ipp;
56319+ unsigned int x;
56320+ u32 curr_ip = task->signal->curr_ip;
56321+
56322+ task->signal->saved_ip = curr_ip;
56323+
56324+ match = acl_role_set.r_hash[index];
56325+
56326+ while (match) {
56327+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
56328+ for (x = 0; x < match->domain_child_num; x++) {
56329+ if (match->domain_children[x] == uid)
56330+ goto found;
56331+ }
56332+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
56333+ break;
56334+ match = match->next;
56335+ }
56336+found:
56337+ if (match == NULL) {
56338+ try_group:
56339+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
56340+ match = acl_role_set.r_hash[index];
56341+
56342+ while (match) {
56343+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
56344+ for (x = 0; x < match->domain_child_num; x++) {
56345+ if (match->domain_children[x] == gid)
56346+ goto found2;
56347+ }
56348+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
56349+ break;
56350+ match = match->next;
56351+ }
56352+found2:
56353+ if (match == NULL)
56354+ match = default_role;
56355+ if (match->allowed_ips == NULL)
56356+ return match;
56357+ else {
56358+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56359+ if (likely
56360+ ((ntohl(curr_ip) & ipp->netmask) ==
56361+ (ntohl(ipp->addr) & ipp->netmask)))
56362+ return match;
56363+ }
56364+ match = default_role;
56365+ }
56366+ } else if (match->allowed_ips == NULL) {
56367+ return match;
56368+ } else {
56369+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
56370+ if (likely
56371+ ((ntohl(curr_ip) & ipp->netmask) ==
56372+ (ntohl(ipp->addr) & ipp->netmask)))
56373+ return match;
56374+ }
56375+ goto try_group;
56376+ }
56377+
56378+ return match;
56379+}
56380+
56381+struct acl_subject_label *
56382+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
56383+ const struct acl_role_label *role)
56384+{
56385+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56386+ struct acl_subject_label *match;
56387+
56388+ match = role->subj_hash[index];
56389+
56390+ while (match && (match->inode != ino || match->device != dev ||
56391+ (match->mode & GR_DELETED))) {
56392+ match = match->next;
56393+ }
56394+
56395+ if (match && !(match->mode & GR_DELETED))
56396+ return match;
56397+ else
56398+ return NULL;
56399+}
56400+
56401+struct acl_subject_label *
56402+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
56403+ const struct acl_role_label *role)
56404+{
56405+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
56406+ struct acl_subject_label *match;
56407+
56408+ match = role->subj_hash[index];
56409+
56410+ while (match && (match->inode != ino || match->device != dev ||
56411+ !(match->mode & GR_DELETED))) {
56412+ match = match->next;
56413+ }
56414+
56415+ if (match && (match->mode & GR_DELETED))
56416+ return match;
56417+ else
56418+ return NULL;
56419+}
56420+
56421+static struct acl_object_label *
56422+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
56423+ const struct acl_subject_label *subj)
56424+{
56425+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56426+ struct acl_object_label *match;
56427+
56428+ match = subj->obj_hash[index];
56429+
56430+ while (match && (match->inode != ino || match->device != dev ||
56431+ (match->mode & GR_DELETED))) {
56432+ match = match->next;
56433+ }
56434+
56435+ if (match && !(match->mode & GR_DELETED))
56436+ return match;
56437+ else
56438+ return NULL;
56439+}
56440+
56441+static struct acl_object_label *
56442+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
56443+ const struct acl_subject_label *subj)
56444+{
56445+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
56446+ struct acl_object_label *match;
56447+
56448+ match = subj->obj_hash[index];
56449+
56450+ while (match && (match->inode != ino || match->device != dev ||
56451+ !(match->mode & GR_DELETED))) {
56452+ match = match->next;
56453+ }
56454+
56455+ if (match && (match->mode & GR_DELETED))
56456+ return match;
56457+
56458+ match = subj->obj_hash[index];
56459+
56460+ while (match && (match->inode != ino || match->device != dev ||
56461+ (match->mode & GR_DELETED))) {
56462+ match = match->next;
56463+ }
56464+
56465+ if (match && !(match->mode & GR_DELETED))
56466+ return match;
56467+ else
56468+ return NULL;
56469+}
56470+
56471+static struct name_entry *
56472+lookup_name_entry(const char *name)
56473+{
56474+ unsigned int len = strlen(name);
56475+ unsigned int key = full_name_hash(name, len);
56476+ unsigned int index = key % name_set.n_size;
56477+ struct name_entry *match;
56478+
56479+ match = name_set.n_hash[index];
56480+
56481+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
56482+ match = match->next;
56483+
56484+ return match;
56485+}
56486+
56487+static struct name_entry *
56488+lookup_name_entry_create(const char *name)
56489+{
56490+ unsigned int len = strlen(name);
56491+ unsigned int key = full_name_hash(name, len);
56492+ unsigned int index = key % name_set.n_size;
56493+ struct name_entry *match;
56494+
56495+ match = name_set.n_hash[index];
56496+
56497+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56498+ !match->deleted))
56499+ match = match->next;
56500+
56501+ if (match && match->deleted)
56502+ return match;
56503+
56504+ match = name_set.n_hash[index];
56505+
56506+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
56507+ match->deleted))
56508+ match = match->next;
56509+
56510+ if (match && !match->deleted)
56511+ return match;
56512+ else
56513+ return NULL;
56514+}
56515+
56516+static struct inodev_entry *
56517+lookup_inodev_entry(const ino_t ino, const dev_t dev)
56518+{
56519+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
56520+ struct inodev_entry *match;
56521+
56522+ match = inodev_set.i_hash[index];
56523+
56524+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
56525+ match = match->next;
56526+
56527+ return match;
56528+}
56529+
56530+static void
56531+insert_inodev_entry(struct inodev_entry *entry)
56532+{
56533+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
56534+ inodev_set.i_size);
56535+ struct inodev_entry **curr;
56536+
56537+ entry->prev = NULL;
56538+
56539+ curr = &inodev_set.i_hash[index];
56540+ if (*curr != NULL)
56541+ (*curr)->prev = entry;
56542+
56543+ entry->next = *curr;
56544+ *curr = entry;
56545+
56546+ return;
56547+}
56548+
56549+static void
56550+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
56551+{
56552+ unsigned int index =
56553+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
56554+ struct acl_role_label **curr;
56555+ struct acl_role_label *tmp;
56556+
56557+ curr = &acl_role_set.r_hash[index];
56558+
56559+ /* if role was already inserted due to domains and already has
56560+ a role in the same bucket as it attached, then we need to
56561+ combine these two buckets
56562+ */
56563+ if (role->next) {
56564+ tmp = role->next;
56565+ while (tmp->next)
56566+ tmp = tmp->next;
56567+ tmp->next = *curr;
56568+ } else
56569+ role->next = *curr;
56570+ *curr = role;
56571+
56572+ return;
56573+}
56574+
56575+static void
56576+insert_acl_role_label(struct acl_role_label *role)
56577+{
56578+ int i;
56579+
56580+ if (role_list == NULL) {
56581+ role_list = role;
56582+ role->prev = NULL;
56583+ } else {
56584+ role->prev = role_list;
56585+ role_list = role;
56586+ }
56587+
56588+ /* used for hash chains */
56589+ role->next = NULL;
56590+
56591+ if (role->roletype & GR_ROLE_DOMAIN) {
56592+ for (i = 0; i < role->domain_child_num; i++)
56593+ __insert_acl_role_label(role, role->domain_children[i]);
56594+ } else
56595+ __insert_acl_role_label(role, role->uidgid);
56596+}
56597+
56598+static int
56599+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
56600+{
56601+ struct name_entry **curr, *nentry;
56602+ struct inodev_entry *ientry;
56603+ unsigned int len = strlen(name);
56604+ unsigned int key = full_name_hash(name, len);
56605+ unsigned int index = key % name_set.n_size;
56606+
56607+ curr = &name_set.n_hash[index];
56608+
56609+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
56610+ curr = &((*curr)->next);
56611+
56612+ if (*curr != NULL)
56613+ return 1;
56614+
56615+ nentry = acl_alloc(sizeof (struct name_entry));
56616+ if (nentry == NULL)
56617+ return 0;
56618+ ientry = acl_alloc(sizeof (struct inodev_entry));
56619+ if (ientry == NULL)
56620+ return 0;
56621+ ientry->nentry = nentry;
56622+
56623+ nentry->key = key;
56624+ nentry->name = name;
56625+ nentry->inode = inode;
56626+ nentry->device = device;
56627+ nentry->len = len;
56628+ nentry->deleted = deleted;
56629+
56630+ nentry->prev = NULL;
56631+ curr = &name_set.n_hash[index];
56632+ if (*curr != NULL)
56633+ (*curr)->prev = nentry;
56634+ nentry->next = *curr;
56635+ *curr = nentry;
56636+
56637+ /* insert us into the table searchable by inode/dev */
56638+ insert_inodev_entry(ientry);
56639+
56640+ return 1;
56641+}
56642+
56643+static void
56644+insert_acl_obj_label(struct acl_object_label *obj,
56645+ struct acl_subject_label *subj)
56646+{
56647+ unsigned int index =
56648+ fhash(obj->inode, obj->device, subj->obj_hash_size);
56649+ struct acl_object_label **curr;
56650+
56651+
56652+ obj->prev = NULL;
56653+
56654+ curr = &subj->obj_hash[index];
56655+ if (*curr != NULL)
56656+ (*curr)->prev = obj;
56657+
56658+ obj->next = *curr;
56659+ *curr = obj;
56660+
56661+ return;
56662+}
56663+
56664+static void
56665+insert_acl_subj_label(struct acl_subject_label *obj,
56666+ struct acl_role_label *role)
56667+{
56668+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
56669+ struct acl_subject_label **curr;
56670+
56671+ obj->prev = NULL;
56672+
56673+ curr = &role->subj_hash[index];
56674+ if (*curr != NULL)
56675+ (*curr)->prev = obj;
56676+
56677+ obj->next = *curr;
56678+ *curr = obj;
56679+
56680+ return;
56681+}
56682+
56683+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
56684+
56685+static void *
56686+create_table(__u32 * len, int elementsize)
56687+{
56688+ unsigned int table_sizes[] = {
56689+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
56690+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
56691+ 4194301, 8388593, 16777213, 33554393, 67108859
56692+ };
56693+ void *newtable = NULL;
56694+ unsigned int pwr = 0;
56695+
56696+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
56697+ table_sizes[pwr] <= *len)
56698+ pwr++;
56699+
56700+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
56701+ return newtable;
56702+
56703+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
56704+ newtable =
56705+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
56706+ else
56707+ newtable = vmalloc(table_sizes[pwr] * elementsize);
56708+
56709+ *len = table_sizes[pwr];
56710+
56711+ return newtable;
56712+}
56713+
56714+static int
56715+init_variables(const struct gr_arg *arg)
56716+{
56717+ struct task_struct *reaper = &init_task;
56718+ unsigned int stacksize;
56719+
56720+ subj_map_set.s_size = arg->role_db.num_subjects;
56721+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
56722+ name_set.n_size = arg->role_db.num_objects;
56723+ inodev_set.i_size = arg->role_db.num_objects;
56724+
56725+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
56726+ !name_set.n_size || !inodev_set.i_size)
56727+ return 1;
56728+
56729+ if (!gr_init_uidset())
56730+ return 1;
56731+
56732+ /* set up the stack that holds allocation info */
56733+
56734+ stacksize = arg->role_db.num_pointers + 5;
56735+
56736+ if (!acl_alloc_stack_init(stacksize))
56737+ return 1;
56738+
56739+ /* grab reference for the real root dentry and vfsmount */
56740+ read_lock(&reaper->fs->lock);
56741+ real_root = dget(reaper->fs->root.dentry);
56742+ real_root_mnt = mntget(reaper->fs->root.mnt);
56743+ read_unlock(&reaper->fs->lock);
56744+
56745+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
56746+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
56747+#endif
56748+
56749+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
56750+ if (fakefs_obj_rw == NULL)
56751+ return 1;
56752+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
56753+
56754+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
56755+ if (fakefs_obj_rwx == NULL)
56756+ return 1;
56757+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
56758+
56759+ subj_map_set.s_hash =
56760+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
56761+ acl_role_set.r_hash =
56762+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
56763+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
56764+ inodev_set.i_hash =
56765+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
56766+
56767+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
56768+ !name_set.n_hash || !inodev_set.i_hash)
56769+ return 1;
56770+
56771+ memset(subj_map_set.s_hash, 0,
56772+ sizeof(struct subject_map *) * subj_map_set.s_size);
56773+ memset(acl_role_set.r_hash, 0,
56774+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
56775+ memset(name_set.n_hash, 0,
56776+ sizeof (struct name_entry *) * name_set.n_size);
56777+ memset(inodev_set.i_hash, 0,
56778+ sizeof (struct inodev_entry *) * inodev_set.i_size);
56779+
56780+ return 0;
56781+}
56782+
56783+/* free information not needed after startup
56784+ currently contains user->kernel pointer mappings for subjects
56785+*/
56786+
56787+static void
56788+free_init_variables(void)
56789+{
56790+ __u32 i;
56791+
56792+ if (subj_map_set.s_hash) {
56793+ for (i = 0; i < subj_map_set.s_size; i++) {
56794+ if (subj_map_set.s_hash[i]) {
56795+ kfree(subj_map_set.s_hash[i]);
56796+ subj_map_set.s_hash[i] = NULL;
56797+ }
56798+ }
56799+
56800+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
56801+ PAGE_SIZE)
56802+ kfree(subj_map_set.s_hash);
56803+ else
56804+ vfree(subj_map_set.s_hash);
56805+ }
56806+
56807+ return;
56808+}
56809+
56810+static void
56811+free_variables(void)
56812+{
56813+ struct acl_subject_label *s;
56814+ struct acl_role_label *r;
56815+ struct task_struct *task, *task2;
56816+ unsigned int x;
56817+
56818+ gr_clear_learn_entries();
56819+
56820+ read_lock(&tasklist_lock);
56821+ do_each_thread(task2, task) {
56822+ task->acl_sp_role = 0;
56823+ task->acl_role_id = 0;
56824+ task->acl = NULL;
56825+ task->role = NULL;
56826+ } while_each_thread(task2, task);
56827+ read_unlock(&tasklist_lock);
56828+
56829+ /* release the reference to the real root dentry and vfsmount */
56830+ if (real_root)
56831+ dput(real_root);
56832+ real_root = NULL;
56833+ if (real_root_mnt)
56834+ mntput(real_root_mnt);
56835+ real_root_mnt = NULL;
56836+
56837+ /* free all object hash tables */
56838+
56839+ FOR_EACH_ROLE_START(r)
56840+ if (r->subj_hash == NULL)
56841+ goto next_role;
56842+ FOR_EACH_SUBJECT_START(r, s, x)
56843+ if (s->obj_hash == NULL)
56844+ break;
56845+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56846+ kfree(s->obj_hash);
56847+ else
56848+ vfree(s->obj_hash);
56849+ FOR_EACH_SUBJECT_END(s, x)
56850+ FOR_EACH_NESTED_SUBJECT_START(r, s)
56851+ if (s->obj_hash == NULL)
56852+ break;
56853+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
56854+ kfree(s->obj_hash);
56855+ else
56856+ vfree(s->obj_hash);
56857+ FOR_EACH_NESTED_SUBJECT_END(s)
56858+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
56859+ kfree(r->subj_hash);
56860+ else
56861+ vfree(r->subj_hash);
56862+ r->subj_hash = NULL;
56863+next_role:
56864+ FOR_EACH_ROLE_END(r)
56865+
56866+ acl_free_all();
56867+
56868+ if (acl_role_set.r_hash) {
56869+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
56870+ PAGE_SIZE)
56871+ kfree(acl_role_set.r_hash);
56872+ else
56873+ vfree(acl_role_set.r_hash);
56874+ }
56875+ if (name_set.n_hash) {
56876+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
56877+ PAGE_SIZE)
56878+ kfree(name_set.n_hash);
56879+ else
56880+ vfree(name_set.n_hash);
56881+ }
56882+
56883+ if (inodev_set.i_hash) {
56884+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
56885+ PAGE_SIZE)
56886+ kfree(inodev_set.i_hash);
56887+ else
56888+ vfree(inodev_set.i_hash);
56889+ }
56890+
56891+ gr_free_uidset();
56892+
56893+ memset(&name_set, 0, sizeof (struct name_db));
56894+ memset(&inodev_set, 0, sizeof (struct inodev_db));
56895+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
56896+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
56897+
56898+ default_role = NULL;
56899+ role_list = NULL;
56900+
56901+ return;
56902+}
56903+
56904+static __u32
56905+count_user_objs(struct acl_object_label *userp)
56906+{
56907+ struct acl_object_label o_tmp;
56908+ __u32 num = 0;
56909+
56910+ while (userp) {
56911+ if (copy_from_user(&o_tmp, userp,
56912+ sizeof (struct acl_object_label)))
56913+ break;
56914+
56915+ userp = o_tmp.prev;
56916+ num++;
56917+ }
56918+
56919+ return num;
56920+}
56921+
56922+static struct acl_subject_label *
56923+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
56924+
56925+static int
56926+copy_user_glob(struct acl_object_label *obj)
56927+{
56928+ struct acl_object_label *g_tmp, **guser;
56929+ unsigned int len;
56930+ char *tmp;
56931+
56932+ if (obj->globbed == NULL)
56933+ return 0;
56934+
56935+ guser = &obj->globbed;
56936+ while (*guser) {
56937+ g_tmp = (struct acl_object_label *)
56938+ acl_alloc(sizeof (struct acl_object_label));
56939+ if (g_tmp == NULL)
56940+ return -ENOMEM;
56941+
56942+ if (copy_from_user(g_tmp, *guser,
56943+ sizeof (struct acl_object_label)))
56944+ return -EFAULT;
56945+
56946+ len = strnlen_user(g_tmp->filename, PATH_MAX);
56947+
56948+ if (!len || len >= PATH_MAX)
56949+ return -EINVAL;
56950+
56951+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56952+ return -ENOMEM;
56953+
56954+ if (copy_from_user(tmp, g_tmp->filename, len))
56955+ return -EFAULT;
56956+ tmp[len-1] = '\0';
56957+ g_tmp->filename = tmp;
56958+
56959+ *guser = g_tmp;
56960+ guser = &(g_tmp->next);
56961+ }
56962+
56963+ return 0;
56964+}
56965+
56966+static int
56967+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
56968+ struct acl_role_label *role)
56969+{
56970+ struct acl_object_label *o_tmp;
56971+ unsigned int len;
56972+ int ret;
56973+ char *tmp;
56974+
56975+ while (userp) {
56976+ if ((o_tmp = (struct acl_object_label *)
56977+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
56978+ return -ENOMEM;
56979+
56980+ if (copy_from_user(o_tmp, userp,
56981+ sizeof (struct acl_object_label)))
56982+ return -EFAULT;
56983+
56984+ userp = o_tmp->prev;
56985+
56986+ len = strnlen_user(o_tmp->filename, PATH_MAX);
56987+
56988+ if (!len || len >= PATH_MAX)
56989+ return -EINVAL;
56990+
56991+ if ((tmp = (char *) acl_alloc(len)) == NULL)
56992+ return -ENOMEM;
56993+
56994+ if (copy_from_user(tmp, o_tmp->filename, len))
56995+ return -EFAULT;
56996+ tmp[len-1] = '\0';
56997+ o_tmp->filename = tmp;
56998+
56999+ insert_acl_obj_label(o_tmp, subj);
57000+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
57001+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
57002+ return -ENOMEM;
57003+
57004+ ret = copy_user_glob(o_tmp);
57005+ if (ret)
57006+ return ret;
57007+
57008+ if (o_tmp->nested) {
57009+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
57010+ if (IS_ERR(o_tmp->nested))
57011+ return PTR_ERR(o_tmp->nested);
57012+
57013+ /* insert into nested subject list */
57014+ o_tmp->nested->next = role->hash->first;
57015+ role->hash->first = o_tmp->nested;
57016+ }
57017+ }
57018+
57019+ return 0;
57020+}
57021+
57022+static __u32
57023+count_user_subjs(struct acl_subject_label *userp)
57024+{
57025+ struct acl_subject_label s_tmp;
57026+ __u32 num = 0;
57027+
57028+ while (userp) {
57029+ if (copy_from_user(&s_tmp, userp,
57030+ sizeof (struct acl_subject_label)))
57031+ break;
57032+
57033+ userp = s_tmp.prev;
57034+ /* do not count nested subjects against this count, since
57035+ they are not included in the hash table, but are
57036+ attached to objects. We have already counted
57037+ the subjects in userspace for the allocation
57038+ stack
57039+ */
57040+ if (!(s_tmp.mode & GR_NESTED))
57041+ num++;
57042+ }
57043+
57044+ return num;
57045+}
57046+
57047+static int
57048+copy_user_allowedips(struct acl_role_label *rolep)
57049+{
57050+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
57051+
57052+ ruserip = rolep->allowed_ips;
57053+
57054+ while (ruserip) {
57055+ rlast = rtmp;
57056+
57057+ if ((rtmp = (struct role_allowed_ip *)
57058+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
57059+ return -ENOMEM;
57060+
57061+ if (copy_from_user(rtmp, ruserip,
57062+ sizeof (struct role_allowed_ip)))
57063+ return -EFAULT;
57064+
57065+ ruserip = rtmp->prev;
57066+
57067+ if (!rlast) {
57068+ rtmp->prev = NULL;
57069+ rolep->allowed_ips = rtmp;
57070+ } else {
57071+ rlast->next = rtmp;
57072+ rtmp->prev = rlast;
57073+ }
57074+
57075+ if (!ruserip)
57076+ rtmp->next = NULL;
57077+ }
57078+
57079+ return 0;
57080+}
57081+
57082+static int
57083+copy_user_transitions(struct acl_role_label *rolep)
57084+{
57085+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
57086+
57087+ unsigned int len;
57088+ char *tmp;
57089+
57090+ rusertp = rolep->transitions;
57091+
57092+ while (rusertp) {
57093+ rlast = rtmp;
57094+
57095+ if ((rtmp = (struct role_transition *)
57096+ acl_alloc(sizeof (struct role_transition))) == NULL)
57097+ return -ENOMEM;
57098+
57099+ if (copy_from_user(rtmp, rusertp,
57100+ sizeof (struct role_transition)))
57101+ return -EFAULT;
57102+
57103+ rusertp = rtmp->prev;
57104+
57105+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
57106+
57107+ if (!len || len >= GR_SPROLE_LEN)
57108+ return -EINVAL;
57109+
57110+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57111+ return -ENOMEM;
57112+
57113+ if (copy_from_user(tmp, rtmp->rolename, len))
57114+ return -EFAULT;
57115+ tmp[len-1] = '\0';
57116+ rtmp->rolename = tmp;
57117+
57118+ if (!rlast) {
57119+ rtmp->prev = NULL;
57120+ rolep->transitions = rtmp;
57121+ } else {
57122+ rlast->next = rtmp;
57123+ rtmp->prev = rlast;
57124+ }
57125+
57126+ if (!rusertp)
57127+ rtmp->next = NULL;
57128+ }
57129+
57130+ return 0;
57131+}
57132+
57133+static struct acl_subject_label *
57134+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
57135+{
57136+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
57137+ unsigned int len;
57138+ char *tmp;
57139+ __u32 num_objs;
57140+ struct acl_ip_label **i_tmp, *i_utmp2;
57141+ struct gr_hash_struct ghash;
57142+ struct subject_map *subjmap;
57143+ unsigned int i_num;
57144+ int err;
57145+
57146+ s_tmp = lookup_subject_map(userp);
57147+
57148+ /* we've already copied this subject into the kernel, just return
57149+ the reference to it, and don't copy it over again
57150+ */
57151+ if (s_tmp)
57152+ return(s_tmp);
57153+
57154+ if ((s_tmp = (struct acl_subject_label *)
57155+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
57156+ return ERR_PTR(-ENOMEM);
57157+
57158+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
57159+ if (subjmap == NULL)
57160+ return ERR_PTR(-ENOMEM);
57161+
57162+ subjmap->user = userp;
57163+ subjmap->kernel = s_tmp;
57164+ insert_subj_map_entry(subjmap);
57165+
57166+ if (copy_from_user(s_tmp, userp,
57167+ sizeof (struct acl_subject_label)))
57168+ return ERR_PTR(-EFAULT);
57169+
57170+ len = strnlen_user(s_tmp->filename, PATH_MAX);
57171+
57172+ if (!len || len >= PATH_MAX)
57173+ return ERR_PTR(-EINVAL);
57174+
57175+ if ((tmp = (char *) acl_alloc(len)) == NULL)
57176+ return ERR_PTR(-ENOMEM);
57177+
57178+ if (copy_from_user(tmp, s_tmp->filename, len))
57179+ return ERR_PTR(-EFAULT);
57180+ tmp[len-1] = '\0';
57181+ s_tmp->filename = tmp;
57182+
57183+ if (!strcmp(s_tmp->filename, "/"))
57184+ role->root_label = s_tmp;
57185+
57186+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
57187+ return ERR_PTR(-EFAULT);
57188+
57189+ /* copy user and group transition tables */
57190+
57191+ if (s_tmp->user_trans_num) {
57192+ uid_t *uidlist;
57193+
57194+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
57195+ if (uidlist == NULL)
57196+ return ERR_PTR(-ENOMEM);
57197+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
57198+ return ERR_PTR(-EFAULT);
57199+
57200+ s_tmp->user_transitions = uidlist;
57201+ }
57202+
57203+ if (s_tmp->group_trans_num) {
57204+ gid_t *gidlist;
57205+
57206+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
57207+ if (gidlist == NULL)
57208+ return ERR_PTR(-ENOMEM);
57209+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
57210+ return ERR_PTR(-EFAULT);
57211+
57212+ s_tmp->group_transitions = gidlist;
57213+ }
57214+
57215+ /* set up object hash table */
57216+ num_objs = count_user_objs(ghash.first);
57217+
57218+ s_tmp->obj_hash_size = num_objs;
57219+ s_tmp->obj_hash =
57220+ (struct acl_object_label **)
57221+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
57222+
57223+ if (!s_tmp->obj_hash)
57224+ return ERR_PTR(-ENOMEM);
57225+
57226+ memset(s_tmp->obj_hash, 0,
57227+ s_tmp->obj_hash_size *
57228+ sizeof (struct acl_object_label *));
57229+
57230+ /* add in objects */
57231+ err = copy_user_objs(ghash.first, s_tmp, role);
57232+
57233+ if (err)
57234+ return ERR_PTR(err);
57235+
57236+ /* set pointer for parent subject */
57237+ if (s_tmp->parent_subject) {
57238+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
57239+
57240+ if (IS_ERR(s_tmp2))
57241+ return s_tmp2;
57242+
57243+ s_tmp->parent_subject = s_tmp2;
57244+ }
57245+
57246+ /* add in ip acls */
57247+
57248+ if (!s_tmp->ip_num) {
57249+ s_tmp->ips = NULL;
57250+ goto insert;
57251+ }
57252+
57253+ i_tmp =
57254+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
57255+ sizeof (struct acl_ip_label *));
57256+
57257+ if (!i_tmp)
57258+ return ERR_PTR(-ENOMEM);
57259+
57260+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
57261+ *(i_tmp + i_num) =
57262+ (struct acl_ip_label *)
57263+ acl_alloc(sizeof (struct acl_ip_label));
57264+ if (!*(i_tmp + i_num))
57265+ return ERR_PTR(-ENOMEM);
57266+
57267+ if (copy_from_user
57268+ (&i_utmp2, s_tmp->ips + i_num,
57269+ sizeof (struct acl_ip_label *)))
57270+ return ERR_PTR(-EFAULT);
57271+
57272+ if (copy_from_user
57273+ (*(i_tmp + i_num), i_utmp2,
57274+ sizeof (struct acl_ip_label)))
57275+ return ERR_PTR(-EFAULT);
57276+
57277+ if ((*(i_tmp + i_num))->iface == NULL)
57278+ continue;
57279+
57280+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
57281+ if (!len || len >= IFNAMSIZ)
57282+ return ERR_PTR(-EINVAL);
57283+ tmp = acl_alloc(len);
57284+ if (tmp == NULL)
57285+ return ERR_PTR(-ENOMEM);
57286+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
57287+ return ERR_PTR(-EFAULT);
57288+ (*(i_tmp + i_num))->iface = tmp;
57289+ }
57290+
57291+ s_tmp->ips = i_tmp;
57292+
57293+insert:
57294+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
57295+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
57296+ return ERR_PTR(-ENOMEM);
57297+
57298+ return s_tmp;
57299+}
57300+
57301+static int
57302+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
57303+{
57304+ struct acl_subject_label s_pre;
57305+ struct acl_subject_label * ret;
57306+ int err;
57307+
57308+ while (userp) {
57309+ if (copy_from_user(&s_pre, userp,
57310+ sizeof (struct acl_subject_label)))
57311+ return -EFAULT;
57312+
57313+ /* do not add nested subjects here, add
57314+ while parsing objects
57315+ */
57316+
57317+ if (s_pre.mode & GR_NESTED) {
57318+ userp = s_pre.prev;
57319+ continue;
57320+ }
57321+
57322+ ret = do_copy_user_subj(userp, role);
57323+
57324+ err = PTR_ERR(ret);
57325+ if (IS_ERR(ret))
57326+ return err;
57327+
57328+ insert_acl_subj_label(ret, role);
57329+
57330+ userp = s_pre.prev;
57331+ }
57332+
57333+ return 0;
57334+}
57335+
57336+static int
57337+copy_user_acl(struct gr_arg *arg)
57338+{
57339+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
57340+ struct sprole_pw *sptmp;
57341+ struct gr_hash_struct *ghash;
57342+ uid_t *domainlist;
57343+ unsigned int r_num;
57344+ unsigned int len;
57345+ char *tmp;
57346+ int err = 0;
57347+ __u16 i;
57348+ __u32 num_subjs;
57349+
57350+ /* we need a default and kernel role */
57351+ if (arg->role_db.num_roles < 2)
57352+ return -EINVAL;
57353+
57354+ /* copy special role authentication info from userspace */
57355+
57356+ num_sprole_pws = arg->num_sprole_pws;
57357+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
57358+
57359+ if (!acl_special_roles) {
57360+ err = -ENOMEM;
57361+ goto cleanup;
57362+ }
57363+
57364+ for (i = 0; i < num_sprole_pws; i++) {
57365+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
57366+ if (!sptmp) {
57367+ err = -ENOMEM;
57368+ goto cleanup;
57369+ }
57370+ if (copy_from_user(sptmp, arg->sprole_pws + i,
57371+ sizeof (struct sprole_pw))) {
57372+ err = -EFAULT;
57373+ goto cleanup;
57374+ }
57375+
57376+ len =
57377+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
57378+
57379+ if (!len || len >= GR_SPROLE_LEN) {
57380+ err = -EINVAL;
57381+ goto cleanup;
57382+ }
57383+
57384+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57385+ err = -ENOMEM;
57386+ goto cleanup;
57387+ }
57388+
57389+ if (copy_from_user(tmp, sptmp->rolename, len)) {
57390+ err = -EFAULT;
57391+ goto cleanup;
57392+ }
57393+ tmp[len-1] = '\0';
57394+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
57395+ printk(KERN_ALERT "Copying special role %s\n", tmp);
57396+#endif
57397+ sptmp->rolename = tmp;
57398+ acl_special_roles[i] = sptmp;
57399+ }
57400+
57401+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
57402+
57403+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
57404+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
57405+
57406+ if (!r_tmp) {
57407+ err = -ENOMEM;
57408+ goto cleanup;
57409+ }
57410+
57411+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
57412+ sizeof (struct acl_role_label *))) {
57413+ err = -EFAULT;
57414+ goto cleanup;
57415+ }
57416+
57417+ if (copy_from_user(r_tmp, r_utmp2,
57418+ sizeof (struct acl_role_label))) {
57419+ err = -EFAULT;
57420+ goto cleanup;
57421+ }
57422+
57423+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
57424+
57425+ if (!len || len >= PATH_MAX) {
57426+ err = -EINVAL;
57427+ goto cleanup;
57428+ }
57429+
57430+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
57431+ err = -ENOMEM;
57432+ goto cleanup;
57433+ }
57434+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
57435+ err = -EFAULT;
57436+ goto cleanup;
57437+ }
57438+ tmp[len-1] = '\0';
57439+ r_tmp->rolename = tmp;
57440+
57441+ if (!strcmp(r_tmp->rolename, "default")
57442+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
57443+ default_role = r_tmp;
57444+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
57445+ kernel_role = r_tmp;
57446+ }
57447+
57448+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
57449+ err = -ENOMEM;
57450+ goto cleanup;
57451+ }
57452+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
57453+ err = -EFAULT;
57454+ goto cleanup;
57455+ }
57456+
57457+ r_tmp->hash = ghash;
57458+
57459+ num_subjs = count_user_subjs(r_tmp->hash->first);
57460+
57461+ r_tmp->subj_hash_size = num_subjs;
57462+ r_tmp->subj_hash =
57463+ (struct acl_subject_label **)
57464+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
57465+
57466+ if (!r_tmp->subj_hash) {
57467+ err = -ENOMEM;
57468+ goto cleanup;
57469+ }
57470+
57471+ err = copy_user_allowedips(r_tmp);
57472+ if (err)
57473+ goto cleanup;
57474+
57475+ /* copy domain info */
57476+ if (r_tmp->domain_children != NULL) {
57477+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
57478+ if (domainlist == NULL) {
57479+ err = -ENOMEM;
57480+ goto cleanup;
57481+ }
57482+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
57483+ err = -EFAULT;
57484+ goto cleanup;
57485+ }
57486+ r_tmp->domain_children = domainlist;
57487+ }
57488+
57489+ err = copy_user_transitions(r_tmp);
57490+ if (err)
57491+ goto cleanup;
57492+
57493+ memset(r_tmp->subj_hash, 0,
57494+ r_tmp->subj_hash_size *
57495+ sizeof (struct acl_subject_label *));
57496+
57497+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
57498+
57499+ if (err)
57500+ goto cleanup;
57501+
57502+ /* set nested subject list to null */
57503+ r_tmp->hash->first = NULL;
57504+
57505+ insert_acl_role_label(r_tmp);
57506+ }
57507+
57508+ goto return_err;
57509+ cleanup:
57510+ free_variables();
57511+ return_err:
57512+ return err;
57513+
57514+}
57515+
57516+static int
57517+gracl_init(struct gr_arg *args)
57518+{
57519+ int error = 0;
57520+
57521+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
57522+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
57523+
57524+ if (init_variables(args)) {
57525+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
57526+ error = -ENOMEM;
57527+ free_variables();
57528+ goto out;
57529+ }
57530+
57531+ error = copy_user_acl(args);
57532+ free_init_variables();
57533+ if (error) {
57534+ free_variables();
57535+ goto out;
57536+ }
57537+
57538+ if ((error = gr_set_acls(0))) {
57539+ free_variables();
57540+ goto out;
57541+ }
57542+
57543+ pax_open_kernel();
57544+ gr_status |= GR_READY;
57545+ pax_close_kernel();
57546+
57547+ out:
57548+ return error;
57549+}
57550+
57551+/* derived from glibc fnmatch() 0: match, 1: no match*/
57552+
57553+static int
57554+glob_match(const char *p, const char *n)
57555+{
57556+ char c;
57557+
57558+ while ((c = *p++) != '\0') {
57559+ switch (c) {
57560+ case '?':
57561+ if (*n == '\0')
57562+ return 1;
57563+ else if (*n == '/')
57564+ return 1;
57565+ break;
57566+ case '\\':
57567+ if (*n != c)
57568+ return 1;
57569+ break;
57570+ case '*':
57571+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
57572+ if (*n == '/')
57573+ return 1;
57574+ else if (c == '?') {
57575+ if (*n == '\0')
57576+ return 1;
57577+ else
57578+ ++n;
57579+ }
57580+ }
57581+ if (c == '\0') {
57582+ return 0;
57583+ } else {
57584+ const char *endp;
57585+
57586+ if ((endp = strchr(n, '/')) == NULL)
57587+ endp = n + strlen(n);
57588+
57589+ if (c == '[') {
57590+ for (--p; n < endp; ++n)
57591+ if (!glob_match(p, n))
57592+ return 0;
57593+ } else if (c == '/') {
57594+ while (*n != '\0' && *n != '/')
57595+ ++n;
57596+ if (*n == '/' && !glob_match(p, n + 1))
57597+ return 0;
57598+ } else {
57599+ for (--p; n < endp; ++n)
57600+ if (*n == c && !glob_match(p, n))
57601+ return 0;
57602+ }
57603+
57604+ return 1;
57605+ }
57606+ case '[':
57607+ {
57608+ int not;
57609+ char cold;
57610+
57611+ if (*n == '\0' || *n == '/')
57612+ return 1;
57613+
57614+ not = (*p == '!' || *p == '^');
57615+ if (not)
57616+ ++p;
57617+
57618+ c = *p++;
57619+ for (;;) {
57620+ unsigned char fn = (unsigned char)*n;
57621+
57622+ if (c == '\0')
57623+ return 1;
57624+ else {
57625+ if (c == fn)
57626+ goto matched;
57627+ cold = c;
57628+ c = *p++;
57629+
57630+ if (c == '-' && *p != ']') {
57631+ unsigned char cend = *p++;
57632+
57633+ if (cend == '\0')
57634+ return 1;
57635+
57636+ if (cold <= fn && fn <= cend)
57637+ goto matched;
57638+
57639+ c = *p++;
57640+ }
57641+ }
57642+
57643+ if (c == ']')
57644+ break;
57645+ }
57646+ if (!not)
57647+ return 1;
57648+ break;
57649+ matched:
57650+ while (c != ']') {
57651+ if (c == '\0')
57652+ return 1;
57653+
57654+ c = *p++;
57655+ }
57656+ if (not)
57657+ return 1;
57658+ }
57659+ break;
57660+ default:
57661+ if (c != *n)
57662+ return 1;
57663+ }
57664+
57665+ ++n;
57666+ }
57667+
57668+ if (*n == '\0')
57669+ return 0;
57670+
57671+ if (*n == '/')
57672+ return 0;
57673+
57674+ return 1;
57675+}
57676+
57677+static struct acl_object_label *
57678+chk_glob_label(struct acl_object_label *globbed,
57679+ struct dentry *dentry, struct vfsmount *mnt, char **path)
57680+{
57681+ struct acl_object_label *tmp;
57682+
57683+ if (*path == NULL)
57684+ *path = gr_to_filename_nolock(dentry, mnt);
57685+
57686+ tmp = globbed;
57687+
57688+ while (tmp) {
57689+ if (!glob_match(tmp->filename, *path))
57690+ return tmp;
57691+ tmp = tmp->next;
57692+ }
57693+
57694+ return NULL;
57695+}
57696+
57697+static struct acl_object_label *
57698+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57699+ const ino_t curr_ino, const dev_t curr_dev,
57700+ const struct acl_subject_label *subj, char **path, const int checkglob)
57701+{
57702+ struct acl_subject_label *tmpsubj;
57703+ struct acl_object_label *retval;
57704+ struct acl_object_label *retval2;
57705+
57706+ tmpsubj = (struct acl_subject_label *) subj;
57707+ read_lock(&gr_inode_lock);
57708+ do {
57709+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
57710+ if (retval) {
57711+ if (checkglob && retval->globbed) {
57712+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
57713+ (struct vfsmount *)orig_mnt, path);
57714+ if (retval2)
57715+ retval = retval2;
57716+ }
57717+ break;
57718+ }
57719+ } while ((tmpsubj = tmpsubj->parent_subject));
57720+ read_unlock(&gr_inode_lock);
57721+
57722+ return retval;
57723+}
57724+
57725+static __inline__ struct acl_object_label *
57726+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
57727+ const struct dentry *curr_dentry,
57728+ const struct acl_subject_label *subj, char **path, const int checkglob)
57729+{
57730+ int newglob = checkglob;
57731+
57732+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
57733+ as we don't want a / * rule to match instead of the / object
57734+ don't do this for create lookups that call this function though, since they're looking up
57735+ on the parent and thus need globbing checks on all paths
57736+ */
57737+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
57738+ newglob = GR_NO_GLOB;
57739+
57740+ return __full_lookup(orig_dentry, orig_mnt,
57741+ curr_dentry->d_inode->i_ino,
57742+ __get_dev(curr_dentry), subj, path, newglob);
57743+}
57744+
57745+static struct acl_object_label *
57746+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57747+ const struct acl_subject_label *subj, char *path, const int checkglob)
57748+{
57749+ struct dentry *dentry = (struct dentry *) l_dentry;
57750+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57751+ struct acl_object_label *retval;
57752+
57753+ spin_lock(&dcache_lock);
57754+ spin_lock(&vfsmount_lock);
57755+
57756+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
57757+#ifdef CONFIG_NET
57758+ mnt == sock_mnt ||
57759+#endif
57760+#ifdef CONFIG_HUGETLBFS
57761+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
57762+#endif
57763+ /* ignore Eric Biederman */
57764+ IS_PRIVATE(l_dentry->d_inode))) {
57765+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
57766+ goto out;
57767+ }
57768+
57769+ for (;;) {
57770+ if (dentry == real_root && mnt == real_root_mnt)
57771+ break;
57772+
57773+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57774+ if (mnt->mnt_parent == mnt)
57775+ break;
57776+
57777+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57778+ if (retval != NULL)
57779+ goto out;
57780+
57781+ dentry = mnt->mnt_mountpoint;
57782+ mnt = mnt->mnt_parent;
57783+ continue;
57784+ }
57785+
57786+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57787+ if (retval != NULL)
57788+ goto out;
57789+
57790+ dentry = dentry->d_parent;
57791+ }
57792+
57793+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
57794+
57795+ if (retval == NULL)
57796+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
57797+out:
57798+ spin_unlock(&vfsmount_lock);
57799+ spin_unlock(&dcache_lock);
57800+
57801+ BUG_ON(retval == NULL);
57802+
57803+ return retval;
57804+}
57805+
57806+static __inline__ struct acl_object_label *
57807+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57808+ const struct acl_subject_label *subj)
57809+{
57810+ char *path = NULL;
57811+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
57812+}
57813+
57814+static __inline__ struct acl_object_label *
57815+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57816+ const struct acl_subject_label *subj)
57817+{
57818+ char *path = NULL;
57819+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
57820+}
57821+
57822+static __inline__ struct acl_object_label *
57823+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57824+ const struct acl_subject_label *subj, char *path)
57825+{
57826+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
57827+}
57828+
57829+static struct acl_subject_label *
57830+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
57831+ const struct acl_role_label *role)
57832+{
57833+ struct dentry *dentry = (struct dentry *) l_dentry;
57834+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
57835+ struct acl_subject_label *retval;
57836+
57837+ spin_lock(&dcache_lock);
57838+ spin_lock(&vfsmount_lock);
57839+
57840+ for (;;) {
57841+ if (dentry == real_root && mnt == real_root_mnt)
57842+ break;
57843+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
57844+ if (mnt->mnt_parent == mnt)
57845+ break;
57846+
57847+ read_lock(&gr_inode_lock);
57848+ retval =
57849+ lookup_acl_subj_label(dentry->d_inode->i_ino,
57850+ __get_dev(dentry), role);
57851+ read_unlock(&gr_inode_lock);
57852+ if (retval != NULL)
57853+ goto out;
57854+
57855+ dentry = mnt->mnt_mountpoint;
57856+ mnt = mnt->mnt_parent;
57857+ continue;
57858+ }
57859+
57860+ read_lock(&gr_inode_lock);
57861+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57862+ __get_dev(dentry), role);
57863+ read_unlock(&gr_inode_lock);
57864+ if (retval != NULL)
57865+ goto out;
57866+
57867+ dentry = dentry->d_parent;
57868+ }
57869+
57870+ read_lock(&gr_inode_lock);
57871+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
57872+ __get_dev(dentry), role);
57873+ read_unlock(&gr_inode_lock);
57874+
57875+ if (unlikely(retval == NULL)) {
57876+ read_lock(&gr_inode_lock);
57877+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
57878+ __get_dev(real_root), role);
57879+ read_unlock(&gr_inode_lock);
57880+ }
57881+out:
57882+ spin_unlock(&vfsmount_lock);
57883+ spin_unlock(&dcache_lock);
57884+
57885+ BUG_ON(retval == NULL);
57886+
57887+ return retval;
57888+}
57889+
57890+static void
57891+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
57892+{
57893+ struct task_struct *task = current;
57894+ const struct cred *cred = current_cred();
57895+
57896+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57897+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57898+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57899+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
57900+
57901+ return;
57902+}
57903+
57904+static void
57905+gr_log_learn_sysctl(const char *path, const __u32 mode)
57906+{
57907+ struct task_struct *task = current;
57908+ const struct cred *cred = current_cred();
57909+
57910+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
57911+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57912+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57913+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
57914+
57915+ return;
57916+}
57917+
57918+static void
57919+gr_log_learn_id_change(const char type, const unsigned int real,
57920+ const unsigned int effective, const unsigned int fs)
57921+{
57922+ struct task_struct *task = current;
57923+ const struct cred *cred = current_cred();
57924+
57925+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
57926+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
57927+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
57928+ type, real, effective, fs, &task->signal->saved_ip);
57929+
57930+ return;
57931+}
57932+
57933+__u32
57934+gr_search_file(const struct dentry * dentry, const __u32 mode,
57935+ const struct vfsmount * mnt)
57936+{
57937+ __u32 retval = mode;
57938+ struct acl_subject_label *curracl;
57939+ struct acl_object_label *currobj;
57940+
57941+ if (unlikely(!(gr_status & GR_READY)))
57942+ return (mode & ~GR_AUDITS);
57943+
57944+ curracl = current->acl;
57945+
57946+ currobj = chk_obj_label(dentry, mnt, curracl);
57947+ retval = currobj->mode & mode;
57948+
57949+ /* if we're opening a specified transfer file for writing
57950+ (e.g. /dev/initctl), then transfer our role to init
57951+ */
57952+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
57953+ current->role->roletype & GR_ROLE_PERSIST)) {
57954+ struct task_struct *task = init_pid_ns.child_reaper;
57955+
57956+ if (task->role != current->role) {
57957+ task->acl_sp_role = 0;
57958+ task->acl_role_id = current->acl_role_id;
57959+ task->role = current->role;
57960+ rcu_read_lock();
57961+ read_lock(&grsec_exec_file_lock);
57962+ gr_apply_subject_to_task(task);
57963+ read_unlock(&grsec_exec_file_lock);
57964+ rcu_read_unlock();
57965+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
57966+ }
57967+ }
57968+
57969+ if (unlikely
57970+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
57971+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
57972+ __u32 new_mode = mode;
57973+
57974+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
57975+
57976+ retval = new_mode;
57977+
57978+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
57979+ new_mode |= GR_INHERIT;
57980+
57981+ if (!(mode & GR_NOLEARN))
57982+ gr_log_learn(dentry, mnt, new_mode);
57983+ }
57984+
57985+ return retval;
57986+}
57987+
57988+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
57989+ const struct dentry *parent,
57990+ const struct vfsmount *mnt)
57991+{
57992+ struct name_entry *match;
57993+ struct acl_object_label *matchpo;
57994+ struct acl_subject_label *curracl;
57995+ char *path;
57996+
57997+ if (unlikely(!(gr_status & GR_READY)))
57998+ return NULL;
57999+
58000+ preempt_disable();
58001+ path = gr_to_filename_rbac(new_dentry, mnt);
58002+ match = lookup_name_entry_create(path);
58003+
58004+ curracl = current->acl;
58005+
58006+ if (match) {
58007+ read_lock(&gr_inode_lock);
58008+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
58009+ read_unlock(&gr_inode_lock);
58010+
58011+ if (matchpo) {
58012+ preempt_enable();
58013+ return matchpo;
58014+ }
58015+ }
58016+
58017+ // lookup parent
58018+
58019+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
58020+
58021+ preempt_enable();
58022+ return matchpo;
58023+}
58024+
58025+__u32
58026+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
58027+ const struct vfsmount * mnt, const __u32 mode)
58028+{
58029+ struct acl_object_label *matchpo;
58030+ __u32 retval;
58031+
58032+ if (unlikely(!(gr_status & GR_READY)))
58033+ return (mode & ~GR_AUDITS);
58034+
58035+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
58036+
58037+ retval = matchpo->mode & mode;
58038+
58039+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
58040+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58041+ __u32 new_mode = mode;
58042+
58043+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
58044+
58045+ gr_log_learn(new_dentry, mnt, new_mode);
58046+ return new_mode;
58047+ }
58048+
58049+ return retval;
58050+}
58051+
58052+__u32
58053+gr_check_link(const struct dentry * new_dentry,
58054+ const struct dentry * parent_dentry,
58055+ const struct vfsmount * parent_mnt,
58056+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
58057+{
58058+ struct acl_object_label *obj;
58059+ __u32 oldmode, newmode;
58060+ __u32 needmode;
58061+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
58062+ GR_DELETE | GR_INHERIT;
58063+
58064+ if (unlikely(!(gr_status & GR_READY)))
58065+ return (GR_CREATE | GR_LINK);
58066+
58067+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
58068+ oldmode = obj->mode;
58069+
58070+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
58071+ newmode = obj->mode;
58072+
58073+ needmode = newmode & checkmodes;
58074+
58075+ // old name for hardlink must have at least the permissions of the new name
58076+ if ((oldmode & needmode) != needmode)
58077+ goto bad;
58078+
58079+ // if old name had restrictions/auditing, make sure the new name does as well
58080+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
58081+
58082+ // don't allow hardlinking of suid/sgid files without permission
58083+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58084+ needmode |= GR_SETID;
58085+
58086+ if ((newmode & needmode) != needmode)
58087+ goto bad;
58088+
58089+ // enforce minimum permissions
58090+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
58091+ return newmode;
58092+bad:
58093+ needmode = oldmode;
58094+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
58095+ needmode |= GR_SETID;
58096+
58097+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
58098+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
58099+ return (GR_CREATE | GR_LINK);
58100+ } else if (newmode & GR_SUPPRESS)
58101+ return GR_SUPPRESS;
58102+ else
58103+ return 0;
58104+}
58105+
58106+int
58107+gr_check_hidden_task(const struct task_struct *task)
58108+{
58109+ if (unlikely(!(gr_status & GR_READY)))
58110+ return 0;
58111+
58112+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
58113+ return 1;
58114+
58115+ return 0;
58116+}
58117+
58118+int
58119+gr_check_protected_task(const struct task_struct *task)
58120+{
58121+ if (unlikely(!(gr_status & GR_READY) || !task))
58122+ return 0;
58123+
58124+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58125+ task->acl != current->acl)
58126+ return 1;
58127+
58128+ return 0;
58129+}
58130+
58131+int
58132+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
58133+{
58134+ struct task_struct *p;
58135+ int ret = 0;
58136+
58137+ if (unlikely(!(gr_status & GR_READY) || !pid))
58138+ return ret;
58139+
58140+ read_lock(&tasklist_lock);
58141+ do_each_pid_task(pid, type, p) {
58142+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
58143+ p->acl != current->acl) {
58144+ ret = 1;
58145+ goto out;
58146+ }
58147+ } while_each_pid_task(pid, type, p);
58148+out:
58149+ read_unlock(&tasklist_lock);
58150+
58151+ return ret;
58152+}
58153+
58154+void
58155+gr_copy_label(struct task_struct *tsk)
58156+{
58157+ tsk->signal->used_accept = 0;
58158+ tsk->acl_sp_role = 0;
58159+ tsk->acl_role_id = current->acl_role_id;
58160+ tsk->acl = current->acl;
58161+ tsk->role = current->role;
58162+ tsk->signal->curr_ip = current->signal->curr_ip;
58163+ tsk->signal->saved_ip = current->signal->saved_ip;
58164+ if (current->exec_file)
58165+ get_file(current->exec_file);
58166+ tsk->exec_file = current->exec_file;
58167+ tsk->is_writable = current->is_writable;
58168+ if (unlikely(current->signal->used_accept)) {
58169+ current->signal->curr_ip = 0;
58170+ current->signal->saved_ip = 0;
58171+ }
58172+
58173+ return;
58174+}
58175+
58176+static void
58177+gr_set_proc_res(struct task_struct *task)
58178+{
58179+ struct acl_subject_label *proc;
58180+ unsigned short i;
58181+
58182+ proc = task->acl;
58183+
58184+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
58185+ return;
58186+
58187+ for (i = 0; i < RLIM_NLIMITS; i++) {
58188+ if (!(proc->resmask & (1 << i)))
58189+ continue;
58190+
58191+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
58192+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
58193+ }
58194+
58195+ return;
58196+}
58197+
58198+extern int __gr_process_user_ban(struct user_struct *user);
58199+
58200+int
58201+gr_check_user_change(int real, int effective, int fs)
58202+{
58203+ unsigned int i;
58204+ __u16 num;
58205+ uid_t *uidlist;
58206+ int curuid;
58207+ int realok = 0;
58208+ int effectiveok = 0;
58209+ int fsok = 0;
58210+
58211+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
58212+ struct user_struct *user;
58213+
58214+ if (real == -1)
58215+ goto skipit;
58216+
58217+ user = find_user(real);
58218+ if (user == NULL)
58219+ goto skipit;
58220+
58221+ if (__gr_process_user_ban(user)) {
58222+ /* for find_user */
58223+ free_uid(user);
58224+ return 1;
58225+ }
58226+
58227+ /* for find_user */
58228+ free_uid(user);
58229+
58230+skipit:
58231+#endif
58232+
58233+ if (unlikely(!(gr_status & GR_READY)))
58234+ return 0;
58235+
58236+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58237+ gr_log_learn_id_change('u', real, effective, fs);
58238+
58239+ num = current->acl->user_trans_num;
58240+ uidlist = current->acl->user_transitions;
58241+
58242+ if (uidlist == NULL)
58243+ return 0;
58244+
58245+ if (real == -1)
58246+ realok = 1;
58247+ if (effective == -1)
58248+ effectiveok = 1;
58249+ if (fs == -1)
58250+ fsok = 1;
58251+
58252+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
58253+ for (i = 0; i < num; i++) {
58254+ curuid = (int)uidlist[i];
58255+ if (real == curuid)
58256+ realok = 1;
58257+ if (effective == curuid)
58258+ effectiveok = 1;
58259+ if (fs == curuid)
58260+ fsok = 1;
58261+ }
58262+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
58263+ for (i = 0; i < num; i++) {
58264+ curuid = (int)uidlist[i];
58265+ if (real == curuid)
58266+ break;
58267+ if (effective == curuid)
58268+ break;
58269+ if (fs == curuid)
58270+ break;
58271+ }
58272+ /* not in deny list */
58273+ if (i == num) {
58274+ realok = 1;
58275+ effectiveok = 1;
58276+ fsok = 1;
58277+ }
58278+ }
58279+
58280+ if (realok && effectiveok && fsok)
58281+ return 0;
58282+ else {
58283+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58284+ return 1;
58285+ }
58286+}
58287+
58288+int
58289+gr_check_group_change(int real, int effective, int fs)
58290+{
58291+ unsigned int i;
58292+ __u16 num;
58293+ gid_t *gidlist;
58294+ int curgid;
58295+ int realok = 0;
58296+ int effectiveok = 0;
58297+ int fsok = 0;
58298+
58299+ if (unlikely(!(gr_status & GR_READY)))
58300+ return 0;
58301+
58302+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
58303+ gr_log_learn_id_change('g', real, effective, fs);
58304+
58305+ num = current->acl->group_trans_num;
58306+ gidlist = current->acl->group_transitions;
58307+
58308+ if (gidlist == NULL)
58309+ return 0;
58310+
58311+ if (real == -1)
58312+ realok = 1;
58313+ if (effective == -1)
58314+ effectiveok = 1;
58315+ if (fs == -1)
58316+ fsok = 1;
58317+
58318+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
58319+ for (i = 0; i < num; i++) {
58320+ curgid = (int)gidlist[i];
58321+ if (real == curgid)
58322+ realok = 1;
58323+ if (effective == curgid)
58324+ effectiveok = 1;
58325+ if (fs == curgid)
58326+ fsok = 1;
58327+ }
58328+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
58329+ for (i = 0; i < num; i++) {
58330+ curgid = (int)gidlist[i];
58331+ if (real == curgid)
58332+ break;
58333+ if (effective == curgid)
58334+ break;
58335+ if (fs == curgid)
58336+ break;
58337+ }
58338+ /* not in deny list */
58339+ if (i == num) {
58340+ realok = 1;
58341+ effectiveok = 1;
58342+ fsok = 1;
58343+ }
58344+ }
58345+
58346+ if (realok && effectiveok && fsok)
58347+ return 0;
58348+ else {
58349+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
58350+ return 1;
58351+ }
58352+}
58353+
58354+void
58355+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
58356+{
58357+ struct acl_role_label *role = task->role;
58358+ struct acl_subject_label *subj = NULL;
58359+ struct acl_object_label *obj;
58360+ struct file *filp;
58361+
58362+ if (unlikely(!(gr_status & GR_READY)))
58363+ return;
58364+
58365+ filp = task->exec_file;
58366+
58367+ /* kernel process, we'll give them the kernel role */
58368+ if (unlikely(!filp)) {
58369+ task->role = kernel_role;
58370+ task->acl = kernel_role->root_label;
58371+ return;
58372+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
58373+ role = lookup_acl_role_label(task, uid, gid);
58374+
58375+ /* perform subject lookup in possibly new role
58376+ we can use this result below in the case where role == task->role
58377+ */
58378+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
58379+
58380+ /* if we changed uid/gid, but result in the same role
58381+ and are using inheritance, don't lose the inherited subject
58382+ if current subject is other than what normal lookup
58383+ would result in, we arrived via inheritance, don't
58384+ lose subject
58385+ */
58386+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
58387+ (subj == task->acl)))
58388+ task->acl = subj;
58389+
58390+ task->role = role;
58391+
58392+ task->is_writable = 0;
58393+
58394+ /* ignore additional mmap checks for processes that are writable
58395+ by the default ACL */
58396+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58397+ if (unlikely(obj->mode & GR_WRITE))
58398+ task->is_writable = 1;
58399+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
58400+ if (unlikely(obj->mode & GR_WRITE))
58401+ task->is_writable = 1;
58402+
58403+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58404+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58405+#endif
58406+
58407+ gr_set_proc_res(task);
58408+
58409+ return;
58410+}
58411+
58412+int
58413+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
58414+ const int unsafe_flags)
58415+{
58416+ struct task_struct *task = current;
58417+ struct acl_subject_label *newacl;
58418+ struct acl_object_label *obj;
58419+ __u32 retmode;
58420+
58421+ if (unlikely(!(gr_status & GR_READY)))
58422+ return 0;
58423+
58424+ newacl = chk_subj_label(dentry, mnt, task->role);
58425+
58426+ task_lock(task);
58427+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
58428+ !(task->role->roletype & GR_ROLE_GOD) &&
58429+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
58430+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
58431+ task_unlock(task);
58432+ if (unsafe_flags & LSM_UNSAFE_SHARE)
58433+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
58434+ else
58435+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
58436+ return -EACCES;
58437+ }
58438+ task_unlock(task);
58439+
58440+ obj = chk_obj_label(dentry, mnt, task->acl);
58441+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
58442+
58443+ if (!(task->acl->mode & GR_INHERITLEARN) &&
58444+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
58445+ if (obj->nested)
58446+ task->acl = obj->nested;
58447+ else
58448+ task->acl = newacl;
58449+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
58450+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
58451+
58452+ task->is_writable = 0;
58453+
58454+ /* ignore additional mmap checks for processes that are writable
58455+ by the default ACL */
58456+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
58457+ if (unlikely(obj->mode & GR_WRITE))
58458+ task->is_writable = 1;
58459+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
58460+ if (unlikely(obj->mode & GR_WRITE))
58461+ task->is_writable = 1;
58462+
58463+ gr_set_proc_res(task);
58464+
58465+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58466+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
58467+#endif
58468+ return 0;
58469+}
58470+
58471+/* always called with valid inodev ptr */
58472+static void
58473+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
58474+{
58475+ struct acl_object_label *matchpo;
58476+ struct acl_subject_label *matchps;
58477+ struct acl_subject_label *subj;
58478+ struct acl_role_label *role;
58479+ unsigned int x;
58480+
58481+ FOR_EACH_ROLE_START(role)
58482+ FOR_EACH_SUBJECT_START(role, subj, x)
58483+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
58484+ matchpo->mode |= GR_DELETED;
58485+ FOR_EACH_SUBJECT_END(subj,x)
58486+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58487+ if (subj->inode == ino && subj->device == dev)
58488+ subj->mode |= GR_DELETED;
58489+ FOR_EACH_NESTED_SUBJECT_END(subj)
58490+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
58491+ matchps->mode |= GR_DELETED;
58492+ FOR_EACH_ROLE_END(role)
58493+
58494+ inodev->nentry->deleted = 1;
58495+
58496+ return;
58497+}
58498+
58499+void
58500+gr_handle_delete(const ino_t ino, const dev_t dev)
58501+{
58502+ struct inodev_entry *inodev;
58503+
58504+ if (unlikely(!(gr_status & GR_READY)))
58505+ return;
58506+
58507+ write_lock(&gr_inode_lock);
58508+ inodev = lookup_inodev_entry(ino, dev);
58509+ if (inodev != NULL)
58510+ do_handle_delete(inodev, ino, dev);
58511+ write_unlock(&gr_inode_lock);
58512+
58513+ return;
58514+}
58515+
58516+static void
58517+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
58518+ const ino_t newinode, const dev_t newdevice,
58519+ struct acl_subject_label *subj)
58520+{
58521+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
58522+ struct acl_object_label *match;
58523+
58524+ match = subj->obj_hash[index];
58525+
58526+ while (match && (match->inode != oldinode ||
58527+ match->device != olddevice ||
58528+ !(match->mode & GR_DELETED)))
58529+ match = match->next;
58530+
58531+ if (match && (match->inode == oldinode)
58532+ && (match->device == olddevice)
58533+ && (match->mode & GR_DELETED)) {
58534+ if (match->prev == NULL) {
58535+ subj->obj_hash[index] = match->next;
58536+ if (match->next != NULL)
58537+ match->next->prev = NULL;
58538+ } else {
58539+ match->prev->next = match->next;
58540+ if (match->next != NULL)
58541+ match->next->prev = match->prev;
58542+ }
58543+ match->prev = NULL;
58544+ match->next = NULL;
58545+ match->inode = newinode;
58546+ match->device = newdevice;
58547+ match->mode &= ~GR_DELETED;
58548+
58549+ insert_acl_obj_label(match, subj);
58550+ }
58551+
58552+ return;
58553+}
58554+
58555+static void
58556+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
58557+ const ino_t newinode, const dev_t newdevice,
58558+ struct acl_role_label *role)
58559+{
58560+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
58561+ struct acl_subject_label *match;
58562+
58563+ match = role->subj_hash[index];
58564+
58565+ while (match && (match->inode != oldinode ||
58566+ match->device != olddevice ||
58567+ !(match->mode & GR_DELETED)))
58568+ match = match->next;
58569+
58570+ if (match && (match->inode == oldinode)
58571+ && (match->device == olddevice)
58572+ && (match->mode & GR_DELETED)) {
58573+ if (match->prev == NULL) {
58574+ role->subj_hash[index] = match->next;
58575+ if (match->next != NULL)
58576+ match->next->prev = NULL;
58577+ } else {
58578+ match->prev->next = match->next;
58579+ if (match->next != NULL)
58580+ match->next->prev = match->prev;
58581+ }
58582+ match->prev = NULL;
58583+ match->next = NULL;
58584+ match->inode = newinode;
58585+ match->device = newdevice;
58586+ match->mode &= ~GR_DELETED;
58587+
58588+ insert_acl_subj_label(match, role);
58589+ }
58590+
58591+ return;
58592+}
58593+
58594+static void
58595+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
58596+ const ino_t newinode, const dev_t newdevice)
58597+{
58598+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
58599+ struct inodev_entry *match;
58600+
58601+ match = inodev_set.i_hash[index];
58602+
58603+ while (match && (match->nentry->inode != oldinode ||
58604+ match->nentry->device != olddevice || !match->nentry->deleted))
58605+ match = match->next;
58606+
58607+ if (match && (match->nentry->inode == oldinode)
58608+ && (match->nentry->device == olddevice) &&
58609+ match->nentry->deleted) {
58610+ if (match->prev == NULL) {
58611+ inodev_set.i_hash[index] = match->next;
58612+ if (match->next != NULL)
58613+ match->next->prev = NULL;
58614+ } else {
58615+ match->prev->next = match->next;
58616+ if (match->next != NULL)
58617+ match->next->prev = match->prev;
58618+ }
58619+ match->prev = NULL;
58620+ match->next = NULL;
58621+ match->nentry->inode = newinode;
58622+ match->nentry->device = newdevice;
58623+ match->nentry->deleted = 0;
58624+
58625+ insert_inodev_entry(match);
58626+ }
58627+
58628+ return;
58629+}
58630+
58631+static void
58632+__do_handle_create(const struct name_entry *matchn, ino_t inode, dev_t dev)
58633+{
58634+ struct acl_subject_label *subj;
58635+ struct acl_role_label *role;
58636+ unsigned int x;
58637+
58638+ FOR_EACH_ROLE_START(role)
58639+ update_acl_subj_label(matchn->inode, matchn->device,
58640+ inode, dev, role);
58641+
58642+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
58643+ if ((subj->inode == inode) && (subj->device == dev)) {
58644+ subj->inode = inode;
58645+ subj->device = dev;
58646+ }
58647+ FOR_EACH_NESTED_SUBJECT_END(subj)
58648+ FOR_EACH_SUBJECT_START(role, subj, x)
58649+ update_acl_obj_label(matchn->inode, matchn->device,
58650+ inode, dev, subj);
58651+ FOR_EACH_SUBJECT_END(subj,x)
58652+ FOR_EACH_ROLE_END(role)
58653+
58654+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
58655+
58656+ return;
58657+}
58658+
58659+static void
58660+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
58661+ const struct vfsmount *mnt)
58662+{
58663+ ino_t ino = dentry->d_inode->i_ino;
58664+ dev_t dev = __get_dev(dentry);
58665+
58666+ __do_handle_create(matchn, ino, dev);
58667+
58668+ return;
58669+}
58670+
58671+void
58672+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
58673+{
58674+ struct name_entry *matchn;
58675+
58676+ if (unlikely(!(gr_status & GR_READY)))
58677+ return;
58678+
58679+ preempt_disable();
58680+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
58681+
58682+ if (unlikely((unsigned long)matchn)) {
58683+ write_lock(&gr_inode_lock);
58684+ do_handle_create(matchn, dentry, mnt);
58685+ write_unlock(&gr_inode_lock);
58686+ }
58687+ preempt_enable();
58688+
58689+ return;
58690+}
58691+
58692+void
58693+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
58694+{
58695+ struct name_entry *matchn;
58696+
58697+ if (unlikely(!(gr_status & GR_READY)))
58698+ return;
58699+
58700+ preempt_disable();
58701+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
58702+
58703+ if (unlikely((unsigned long)matchn)) {
58704+ write_lock(&gr_inode_lock);
58705+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
58706+ write_unlock(&gr_inode_lock);
58707+ }
58708+ preempt_enable();
58709+
58710+ return;
58711+}
58712+
58713+void
58714+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
58715+ struct dentry *old_dentry,
58716+ struct dentry *new_dentry,
58717+ struct vfsmount *mnt, const __u8 replace)
58718+{
58719+ struct name_entry *matchn;
58720+ struct inodev_entry *inodev;
58721+ struct inode *inode = new_dentry->d_inode;
58722+ ino_t oldinode = old_dentry->d_inode->i_ino;
58723+ dev_t olddev = __get_dev(old_dentry);
58724+
58725+ /* vfs_rename swaps the name and parent link for old_dentry and
58726+ new_dentry
58727+ at this point, old_dentry has the new name, parent link, and inode
58728+ for the renamed file
58729+ if a file is being replaced by a rename, new_dentry has the inode
58730+ and name for the replaced file
58731+ */
58732+
58733+ if (unlikely(!(gr_status & GR_READY)))
58734+ return;
58735+
58736+ preempt_disable();
58737+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
58738+
58739+ /* we wouldn't have to check d_inode if it weren't for
58740+ NFS silly-renaming
58741+ */
58742+
58743+ write_lock(&gr_inode_lock);
58744+ if (unlikely(replace && inode)) {
58745+ ino_t newinode = inode->i_ino;
58746+ dev_t newdev = __get_dev(new_dentry);
58747+ inodev = lookup_inodev_entry(newinode, newdev);
58748+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
58749+ do_handle_delete(inodev, newinode, newdev);
58750+ }
58751+
58752+ inodev = lookup_inodev_entry(oldinode, olddev);
58753+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
58754+ do_handle_delete(inodev, oldinode, olddev);
58755+
58756+ if (unlikely((unsigned long)matchn))
58757+ do_handle_create(matchn, old_dentry, mnt);
58758+
58759+ write_unlock(&gr_inode_lock);
58760+ preempt_enable();
58761+
58762+ return;
58763+}
58764+
58765+static int
58766+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
58767+ unsigned char **sum)
58768+{
58769+ struct acl_role_label *r;
58770+ struct role_allowed_ip *ipp;
58771+ struct role_transition *trans;
58772+ unsigned int i;
58773+ int found = 0;
58774+ u32 curr_ip = current->signal->curr_ip;
58775+
58776+ current->signal->saved_ip = curr_ip;
58777+
58778+ /* check transition table */
58779+
58780+ for (trans = current->role->transitions; trans; trans = trans->next) {
58781+ if (!strcmp(rolename, trans->rolename)) {
58782+ found = 1;
58783+ break;
58784+ }
58785+ }
58786+
58787+ if (!found)
58788+ return 0;
58789+
58790+ /* handle special roles that do not require authentication
58791+ and check ip */
58792+
58793+ FOR_EACH_ROLE_START(r)
58794+ if (!strcmp(rolename, r->rolename) &&
58795+ (r->roletype & GR_ROLE_SPECIAL)) {
58796+ found = 0;
58797+ if (r->allowed_ips != NULL) {
58798+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
58799+ if ((ntohl(curr_ip) & ipp->netmask) ==
58800+ (ntohl(ipp->addr) & ipp->netmask))
58801+ found = 1;
58802+ }
58803+ } else
58804+ found = 2;
58805+ if (!found)
58806+ return 0;
58807+
58808+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
58809+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
58810+ *salt = NULL;
58811+ *sum = NULL;
58812+ return 1;
58813+ }
58814+ }
58815+ FOR_EACH_ROLE_END(r)
58816+
58817+ for (i = 0; i < num_sprole_pws; i++) {
58818+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
58819+ *salt = acl_special_roles[i]->salt;
58820+ *sum = acl_special_roles[i]->sum;
58821+ return 1;
58822+ }
58823+ }
58824+
58825+ return 0;
58826+}
58827+
58828+static void
58829+assign_special_role(char *rolename)
58830+{
58831+ struct acl_object_label *obj;
58832+ struct acl_role_label *r;
58833+ struct acl_role_label *assigned = NULL;
58834+ struct task_struct *tsk;
58835+ struct file *filp;
58836+
58837+ FOR_EACH_ROLE_START(r)
58838+ if (!strcmp(rolename, r->rolename) &&
58839+ (r->roletype & GR_ROLE_SPECIAL)) {
58840+ assigned = r;
58841+ break;
58842+ }
58843+ FOR_EACH_ROLE_END(r)
58844+
58845+ if (!assigned)
58846+ return;
58847+
58848+ read_lock(&tasklist_lock);
58849+ read_lock(&grsec_exec_file_lock);
58850+
58851+ tsk = current->real_parent;
58852+ if (tsk == NULL)
58853+ goto out_unlock;
58854+
58855+ filp = tsk->exec_file;
58856+ if (filp == NULL)
58857+ goto out_unlock;
58858+
58859+ tsk->is_writable = 0;
58860+
58861+ tsk->acl_sp_role = 1;
58862+ tsk->acl_role_id = ++acl_sp_role_value;
58863+ tsk->role = assigned;
58864+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
58865+
58866+ /* ignore additional mmap checks for processes that are writable
58867+ by the default ACL */
58868+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
58869+ if (unlikely(obj->mode & GR_WRITE))
58870+ tsk->is_writable = 1;
58871+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
58872+ if (unlikely(obj->mode & GR_WRITE))
58873+ tsk->is_writable = 1;
58874+
58875+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
58876+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
58877+#endif
58878+
58879+out_unlock:
58880+ read_unlock(&grsec_exec_file_lock);
58881+ read_unlock(&tasklist_lock);
58882+ return;
58883+}
58884+
58885+int gr_check_secure_terminal(struct task_struct *task)
58886+{
58887+ struct task_struct *p, *p2, *p3;
58888+ struct files_struct *files;
58889+ struct fdtable *fdt;
58890+ struct file *our_file = NULL, *file;
58891+ int i;
58892+
58893+ if (task->signal->tty == NULL)
58894+ return 1;
58895+
58896+ files = get_files_struct(task);
58897+ if (files != NULL) {
58898+ rcu_read_lock();
58899+ fdt = files_fdtable(files);
58900+ for (i=0; i < fdt->max_fds; i++) {
58901+ file = fcheck_files(files, i);
58902+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
58903+ get_file(file);
58904+ our_file = file;
58905+ }
58906+ }
58907+ rcu_read_unlock();
58908+ put_files_struct(files);
58909+ }
58910+
58911+ if (our_file == NULL)
58912+ return 1;
58913+
58914+ read_lock(&tasklist_lock);
58915+ do_each_thread(p2, p) {
58916+ files = get_files_struct(p);
58917+ if (files == NULL ||
58918+ (p->signal && p->signal->tty == task->signal->tty)) {
58919+ if (files != NULL)
58920+ put_files_struct(files);
58921+ continue;
58922+ }
58923+ rcu_read_lock();
58924+ fdt = files_fdtable(files);
58925+ for (i=0; i < fdt->max_fds; i++) {
58926+ file = fcheck_files(files, i);
58927+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
58928+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
58929+ p3 = task;
58930+ while (p3->pid > 0) {
58931+ if (p3 == p)
58932+ break;
58933+ p3 = p3->real_parent;
58934+ }
58935+ if (p3 == p)
58936+ break;
58937+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
58938+ gr_handle_alertkill(p);
58939+ rcu_read_unlock();
58940+ put_files_struct(files);
58941+ read_unlock(&tasklist_lock);
58942+ fput(our_file);
58943+ return 0;
58944+ }
58945+ }
58946+ rcu_read_unlock();
58947+ put_files_struct(files);
58948+ } while_each_thread(p2, p);
58949+ read_unlock(&tasklist_lock);
58950+
58951+ fput(our_file);
58952+ return 1;
58953+}
58954+
58955+ssize_t
58956+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
58957+{
58958+ struct gr_arg_wrapper uwrap;
58959+ unsigned char *sprole_salt = NULL;
58960+ unsigned char *sprole_sum = NULL;
58961+ int error = sizeof (struct gr_arg_wrapper);
58962+ int error2 = 0;
58963+
58964+ mutex_lock(&gr_dev_mutex);
58965+
58966+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
58967+ error = -EPERM;
58968+ goto out;
58969+ }
58970+
58971+ if (count != sizeof (struct gr_arg_wrapper)) {
58972+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
58973+ error = -EINVAL;
58974+ goto out;
58975+ }
58976+
58977+
58978+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
58979+ gr_auth_expires = 0;
58980+ gr_auth_attempts = 0;
58981+ }
58982+
58983+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
58984+ error = -EFAULT;
58985+ goto out;
58986+ }
58987+
58988+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
58989+ error = -EINVAL;
58990+ goto out;
58991+ }
58992+
58993+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
58994+ error = -EFAULT;
58995+ goto out;
58996+ }
58997+
58998+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
58999+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59000+ time_after(gr_auth_expires, get_seconds())) {
59001+ error = -EBUSY;
59002+ goto out;
59003+ }
59004+
59005+ /* if non-root trying to do anything other than use a special role,
59006+ do not attempt authentication, do not count towards authentication
59007+ locking
59008+ */
59009+
59010+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
59011+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
59012+ current_uid()) {
59013+ error = -EPERM;
59014+ goto out;
59015+ }
59016+
59017+ /* ensure pw and special role name are null terminated */
59018+
59019+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
59020+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
59021+
59022+ /* Okay.
59023+ * We have our enough of the argument structure..(we have yet
59024+ * to copy_from_user the tables themselves) . Copy the tables
59025+ * only if we need them, i.e. for loading operations. */
59026+
59027+ switch (gr_usermode->mode) {
59028+ case GR_STATUS:
59029+ if (gr_status & GR_READY) {
59030+ error = 1;
59031+ if (!gr_check_secure_terminal(current))
59032+ error = 3;
59033+ } else
59034+ error = 2;
59035+ goto out;
59036+ case GR_SHUTDOWN:
59037+ if ((gr_status & GR_READY)
59038+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59039+ pax_open_kernel();
59040+ gr_status &= ~GR_READY;
59041+ pax_close_kernel();
59042+
59043+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
59044+ free_variables();
59045+ memset(gr_usermode, 0, sizeof (struct gr_arg));
59046+ memset(gr_system_salt, 0, GR_SALT_LEN);
59047+ memset(gr_system_sum, 0, GR_SHA_LEN);
59048+ } else if (gr_status & GR_READY) {
59049+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
59050+ error = -EPERM;
59051+ } else {
59052+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
59053+ error = -EAGAIN;
59054+ }
59055+ break;
59056+ case GR_ENABLE:
59057+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
59058+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
59059+ else {
59060+ if (gr_status & GR_READY)
59061+ error = -EAGAIN;
59062+ else
59063+ error = error2;
59064+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
59065+ }
59066+ break;
59067+ case GR_RELOAD:
59068+ if (!(gr_status & GR_READY)) {
59069+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
59070+ error = -EAGAIN;
59071+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59072+ lock_kernel();
59073+
59074+ pax_open_kernel();
59075+ gr_status &= ~GR_READY;
59076+ pax_close_kernel();
59077+
59078+ free_variables();
59079+ if (!(error2 = gracl_init(gr_usermode))) {
59080+ unlock_kernel();
59081+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
59082+ } else {
59083+ unlock_kernel();
59084+ error = error2;
59085+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59086+ }
59087+ } else {
59088+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
59089+ error = -EPERM;
59090+ }
59091+ break;
59092+ case GR_SEGVMOD:
59093+ if (unlikely(!(gr_status & GR_READY))) {
59094+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
59095+ error = -EAGAIN;
59096+ break;
59097+ }
59098+
59099+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
59100+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
59101+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
59102+ struct acl_subject_label *segvacl;
59103+ segvacl =
59104+ lookup_acl_subj_label(gr_usermode->segv_inode,
59105+ gr_usermode->segv_device,
59106+ current->role);
59107+ if (segvacl) {
59108+ segvacl->crashes = 0;
59109+ segvacl->expires = 0;
59110+ }
59111+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
59112+ gr_remove_uid(gr_usermode->segv_uid);
59113+ }
59114+ } else {
59115+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
59116+ error = -EPERM;
59117+ }
59118+ break;
59119+ case GR_SPROLE:
59120+ case GR_SPROLEPAM:
59121+ if (unlikely(!(gr_status & GR_READY))) {
59122+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
59123+ error = -EAGAIN;
59124+ break;
59125+ }
59126+
59127+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
59128+ current->role->expires = 0;
59129+ current->role->auth_attempts = 0;
59130+ }
59131+
59132+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
59133+ time_after(current->role->expires, get_seconds())) {
59134+ error = -EBUSY;
59135+ goto out;
59136+ }
59137+
59138+ if (lookup_special_role_auth
59139+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
59140+ && ((!sprole_salt && !sprole_sum)
59141+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
59142+ char *p = "";
59143+ assign_special_role(gr_usermode->sp_role);
59144+ read_lock(&tasklist_lock);
59145+ if (current->real_parent)
59146+ p = current->real_parent->role->rolename;
59147+ read_unlock(&tasklist_lock);
59148+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
59149+ p, acl_sp_role_value);
59150+ } else {
59151+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
59152+ error = -EPERM;
59153+ if(!(current->role->auth_attempts++))
59154+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59155+
59156+ goto out;
59157+ }
59158+ break;
59159+ case GR_UNSPROLE:
59160+ if (unlikely(!(gr_status & GR_READY))) {
59161+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
59162+ error = -EAGAIN;
59163+ break;
59164+ }
59165+
59166+ if (current->role->roletype & GR_ROLE_SPECIAL) {
59167+ char *p = "";
59168+ int i = 0;
59169+
59170+ read_lock(&tasklist_lock);
59171+ if (current->real_parent) {
59172+ p = current->real_parent->role->rolename;
59173+ i = current->real_parent->acl_role_id;
59174+ }
59175+ read_unlock(&tasklist_lock);
59176+
59177+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
59178+ gr_set_acls(1);
59179+ } else {
59180+ error = -EPERM;
59181+ goto out;
59182+ }
59183+ break;
59184+ default:
59185+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
59186+ error = -EINVAL;
59187+ break;
59188+ }
59189+
59190+ if (error != -EPERM)
59191+ goto out;
59192+
59193+ if(!(gr_auth_attempts++))
59194+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
59195+
59196+ out:
59197+ mutex_unlock(&gr_dev_mutex);
59198+ return error;
59199+}
59200+
59201+/* must be called with
59202+ rcu_read_lock();
59203+ read_lock(&tasklist_lock);
59204+ read_lock(&grsec_exec_file_lock);
59205+*/
59206+int gr_apply_subject_to_task(struct task_struct *task)
59207+{
59208+ struct acl_object_label *obj;
59209+ char *tmpname;
59210+ struct acl_subject_label *tmpsubj;
59211+ struct file *filp;
59212+ struct name_entry *nmatch;
59213+
59214+ filp = task->exec_file;
59215+ if (filp == NULL)
59216+ return 0;
59217+
59218+ /* the following is to apply the correct subject
59219+ on binaries running when the RBAC system
59220+ is enabled, when the binaries have been
59221+ replaced or deleted since their execution
59222+ -----
59223+ when the RBAC system starts, the inode/dev
59224+ from exec_file will be one the RBAC system
59225+ is unaware of. It only knows the inode/dev
59226+ of the present file on disk, or the absence
59227+ of it.
59228+ */
59229+ preempt_disable();
59230+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
59231+
59232+ nmatch = lookup_name_entry(tmpname);
59233+ preempt_enable();
59234+ tmpsubj = NULL;
59235+ if (nmatch) {
59236+ if (nmatch->deleted)
59237+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
59238+ else
59239+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
59240+ if (tmpsubj != NULL)
59241+ task->acl = tmpsubj;
59242+ }
59243+ if (tmpsubj == NULL)
59244+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
59245+ task->role);
59246+ if (task->acl) {
59247+ task->is_writable = 0;
59248+ /* ignore additional mmap checks for processes that are writable
59249+ by the default ACL */
59250+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59251+ if (unlikely(obj->mode & GR_WRITE))
59252+ task->is_writable = 1;
59253+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
59254+ if (unlikely(obj->mode & GR_WRITE))
59255+ task->is_writable = 1;
59256+
59257+ gr_set_proc_res(task);
59258+
59259+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59260+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
59261+#endif
59262+ } else {
59263+ return 1;
59264+ }
59265+
59266+ return 0;
59267+}
59268+
59269+int
59270+gr_set_acls(const int type)
59271+{
59272+ struct task_struct *task, *task2;
59273+ struct acl_role_label *role = current->role;
59274+ __u16 acl_role_id = current->acl_role_id;
59275+ const struct cred *cred;
59276+ int ret;
59277+
59278+ rcu_read_lock();
59279+ read_lock(&tasklist_lock);
59280+ read_lock(&grsec_exec_file_lock);
59281+ do_each_thread(task2, task) {
59282+ /* check to see if we're called from the exit handler,
59283+ if so, only replace ACLs that have inherited the admin
59284+ ACL */
59285+
59286+ if (type && (task->role != role ||
59287+ task->acl_role_id != acl_role_id))
59288+ continue;
59289+
59290+ task->acl_role_id = 0;
59291+ task->acl_sp_role = 0;
59292+
59293+ if (task->exec_file) {
59294+ cred = __task_cred(task);
59295+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
59296+
59297+ ret = gr_apply_subject_to_task(task);
59298+ if (ret) {
59299+ read_unlock(&grsec_exec_file_lock);
59300+ read_unlock(&tasklist_lock);
59301+ rcu_read_unlock();
59302+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
59303+ return ret;
59304+ }
59305+ } else {
59306+ // it's a kernel process
59307+ task->role = kernel_role;
59308+ task->acl = kernel_role->root_label;
59309+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
59310+ task->acl->mode &= ~GR_PROCFIND;
59311+#endif
59312+ }
59313+ } while_each_thread(task2, task);
59314+ read_unlock(&grsec_exec_file_lock);
59315+ read_unlock(&tasklist_lock);
59316+ rcu_read_unlock();
59317+
59318+ return 0;
59319+}
59320+
59321+void
59322+gr_learn_resource(const struct task_struct *task,
59323+ const int res, const unsigned long wanted, const int gt)
59324+{
59325+ struct acl_subject_label *acl;
59326+ const struct cred *cred;
59327+
59328+ if (unlikely((gr_status & GR_READY) &&
59329+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
59330+ goto skip_reslog;
59331+
59332+#ifdef CONFIG_GRKERNSEC_RESLOG
59333+ gr_log_resource(task, res, wanted, gt);
59334+#endif
59335+ skip_reslog:
59336+
59337+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
59338+ return;
59339+
59340+ acl = task->acl;
59341+
59342+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
59343+ !(acl->resmask & (1 << (unsigned short) res))))
59344+ return;
59345+
59346+ if (wanted >= acl->res[res].rlim_cur) {
59347+ unsigned long res_add;
59348+
59349+ res_add = wanted;
59350+ switch (res) {
59351+ case RLIMIT_CPU:
59352+ res_add += GR_RLIM_CPU_BUMP;
59353+ break;
59354+ case RLIMIT_FSIZE:
59355+ res_add += GR_RLIM_FSIZE_BUMP;
59356+ break;
59357+ case RLIMIT_DATA:
59358+ res_add += GR_RLIM_DATA_BUMP;
59359+ break;
59360+ case RLIMIT_STACK:
59361+ res_add += GR_RLIM_STACK_BUMP;
59362+ break;
59363+ case RLIMIT_CORE:
59364+ res_add += GR_RLIM_CORE_BUMP;
59365+ break;
59366+ case RLIMIT_RSS:
59367+ res_add += GR_RLIM_RSS_BUMP;
59368+ break;
59369+ case RLIMIT_NPROC:
59370+ res_add += GR_RLIM_NPROC_BUMP;
59371+ break;
59372+ case RLIMIT_NOFILE:
59373+ res_add += GR_RLIM_NOFILE_BUMP;
59374+ break;
59375+ case RLIMIT_MEMLOCK:
59376+ res_add += GR_RLIM_MEMLOCK_BUMP;
59377+ break;
59378+ case RLIMIT_AS:
59379+ res_add += GR_RLIM_AS_BUMP;
59380+ break;
59381+ case RLIMIT_LOCKS:
59382+ res_add += GR_RLIM_LOCKS_BUMP;
59383+ break;
59384+ case RLIMIT_SIGPENDING:
59385+ res_add += GR_RLIM_SIGPENDING_BUMP;
59386+ break;
59387+ case RLIMIT_MSGQUEUE:
59388+ res_add += GR_RLIM_MSGQUEUE_BUMP;
59389+ break;
59390+ case RLIMIT_NICE:
59391+ res_add += GR_RLIM_NICE_BUMP;
59392+ break;
59393+ case RLIMIT_RTPRIO:
59394+ res_add += GR_RLIM_RTPRIO_BUMP;
59395+ break;
59396+ case RLIMIT_RTTIME:
59397+ res_add += GR_RLIM_RTTIME_BUMP;
59398+ break;
59399+ }
59400+
59401+ acl->res[res].rlim_cur = res_add;
59402+
59403+ if (wanted > acl->res[res].rlim_max)
59404+ acl->res[res].rlim_max = res_add;
59405+
59406+ /* only log the subject filename, since resource logging is supported for
59407+ single-subject learning only */
59408+ rcu_read_lock();
59409+ cred = __task_cred(task);
59410+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
59411+ task->role->roletype, cred->uid, cred->gid, acl->filename,
59412+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
59413+ "", (unsigned long) res, &task->signal->saved_ip);
59414+ rcu_read_unlock();
59415+ }
59416+
59417+ return;
59418+}
59419+
59420+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
59421+void
59422+pax_set_initial_flags(struct linux_binprm *bprm)
59423+{
59424+ struct task_struct *task = current;
59425+ struct acl_subject_label *proc;
59426+ unsigned long flags;
59427+
59428+ if (unlikely(!(gr_status & GR_READY)))
59429+ return;
59430+
59431+ flags = pax_get_flags(task);
59432+
59433+ proc = task->acl;
59434+
59435+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
59436+ flags &= ~MF_PAX_PAGEEXEC;
59437+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
59438+ flags &= ~MF_PAX_SEGMEXEC;
59439+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
59440+ flags &= ~MF_PAX_RANDMMAP;
59441+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
59442+ flags &= ~MF_PAX_EMUTRAMP;
59443+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
59444+ flags &= ~MF_PAX_MPROTECT;
59445+
59446+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
59447+ flags |= MF_PAX_PAGEEXEC;
59448+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
59449+ flags |= MF_PAX_SEGMEXEC;
59450+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
59451+ flags |= MF_PAX_RANDMMAP;
59452+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
59453+ flags |= MF_PAX_EMUTRAMP;
59454+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
59455+ flags |= MF_PAX_MPROTECT;
59456+
59457+ pax_set_flags(task, flags);
59458+
59459+ return;
59460+}
59461+#endif
59462+
59463+#ifdef CONFIG_SYSCTL
59464+/* Eric Biederman likes breaking userland ABI and every inode-based security
59465+ system to save 35kb of memory */
59466+
59467+/* we modify the passed in filename, but adjust it back before returning */
59468+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
59469+{
59470+ struct name_entry *nmatch;
59471+ char *p, *lastp = NULL;
59472+ struct acl_object_label *obj = NULL, *tmp;
59473+ struct acl_subject_label *tmpsubj;
59474+ char c = '\0';
59475+
59476+ read_lock(&gr_inode_lock);
59477+
59478+ p = name + len - 1;
59479+ do {
59480+ nmatch = lookup_name_entry(name);
59481+ if (lastp != NULL)
59482+ *lastp = c;
59483+
59484+ if (nmatch == NULL)
59485+ goto next_component;
59486+ tmpsubj = current->acl;
59487+ do {
59488+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
59489+ if (obj != NULL) {
59490+ tmp = obj->globbed;
59491+ while (tmp) {
59492+ if (!glob_match(tmp->filename, name)) {
59493+ obj = tmp;
59494+ goto found_obj;
59495+ }
59496+ tmp = tmp->next;
59497+ }
59498+ goto found_obj;
59499+ }
59500+ } while ((tmpsubj = tmpsubj->parent_subject));
59501+next_component:
59502+ /* end case */
59503+ if (p == name)
59504+ break;
59505+
59506+ while (*p != '/')
59507+ p--;
59508+ if (p == name)
59509+ lastp = p + 1;
59510+ else {
59511+ lastp = p;
59512+ p--;
59513+ }
59514+ c = *lastp;
59515+ *lastp = '\0';
59516+ } while (1);
59517+found_obj:
59518+ read_unlock(&gr_inode_lock);
59519+ /* obj returned will always be non-null */
59520+ return obj;
59521+}
59522+
59523+/* returns 0 when allowing, non-zero on error
59524+ op of 0 is used for readdir, so we don't log the names of hidden files
59525+*/
59526+__u32
59527+gr_handle_sysctl(const struct ctl_table *table, const int op)
59528+{
59529+ ctl_table *tmp;
59530+ const char *proc_sys = "/proc/sys";
59531+ char *path;
59532+ struct acl_object_label *obj;
59533+ unsigned short len = 0, pos = 0, depth = 0, i;
59534+ __u32 err = 0;
59535+ __u32 mode = 0;
59536+
59537+ if (unlikely(!(gr_status & GR_READY)))
59538+ return 0;
59539+
59540+ /* for now, ignore operations on non-sysctl entries if it's not a
59541+ readdir*/
59542+ if (table->child != NULL && op != 0)
59543+ return 0;
59544+
59545+ mode |= GR_FIND;
59546+ /* it's only a read if it's an entry, read on dirs is for readdir */
59547+ if (op & MAY_READ)
59548+ mode |= GR_READ;
59549+ if (op & MAY_WRITE)
59550+ mode |= GR_WRITE;
59551+
59552+ preempt_disable();
59553+
59554+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
59555+
59556+ /* it's only a read/write if it's an actual entry, not a dir
59557+ (which are opened for readdir)
59558+ */
59559+
59560+ /* convert the requested sysctl entry into a pathname */
59561+
59562+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59563+ len += strlen(tmp->procname);
59564+ len++;
59565+ depth++;
59566+ }
59567+
59568+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
59569+ /* deny */
59570+ goto out;
59571+ }
59572+
59573+ memset(path, 0, PAGE_SIZE);
59574+
59575+ memcpy(path, proc_sys, strlen(proc_sys));
59576+
59577+ pos += strlen(proc_sys);
59578+
59579+ for (; depth > 0; depth--) {
59580+ path[pos] = '/';
59581+ pos++;
59582+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
59583+ if (depth == i) {
59584+ memcpy(path + pos, tmp->procname,
59585+ strlen(tmp->procname));
59586+ pos += strlen(tmp->procname);
59587+ }
59588+ i++;
59589+ }
59590+ }
59591+
59592+ obj = gr_lookup_by_name(path, pos);
59593+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
59594+
59595+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
59596+ ((err & mode) != mode))) {
59597+ __u32 new_mode = mode;
59598+
59599+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
59600+
59601+ err = 0;
59602+ gr_log_learn_sysctl(path, new_mode);
59603+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
59604+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
59605+ err = -ENOENT;
59606+ } else if (!(err & GR_FIND)) {
59607+ err = -ENOENT;
59608+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
59609+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
59610+ path, (mode & GR_READ) ? " reading" : "",
59611+ (mode & GR_WRITE) ? " writing" : "");
59612+ err = -EACCES;
59613+ } else if ((err & mode) != mode) {
59614+ err = -EACCES;
59615+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
59616+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
59617+ path, (mode & GR_READ) ? " reading" : "",
59618+ (mode & GR_WRITE) ? " writing" : "");
59619+ err = 0;
59620+ } else
59621+ err = 0;
59622+
59623+ out:
59624+ preempt_enable();
59625+
59626+ return err;
59627+}
59628+#endif
59629+
59630+int
59631+gr_handle_proc_ptrace(struct task_struct *task)
59632+{
59633+ struct file *filp;
59634+ struct task_struct *tmp = task;
59635+ struct task_struct *curtemp = current;
59636+ __u32 retmode;
59637+
59638+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59639+ if (unlikely(!(gr_status & GR_READY)))
59640+ return 0;
59641+#endif
59642+
59643+ read_lock(&tasklist_lock);
59644+ read_lock(&grsec_exec_file_lock);
59645+ filp = task->exec_file;
59646+
59647+ while (tmp->pid > 0) {
59648+ if (tmp == curtemp)
59649+ break;
59650+ tmp = tmp->real_parent;
59651+ }
59652+
59653+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59654+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
59655+ read_unlock(&grsec_exec_file_lock);
59656+ read_unlock(&tasklist_lock);
59657+ return 1;
59658+ }
59659+
59660+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59661+ if (!(gr_status & GR_READY)) {
59662+ read_unlock(&grsec_exec_file_lock);
59663+ read_unlock(&tasklist_lock);
59664+ return 0;
59665+ }
59666+#endif
59667+
59668+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
59669+ read_unlock(&grsec_exec_file_lock);
59670+ read_unlock(&tasklist_lock);
59671+
59672+ if (retmode & GR_NOPTRACE)
59673+ return 1;
59674+
59675+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
59676+ && (current->acl != task->acl || (current->acl != current->role->root_label
59677+ && current->pid != task->pid)))
59678+ return 1;
59679+
59680+ return 0;
59681+}
59682+
59683+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
59684+{
59685+ if (unlikely(!(gr_status & GR_READY)))
59686+ return;
59687+
59688+ if (!(current->role->roletype & GR_ROLE_GOD))
59689+ return;
59690+
59691+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
59692+ p->role->rolename, gr_task_roletype_to_char(p),
59693+ p->acl->filename);
59694+}
59695+
59696+int
59697+gr_handle_ptrace(struct task_struct *task, const long request)
59698+{
59699+ struct task_struct *tmp = task;
59700+ struct task_struct *curtemp = current;
59701+ __u32 retmode;
59702+
59703+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
59704+ if (unlikely(!(gr_status & GR_READY)))
59705+ return 0;
59706+#endif
59707+
59708+ read_lock(&tasklist_lock);
59709+ while (tmp->pid > 0) {
59710+ if (tmp == curtemp)
59711+ break;
59712+ tmp = tmp->real_parent;
59713+ }
59714+
59715+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
59716+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
59717+ read_unlock(&tasklist_lock);
59718+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59719+ return 1;
59720+ }
59721+ read_unlock(&tasklist_lock);
59722+
59723+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
59724+ if (!(gr_status & GR_READY))
59725+ return 0;
59726+#endif
59727+
59728+ read_lock(&grsec_exec_file_lock);
59729+ if (unlikely(!task->exec_file)) {
59730+ read_unlock(&grsec_exec_file_lock);
59731+ return 0;
59732+ }
59733+
59734+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
59735+ read_unlock(&grsec_exec_file_lock);
59736+
59737+ if (retmode & GR_NOPTRACE) {
59738+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59739+ return 1;
59740+ }
59741+
59742+ if (retmode & GR_PTRACERD) {
59743+ switch (request) {
59744+ case PTRACE_POKETEXT:
59745+ case PTRACE_POKEDATA:
59746+ case PTRACE_POKEUSR:
59747+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
59748+ case PTRACE_SETREGS:
59749+ case PTRACE_SETFPREGS:
59750+#endif
59751+#ifdef CONFIG_X86
59752+ case PTRACE_SETFPXREGS:
59753+#endif
59754+#ifdef CONFIG_ALTIVEC
59755+ case PTRACE_SETVRREGS:
59756+#endif
59757+ return 1;
59758+ default:
59759+ return 0;
59760+ }
59761+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
59762+ !(current->role->roletype & GR_ROLE_GOD) &&
59763+ (current->acl != task->acl)) {
59764+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
59765+ return 1;
59766+ }
59767+
59768+ return 0;
59769+}
59770+
59771+static int is_writable_mmap(const struct file *filp)
59772+{
59773+ struct task_struct *task = current;
59774+ struct acl_object_label *obj, *obj2;
59775+
59776+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
59777+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
59778+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
59779+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
59780+ task->role->root_label);
59781+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
59782+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
59783+ return 1;
59784+ }
59785+ }
59786+ return 0;
59787+}
59788+
59789+int
59790+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
59791+{
59792+ __u32 mode;
59793+
59794+ if (unlikely(!file || !(prot & PROT_EXEC)))
59795+ return 1;
59796+
59797+ if (is_writable_mmap(file))
59798+ return 0;
59799+
59800+ mode =
59801+ gr_search_file(file->f_path.dentry,
59802+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59803+ file->f_path.mnt);
59804+
59805+ if (!gr_tpe_allow(file))
59806+ return 0;
59807+
59808+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59809+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59810+ return 0;
59811+ } else if (unlikely(!(mode & GR_EXEC))) {
59812+ return 0;
59813+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59814+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59815+ return 1;
59816+ }
59817+
59818+ return 1;
59819+}
59820+
59821+int
59822+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
59823+{
59824+ __u32 mode;
59825+
59826+ if (unlikely(!file || !(prot & PROT_EXEC)))
59827+ return 1;
59828+
59829+ if (is_writable_mmap(file))
59830+ return 0;
59831+
59832+ mode =
59833+ gr_search_file(file->f_path.dentry,
59834+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
59835+ file->f_path.mnt);
59836+
59837+ if (!gr_tpe_allow(file))
59838+ return 0;
59839+
59840+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
59841+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59842+ return 0;
59843+ } else if (unlikely(!(mode & GR_EXEC))) {
59844+ return 0;
59845+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
59846+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
59847+ return 1;
59848+ }
59849+
59850+ return 1;
59851+}
59852+
59853+void
59854+gr_acl_handle_psacct(struct task_struct *task, const long code)
59855+{
59856+ unsigned long runtime;
59857+ unsigned long cputime;
59858+ unsigned int wday, cday;
59859+ __u8 whr, chr;
59860+ __u8 wmin, cmin;
59861+ __u8 wsec, csec;
59862+ struct timespec timeval;
59863+
59864+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
59865+ !(task->acl->mode & GR_PROCACCT)))
59866+ return;
59867+
59868+ do_posix_clock_monotonic_gettime(&timeval);
59869+ runtime = timeval.tv_sec - task->start_time.tv_sec;
59870+ wday = runtime / (3600 * 24);
59871+ runtime -= wday * (3600 * 24);
59872+ whr = runtime / 3600;
59873+ runtime -= whr * 3600;
59874+ wmin = runtime / 60;
59875+ runtime -= wmin * 60;
59876+ wsec = runtime;
59877+
59878+ cputime = (task->utime + task->stime) / HZ;
59879+ cday = cputime / (3600 * 24);
59880+ cputime -= cday * (3600 * 24);
59881+ chr = cputime / 3600;
59882+ cputime -= chr * 3600;
59883+ cmin = cputime / 60;
59884+ cputime -= cmin * 60;
59885+ csec = cputime;
59886+
59887+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
59888+
59889+ return;
59890+}
59891+
59892+void gr_set_kernel_label(struct task_struct *task)
59893+{
59894+ if (gr_status & GR_READY) {
59895+ task->role = kernel_role;
59896+ task->acl = kernel_role->root_label;
59897+ }
59898+ return;
59899+}
59900+
59901+#ifdef CONFIG_TASKSTATS
59902+int gr_is_taskstats_denied(int pid)
59903+{
59904+ struct task_struct *task;
59905+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59906+ const struct cred *cred;
59907+#endif
59908+ int ret = 0;
59909+
59910+ /* restrict taskstats viewing to un-chrooted root users
59911+ who have the 'view' subject flag if the RBAC system is enabled
59912+ */
59913+
59914+ rcu_read_lock();
59915+ read_lock(&tasklist_lock);
59916+ task = find_task_by_vpid(pid);
59917+ if (task) {
59918+#ifdef CONFIG_GRKERNSEC_CHROOT
59919+ if (proc_is_chrooted(task))
59920+ ret = -EACCES;
59921+#endif
59922+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59923+ cred = __task_cred(task);
59924+#ifdef CONFIG_GRKERNSEC_PROC_USER
59925+ if (cred->uid != 0)
59926+ ret = -EACCES;
59927+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59928+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
59929+ ret = -EACCES;
59930+#endif
59931+#endif
59932+ if (gr_status & GR_READY) {
59933+ if (!(task->acl->mode & GR_VIEW))
59934+ ret = -EACCES;
59935+ }
59936+ } else
59937+ ret = -ENOENT;
59938+
59939+ read_unlock(&tasklist_lock);
59940+ rcu_read_unlock();
59941+
59942+ return ret;
59943+}
59944+#endif
59945+
59946+/* AUXV entries are filled via a descendant of search_binary_handler
59947+ after we've already applied the subject for the target
59948+*/
59949+int gr_acl_enable_at_secure(void)
59950+{
59951+ if (unlikely(!(gr_status & GR_READY)))
59952+ return 0;
59953+
59954+ if (current->acl->mode & GR_ATSECURE)
59955+ return 1;
59956+
59957+ return 0;
59958+}
59959+
59960+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
59961+{
59962+ struct task_struct *task = current;
59963+ struct dentry *dentry = file->f_path.dentry;
59964+ struct vfsmount *mnt = file->f_path.mnt;
59965+ struct acl_object_label *obj, *tmp;
59966+ struct acl_subject_label *subj;
59967+ unsigned int bufsize;
59968+ int is_not_root;
59969+ char *path;
59970+ dev_t dev = __get_dev(dentry);
59971+
59972+ if (unlikely(!(gr_status & GR_READY)))
59973+ return 1;
59974+
59975+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
59976+ return 1;
59977+
59978+ /* ignore Eric Biederman */
59979+ if (IS_PRIVATE(dentry->d_inode))
59980+ return 1;
59981+
59982+ subj = task->acl;
59983+ do {
59984+ obj = lookup_acl_obj_label(ino, dev, subj);
59985+ if (obj != NULL)
59986+ return (obj->mode & GR_FIND) ? 1 : 0;
59987+ } while ((subj = subj->parent_subject));
59988+
59989+ /* this is purely an optimization since we're looking for an object
59990+ for the directory we're doing a readdir on
59991+ if it's possible for any globbed object to match the entry we're
59992+ filling into the directory, then the object we find here will be
59993+ an anchor point with attached globbed objects
59994+ */
59995+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
59996+ if (obj->globbed == NULL)
59997+ return (obj->mode & GR_FIND) ? 1 : 0;
59998+
59999+ is_not_root = ((obj->filename[0] == '/') &&
60000+ (obj->filename[1] == '\0')) ? 0 : 1;
60001+ bufsize = PAGE_SIZE - namelen - is_not_root;
60002+
60003+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
60004+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
60005+ return 1;
60006+
60007+ preempt_disable();
60008+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
60009+ bufsize);
60010+
60011+ bufsize = strlen(path);
60012+
60013+ /* if base is "/", don't append an additional slash */
60014+ if (is_not_root)
60015+ *(path + bufsize) = '/';
60016+ memcpy(path + bufsize + is_not_root, name, namelen);
60017+ *(path + bufsize + namelen + is_not_root) = '\0';
60018+
60019+ tmp = obj->globbed;
60020+ while (tmp) {
60021+ if (!glob_match(tmp->filename, path)) {
60022+ preempt_enable();
60023+ return (tmp->mode & GR_FIND) ? 1 : 0;
60024+ }
60025+ tmp = tmp->next;
60026+ }
60027+ preempt_enable();
60028+ return (obj->mode & GR_FIND) ? 1 : 0;
60029+}
60030+
60031+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
60032+EXPORT_SYMBOL(gr_acl_is_enabled);
60033+#endif
60034+EXPORT_SYMBOL(gr_learn_resource);
60035+EXPORT_SYMBOL(gr_set_kernel_label);
60036+#ifdef CONFIG_SECURITY
60037+EXPORT_SYMBOL(gr_check_user_change);
60038+EXPORT_SYMBOL(gr_check_group_change);
60039+#endif
60040+
60041diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
60042new file mode 100644
60043index 0000000..34fefda
60044--- /dev/null
60045+++ b/grsecurity/gracl_alloc.c
60046@@ -0,0 +1,105 @@
60047+#include <linux/kernel.h>
60048+#include <linux/mm.h>
60049+#include <linux/slab.h>
60050+#include <linux/vmalloc.h>
60051+#include <linux/gracl.h>
60052+#include <linux/grsecurity.h>
60053+
60054+static unsigned long alloc_stack_next = 1;
60055+static unsigned long alloc_stack_size = 1;
60056+static void **alloc_stack;
60057+
60058+static __inline__ int
60059+alloc_pop(void)
60060+{
60061+ if (alloc_stack_next == 1)
60062+ return 0;
60063+
60064+ kfree(alloc_stack[alloc_stack_next - 2]);
60065+
60066+ alloc_stack_next--;
60067+
60068+ return 1;
60069+}
60070+
60071+static __inline__ int
60072+alloc_push(void *buf)
60073+{
60074+ if (alloc_stack_next >= alloc_stack_size)
60075+ return 1;
60076+
60077+ alloc_stack[alloc_stack_next - 1] = buf;
60078+
60079+ alloc_stack_next++;
60080+
60081+ return 0;
60082+}
60083+
60084+void *
60085+acl_alloc(unsigned long len)
60086+{
60087+ void *ret = NULL;
60088+
60089+ if (!len || len > PAGE_SIZE)
60090+ goto out;
60091+
60092+ ret = kmalloc(len, GFP_KERNEL);
60093+
60094+ if (ret) {
60095+ if (alloc_push(ret)) {
60096+ kfree(ret);
60097+ ret = NULL;
60098+ }
60099+ }
60100+
60101+out:
60102+ return ret;
60103+}
60104+
60105+void *
60106+acl_alloc_num(unsigned long num, unsigned long len)
60107+{
60108+ if (!len || (num > (PAGE_SIZE / len)))
60109+ return NULL;
60110+
60111+ return acl_alloc(num * len);
60112+}
60113+
60114+void
60115+acl_free_all(void)
60116+{
60117+ if (gr_acl_is_enabled() || !alloc_stack)
60118+ return;
60119+
60120+ while (alloc_pop()) ;
60121+
60122+ if (alloc_stack) {
60123+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
60124+ kfree(alloc_stack);
60125+ else
60126+ vfree(alloc_stack);
60127+ }
60128+
60129+ alloc_stack = NULL;
60130+ alloc_stack_size = 1;
60131+ alloc_stack_next = 1;
60132+
60133+ return;
60134+}
60135+
60136+int
60137+acl_alloc_stack_init(unsigned long size)
60138+{
60139+ if ((size * sizeof (void *)) <= PAGE_SIZE)
60140+ alloc_stack =
60141+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
60142+ else
60143+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
60144+
60145+ alloc_stack_size = size;
60146+
60147+ if (!alloc_stack)
60148+ return 0;
60149+ else
60150+ return 1;
60151+}
60152diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
60153new file mode 100644
60154index 0000000..955ddfb
60155--- /dev/null
60156+++ b/grsecurity/gracl_cap.c
60157@@ -0,0 +1,101 @@
60158+#include <linux/kernel.h>
60159+#include <linux/module.h>
60160+#include <linux/sched.h>
60161+#include <linux/gracl.h>
60162+#include <linux/grsecurity.h>
60163+#include <linux/grinternal.h>
60164+
60165+extern const char *captab_log[];
60166+extern int captab_log_entries;
60167+
60168+int
60169+gr_acl_is_capable(const int cap)
60170+{
60171+ struct task_struct *task = current;
60172+ const struct cred *cred = current_cred();
60173+ struct acl_subject_label *curracl;
60174+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60175+ kernel_cap_t cap_audit = __cap_empty_set;
60176+
60177+ if (!gr_acl_is_enabled())
60178+ return 1;
60179+
60180+ curracl = task->acl;
60181+
60182+ cap_drop = curracl->cap_lower;
60183+ cap_mask = curracl->cap_mask;
60184+ cap_audit = curracl->cap_invert_audit;
60185+
60186+ while ((curracl = curracl->parent_subject)) {
60187+ /* if the cap isn't specified in the current computed mask but is specified in the
60188+ current level subject, and is lowered in the current level subject, then add
60189+ it to the set of dropped capabilities
60190+ otherwise, add the current level subject's mask to the current computed mask
60191+ */
60192+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60193+ cap_raise(cap_mask, cap);
60194+ if (cap_raised(curracl->cap_lower, cap))
60195+ cap_raise(cap_drop, cap);
60196+ if (cap_raised(curracl->cap_invert_audit, cap))
60197+ cap_raise(cap_audit, cap);
60198+ }
60199+ }
60200+
60201+ if (!cap_raised(cap_drop, cap)) {
60202+ if (cap_raised(cap_audit, cap))
60203+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
60204+ return 1;
60205+ }
60206+
60207+ curracl = task->acl;
60208+
60209+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
60210+ && cap_raised(cred->cap_effective, cap)) {
60211+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
60212+ task->role->roletype, cred->uid,
60213+ cred->gid, task->exec_file ?
60214+ gr_to_filename(task->exec_file->f_path.dentry,
60215+ task->exec_file->f_path.mnt) : curracl->filename,
60216+ curracl->filename, 0UL,
60217+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
60218+ return 1;
60219+ }
60220+
60221+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
60222+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
60223+ return 0;
60224+}
60225+
60226+int
60227+gr_acl_is_capable_nolog(const int cap)
60228+{
60229+ struct acl_subject_label *curracl;
60230+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
60231+
60232+ if (!gr_acl_is_enabled())
60233+ return 1;
60234+
60235+ curracl = current->acl;
60236+
60237+ cap_drop = curracl->cap_lower;
60238+ cap_mask = curracl->cap_mask;
60239+
60240+ while ((curracl = curracl->parent_subject)) {
60241+ /* if the cap isn't specified in the current computed mask but is specified in the
60242+ current level subject, and is lowered in the current level subject, then add
60243+ it to the set of dropped capabilities
60244+ otherwise, add the current level subject's mask to the current computed mask
60245+ */
60246+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
60247+ cap_raise(cap_mask, cap);
60248+ if (cap_raised(curracl->cap_lower, cap))
60249+ cap_raise(cap_drop, cap);
60250+ }
60251+ }
60252+
60253+ if (!cap_raised(cap_drop, cap))
60254+ return 1;
60255+
60256+ return 0;
60257+}
60258+
60259diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
60260new file mode 100644
60261index 0000000..d5f210c
60262--- /dev/null
60263+++ b/grsecurity/gracl_fs.c
60264@@ -0,0 +1,433 @@
60265+#include <linux/kernel.h>
60266+#include <linux/sched.h>
60267+#include <linux/types.h>
60268+#include <linux/fs.h>
60269+#include <linux/file.h>
60270+#include <linux/stat.h>
60271+#include <linux/grsecurity.h>
60272+#include <linux/grinternal.h>
60273+#include <linux/gracl.h>
60274+
60275+__u32
60276+gr_acl_handle_hidden_file(const struct dentry * dentry,
60277+ const struct vfsmount * mnt)
60278+{
60279+ __u32 mode;
60280+
60281+ if (unlikely(!dentry->d_inode))
60282+ return GR_FIND;
60283+
60284+ mode =
60285+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
60286+
60287+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
60288+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60289+ return mode;
60290+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
60291+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
60292+ return 0;
60293+ } else if (unlikely(!(mode & GR_FIND)))
60294+ return 0;
60295+
60296+ return GR_FIND;
60297+}
60298+
60299+__u32
60300+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
60301+ int acc_mode)
60302+{
60303+ __u32 reqmode = GR_FIND;
60304+ __u32 mode;
60305+
60306+ if (unlikely(!dentry->d_inode))
60307+ return reqmode;
60308+
60309+ if (acc_mode & MAY_APPEND)
60310+ reqmode |= GR_APPEND;
60311+ else if (acc_mode & MAY_WRITE)
60312+ reqmode |= GR_WRITE;
60313+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
60314+ reqmode |= GR_READ;
60315+
60316+ mode =
60317+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60318+ mnt);
60319+
60320+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60321+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60322+ reqmode & GR_READ ? " reading" : "",
60323+ reqmode & GR_WRITE ? " writing" : reqmode &
60324+ GR_APPEND ? " appending" : "");
60325+ return reqmode;
60326+ } else
60327+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60328+ {
60329+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
60330+ reqmode & GR_READ ? " reading" : "",
60331+ reqmode & GR_WRITE ? " writing" : reqmode &
60332+ GR_APPEND ? " appending" : "");
60333+ return 0;
60334+ } else if (unlikely((mode & reqmode) != reqmode))
60335+ return 0;
60336+
60337+ return reqmode;
60338+}
60339+
60340+__u32
60341+gr_acl_handle_creat(const struct dentry * dentry,
60342+ const struct dentry * p_dentry,
60343+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
60344+ const int imode)
60345+{
60346+ __u32 reqmode = GR_WRITE | GR_CREATE;
60347+ __u32 mode;
60348+
60349+ if (acc_mode & MAY_APPEND)
60350+ reqmode |= GR_APPEND;
60351+ // if a directory was required or the directory already exists, then
60352+ // don't count this open as a read
60353+ if ((acc_mode & MAY_READ) &&
60354+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
60355+ reqmode |= GR_READ;
60356+ if ((open_flags & O_CREAT) && (imode & (S_ISUID | S_ISGID)))
60357+ reqmode |= GR_SETID;
60358+
60359+ mode =
60360+ gr_check_create(dentry, p_dentry, p_mnt,
60361+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60362+
60363+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60364+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60365+ reqmode & GR_READ ? " reading" : "",
60366+ reqmode & GR_WRITE ? " writing" : reqmode &
60367+ GR_APPEND ? " appending" : "");
60368+ return reqmode;
60369+ } else
60370+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60371+ {
60372+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
60373+ reqmode & GR_READ ? " reading" : "",
60374+ reqmode & GR_WRITE ? " writing" : reqmode &
60375+ GR_APPEND ? " appending" : "");
60376+ return 0;
60377+ } else if (unlikely((mode & reqmode) != reqmode))
60378+ return 0;
60379+
60380+ return reqmode;
60381+}
60382+
60383+__u32
60384+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
60385+ const int fmode)
60386+{
60387+ __u32 mode, reqmode = GR_FIND;
60388+
60389+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
60390+ reqmode |= GR_EXEC;
60391+ if (fmode & S_IWOTH)
60392+ reqmode |= GR_WRITE;
60393+ if (fmode & S_IROTH)
60394+ reqmode |= GR_READ;
60395+
60396+ mode =
60397+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
60398+ mnt);
60399+
60400+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
60401+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60402+ reqmode & GR_READ ? " reading" : "",
60403+ reqmode & GR_WRITE ? " writing" : "",
60404+ reqmode & GR_EXEC ? " executing" : "");
60405+ return reqmode;
60406+ } else
60407+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
60408+ {
60409+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
60410+ reqmode & GR_READ ? " reading" : "",
60411+ reqmode & GR_WRITE ? " writing" : "",
60412+ reqmode & GR_EXEC ? " executing" : "");
60413+ return 0;
60414+ } else if (unlikely((mode & reqmode) != reqmode))
60415+ return 0;
60416+
60417+ return reqmode;
60418+}
60419+
60420+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
60421+{
60422+ __u32 mode;
60423+
60424+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
60425+
60426+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60427+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
60428+ return mode;
60429+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60430+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
60431+ return 0;
60432+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60433+ return 0;
60434+
60435+ return (reqmode);
60436+}
60437+
60438+__u32
60439+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
60440+{
60441+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
60442+}
60443+
60444+__u32
60445+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
60446+{
60447+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
60448+}
60449+
60450+__u32
60451+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
60452+{
60453+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
60454+}
60455+
60456+__u32
60457+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
60458+{
60459+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
60460+}
60461+
60462+__u32
60463+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
60464+ mode_t mode)
60465+{
60466+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
60467+ return 1;
60468+
60469+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60470+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60471+ GR_FCHMOD_ACL_MSG);
60472+ } else {
60473+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
60474+ }
60475+}
60476+
60477+__u32
60478+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
60479+ mode_t mode)
60480+{
60481+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
60482+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
60483+ GR_CHMOD_ACL_MSG);
60484+ } else {
60485+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
60486+ }
60487+}
60488+
60489+__u32
60490+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
60491+{
60492+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
60493+}
60494+
60495+__u32
60496+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
60497+{
60498+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
60499+}
60500+
60501+__u32
60502+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
60503+{
60504+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
60505+}
60506+
60507+__u32
60508+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
60509+{
60510+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
60511+ GR_UNIXCONNECT_ACL_MSG);
60512+}
60513+
60514+/* hardlinks require at minimum create and link permission,
60515+ any additional privilege required is based on the
60516+ privilege of the file being linked to
60517+*/
60518+__u32
60519+gr_acl_handle_link(const struct dentry * new_dentry,
60520+ const struct dentry * parent_dentry,
60521+ const struct vfsmount * parent_mnt,
60522+ const struct dentry * old_dentry,
60523+ const struct vfsmount * old_mnt, const char *to)
60524+{
60525+ __u32 mode;
60526+ __u32 needmode = GR_CREATE | GR_LINK;
60527+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
60528+
60529+ mode =
60530+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
60531+ old_mnt);
60532+
60533+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
60534+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60535+ return mode;
60536+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60537+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
60538+ return 0;
60539+ } else if (unlikely((mode & needmode) != needmode))
60540+ return 0;
60541+
60542+ return 1;
60543+}
60544+
60545+__u32
60546+gr_acl_handle_symlink(const struct dentry * new_dentry,
60547+ const struct dentry * parent_dentry,
60548+ const struct vfsmount * parent_mnt, const char *from)
60549+{
60550+ __u32 needmode = GR_WRITE | GR_CREATE;
60551+ __u32 mode;
60552+
60553+ mode =
60554+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
60555+ GR_CREATE | GR_AUDIT_CREATE |
60556+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
60557+
60558+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
60559+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60560+ return mode;
60561+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
60562+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
60563+ return 0;
60564+ } else if (unlikely((mode & needmode) != needmode))
60565+ return 0;
60566+
60567+ return (GR_WRITE | GR_CREATE);
60568+}
60569+
60570+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
60571+{
60572+ __u32 mode;
60573+
60574+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
60575+
60576+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
60577+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
60578+ return mode;
60579+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
60580+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
60581+ return 0;
60582+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
60583+ return 0;
60584+
60585+ return (reqmode);
60586+}
60587+
60588+__u32
60589+gr_acl_handle_mknod(const struct dentry * new_dentry,
60590+ const struct dentry * parent_dentry,
60591+ const struct vfsmount * parent_mnt,
60592+ const int mode)
60593+{
60594+ __u32 reqmode = GR_WRITE | GR_CREATE;
60595+ if (unlikely(mode & (S_ISUID | S_ISGID)))
60596+ reqmode |= GR_SETID;
60597+
60598+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60599+ reqmode, GR_MKNOD_ACL_MSG);
60600+}
60601+
60602+__u32
60603+gr_acl_handle_mkdir(const struct dentry *new_dentry,
60604+ const struct dentry *parent_dentry,
60605+ const struct vfsmount *parent_mnt)
60606+{
60607+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
60608+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
60609+}
60610+
60611+#define RENAME_CHECK_SUCCESS(old, new) \
60612+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
60613+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
60614+
60615+int
60616+gr_acl_handle_rename(struct dentry *new_dentry,
60617+ struct dentry *parent_dentry,
60618+ const struct vfsmount *parent_mnt,
60619+ struct dentry *old_dentry,
60620+ struct inode *old_parent_inode,
60621+ struct vfsmount *old_mnt, const char *newname)
60622+{
60623+ __u32 comp1, comp2;
60624+ int error = 0;
60625+
60626+ if (unlikely(!gr_acl_is_enabled()))
60627+ return 0;
60628+
60629+ if (!new_dentry->d_inode) {
60630+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
60631+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
60632+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
60633+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
60634+ GR_DELETE | GR_AUDIT_DELETE |
60635+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60636+ GR_SUPPRESS, old_mnt);
60637+ } else {
60638+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
60639+ GR_CREATE | GR_DELETE |
60640+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
60641+ GR_AUDIT_READ | GR_AUDIT_WRITE |
60642+ GR_SUPPRESS, parent_mnt);
60643+ comp2 =
60644+ gr_search_file(old_dentry,
60645+ GR_READ | GR_WRITE | GR_AUDIT_READ |
60646+ GR_DELETE | GR_AUDIT_DELETE |
60647+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
60648+ }
60649+
60650+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
60651+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
60652+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60653+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
60654+ && !(comp2 & GR_SUPPRESS)) {
60655+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
60656+ error = -EACCES;
60657+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
60658+ error = -EACCES;
60659+
60660+ return error;
60661+}
60662+
60663+void
60664+gr_acl_handle_exit(void)
60665+{
60666+ u16 id;
60667+ char *rolename;
60668+ struct file *exec_file;
60669+
60670+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
60671+ !(current->role->roletype & GR_ROLE_PERSIST))) {
60672+ id = current->acl_role_id;
60673+ rolename = current->role->rolename;
60674+ gr_set_acls(1);
60675+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
60676+ }
60677+
60678+ write_lock(&grsec_exec_file_lock);
60679+ exec_file = current->exec_file;
60680+ current->exec_file = NULL;
60681+ write_unlock(&grsec_exec_file_lock);
60682+
60683+ if (exec_file)
60684+ fput(exec_file);
60685+}
60686+
60687+int
60688+gr_acl_handle_procpidmem(const struct task_struct *task)
60689+{
60690+ if (unlikely(!gr_acl_is_enabled()))
60691+ return 0;
60692+
60693+ if (task != current && task->acl->mode & GR_PROTPROCFD)
60694+ return -EACCES;
60695+
60696+ return 0;
60697+}
60698diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
60699new file mode 100644
60700index 0000000..cd07b96
60701--- /dev/null
60702+++ b/grsecurity/gracl_ip.c
60703@@ -0,0 +1,382 @@
60704+#include <linux/kernel.h>
60705+#include <asm/uaccess.h>
60706+#include <asm/errno.h>
60707+#include <net/sock.h>
60708+#include <linux/file.h>
60709+#include <linux/fs.h>
60710+#include <linux/net.h>
60711+#include <linux/in.h>
60712+#include <linux/skbuff.h>
60713+#include <linux/ip.h>
60714+#include <linux/udp.h>
60715+#include <linux/smp_lock.h>
60716+#include <linux/types.h>
60717+#include <linux/sched.h>
60718+#include <linux/netdevice.h>
60719+#include <linux/inetdevice.h>
60720+#include <linux/gracl.h>
60721+#include <linux/grsecurity.h>
60722+#include <linux/grinternal.h>
60723+
60724+#define GR_BIND 0x01
60725+#define GR_CONNECT 0x02
60726+#define GR_INVERT 0x04
60727+#define GR_BINDOVERRIDE 0x08
60728+#define GR_CONNECTOVERRIDE 0x10
60729+#define GR_SOCK_FAMILY 0x20
60730+
60731+static const char * gr_protocols[IPPROTO_MAX] = {
60732+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
60733+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
60734+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
60735+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
60736+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
60737+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
60738+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
60739+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
60740+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
60741+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
60742+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
60743+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
60744+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
60745+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
60746+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
60747+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
60748+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
60749+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
60750+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
60751+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
60752+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
60753+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
60754+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
60755+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
60756+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
60757+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
60758+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
60759+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
60760+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
60761+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
60762+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
60763+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
60764+ };
60765+
60766+static const char * gr_socktypes[SOCK_MAX] = {
60767+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
60768+ "unknown:7", "unknown:8", "unknown:9", "packet"
60769+ };
60770+
60771+static const char * gr_sockfamilies[AF_MAX+1] = {
60772+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
60773+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
60774+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
60775+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
60776+ };
60777+
60778+const char *
60779+gr_proto_to_name(unsigned char proto)
60780+{
60781+ return gr_protocols[proto];
60782+}
60783+
60784+const char *
60785+gr_socktype_to_name(unsigned char type)
60786+{
60787+ return gr_socktypes[type];
60788+}
60789+
60790+const char *
60791+gr_sockfamily_to_name(unsigned char family)
60792+{
60793+ return gr_sockfamilies[family];
60794+}
60795+
60796+int
60797+gr_search_socket(const int domain, const int type, const int protocol)
60798+{
60799+ struct acl_subject_label *curr;
60800+ const struct cred *cred = current_cred();
60801+
60802+ if (unlikely(!gr_acl_is_enabled()))
60803+ goto exit;
60804+
60805+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
60806+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
60807+ goto exit; // let the kernel handle it
60808+
60809+ curr = current->acl;
60810+
60811+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
60812+ /* the family is allowed, if this is PF_INET allow it only if
60813+ the extra sock type/protocol checks pass */
60814+ if (domain == PF_INET)
60815+ goto inet_check;
60816+ goto exit;
60817+ } else {
60818+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60819+ __u32 fakeip = 0;
60820+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60821+ current->role->roletype, cred->uid,
60822+ cred->gid, current->exec_file ?
60823+ gr_to_filename(current->exec_file->f_path.dentry,
60824+ current->exec_file->f_path.mnt) :
60825+ curr->filename, curr->filename,
60826+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
60827+ &current->signal->saved_ip);
60828+ goto exit;
60829+ }
60830+ goto exit_fail;
60831+ }
60832+
60833+inet_check:
60834+ /* the rest of this checking is for IPv4 only */
60835+ if (!curr->ips)
60836+ goto exit;
60837+
60838+ if ((curr->ip_type & (1 << type)) &&
60839+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
60840+ goto exit;
60841+
60842+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60843+ /* we don't place acls on raw sockets , and sometimes
60844+ dgram/ip sockets are opened for ioctl and not
60845+ bind/connect, so we'll fake a bind learn log */
60846+ if (type == SOCK_RAW || type == SOCK_PACKET) {
60847+ __u32 fakeip = 0;
60848+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60849+ current->role->roletype, cred->uid,
60850+ cred->gid, current->exec_file ?
60851+ gr_to_filename(current->exec_file->f_path.dentry,
60852+ current->exec_file->f_path.mnt) :
60853+ curr->filename, curr->filename,
60854+ &fakeip, 0, type,
60855+ protocol, GR_CONNECT, &current->signal->saved_ip);
60856+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
60857+ __u32 fakeip = 0;
60858+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60859+ current->role->roletype, cred->uid,
60860+ cred->gid, current->exec_file ?
60861+ gr_to_filename(current->exec_file->f_path.dentry,
60862+ current->exec_file->f_path.mnt) :
60863+ curr->filename, curr->filename,
60864+ &fakeip, 0, type,
60865+ protocol, GR_BIND, &current->signal->saved_ip);
60866+ }
60867+ /* we'll log when they use connect or bind */
60868+ goto exit;
60869+ }
60870+
60871+exit_fail:
60872+ if (domain == PF_INET)
60873+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
60874+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
60875+ else
60876+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
60877+ gr_socktype_to_name(type), protocol);
60878+
60879+ return 0;
60880+exit:
60881+ return 1;
60882+}
60883+
60884+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
60885+{
60886+ if ((ip->mode & mode) &&
60887+ (ip_port >= ip->low) &&
60888+ (ip_port <= ip->high) &&
60889+ ((ntohl(ip_addr) & our_netmask) ==
60890+ (ntohl(our_addr) & our_netmask))
60891+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
60892+ && (ip->type & (1 << type))) {
60893+ if (ip->mode & GR_INVERT)
60894+ return 2; // specifically denied
60895+ else
60896+ return 1; // allowed
60897+ }
60898+
60899+ return 0; // not specifically allowed, may continue parsing
60900+}
60901+
60902+static int
60903+gr_search_connectbind(const int full_mode, struct sock *sk,
60904+ struct sockaddr_in *addr, const int type)
60905+{
60906+ char iface[IFNAMSIZ] = {0};
60907+ struct acl_subject_label *curr;
60908+ struct acl_ip_label *ip;
60909+ struct inet_sock *isk;
60910+ struct net_device *dev;
60911+ struct in_device *idev;
60912+ unsigned long i;
60913+ int ret;
60914+ int mode = full_mode & (GR_BIND | GR_CONNECT);
60915+ __u32 ip_addr = 0;
60916+ __u32 our_addr;
60917+ __u32 our_netmask;
60918+ char *p;
60919+ __u16 ip_port = 0;
60920+ const struct cred *cred = current_cred();
60921+
60922+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
60923+ return 0;
60924+
60925+ curr = current->acl;
60926+ isk = inet_sk(sk);
60927+
60928+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
60929+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
60930+ addr->sin_addr.s_addr = curr->inaddr_any_override;
60931+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
60932+ struct sockaddr_in saddr;
60933+ int err;
60934+
60935+ saddr.sin_family = AF_INET;
60936+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
60937+ saddr.sin_port = isk->sport;
60938+
60939+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60940+ if (err)
60941+ return err;
60942+
60943+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
60944+ if (err)
60945+ return err;
60946+ }
60947+
60948+ if (!curr->ips)
60949+ return 0;
60950+
60951+ ip_addr = addr->sin_addr.s_addr;
60952+ ip_port = ntohs(addr->sin_port);
60953+
60954+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
60955+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
60956+ current->role->roletype, cred->uid,
60957+ cred->gid, current->exec_file ?
60958+ gr_to_filename(current->exec_file->f_path.dentry,
60959+ current->exec_file->f_path.mnt) :
60960+ curr->filename, curr->filename,
60961+ &ip_addr, ip_port, type,
60962+ sk->sk_protocol, mode, &current->signal->saved_ip);
60963+ return 0;
60964+ }
60965+
60966+ for (i = 0; i < curr->ip_num; i++) {
60967+ ip = *(curr->ips + i);
60968+ if (ip->iface != NULL) {
60969+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
60970+ p = strchr(iface, ':');
60971+ if (p != NULL)
60972+ *p = '\0';
60973+ dev = dev_get_by_name(sock_net(sk), iface);
60974+ if (dev == NULL)
60975+ continue;
60976+ idev = in_dev_get(dev);
60977+ if (idev == NULL) {
60978+ dev_put(dev);
60979+ continue;
60980+ }
60981+ rcu_read_lock();
60982+ for_ifa(idev) {
60983+ if (!strcmp(ip->iface, ifa->ifa_label)) {
60984+ our_addr = ifa->ifa_address;
60985+ our_netmask = 0xffffffff;
60986+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
60987+ if (ret == 1) {
60988+ rcu_read_unlock();
60989+ in_dev_put(idev);
60990+ dev_put(dev);
60991+ return 0;
60992+ } else if (ret == 2) {
60993+ rcu_read_unlock();
60994+ in_dev_put(idev);
60995+ dev_put(dev);
60996+ goto denied;
60997+ }
60998+ }
60999+ } endfor_ifa(idev);
61000+ rcu_read_unlock();
61001+ in_dev_put(idev);
61002+ dev_put(dev);
61003+ } else {
61004+ our_addr = ip->addr;
61005+ our_netmask = ip->netmask;
61006+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
61007+ if (ret == 1)
61008+ return 0;
61009+ else if (ret == 2)
61010+ goto denied;
61011+ }
61012+ }
61013+
61014+denied:
61015+ if (mode == GR_BIND)
61016+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61017+ else if (mode == GR_CONNECT)
61018+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
61019+
61020+ return -EACCES;
61021+}
61022+
61023+int
61024+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
61025+{
61026+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
61027+}
61028+
61029+int
61030+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
61031+{
61032+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
61033+}
61034+
61035+int gr_search_listen(struct socket *sock)
61036+{
61037+ struct sock *sk = sock->sk;
61038+ struct sockaddr_in addr;
61039+
61040+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61041+ addr.sin_port = inet_sk(sk)->sport;
61042+
61043+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61044+}
61045+
61046+int gr_search_accept(struct socket *sock)
61047+{
61048+ struct sock *sk = sock->sk;
61049+ struct sockaddr_in addr;
61050+
61051+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
61052+ addr.sin_port = inet_sk(sk)->sport;
61053+
61054+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
61055+}
61056+
61057+int
61058+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
61059+{
61060+ if (addr)
61061+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
61062+ else {
61063+ struct sockaddr_in sin;
61064+ const struct inet_sock *inet = inet_sk(sk);
61065+
61066+ sin.sin_addr.s_addr = inet->daddr;
61067+ sin.sin_port = inet->dport;
61068+
61069+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61070+ }
61071+}
61072+
61073+int
61074+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
61075+{
61076+ struct sockaddr_in sin;
61077+
61078+ if (unlikely(skb->len < sizeof (struct udphdr)))
61079+ return 0; // skip this packet
61080+
61081+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
61082+ sin.sin_port = udp_hdr(skb)->source;
61083+
61084+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
61085+}
61086diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
61087new file mode 100644
61088index 0000000..34bdd46
61089--- /dev/null
61090+++ b/grsecurity/gracl_learn.c
61091@@ -0,0 +1,208 @@
61092+#include <linux/kernel.h>
61093+#include <linux/mm.h>
61094+#include <linux/sched.h>
61095+#include <linux/poll.h>
61096+#include <linux/smp_lock.h>
61097+#include <linux/string.h>
61098+#include <linux/file.h>
61099+#include <linux/types.h>
61100+#include <linux/vmalloc.h>
61101+#include <linux/grinternal.h>
61102+
61103+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
61104+ size_t count, loff_t *ppos);
61105+extern int gr_acl_is_enabled(void);
61106+
61107+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
61108+static int gr_learn_attached;
61109+
61110+/* use a 512k buffer */
61111+#define LEARN_BUFFER_SIZE (512 * 1024)
61112+
61113+static DEFINE_SPINLOCK(gr_learn_lock);
61114+static DEFINE_MUTEX(gr_learn_user_mutex);
61115+
61116+/* we need to maintain two buffers, so that the kernel context of grlearn
61117+ uses a semaphore around the userspace copying, and the other kernel contexts
61118+ use a spinlock when copying into the buffer, since they cannot sleep
61119+*/
61120+static char *learn_buffer;
61121+static char *learn_buffer_user;
61122+static int learn_buffer_len;
61123+static int learn_buffer_user_len;
61124+
61125+static ssize_t
61126+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
61127+{
61128+ DECLARE_WAITQUEUE(wait, current);
61129+ ssize_t retval = 0;
61130+
61131+ add_wait_queue(&learn_wait, &wait);
61132+ set_current_state(TASK_INTERRUPTIBLE);
61133+ do {
61134+ mutex_lock(&gr_learn_user_mutex);
61135+ spin_lock(&gr_learn_lock);
61136+ if (learn_buffer_len)
61137+ break;
61138+ spin_unlock(&gr_learn_lock);
61139+ mutex_unlock(&gr_learn_user_mutex);
61140+ if (file->f_flags & O_NONBLOCK) {
61141+ retval = -EAGAIN;
61142+ goto out;
61143+ }
61144+ if (signal_pending(current)) {
61145+ retval = -ERESTARTSYS;
61146+ goto out;
61147+ }
61148+
61149+ schedule();
61150+ } while (1);
61151+
61152+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
61153+ learn_buffer_user_len = learn_buffer_len;
61154+ retval = learn_buffer_len;
61155+ learn_buffer_len = 0;
61156+
61157+ spin_unlock(&gr_learn_lock);
61158+
61159+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
61160+ retval = -EFAULT;
61161+
61162+ mutex_unlock(&gr_learn_user_mutex);
61163+out:
61164+ set_current_state(TASK_RUNNING);
61165+ remove_wait_queue(&learn_wait, &wait);
61166+ return retval;
61167+}
61168+
61169+static unsigned int
61170+poll_learn(struct file * file, poll_table * wait)
61171+{
61172+ poll_wait(file, &learn_wait, wait);
61173+
61174+ if (learn_buffer_len)
61175+ return (POLLIN | POLLRDNORM);
61176+
61177+ return 0;
61178+}
61179+
61180+void
61181+gr_clear_learn_entries(void)
61182+{
61183+ char *tmp;
61184+
61185+ mutex_lock(&gr_learn_user_mutex);
61186+ spin_lock(&gr_learn_lock);
61187+ tmp = learn_buffer;
61188+ learn_buffer = NULL;
61189+ spin_unlock(&gr_learn_lock);
61190+ if (tmp)
61191+ vfree(tmp);
61192+ if (learn_buffer_user != NULL) {
61193+ vfree(learn_buffer_user);
61194+ learn_buffer_user = NULL;
61195+ }
61196+ learn_buffer_len = 0;
61197+ mutex_unlock(&gr_learn_user_mutex);
61198+
61199+ return;
61200+}
61201+
61202+void
61203+gr_add_learn_entry(const char *fmt, ...)
61204+{
61205+ va_list args;
61206+ unsigned int len;
61207+
61208+ if (!gr_learn_attached)
61209+ return;
61210+
61211+ spin_lock(&gr_learn_lock);
61212+
61213+ /* leave a gap at the end so we know when it's "full" but don't have to
61214+ compute the exact length of the string we're trying to append
61215+ */
61216+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
61217+ spin_unlock(&gr_learn_lock);
61218+ wake_up_interruptible(&learn_wait);
61219+ return;
61220+ }
61221+ if (learn_buffer == NULL) {
61222+ spin_unlock(&gr_learn_lock);
61223+ return;
61224+ }
61225+
61226+ va_start(args, fmt);
61227+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
61228+ va_end(args);
61229+
61230+ learn_buffer_len += len + 1;
61231+
61232+ spin_unlock(&gr_learn_lock);
61233+ wake_up_interruptible(&learn_wait);
61234+
61235+ return;
61236+}
61237+
61238+static int
61239+open_learn(struct inode *inode, struct file *file)
61240+{
61241+ if (file->f_mode & FMODE_READ && gr_learn_attached)
61242+ return -EBUSY;
61243+ if (file->f_mode & FMODE_READ) {
61244+ int retval = 0;
61245+ mutex_lock(&gr_learn_user_mutex);
61246+ if (learn_buffer == NULL)
61247+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
61248+ if (learn_buffer_user == NULL)
61249+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
61250+ if (learn_buffer == NULL) {
61251+ retval = -ENOMEM;
61252+ goto out_error;
61253+ }
61254+ if (learn_buffer_user == NULL) {
61255+ retval = -ENOMEM;
61256+ goto out_error;
61257+ }
61258+ learn_buffer_len = 0;
61259+ learn_buffer_user_len = 0;
61260+ gr_learn_attached = 1;
61261+out_error:
61262+ mutex_unlock(&gr_learn_user_mutex);
61263+ return retval;
61264+ }
61265+ return 0;
61266+}
61267+
61268+static int
61269+close_learn(struct inode *inode, struct file *file)
61270+{
61271+ if (file->f_mode & FMODE_READ) {
61272+ char *tmp = NULL;
61273+ mutex_lock(&gr_learn_user_mutex);
61274+ spin_lock(&gr_learn_lock);
61275+ tmp = learn_buffer;
61276+ learn_buffer = NULL;
61277+ spin_unlock(&gr_learn_lock);
61278+ if (tmp)
61279+ vfree(tmp);
61280+ if (learn_buffer_user != NULL) {
61281+ vfree(learn_buffer_user);
61282+ learn_buffer_user = NULL;
61283+ }
61284+ learn_buffer_len = 0;
61285+ learn_buffer_user_len = 0;
61286+ gr_learn_attached = 0;
61287+ mutex_unlock(&gr_learn_user_mutex);
61288+ }
61289+
61290+ return 0;
61291+}
61292+
61293+const struct file_operations grsec_fops = {
61294+ .read = read_learn,
61295+ .write = write_grsec_handler,
61296+ .open = open_learn,
61297+ .release = close_learn,
61298+ .poll = poll_learn,
61299+};
61300diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
61301new file mode 100644
61302index 0000000..70b2179
61303--- /dev/null
61304+++ b/grsecurity/gracl_res.c
61305@@ -0,0 +1,67 @@
61306+#include <linux/kernel.h>
61307+#include <linux/sched.h>
61308+#include <linux/gracl.h>
61309+#include <linux/grinternal.h>
61310+
61311+static const char *restab_log[] = {
61312+ [RLIMIT_CPU] = "RLIMIT_CPU",
61313+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
61314+ [RLIMIT_DATA] = "RLIMIT_DATA",
61315+ [RLIMIT_STACK] = "RLIMIT_STACK",
61316+ [RLIMIT_CORE] = "RLIMIT_CORE",
61317+ [RLIMIT_RSS] = "RLIMIT_RSS",
61318+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
61319+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
61320+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
61321+ [RLIMIT_AS] = "RLIMIT_AS",
61322+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
61323+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
61324+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
61325+ [RLIMIT_NICE] = "RLIMIT_NICE",
61326+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
61327+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
61328+ [GR_CRASH_RES] = "RLIMIT_CRASH"
61329+};
61330+
61331+void
61332+gr_log_resource(const struct task_struct *task,
61333+ const int res, const unsigned long wanted, const int gt)
61334+{
61335+ const struct cred *cred;
61336+ unsigned long rlim;
61337+
61338+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
61339+ return;
61340+
61341+ // not yet supported resource
61342+ if (unlikely(!restab_log[res]))
61343+ return;
61344+
61345+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
61346+ rlim = task->signal->rlim[res].rlim_max;
61347+ else
61348+ rlim = task->signal->rlim[res].rlim_cur;
61349+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
61350+ return;
61351+
61352+ rcu_read_lock();
61353+ cred = __task_cred(task);
61354+
61355+ if (res == RLIMIT_NPROC &&
61356+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
61357+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
61358+ goto out_rcu_unlock;
61359+ else if (res == RLIMIT_MEMLOCK &&
61360+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
61361+ goto out_rcu_unlock;
61362+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
61363+ goto out_rcu_unlock;
61364+ rcu_read_unlock();
61365+
61366+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
61367+
61368+ return;
61369+out_rcu_unlock:
61370+ rcu_read_unlock();
61371+ return;
61372+}
61373diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
61374new file mode 100644
61375index 0000000..1d1b734
61376--- /dev/null
61377+++ b/grsecurity/gracl_segv.c
61378@@ -0,0 +1,284 @@
61379+#include <linux/kernel.h>
61380+#include <linux/mm.h>
61381+#include <asm/uaccess.h>
61382+#include <asm/errno.h>
61383+#include <asm/mman.h>
61384+#include <net/sock.h>
61385+#include <linux/file.h>
61386+#include <linux/fs.h>
61387+#include <linux/net.h>
61388+#include <linux/in.h>
61389+#include <linux/smp_lock.h>
61390+#include <linux/slab.h>
61391+#include <linux/types.h>
61392+#include <linux/sched.h>
61393+#include <linux/timer.h>
61394+#include <linux/gracl.h>
61395+#include <linux/grsecurity.h>
61396+#include <linux/grinternal.h>
61397+
61398+static struct crash_uid *uid_set;
61399+static unsigned short uid_used;
61400+static DEFINE_SPINLOCK(gr_uid_lock);
61401+extern rwlock_t gr_inode_lock;
61402+extern struct acl_subject_label *
61403+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
61404+ struct acl_role_label *role);
61405+extern int gr_fake_force_sig(int sig, struct task_struct *t);
61406+
61407+int
61408+gr_init_uidset(void)
61409+{
61410+ uid_set =
61411+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
61412+ uid_used = 0;
61413+
61414+ return uid_set ? 1 : 0;
61415+}
61416+
61417+void
61418+gr_free_uidset(void)
61419+{
61420+ if (uid_set)
61421+ kfree(uid_set);
61422+
61423+ return;
61424+}
61425+
61426+int
61427+gr_find_uid(const uid_t uid)
61428+{
61429+ struct crash_uid *tmp = uid_set;
61430+ uid_t buid;
61431+ int low = 0, high = uid_used - 1, mid;
61432+
61433+ while (high >= low) {
61434+ mid = (low + high) >> 1;
61435+ buid = tmp[mid].uid;
61436+ if (buid == uid)
61437+ return mid;
61438+ if (buid > uid)
61439+ high = mid - 1;
61440+ if (buid < uid)
61441+ low = mid + 1;
61442+ }
61443+
61444+ return -1;
61445+}
61446+
61447+static __inline__ void
61448+gr_insertsort(void)
61449+{
61450+ unsigned short i, j;
61451+ struct crash_uid index;
61452+
61453+ for (i = 1; i < uid_used; i++) {
61454+ index = uid_set[i];
61455+ j = i;
61456+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
61457+ uid_set[j] = uid_set[j - 1];
61458+ j--;
61459+ }
61460+ uid_set[j] = index;
61461+ }
61462+
61463+ return;
61464+}
61465+
61466+static __inline__ void
61467+gr_insert_uid(const uid_t uid, const unsigned long expires)
61468+{
61469+ int loc;
61470+
61471+ if (uid_used == GR_UIDTABLE_MAX)
61472+ return;
61473+
61474+ loc = gr_find_uid(uid);
61475+
61476+ if (loc >= 0) {
61477+ uid_set[loc].expires = expires;
61478+ return;
61479+ }
61480+
61481+ uid_set[uid_used].uid = uid;
61482+ uid_set[uid_used].expires = expires;
61483+ uid_used++;
61484+
61485+ gr_insertsort();
61486+
61487+ return;
61488+}
61489+
61490+void
61491+gr_remove_uid(const unsigned short loc)
61492+{
61493+ unsigned short i;
61494+
61495+ for (i = loc + 1; i < uid_used; i++)
61496+ uid_set[i - 1] = uid_set[i];
61497+
61498+ uid_used--;
61499+
61500+ return;
61501+}
61502+
61503+int
61504+gr_check_crash_uid(const uid_t uid)
61505+{
61506+ int loc;
61507+ int ret = 0;
61508+
61509+ if (unlikely(!gr_acl_is_enabled()))
61510+ return 0;
61511+
61512+ spin_lock(&gr_uid_lock);
61513+ loc = gr_find_uid(uid);
61514+
61515+ if (loc < 0)
61516+ goto out_unlock;
61517+
61518+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
61519+ gr_remove_uid(loc);
61520+ else
61521+ ret = 1;
61522+
61523+out_unlock:
61524+ spin_unlock(&gr_uid_lock);
61525+ return ret;
61526+}
61527+
61528+static __inline__ int
61529+proc_is_setxid(const struct cred *cred)
61530+{
61531+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
61532+ cred->uid != cred->fsuid)
61533+ return 1;
61534+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
61535+ cred->gid != cred->fsgid)
61536+ return 1;
61537+
61538+ return 0;
61539+}
61540+
61541+void
61542+gr_handle_crash(struct task_struct *task, const int sig)
61543+{
61544+ struct acl_subject_label *curr;
61545+ struct task_struct *tsk, *tsk2;
61546+ const struct cred *cred;
61547+ const struct cred *cred2;
61548+
61549+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
61550+ return;
61551+
61552+ if (unlikely(!gr_acl_is_enabled()))
61553+ return;
61554+
61555+ curr = task->acl;
61556+
61557+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
61558+ return;
61559+
61560+ if (time_before_eq(curr->expires, get_seconds())) {
61561+ curr->expires = 0;
61562+ curr->crashes = 0;
61563+ }
61564+
61565+ curr->crashes++;
61566+
61567+ if (!curr->expires)
61568+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
61569+
61570+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61571+ time_after(curr->expires, get_seconds())) {
61572+ rcu_read_lock();
61573+ cred = __task_cred(task);
61574+ if (cred->uid && proc_is_setxid(cred)) {
61575+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61576+ spin_lock(&gr_uid_lock);
61577+ gr_insert_uid(cred->uid, curr->expires);
61578+ spin_unlock(&gr_uid_lock);
61579+ curr->expires = 0;
61580+ curr->crashes = 0;
61581+ read_lock(&tasklist_lock);
61582+ do_each_thread(tsk2, tsk) {
61583+ cred2 = __task_cred(tsk);
61584+ if (tsk != task && cred2->uid == cred->uid)
61585+ gr_fake_force_sig(SIGKILL, tsk);
61586+ } while_each_thread(tsk2, tsk);
61587+ read_unlock(&tasklist_lock);
61588+ } else {
61589+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
61590+ read_lock(&tasklist_lock);
61591+ read_lock(&grsec_exec_file_lock);
61592+ do_each_thread(tsk2, tsk) {
61593+ if (likely(tsk != task)) {
61594+ // if this thread has the same subject as the one that triggered
61595+ // RES_CRASH and it's the same binary, kill it
61596+ if (tsk->acl == task->acl && tsk->exec_file == task->exec_file)
61597+ gr_fake_force_sig(SIGKILL, tsk);
61598+ }
61599+ } while_each_thread(tsk2, tsk);
61600+ read_unlock(&grsec_exec_file_lock);
61601+ read_unlock(&tasklist_lock);
61602+ }
61603+ rcu_read_unlock();
61604+ }
61605+
61606+ return;
61607+}
61608+
61609+int
61610+gr_check_crash_exec(const struct file *filp)
61611+{
61612+ struct acl_subject_label *curr;
61613+
61614+ if (unlikely(!gr_acl_is_enabled()))
61615+ return 0;
61616+
61617+ read_lock(&gr_inode_lock);
61618+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
61619+ filp->f_path.dentry->d_inode->i_sb->s_dev,
61620+ current->role);
61621+ read_unlock(&gr_inode_lock);
61622+
61623+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
61624+ (!curr->crashes && !curr->expires))
61625+ return 0;
61626+
61627+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
61628+ time_after(curr->expires, get_seconds()))
61629+ return 1;
61630+ else if (time_before_eq(curr->expires, get_seconds())) {
61631+ curr->crashes = 0;
61632+ curr->expires = 0;
61633+ }
61634+
61635+ return 0;
61636+}
61637+
61638+void
61639+gr_handle_alertkill(struct task_struct *task)
61640+{
61641+ struct acl_subject_label *curracl;
61642+ __u32 curr_ip;
61643+ struct task_struct *p, *p2;
61644+
61645+ if (unlikely(!gr_acl_is_enabled()))
61646+ return;
61647+
61648+ curracl = task->acl;
61649+ curr_ip = task->signal->curr_ip;
61650+
61651+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
61652+ read_lock(&tasklist_lock);
61653+ do_each_thread(p2, p) {
61654+ if (p->signal->curr_ip == curr_ip)
61655+ gr_fake_force_sig(SIGKILL, p);
61656+ } while_each_thread(p2, p);
61657+ read_unlock(&tasklist_lock);
61658+ } else if (curracl->mode & GR_KILLPROC)
61659+ gr_fake_force_sig(SIGKILL, task);
61660+
61661+ return;
61662+}
61663diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
61664new file mode 100644
61665index 0000000..9d83a69
61666--- /dev/null
61667+++ b/grsecurity/gracl_shm.c
61668@@ -0,0 +1,40 @@
61669+#include <linux/kernel.h>
61670+#include <linux/mm.h>
61671+#include <linux/sched.h>
61672+#include <linux/file.h>
61673+#include <linux/ipc.h>
61674+#include <linux/gracl.h>
61675+#include <linux/grsecurity.h>
61676+#include <linux/grinternal.h>
61677+
61678+int
61679+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61680+ const time_t shm_createtime, const uid_t cuid, const int shmid)
61681+{
61682+ struct task_struct *task;
61683+
61684+ if (!gr_acl_is_enabled())
61685+ return 1;
61686+
61687+ rcu_read_lock();
61688+ read_lock(&tasklist_lock);
61689+
61690+ task = find_task_by_vpid(shm_cprid);
61691+
61692+ if (unlikely(!task))
61693+ task = find_task_by_vpid(shm_lapid);
61694+
61695+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
61696+ (task->pid == shm_lapid)) &&
61697+ (task->acl->mode & GR_PROTSHM) &&
61698+ (task->acl != current->acl))) {
61699+ read_unlock(&tasklist_lock);
61700+ rcu_read_unlock();
61701+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
61702+ return 0;
61703+ }
61704+ read_unlock(&tasklist_lock);
61705+ rcu_read_unlock();
61706+
61707+ return 1;
61708+}
61709diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
61710new file mode 100644
61711index 0000000..bc0be01
61712--- /dev/null
61713+++ b/grsecurity/grsec_chdir.c
61714@@ -0,0 +1,19 @@
61715+#include <linux/kernel.h>
61716+#include <linux/sched.h>
61717+#include <linux/fs.h>
61718+#include <linux/file.h>
61719+#include <linux/grsecurity.h>
61720+#include <linux/grinternal.h>
61721+
61722+void
61723+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
61724+{
61725+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
61726+ if ((grsec_enable_chdir && grsec_enable_group &&
61727+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
61728+ !grsec_enable_group)) {
61729+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
61730+ }
61731+#endif
61732+ return;
61733+}
61734diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
61735new file mode 100644
61736index 0000000..197bdd5
61737--- /dev/null
61738+++ b/grsecurity/grsec_chroot.c
61739@@ -0,0 +1,386 @@
61740+#include <linux/kernel.h>
61741+#include <linux/module.h>
61742+#include <linux/sched.h>
61743+#include <linux/file.h>
61744+#include <linux/fs.h>
61745+#include <linux/mount.h>
61746+#include <linux/types.h>
61747+#include <linux/pid_namespace.h>
61748+#include <linux/grsecurity.h>
61749+#include <linux/grinternal.h>
61750+
61751+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
61752+{
61753+#ifdef CONFIG_GRKERNSEC
61754+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
61755+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
61756+ task->gr_is_chrooted = 1;
61757+ else
61758+ task->gr_is_chrooted = 0;
61759+
61760+ task->gr_chroot_dentry = path->dentry;
61761+#endif
61762+ return;
61763+}
61764+
61765+void gr_clear_chroot_entries(struct task_struct *task)
61766+{
61767+#ifdef CONFIG_GRKERNSEC
61768+ task->gr_is_chrooted = 0;
61769+ task->gr_chroot_dentry = NULL;
61770+#endif
61771+ return;
61772+}
61773+
61774+int
61775+gr_handle_chroot_unix(const pid_t pid)
61776+{
61777+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
61778+ struct task_struct *p;
61779+
61780+ if (unlikely(!grsec_enable_chroot_unix))
61781+ return 1;
61782+
61783+ if (likely(!proc_is_chrooted(current)))
61784+ return 1;
61785+
61786+ rcu_read_lock();
61787+ read_lock(&tasklist_lock);
61788+
61789+ p = find_task_by_vpid_unrestricted(pid);
61790+ if (unlikely(p && !have_same_root(current, p))) {
61791+ read_unlock(&tasklist_lock);
61792+ rcu_read_unlock();
61793+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
61794+ return 0;
61795+ }
61796+ read_unlock(&tasklist_lock);
61797+ rcu_read_unlock();
61798+#endif
61799+ return 1;
61800+}
61801+
61802+int
61803+gr_handle_chroot_nice(void)
61804+{
61805+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61806+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
61807+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
61808+ return -EPERM;
61809+ }
61810+#endif
61811+ return 0;
61812+}
61813+
61814+int
61815+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
61816+{
61817+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
61818+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
61819+ && proc_is_chrooted(current)) {
61820+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
61821+ return -EACCES;
61822+ }
61823+#endif
61824+ return 0;
61825+}
61826+
61827+int
61828+gr_handle_chroot_rawio(const struct inode *inode)
61829+{
61830+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
61831+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
61832+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
61833+ return 1;
61834+#endif
61835+ return 0;
61836+}
61837+
61838+int
61839+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
61840+{
61841+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61842+ struct task_struct *p;
61843+ int ret = 0;
61844+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
61845+ return ret;
61846+
61847+ read_lock(&tasklist_lock);
61848+ do_each_pid_task(pid, type, p) {
61849+ if (!have_same_root(current, p)) {
61850+ ret = 1;
61851+ goto out;
61852+ }
61853+ } while_each_pid_task(pid, type, p);
61854+out:
61855+ read_unlock(&tasklist_lock);
61856+ return ret;
61857+#endif
61858+ return 0;
61859+}
61860+
61861+int
61862+gr_pid_is_chrooted(struct task_struct *p)
61863+{
61864+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
61865+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
61866+ return 0;
61867+
61868+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
61869+ !have_same_root(current, p)) {
61870+ return 1;
61871+ }
61872+#endif
61873+ return 0;
61874+}
61875+
61876+EXPORT_SYMBOL(gr_pid_is_chrooted);
61877+
61878+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
61879+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
61880+{
61881+ struct dentry *dentry = (struct dentry *)u_dentry;
61882+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
61883+ struct dentry *realroot;
61884+ struct vfsmount *realrootmnt;
61885+ struct dentry *currentroot;
61886+ struct vfsmount *currentmnt;
61887+ struct task_struct *reaper = &init_task;
61888+ int ret = 1;
61889+
61890+ read_lock(&reaper->fs->lock);
61891+ realrootmnt = mntget(reaper->fs->root.mnt);
61892+ realroot = dget(reaper->fs->root.dentry);
61893+ read_unlock(&reaper->fs->lock);
61894+
61895+ read_lock(&current->fs->lock);
61896+ currentmnt = mntget(current->fs->root.mnt);
61897+ currentroot = dget(current->fs->root.dentry);
61898+ read_unlock(&current->fs->lock);
61899+
61900+ spin_lock(&dcache_lock);
61901+ for (;;) {
61902+ if (unlikely((dentry == realroot && mnt == realrootmnt)
61903+ || (dentry == currentroot && mnt == currentmnt)))
61904+ break;
61905+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
61906+ if (mnt->mnt_parent == mnt)
61907+ break;
61908+ dentry = mnt->mnt_mountpoint;
61909+ mnt = mnt->mnt_parent;
61910+ continue;
61911+ }
61912+ dentry = dentry->d_parent;
61913+ }
61914+ spin_unlock(&dcache_lock);
61915+
61916+ dput(currentroot);
61917+ mntput(currentmnt);
61918+
61919+ /* access is outside of chroot */
61920+ if (dentry == realroot && mnt == realrootmnt)
61921+ ret = 0;
61922+
61923+ dput(realroot);
61924+ mntput(realrootmnt);
61925+ return ret;
61926+}
61927+#endif
61928+
61929+int
61930+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
61931+{
61932+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
61933+ if (!grsec_enable_chroot_fchdir)
61934+ return 1;
61935+
61936+ if (!proc_is_chrooted(current))
61937+ return 1;
61938+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
61939+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
61940+ return 0;
61941+ }
61942+#endif
61943+ return 1;
61944+}
61945+
61946+int
61947+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
61948+ const time_t shm_createtime)
61949+{
61950+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
61951+ struct task_struct *p;
61952+ time_t starttime;
61953+
61954+ if (unlikely(!grsec_enable_chroot_shmat))
61955+ return 1;
61956+
61957+ if (likely(!proc_is_chrooted(current)))
61958+ return 1;
61959+
61960+ rcu_read_lock();
61961+ read_lock(&tasklist_lock);
61962+
61963+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
61964+ starttime = p->start_time.tv_sec;
61965+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
61966+ if (have_same_root(current, p)) {
61967+ goto allow;
61968+ } else {
61969+ read_unlock(&tasklist_lock);
61970+ rcu_read_unlock();
61971+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61972+ return 0;
61973+ }
61974+ }
61975+ /* creator exited, pid reuse, fall through to next check */
61976+ }
61977+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
61978+ if (unlikely(!have_same_root(current, p))) {
61979+ read_unlock(&tasklist_lock);
61980+ rcu_read_unlock();
61981+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
61982+ return 0;
61983+ }
61984+ }
61985+
61986+allow:
61987+ read_unlock(&tasklist_lock);
61988+ rcu_read_unlock();
61989+#endif
61990+ return 1;
61991+}
61992+
61993+void
61994+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
61995+{
61996+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
61997+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
61998+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
61999+#endif
62000+ return;
62001+}
62002+
62003+int
62004+gr_handle_chroot_mknod(const struct dentry *dentry,
62005+ const struct vfsmount *mnt, const int mode)
62006+{
62007+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
62008+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
62009+ proc_is_chrooted(current)) {
62010+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
62011+ return -EPERM;
62012+ }
62013+#endif
62014+ return 0;
62015+}
62016+
62017+int
62018+gr_handle_chroot_mount(const struct dentry *dentry,
62019+ const struct vfsmount *mnt, const char *dev_name)
62020+{
62021+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
62022+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
62023+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
62024+ return -EPERM;
62025+ }
62026+#endif
62027+ return 0;
62028+}
62029+
62030+int
62031+gr_handle_chroot_pivot(void)
62032+{
62033+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
62034+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
62035+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
62036+ return -EPERM;
62037+ }
62038+#endif
62039+ return 0;
62040+}
62041+
62042+int
62043+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
62044+{
62045+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
62046+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
62047+ !gr_is_outside_chroot(dentry, mnt)) {
62048+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
62049+ return -EPERM;
62050+ }
62051+#endif
62052+ return 0;
62053+}
62054+
62055+extern const char *captab_log[];
62056+extern int captab_log_entries;
62057+
62058+int
62059+gr_chroot_is_capable(const int cap)
62060+{
62061+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62062+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62063+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62064+ if (cap_raised(chroot_caps, cap)) {
62065+ const struct cred *creds = current_cred();
62066+ if (cap_raised(creds->cap_effective, cap) && cap < captab_log_entries) {
62067+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, current, captab_log[cap]);
62068+ }
62069+ return 0;
62070+ }
62071+ }
62072+#endif
62073+ return 1;
62074+}
62075+
62076+int
62077+gr_chroot_is_capable_nolog(const int cap)
62078+{
62079+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
62080+ if (grsec_enable_chroot_caps && proc_is_chrooted(current)) {
62081+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
62082+ if (cap_raised(chroot_caps, cap)) {
62083+ return 0;
62084+ }
62085+ }
62086+#endif
62087+ return 1;
62088+}
62089+
62090+int
62091+gr_handle_chroot_sysctl(const int op)
62092+{
62093+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
62094+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
62095+ && (op & MAY_WRITE))
62096+ return -EACCES;
62097+#endif
62098+ return 0;
62099+}
62100+
62101+void
62102+gr_handle_chroot_chdir(struct path *path)
62103+{
62104+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
62105+ if (grsec_enable_chroot_chdir)
62106+ set_fs_pwd(current->fs, path);
62107+#endif
62108+ return;
62109+}
62110+
62111+int
62112+gr_handle_chroot_chmod(const struct dentry *dentry,
62113+ const struct vfsmount *mnt, const int mode)
62114+{
62115+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
62116+ /* allow chmod +s on directories, but not on files */
62117+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
62118+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
62119+ proc_is_chrooted(current)) {
62120+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
62121+ return -EPERM;
62122+ }
62123+#endif
62124+ return 0;
62125+}
62126diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
62127new file mode 100644
62128index 0000000..b81db5b
62129--- /dev/null
62130+++ b/grsecurity/grsec_disabled.c
62131@@ -0,0 +1,439 @@
62132+#include <linux/kernel.h>
62133+#include <linux/module.h>
62134+#include <linux/sched.h>
62135+#include <linux/file.h>
62136+#include <linux/fs.h>
62137+#include <linux/kdev_t.h>
62138+#include <linux/net.h>
62139+#include <linux/in.h>
62140+#include <linux/ip.h>
62141+#include <linux/skbuff.h>
62142+#include <linux/sysctl.h>
62143+
62144+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
62145+void
62146+pax_set_initial_flags(struct linux_binprm *bprm)
62147+{
62148+ return;
62149+}
62150+#endif
62151+
62152+#ifdef CONFIG_SYSCTL
62153+__u32
62154+gr_handle_sysctl(const struct ctl_table * table, const int op)
62155+{
62156+ return 0;
62157+}
62158+#endif
62159+
62160+#ifdef CONFIG_TASKSTATS
62161+int gr_is_taskstats_denied(int pid)
62162+{
62163+ return 0;
62164+}
62165+#endif
62166+
62167+int
62168+gr_acl_is_enabled(void)
62169+{
62170+ return 0;
62171+}
62172+
62173+void
62174+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
62175+{
62176+ return;
62177+}
62178+
62179+int
62180+gr_handle_rawio(const struct inode *inode)
62181+{
62182+ return 0;
62183+}
62184+
62185+void
62186+gr_acl_handle_psacct(struct task_struct *task, const long code)
62187+{
62188+ return;
62189+}
62190+
62191+int
62192+gr_handle_ptrace(struct task_struct *task, const long request)
62193+{
62194+ return 0;
62195+}
62196+
62197+int
62198+gr_handle_proc_ptrace(struct task_struct *task)
62199+{
62200+ return 0;
62201+}
62202+
62203+void
62204+gr_learn_resource(const struct task_struct *task,
62205+ const int res, const unsigned long wanted, const int gt)
62206+{
62207+ return;
62208+}
62209+
62210+int
62211+gr_set_acls(const int type)
62212+{
62213+ return 0;
62214+}
62215+
62216+int
62217+gr_check_hidden_task(const struct task_struct *tsk)
62218+{
62219+ return 0;
62220+}
62221+
62222+int
62223+gr_check_protected_task(const struct task_struct *task)
62224+{
62225+ return 0;
62226+}
62227+
62228+int
62229+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
62230+{
62231+ return 0;
62232+}
62233+
62234+void
62235+gr_copy_label(struct task_struct *tsk)
62236+{
62237+ return;
62238+}
62239+
62240+void
62241+gr_set_pax_flags(struct task_struct *task)
62242+{
62243+ return;
62244+}
62245+
62246+int
62247+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
62248+ const int unsafe_share)
62249+{
62250+ return 0;
62251+}
62252+
62253+void
62254+gr_handle_delete(const ino_t ino, const dev_t dev)
62255+{
62256+ return;
62257+}
62258+
62259+void
62260+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
62261+{
62262+ return;
62263+}
62264+
62265+void
62266+gr_handle_crash(struct task_struct *task, const int sig)
62267+{
62268+ return;
62269+}
62270+
62271+int
62272+gr_check_crash_exec(const struct file *filp)
62273+{
62274+ return 0;
62275+}
62276+
62277+int
62278+gr_check_crash_uid(const uid_t uid)
62279+{
62280+ return 0;
62281+}
62282+
62283+void
62284+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
62285+ struct dentry *old_dentry,
62286+ struct dentry *new_dentry,
62287+ struct vfsmount *mnt, const __u8 replace)
62288+{
62289+ return;
62290+}
62291+
62292+int
62293+gr_search_socket(const int family, const int type, const int protocol)
62294+{
62295+ return 1;
62296+}
62297+
62298+int
62299+gr_search_connectbind(const int mode, const struct socket *sock,
62300+ const struct sockaddr_in *addr)
62301+{
62302+ return 0;
62303+}
62304+
62305+void
62306+gr_handle_alertkill(struct task_struct *task)
62307+{
62308+ return;
62309+}
62310+
62311+__u32
62312+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
62313+{
62314+ return 1;
62315+}
62316+
62317+__u32
62318+gr_acl_handle_hidden_file(const struct dentry * dentry,
62319+ const struct vfsmount * mnt)
62320+{
62321+ return 1;
62322+}
62323+
62324+__u32
62325+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62326+ int acc_mode)
62327+{
62328+ return 1;
62329+}
62330+
62331+__u32
62332+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62333+{
62334+ return 1;
62335+}
62336+
62337+__u32
62338+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
62339+{
62340+ return 1;
62341+}
62342+
62343+int
62344+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
62345+ unsigned int *vm_flags)
62346+{
62347+ return 1;
62348+}
62349+
62350+__u32
62351+gr_acl_handle_truncate(const struct dentry * dentry,
62352+ const struct vfsmount * mnt)
62353+{
62354+ return 1;
62355+}
62356+
62357+__u32
62358+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
62359+{
62360+ return 1;
62361+}
62362+
62363+__u32
62364+gr_acl_handle_access(const struct dentry * dentry,
62365+ const struct vfsmount * mnt, const int fmode)
62366+{
62367+ return 1;
62368+}
62369+
62370+__u32
62371+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
62372+ mode_t mode)
62373+{
62374+ return 1;
62375+}
62376+
62377+__u32
62378+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
62379+ mode_t mode)
62380+{
62381+ return 1;
62382+}
62383+
62384+__u32
62385+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
62386+{
62387+ return 1;
62388+}
62389+
62390+__u32
62391+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
62392+{
62393+ return 1;
62394+}
62395+
62396+void
62397+grsecurity_init(void)
62398+{
62399+ return;
62400+}
62401+
62402+__u32
62403+gr_acl_handle_mknod(const struct dentry * new_dentry,
62404+ const struct dentry * parent_dentry,
62405+ const struct vfsmount * parent_mnt,
62406+ const int mode)
62407+{
62408+ return 1;
62409+}
62410+
62411+__u32
62412+gr_acl_handle_mkdir(const struct dentry * new_dentry,
62413+ const struct dentry * parent_dentry,
62414+ const struct vfsmount * parent_mnt)
62415+{
62416+ return 1;
62417+}
62418+
62419+__u32
62420+gr_acl_handle_symlink(const struct dentry * new_dentry,
62421+ const struct dentry * parent_dentry,
62422+ const struct vfsmount * parent_mnt, const char *from)
62423+{
62424+ return 1;
62425+}
62426+
62427+__u32
62428+gr_acl_handle_link(const struct dentry * new_dentry,
62429+ const struct dentry * parent_dentry,
62430+ const struct vfsmount * parent_mnt,
62431+ const struct dentry * old_dentry,
62432+ const struct vfsmount * old_mnt, const char *to)
62433+{
62434+ return 1;
62435+}
62436+
62437+int
62438+gr_acl_handle_rename(const struct dentry *new_dentry,
62439+ const struct dentry *parent_dentry,
62440+ const struct vfsmount *parent_mnt,
62441+ const struct dentry *old_dentry,
62442+ const struct inode *old_parent_inode,
62443+ const struct vfsmount *old_mnt, const char *newname)
62444+{
62445+ return 0;
62446+}
62447+
62448+int
62449+gr_acl_handle_filldir(const struct file *file, const char *name,
62450+ const int namelen, const ino_t ino)
62451+{
62452+ return 1;
62453+}
62454+
62455+int
62456+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62457+ const time_t shm_createtime, const uid_t cuid, const int shmid)
62458+{
62459+ return 1;
62460+}
62461+
62462+int
62463+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
62464+{
62465+ return 0;
62466+}
62467+
62468+int
62469+gr_search_accept(const struct socket *sock)
62470+{
62471+ return 0;
62472+}
62473+
62474+int
62475+gr_search_listen(const struct socket *sock)
62476+{
62477+ return 0;
62478+}
62479+
62480+int
62481+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
62482+{
62483+ return 0;
62484+}
62485+
62486+__u32
62487+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
62488+{
62489+ return 1;
62490+}
62491+
62492+__u32
62493+gr_acl_handle_creat(const struct dentry * dentry,
62494+ const struct dentry * p_dentry,
62495+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62496+ const int imode)
62497+{
62498+ return 1;
62499+}
62500+
62501+void
62502+gr_acl_handle_exit(void)
62503+{
62504+ return;
62505+}
62506+
62507+int
62508+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62509+{
62510+ return 1;
62511+}
62512+
62513+void
62514+gr_set_role_label(const uid_t uid, const gid_t gid)
62515+{
62516+ return;
62517+}
62518+
62519+int
62520+gr_acl_handle_procpidmem(const struct task_struct *task)
62521+{
62522+ return 0;
62523+}
62524+
62525+int
62526+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
62527+{
62528+ return 0;
62529+}
62530+
62531+int
62532+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
62533+{
62534+ return 0;
62535+}
62536+
62537+void
62538+gr_set_kernel_label(struct task_struct *task)
62539+{
62540+ return;
62541+}
62542+
62543+int
62544+gr_check_user_change(int real, int effective, int fs)
62545+{
62546+ return 0;
62547+}
62548+
62549+int
62550+gr_check_group_change(int real, int effective, int fs)
62551+{
62552+ return 0;
62553+}
62554+
62555+int gr_acl_enable_at_secure(void)
62556+{
62557+ return 0;
62558+}
62559+
62560+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
62561+{
62562+ return dentry->d_inode->i_sb->s_dev;
62563+}
62564+
62565+EXPORT_SYMBOL(gr_learn_resource);
62566+EXPORT_SYMBOL(gr_set_kernel_label);
62567+#ifdef CONFIG_SECURITY
62568+EXPORT_SYMBOL(gr_check_user_change);
62569+EXPORT_SYMBOL(gr_check_group_change);
62570+#endif
62571diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
62572new file mode 100644
62573index 0000000..a96e155
62574--- /dev/null
62575+++ b/grsecurity/grsec_exec.c
62576@@ -0,0 +1,204 @@
62577+#include <linux/kernel.h>
62578+#include <linux/sched.h>
62579+#include <linux/file.h>
62580+#include <linux/binfmts.h>
62581+#include <linux/smp_lock.h>
62582+#include <linux/fs.h>
62583+#include <linux/types.h>
62584+#include <linux/grdefs.h>
62585+#include <linux/grinternal.h>
62586+#include <linux/capability.h>
62587+#include <linux/compat.h>
62588+#include <linux/module.h>
62589+
62590+#include <asm/uaccess.h>
62591+
62592+#ifdef CONFIG_GRKERNSEC_EXECLOG
62593+static char gr_exec_arg_buf[132];
62594+static DEFINE_MUTEX(gr_exec_arg_mutex);
62595+#endif
62596+
62597+void
62598+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
62599+{
62600+#ifdef CONFIG_GRKERNSEC_EXECLOG
62601+ char *grarg = gr_exec_arg_buf;
62602+ unsigned int i, x, execlen = 0;
62603+ char c;
62604+
62605+ if (!((grsec_enable_execlog && grsec_enable_group &&
62606+ in_group_p(grsec_audit_gid))
62607+ || (grsec_enable_execlog && !grsec_enable_group)))
62608+ return;
62609+
62610+ mutex_lock(&gr_exec_arg_mutex);
62611+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62612+
62613+ if (unlikely(argv == NULL))
62614+ goto log;
62615+
62616+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62617+ const char __user *p;
62618+ unsigned int len;
62619+
62620+ if (copy_from_user(&p, argv + i, sizeof(p)))
62621+ goto log;
62622+ if (!p)
62623+ goto log;
62624+ len = strnlen_user(p, 128 - execlen);
62625+ if (len > 128 - execlen)
62626+ len = 128 - execlen;
62627+ else if (len > 0)
62628+ len--;
62629+ if (copy_from_user(grarg + execlen, p, len))
62630+ goto log;
62631+
62632+ /* rewrite unprintable characters */
62633+ for (x = 0; x < len; x++) {
62634+ c = *(grarg + execlen + x);
62635+ if (c < 32 || c > 126)
62636+ *(grarg + execlen + x) = ' ';
62637+ }
62638+
62639+ execlen += len;
62640+ *(grarg + execlen) = ' ';
62641+ *(grarg + execlen + 1) = '\0';
62642+ execlen++;
62643+ }
62644+
62645+ log:
62646+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62647+ bprm->file->f_path.mnt, grarg);
62648+ mutex_unlock(&gr_exec_arg_mutex);
62649+#endif
62650+ return;
62651+}
62652+
62653+#ifdef CONFIG_COMPAT
62654+void
62655+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
62656+{
62657+#ifdef CONFIG_GRKERNSEC_EXECLOG
62658+ char *grarg = gr_exec_arg_buf;
62659+ unsigned int i, x, execlen = 0;
62660+ char c;
62661+
62662+ if (!((grsec_enable_execlog && grsec_enable_group &&
62663+ in_group_p(grsec_audit_gid))
62664+ || (grsec_enable_execlog && !grsec_enable_group)))
62665+ return;
62666+
62667+ mutex_lock(&gr_exec_arg_mutex);
62668+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
62669+
62670+ if (unlikely(argv == NULL))
62671+ goto log;
62672+
62673+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
62674+ compat_uptr_t p;
62675+ unsigned int len;
62676+
62677+ if (get_user(p, argv + i))
62678+ goto log;
62679+ len = strnlen_user(compat_ptr(p), 128 - execlen);
62680+ if (len > 128 - execlen)
62681+ len = 128 - execlen;
62682+ else if (len > 0)
62683+ len--;
62684+ else
62685+ goto log;
62686+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
62687+ goto log;
62688+
62689+ /* rewrite unprintable characters */
62690+ for (x = 0; x < len; x++) {
62691+ c = *(grarg + execlen + x);
62692+ if (c < 32 || c > 126)
62693+ *(grarg + execlen + x) = ' ';
62694+ }
62695+
62696+ execlen += len;
62697+ *(grarg + execlen) = ' ';
62698+ *(grarg + execlen + 1) = '\0';
62699+ execlen++;
62700+ }
62701+
62702+ log:
62703+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
62704+ bprm->file->f_path.mnt, grarg);
62705+ mutex_unlock(&gr_exec_arg_mutex);
62706+#endif
62707+ return;
62708+}
62709+#endif
62710+
62711+#ifdef CONFIG_GRKERNSEC
62712+extern int gr_acl_is_capable(const int cap);
62713+extern int gr_acl_is_capable_nolog(const int cap);
62714+extern int gr_chroot_is_capable(const int cap);
62715+extern int gr_chroot_is_capable_nolog(const int cap);
62716+#endif
62717+
62718+const char *captab_log[] = {
62719+ "CAP_CHOWN",
62720+ "CAP_DAC_OVERRIDE",
62721+ "CAP_DAC_READ_SEARCH",
62722+ "CAP_FOWNER",
62723+ "CAP_FSETID",
62724+ "CAP_KILL",
62725+ "CAP_SETGID",
62726+ "CAP_SETUID",
62727+ "CAP_SETPCAP",
62728+ "CAP_LINUX_IMMUTABLE",
62729+ "CAP_NET_BIND_SERVICE",
62730+ "CAP_NET_BROADCAST",
62731+ "CAP_NET_ADMIN",
62732+ "CAP_NET_RAW",
62733+ "CAP_IPC_LOCK",
62734+ "CAP_IPC_OWNER",
62735+ "CAP_SYS_MODULE",
62736+ "CAP_SYS_RAWIO",
62737+ "CAP_SYS_CHROOT",
62738+ "CAP_SYS_PTRACE",
62739+ "CAP_SYS_PACCT",
62740+ "CAP_SYS_ADMIN",
62741+ "CAP_SYS_BOOT",
62742+ "CAP_SYS_NICE",
62743+ "CAP_SYS_RESOURCE",
62744+ "CAP_SYS_TIME",
62745+ "CAP_SYS_TTY_CONFIG",
62746+ "CAP_MKNOD",
62747+ "CAP_LEASE",
62748+ "CAP_AUDIT_WRITE",
62749+ "CAP_AUDIT_CONTROL",
62750+ "CAP_SETFCAP",
62751+ "CAP_MAC_OVERRIDE",
62752+ "CAP_MAC_ADMIN"
62753+};
62754+
62755+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
62756+
62757+int gr_is_capable(const int cap)
62758+{
62759+#ifdef CONFIG_GRKERNSEC
62760+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
62761+ return 1;
62762+ return 0;
62763+#else
62764+ return 1;
62765+#endif
62766+}
62767+
62768+int gr_is_capable_nolog(const int cap)
62769+{
62770+#ifdef CONFIG_GRKERNSEC
62771+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
62772+ return 1;
62773+ return 0;
62774+#else
62775+ return 1;
62776+#endif
62777+}
62778+
62779+EXPORT_SYMBOL(gr_is_capable);
62780+EXPORT_SYMBOL(gr_is_capable_nolog);
62781diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
62782new file mode 100644
62783index 0000000..d3ee748
62784--- /dev/null
62785+++ b/grsecurity/grsec_fifo.c
62786@@ -0,0 +1,24 @@
62787+#include <linux/kernel.h>
62788+#include <linux/sched.h>
62789+#include <linux/fs.h>
62790+#include <linux/file.h>
62791+#include <linux/grinternal.h>
62792+
62793+int
62794+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
62795+ const struct dentry *dir, const int flag, const int acc_mode)
62796+{
62797+#ifdef CONFIG_GRKERNSEC_FIFO
62798+ const struct cred *cred = current_cred();
62799+
62800+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
62801+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
62802+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
62803+ (cred->fsuid != dentry->d_inode->i_uid)) {
62804+ if (!inode_permission(dentry->d_inode, acc_mode))
62805+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
62806+ return -EACCES;
62807+ }
62808+#endif
62809+ return 0;
62810+}
62811diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
62812new file mode 100644
62813index 0000000..8ca18bf
62814--- /dev/null
62815+++ b/grsecurity/grsec_fork.c
62816@@ -0,0 +1,23 @@
62817+#include <linux/kernel.h>
62818+#include <linux/sched.h>
62819+#include <linux/grsecurity.h>
62820+#include <linux/grinternal.h>
62821+#include <linux/errno.h>
62822+
62823+void
62824+gr_log_forkfail(const int retval)
62825+{
62826+#ifdef CONFIG_GRKERNSEC_FORKFAIL
62827+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
62828+ switch (retval) {
62829+ case -EAGAIN:
62830+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
62831+ break;
62832+ case -ENOMEM:
62833+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
62834+ break;
62835+ }
62836+ }
62837+#endif
62838+ return;
62839+}
62840diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
62841new file mode 100644
62842index 0000000..1e995d3
62843--- /dev/null
62844+++ b/grsecurity/grsec_init.c
62845@@ -0,0 +1,278 @@
62846+#include <linux/kernel.h>
62847+#include <linux/sched.h>
62848+#include <linux/mm.h>
62849+#include <linux/smp_lock.h>
62850+#include <linux/gracl.h>
62851+#include <linux/slab.h>
62852+#include <linux/vmalloc.h>
62853+#include <linux/percpu.h>
62854+#include <linux/module.h>
62855+
62856+int grsec_enable_ptrace_readexec;
62857+int grsec_enable_setxid;
62858+int grsec_enable_brute;
62859+int grsec_enable_link;
62860+int grsec_enable_dmesg;
62861+int grsec_enable_harden_ptrace;
62862+int grsec_enable_fifo;
62863+int grsec_enable_execlog;
62864+int grsec_enable_signal;
62865+int grsec_enable_forkfail;
62866+int grsec_enable_audit_ptrace;
62867+int grsec_enable_time;
62868+int grsec_enable_audit_textrel;
62869+int grsec_enable_group;
62870+int grsec_audit_gid;
62871+int grsec_enable_chdir;
62872+int grsec_enable_mount;
62873+int grsec_enable_rofs;
62874+int grsec_enable_chroot_findtask;
62875+int grsec_enable_chroot_mount;
62876+int grsec_enable_chroot_shmat;
62877+int grsec_enable_chroot_fchdir;
62878+int grsec_enable_chroot_double;
62879+int grsec_enable_chroot_pivot;
62880+int grsec_enable_chroot_chdir;
62881+int grsec_enable_chroot_chmod;
62882+int grsec_enable_chroot_mknod;
62883+int grsec_enable_chroot_nice;
62884+int grsec_enable_chroot_execlog;
62885+int grsec_enable_chroot_caps;
62886+int grsec_enable_chroot_sysctl;
62887+int grsec_enable_chroot_unix;
62888+int grsec_enable_tpe;
62889+int grsec_tpe_gid;
62890+int grsec_enable_blackhole;
62891+#ifdef CONFIG_IPV6_MODULE
62892+EXPORT_SYMBOL(grsec_enable_blackhole);
62893+#endif
62894+int grsec_lastack_retries;
62895+int grsec_enable_tpe_all;
62896+int grsec_enable_tpe_invert;
62897+int grsec_enable_socket_all;
62898+int grsec_socket_all_gid;
62899+int grsec_enable_socket_client;
62900+int grsec_socket_client_gid;
62901+int grsec_enable_socket_server;
62902+int grsec_socket_server_gid;
62903+int grsec_resource_logging;
62904+int grsec_disable_privio;
62905+int grsec_enable_log_rwxmaps;
62906+int grsec_lock;
62907+
62908+DEFINE_SPINLOCK(grsec_alert_lock);
62909+unsigned long grsec_alert_wtime = 0;
62910+unsigned long grsec_alert_fyet = 0;
62911+
62912+DEFINE_SPINLOCK(grsec_audit_lock);
62913+
62914+DEFINE_RWLOCK(grsec_exec_file_lock);
62915+
62916+char *gr_shared_page[4];
62917+
62918+char *gr_alert_log_fmt;
62919+char *gr_audit_log_fmt;
62920+char *gr_alert_log_buf;
62921+char *gr_audit_log_buf;
62922+
62923+extern struct gr_arg *gr_usermode;
62924+extern unsigned char *gr_system_salt;
62925+extern unsigned char *gr_system_sum;
62926+
62927+void __init
62928+grsecurity_init(void)
62929+{
62930+ int j;
62931+ /* create the per-cpu shared pages */
62932+
62933+#ifdef CONFIG_X86
62934+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
62935+#endif
62936+
62937+ for (j = 0; j < 4; j++) {
62938+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
62939+ if (gr_shared_page[j] == NULL) {
62940+ panic("Unable to allocate grsecurity shared page");
62941+ return;
62942+ }
62943+ }
62944+
62945+ /* allocate log buffers */
62946+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
62947+ if (!gr_alert_log_fmt) {
62948+ panic("Unable to allocate grsecurity alert log format buffer");
62949+ return;
62950+ }
62951+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
62952+ if (!gr_audit_log_fmt) {
62953+ panic("Unable to allocate grsecurity audit log format buffer");
62954+ return;
62955+ }
62956+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62957+ if (!gr_alert_log_buf) {
62958+ panic("Unable to allocate grsecurity alert log buffer");
62959+ return;
62960+ }
62961+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
62962+ if (!gr_audit_log_buf) {
62963+ panic("Unable to allocate grsecurity audit log buffer");
62964+ return;
62965+ }
62966+
62967+ /* allocate memory for authentication structure */
62968+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
62969+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
62970+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
62971+
62972+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
62973+ panic("Unable to allocate grsecurity authentication structure");
62974+ return;
62975+ }
62976+
62977+
62978+#ifdef CONFIG_GRKERNSEC_IO
62979+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
62980+ grsec_disable_privio = 1;
62981+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62982+ grsec_disable_privio = 1;
62983+#else
62984+ grsec_disable_privio = 0;
62985+#endif
62986+#endif
62987+
62988+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
62989+ /* for backward compatibility, tpe_invert always defaults to on if
62990+ enabled in the kernel
62991+ */
62992+ grsec_enable_tpe_invert = 1;
62993+#endif
62994+
62995+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
62996+#ifndef CONFIG_GRKERNSEC_SYSCTL
62997+ grsec_lock = 1;
62998+#endif
62999+
63000+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63001+ grsec_enable_audit_textrel = 1;
63002+#endif
63003+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63004+ grsec_enable_log_rwxmaps = 1;
63005+#endif
63006+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
63007+ grsec_enable_group = 1;
63008+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
63009+#endif
63010+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
63011+ grsec_enable_chdir = 1;
63012+#endif
63013+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
63014+ grsec_enable_harden_ptrace = 1;
63015+#endif
63016+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63017+ grsec_enable_mount = 1;
63018+#endif
63019+#ifdef CONFIG_GRKERNSEC_LINK
63020+ grsec_enable_link = 1;
63021+#endif
63022+#ifdef CONFIG_GRKERNSEC_BRUTE
63023+ grsec_enable_brute = 1;
63024+#endif
63025+#ifdef CONFIG_GRKERNSEC_DMESG
63026+ grsec_enable_dmesg = 1;
63027+#endif
63028+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
63029+ grsec_enable_blackhole = 1;
63030+ grsec_lastack_retries = 4;
63031+#endif
63032+#ifdef CONFIG_GRKERNSEC_FIFO
63033+ grsec_enable_fifo = 1;
63034+#endif
63035+#ifdef CONFIG_GRKERNSEC_EXECLOG
63036+ grsec_enable_execlog = 1;
63037+#endif
63038+#ifdef CONFIG_GRKERNSEC_SETXID
63039+ grsec_enable_setxid = 1;
63040+#endif
63041+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63042+ grsec_enable_ptrace_readexec = 1;
63043+#endif
63044+#ifdef CONFIG_GRKERNSEC_SIGNAL
63045+ grsec_enable_signal = 1;
63046+#endif
63047+#ifdef CONFIG_GRKERNSEC_FORKFAIL
63048+ grsec_enable_forkfail = 1;
63049+#endif
63050+#ifdef CONFIG_GRKERNSEC_TIME
63051+ grsec_enable_time = 1;
63052+#endif
63053+#ifdef CONFIG_GRKERNSEC_RESLOG
63054+ grsec_resource_logging = 1;
63055+#endif
63056+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
63057+ grsec_enable_chroot_findtask = 1;
63058+#endif
63059+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
63060+ grsec_enable_chroot_unix = 1;
63061+#endif
63062+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
63063+ grsec_enable_chroot_mount = 1;
63064+#endif
63065+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
63066+ grsec_enable_chroot_fchdir = 1;
63067+#endif
63068+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
63069+ grsec_enable_chroot_shmat = 1;
63070+#endif
63071+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63072+ grsec_enable_audit_ptrace = 1;
63073+#endif
63074+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
63075+ grsec_enable_chroot_double = 1;
63076+#endif
63077+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
63078+ grsec_enable_chroot_pivot = 1;
63079+#endif
63080+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
63081+ grsec_enable_chroot_chdir = 1;
63082+#endif
63083+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
63084+ grsec_enable_chroot_chmod = 1;
63085+#endif
63086+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
63087+ grsec_enable_chroot_mknod = 1;
63088+#endif
63089+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
63090+ grsec_enable_chroot_nice = 1;
63091+#endif
63092+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
63093+ grsec_enable_chroot_execlog = 1;
63094+#endif
63095+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
63096+ grsec_enable_chroot_caps = 1;
63097+#endif
63098+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
63099+ grsec_enable_chroot_sysctl = 1;
63100+#endif
63101+#ifdef CONFIG_GRKERNSEC_TPE
63102+ grsec_enable_tpe = 1;
63103+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
63104+#ifdef CONFIG_GRKERNSEC_TPE_ALL
63105+ grsec_enable_tpe_all = 1;
63106+#endif
63107+#endif
63108+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
63109+ grsec_enable_socket_all = 1;
63110+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
63111+#endif
63112+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
63113+ grsec_enable_socket_client = 1;
63114+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
63115+#endif
63116+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
63117+ grsec_enable_socket_server = 1;
63118+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
63119+#endif
63120+#endif
63121+
63122+ return;
63123+}
63124diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
63125new file mode 100644
63126index 0000000..3efe141
63127--- /dev/null
63128+++ b/grsecurity/grsec_link.c
63129@@ -0,0 +1,43 @@
63130+#include <linux/kernel.h>
63131+#include <linux/sched.h>
63132+#include <linux/fs.h>
63133+#include <linux/file.h>
63134+#include <linux/grinternal.h>
63135+
63136+int
63137+gr_handle_follow_link(const struct inode *parent,
63138+ const struct inode *inode,
63139+ const struct dentry *dentry, const struct vfsmount *mnt)
63140+{
63141+#ifdef CONFIG_GRKERNSEC_LINK
63142+ const struct cred *cred = current_cred();
63143+
63144+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
63145+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
63146+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
63147+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
63148+ return -EACCES;
63149+ }
63150+#endif
63151+ return 0;
63152+}
63153+
63154+int
63155+gr_handle_hardlink(const struct dentry *dentry,
63156+ const struct vfsmount *mnt,
63157+ struct inode *inode, const int mode, const char *to)
63158+{
63159+#ifdef CONFIG_GRKERNSEC_LINK
63160+ const struct cred *cred = current_cred();
63161+
63162+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
63163+ (!S_ISREG(mode) || (mode & S_ISUID) ||
63164+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
63165+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
63166+ !capable(CAP_FOWNER) && cred->uid) {
63167+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
63168+ return -EPERM;
63169+ }
63170+#endif
63171+ return 0;
63172+}
63173diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
63174new file mode 100644
63175index 0000000..a45d2e9
63176--- /dev/null
63177+++ b/grsecurity/grsec_log.c
63178@@ -0,0 +1,322 @@
63179+#include <linux/kernel.h>
63180+#include <linux/sched.h>
63181+#include <linux/file.h>
63182+#include <linux/tty.h>
63183+#include <linux/fs.h>
63184+#include <linux/grinternal.h>
63185+
63186+#ifdef CONFIG_TREE_PREEMPT_RCU
63187+#define DISABLE_PREEMPT() preempt_disable()
63188+#define ENABLE_PREEMPT() preempt_enable()
63189+#else
63190+#define DISABLE_PREEMPT()
63191+#define ENABLE_PREEMPT()
63192+#endif
63193+
63194+#define BEGIN_LOCKS(x) \
63195+ DISABLE_PREEMPT(); \
63196+ rcu_read_lock(); \
63197+ read_lock(&tasklist_lock); \
63198+ read_lock(&grsec_exec_file_lock); \
63199+ if (x != GR_DO_AUDIT) \
63200+ spin_lock(&grsec_alert_lock); \
63201+ else \
63202+ spin_lock(&grsec_audit_lock)
63203+
63204+#define END_LOCKS(x) \
63205+ if (x != GR_DO_AUDIT) \
63206+ spin_unlock(&grsec_alert_lock); \
63207+ else \
63208+ spin_unlock(&grsec_audit_lock); \
63209+ read_unlock(&grsec_exec_file_lock); \
63210+ read_unlock(&tasklist_lock); \
63211+ rcu_read_unlock(); \
63212+ ENABLE_PREEMPT(); \
63213+ if (x == GR_DONT_AUDIT) \
63214+ gr_handle_alertkill(current)
63215+
63216+enum {
63217+ FLOODING,
63218+ NO_FLOODING
63219+};
63220+
63221+extern char *gr_alert_log_fmt;
63222+extern char *gr_audit_log_fmt;
63223+extern char *gr_alert_log_buf;
63224+extern char *gr_audit_log_buf;
63225+
63226+static int gr_log_start(int audit)
63227+{
63228+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
63229+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
63230+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63231+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
63232+ unsigned long curr_secs = get_seconds();
63233+
63234+ if (audit == GR_DO_AUDIT)
63235+ goto set_fmt;
63236+
63237+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
63238+ grsec_alert_wtime = curr_secs;
63239+ grsec_alert_fyet = 0;
63240+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
63241+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
63242+ grsec_alert_fyet++;
63243+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
63244+ grsec_alert_wtime = curr_secs;
63245+ grsec_alert_fyet++;
63246+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
63247+ return FLOODING;
63248+ }
63249+ else return FLOODING;
63250+
63251+set_fmt:
63252+#endif
63253+ memset(buf, 0, PAGE_SIZE);
63254+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
63255+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
63256+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63257+ } else if (current->signal->curr_ip) {
63258+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
63259+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
63260+ } else if (gr_acl_is_enabled()) {
63261+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
63262+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
63263+ } else {
63264+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
63265+ strcpy(buf, fmt);
63266+ }
63267+
63268+ return NO_FLOODING;
63269+}
63270+
63271+static void gr_log_middle(int audit, const char *msg, va_list ap)
63272+ __attribute__ ((format (printf, 2, 0)));
63273+
63274+static void gr_log_middle(int audit, const char *msg, va_list ap)
63275+{
63276+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63277+ unsigned int len = strlen(buf);
63278+
63279+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63280+
63281+ return;
63282+}
63283+
63284+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63285+ __attribute__ ((format (printf, 2, 3)));
63286+
63287+static void gr_log_middle_varargs(int audit, const char *msg, ...)
63288+{
63289+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63290+ unsigned int len = strlen(buf);
63291+ va_list ap;
63292+
63293+ va_start(ap, msg);
63294+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
63295+ va_end(ap);
63296+
63297+ return;
63298+}
63299+
63300+static void gr_log_end(int audit, int append_default)
63301+{
63302+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
63303+
63304+ if (append_default) {
63305+ unsigned int len = strlen(buf);
63306+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
63307+ }
63308+
63309+ printk("%s\n", buf);
63310+
63311+ return;
63312+}
63313+
63314+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
63315+{
63316+ int logtype;
63317+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
63318+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
63319+ void *voidptr = NULL;
63320+ int num1 = 0, num2 = 0;
63321+ unsigned long ulong1 = 0, ulong2 = 0;
63322+ struct dentry *dentry = NULL;
63323+ struct vfsmount *mnt = NULL;
63324+ struct file *file = NULL;
63325+ struct task_struct *task = NULL;
63326+ const struct cred *cred, *pcred;
63327+ va_list ap;
63328+
63329+ BEGIN_LOCKS(audit);
63330+ logtype = gr_log_start(audit);
63331+ if (logtype == FLOODING) {
63332+ END_LOCKS(audit);
63333+ return;
63334+ }
63335+ va_start(ap, argtypes);
63336+ switch (argtypes) {
63337+ case GR_TTYSNIFF:
63338+ task = va_arg(ap, struct task_struct *);
63339+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
63340+ break;
63341+ case GR_SYSCTL_HIDDEN:
63342+ str1 = va_arg(ap, char *);
63343+ gr_log_middle_varargs(audit, msg, result, str1);
63344+ break;
63345+ case GR_RBAC:
63346+ dentry = va_arg(ap, struct dentry *);
63347+ mnt = va_arg(ap, struct vfsmount *);
63348+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
63349+ break;
63350+ case GR_RBAC_STR:
63351+ dentry = va_arg(ap, struct dentry *);
63352+ mnt = va_arg(ap, struct vfsmount *);
63353+ str1 = va_arg(ap, char *);
63354+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
63355+ break;
63356+ case GR_STR_RBAC:
63357+ str1 = va_arg(ap, char *);
63358+ dentry = va_arg(ap, struct dentry *);
63359+ mnt = va_arg(ap, struct vfsmount *);
63360+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
63361+ break;
63362+ case GR_RBAC_MODE2:
63363+ dentry = va_arg(ap, struct dentry *);
63364+ mnt = va_arg(ap, struct vfsmount *);
63365+ str1 = va_arg(ap, char *);
63366+ str2 = va_arg(ap, char *);
63367+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
63368+ break;
63369+ case GR_RBAC_MODE3:
63370+ dentry = va_arg(ap, struct dentry *);
63371+ mnt = va_arg(ap, struct vfsmount *);
63372+ str1 = va_arg(ap, char *);
63373+ str2 = va_arg(ap, char *);
63374+ str3 = va_arg(ap, char *);
63375+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
63376+ break;
63377+ case GR_FILENAME:
63378+ dentry = va_arg(ap, struct dentry *);
63379+ mnt = va_arg(ap, struct vfsmount *);
63380+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
63381+ break;
63382+ case GR_STR_FILENAME:
63383+ str1 = va_arg(ap, char *);
63384+ dentry = va_arg(ap, struct dentry *);
63385+ mnt = va_arg(ap, struct vfsmount *);
63386+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
63387+ break;
63388+ case GR_FILENAME_STR:
63389+ dentry = va_arg(ap, struct dentry *);
63390+ mnt = va_arg(ap, struct vfsmount *);
63391+ str1 = va_arg(ap, char *);
63392+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
63393+ break;
63394+ case GR_FILENAME_TWO_INT:
63395+ dentry = va_arg(ap, struct dentry *);
63396+ mnt = va_arg(ap, struct vfsmount *);
63397+ num1 = va_arg(ap, int);
63398+ num2 = va_arg(ap, int);
63399+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
63400+ break;
63401+ case GR_FILENAME_TWO_INT_STR:
63402+ dentry = va_arg(ap, struct dentry *);
63403+ mnt = va_arg(ap, struct vfsmount *);
63404+ num1 = va_arg(ap, int);
63405+ num2 = va_arg(ap, int);
63406+ str1 = va_arg(ap, char *);
63407+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
63408+ break;
63409+ case GR_TEXTREL:
63410+ file = va_arg(ap, struct file *);
63411+ ulong1 = va_arg(ap, unsigned long);
63412+ ulong2 = va_arg(ap, unsigned long);
63413+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
63414+ break;
63415+ case GR_PTRACE:
63416+ task = va_arg(ap, struct task_struct *);
63417+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
63418+ break;
63419+ case GR_RESOURCE:
63420+ task = va_arg(ap, struct task_struct *);
63421+ cred = __task_cred(task);
63422+ pcred = __task_cred(task->real_parent);
63423+ ulong1 = va_arg(ap, unsigned long);
63424+ str1 = va_arg(ap, char *);
63425+ ulong2 = va_arg(ap, unsigned long);
63426+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63427+ break;
63428+ case GR_CAP:
63429+ task = va_arg(ap, struct task_struct *);
63430+ cred = __task_cred(task);
63431+ pcred = __task_cred(task->real_parent);
63432+ str1 = va_arg(ap, char *);
63433+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63434+ break;
63435+ case GR_SIG:
63436+ str1 = va_arg(ap, char *);
63437+ voidptr = va_arg(ap, void *);
63438+ gr_log_middle_varargs(audit, msg, str1, voidptr);
63439+ break;
63440+ case GR_SIG2:
63441+ task = va_arg(ap, struct task_struct *);
63442+ cred = __task_cred(task);
63443+ pcred = __task_cred(task->real_parent);
63444+ num1 = va_arg(ap, int);
63445+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63446+ break;
63447+ case GR_CRASH1:
63448+ task = va_arg(ap, struct task_struct *);
63449+ cred = __task_cred(task);
63450+ pcred = __task_cred(task->real_parent);
63451+ ulong1 = va_arg(ap, unsigned long);
63452+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
63453+ break;
63454+ case GR_CRASH2:
63455+ task = va_arg(ap, struct task_struct *);
63456+ cred = __task_cred(task);
63457+ pcred = __task_cred(task->real_parent);
63458+ ulong1 = va_arg(ap, unsigned long);
63459+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
63460+ break;
63461+ case GR_RWXMAP:
63462+ file = va_arg(ap, struct file *);
63463+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
63464+ break;
63465+ case GR_PSACCT:
63466+ {
63467+ unsigned int wday, cday;
63468+ __u8 whr, chr;
63469+ __u8 wmin, cmin;
63470+ __u8 wsec, csec;
63471+ char cur_tty[64] = { 0 };
63472+ char parent_tty[64] = { 0 };
63473+
63474+ task = va_arg(ap, struct task_struct *);
63475+ wday = va_arg(ap, unsigned int);
63476+ cday = va_arg(ap, unsigned int);
63477+ whr = va_arg(ap, int);
63478+ chr = va_arg(ap, int);
63479+ wmin = va_arg(ap, int);
63480+ cmin = va_arg(ap, int);
63481+ wsec = va_arg(ap, int);
63482+ csec = va_arg(ap, int);
63483+ ulong1 = va_arg(ap, unsigned long);
63484+ cred = __task_cred(task);
63485+ pcred = __task_cred(task->real_parent);
63486+
63487+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
63488+ }
63489+ break;
63490+ default:
63491+ gr_log_middle(audit, msg, ap);
63492+ }
63493+ va_end(ap);
63494+ // these don't need DEFAULTSECARGS printed on the end
63495+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
63496+ gr_log_end(audit, 0);
63497+ else
63498+ gr_log_end(audit, 1);
63499+ END_LOCKS(audit);
63500+}
63501diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
63502new file mode 100644
63503index 0000000..6c0416b
63504--- /dev/null
63505+++ b/grsecurity/grsec_mem.c
63506@@ -0,0 +1,33 @@
63507+#include <linux/kernel.h>
63508+#include <linux/sched.h>
63509+#include <linux/mm.h>
63510+#include <linux/mman.h>
63511+#include <linux/grinternal.h>
63512+
63513+void
63514+gr_handle_ioperm(void)
63515+{
63516+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
63517+ return;
63518+}
63519+
63520+void
63521+gr_handle_iopl(void)
63522+{
63523+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
63524+ return;
63525+}
63526+
63527+void
63528+gr_handle_mem_readwrite(u64 from, u64 to)
63529+{
63530+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
63531+ return;
63532+}
63533+
63534+void
63535+gr_handle_vm86(void)
63536+{
63537+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
63538+ return;
63539+}
63540diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
63541new file mode 100644
63542index 0000000..2131422
63543--- /dev/null
63544+++ b/grsecurity/grsec_mount.c
63545@@ -0,0 +1,62 @@
63546+#include <linux/kernel.h>
63547+#include <linux/sched.h>
63548+#include <linux/mount.h>
63549+#include <linux/grsecurity.h>
63550+#include <linux/grinternal.h>
63551+
63552+void
63553+gr_log_remount(const char *devname, const int retval)
63554+{
63555+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63556+ if (grsec_enable_mount && (retval >= 0))
63557+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
63558+#endif
63559+ return;
63560+}
63561+
63562+void
63563+gr_log_unmount(const char *devname, const int retval)
63564+{
63565+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63566+ if (grsec_enable_mount && (retval >= 0))
63567+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
63568+#endif
63569+ return;
63570+}
63571+
63572+void
63573+gr_log_mount(const char *from, const char *to, const int retval)
63574+{
63575+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
63576+ if (grsec_enable_mount && (retval >= 0))
63577+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
63578+#endif
63579+ return;
63580+}
63581+
63582+int
63583+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
63584+{
63585+#ifdef CONFIG_GRKERNSEC_ROFS
63586+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
63587+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
63588+ return -EPERM;
63589+ } else
63590+ return 0;
63591+#endif
63592+ return 0;
63593+}
63594+
63595+int
63596+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
63597+{
63598+#ifdef CONFIG_GRKERNSEC_ROFS
63599+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
63600+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
63601+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
63602+ return -EPERM;
63603+ } else
63604+ return 0;
63605+#endif
63606+ return 0;
63607+}
63608diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
63609new file mode 100644
63610index 0000000..a3b12a0
63611--- /dev/null
63612+++ b/grsecurity/grsec_pax.c
63613@@ -0,0 +1,36 @@
63614+#include <linux/kernel.h>
63615+#include <linux/sched.h>
63616+#include <linux/mm.h>
63617+#include <linux/file.h>
63618+#include <linux/grinternal.h>
63619+#include <linux/grsecurity.h>
63620+
63621+void
63622+gr_log_textrel(struct vm_area_struct * vma)
63623+{
63624+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
63625+ if (grsec_enable_audit_textrel)
63626+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
63627+#endif
63628+ return;
63629+}
63630+
63631+void
63632+gr_log_rwxmmap(struct file *file)
63633+{
63634+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63635+ if (grsec_enable_log_rwxmaps)
63636+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
63637+#endif
63638+ return;
63639+}
63640+
63641+void
63642+gr_log_rwxmprotect(struct file *file)
63643+{
63644+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
63645+ if (grsec_enable_log_rwxmaps)
63646+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
63647+#endif
63648+ return;
63649+}
63650diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
63651new file mode 100644
63652index 0000000..78f8733
63653--- /dev/null
63654+++ b/grsecurity/grsec_ptrace.c
63655@@ -0,0 +1,30 @@
63656+#include <linux/kernel.h>
63657+#include <linux/sched.h>
63658+#include <linux/grinternal.h>
63659+#include <linux/security.h>
63660+
63661+void
63662+gr_audit_ptrace(struct task_struct *task)
63663+{
63664+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
63665+ if (grsec_enable_audit_ptrace)
63666+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
63667+#endif
63668+ return;
63669+}
63670+
63671+int
63672+gr_ptrace_readexec(struct file *file, int unsafe_flags)
63673+{
63674+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
63675+ const struct dentry *dentry = file->f_path.dentry;
63676+ const struct vfsmount *mnt = file->f_path.mnt;
63677+
63678+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
63679+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
63680+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
63681+ return -EACCES;
63682+ }
63683+#endif
63684+ return 0;
63685+}
63686diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
63687new file mode 100644
63688index 0000000..c648492
63689--- /dev/null
63690+++ b/grsecurity/grsec_sig.c
63691@@ -0,0 +1,206 @@
63692+#include <linux/kernel.h>
63693+#include <linux/sched.h>
63694+#include <linux/delay.h>
63695+#include <linux/grsecurity.h>
63696+#include <linux/grinternal.h>
63697+#include <linux/hardirq.h>
63698+
63699+char *signames[] = {
63700+ [SIGSEGV] = "Segmentation fault",
63701+ [SIGILL] = "Illegal instruction",
63702+ [SIGABRT] = "Abort",
63703+ [SIGBUS] = "Invalid alignment/Bus error"
63704+};
63705+
63706+void
63707+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
63708+{
63709+#ifdef CONFIG_GRKERNSEC_SIGNAL
63710+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
63711+ (sig == SIGABRT) || (sig == SIGBUS))) {
63712+ if (t->pid == current->pid) {
63713+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
63714+ } else {
63715+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
63716+ }
63717+ }
63718+#endif
63719+ return;
63720+}
63721+
63722+int
63723+gr_handle_signal(const struct task_struct *p, const int sig)
63724+{
63725+#ifdef CONFIG_GRKERNSEC
63726+ /* ignore the 0 signal for protected task checks */
63727+ if (current->pid > 1 && sig && gr_check_protected_task(p)) {
63728+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
63729+ return -EPERM;
63730+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
63731+ return -EPERM;
63732+ }
63733+#endif
63734+ return 0;
63735+}
63736+
63737+#ifdef CONFIG_GRKERNSEC
63738+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
63739+
63740+int gr_fake_force_sig(int sig, struct task_struct *t)
63741+{
63742+ unsigned long int flags;
63743+ int ret, blocked, ignored;
63744+ struct k_sigaction *action;
63745+
63746+ spin_lock_irqsave(&t->sighand->siglock, flags);
63747+ action = &t->sighand->action[sig-1];
63748+ ignored = action->sa.sa_handler == SIG_IGN;
63749+ blocked = sigismember(&t->blocked, sig);
63750+ if (blocked || ignored) {
63751+ action->sa.sa_handler = SIG_DFL;
63752+ if (blocked) {
63753+ sigdelset(&t->blocked, sig);
63754+ recalc_sigpending_and_wake(t);
63755+ }
63756+ }
63757+ if (action->sa.sa_handler == SIG_DFL)
63758+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
63759+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
63760+
63761+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
63762+
63763+ return ret;
63764+}
63765+#endif
63766+
63767+#ifdef CONFIG_GRKERNSEC_BRUTE
63768+#define GR_USER_BAN_TIME (15 * 60)
63769+
63770+static int __get_dumpable(unsigned long mm_flags)
63771+{
63772+ int ret;
63773+
63774+ ret = mm_flags & MMF_DUMPABLE_MASK;
63775+ return (ret >= 2) ? 2 : ret;
63776+}
63777+#endif
63778+
63779+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
63780+{
63781+#ifdef CONFIG_GRKERNSEC_BRUTE
63782+ uid_t uid = 0;
63783+
63784+ if (!grsec_enable_brute)
63785+ return;
63786+
63787+ rcu_read_lock();
63788+ read_lock(&tasklist_lock);
63789+ read_lock(&grsec_exec_file_lock);
63790+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
63791+ p->real_parent->brute = 1;
63792+ else {
63793+ const struct cred *cred = __task_cred(p), *cred2;
63794+ struct task_struct *tsk, *tsk2;
63795+
63796+ if (!__get_dumpable(mm_flags) && cred->uid) {
63797+ struct user_struct *user;
63798+
63799+ uid = cred->uid;
63800+
63801+ /* this is put upon execution past expiration */
63802+ user = find_user(uid);
63803+ if (user == NULL)
63804+ goto unlock;
63805+ user->banned = 1;
63806+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
63807+ if (user->ban_expires == ~0UL)
63808+ user->ban_expires--;
63809+
63810+ do_each_thread(tsk2, tsk) {
63811+ cred2 = __task_cred(tsk);
63812+ if (tsk != p && cred2->uid == uid)
63813+ gr_fake_force_sig(SIGKILL, tsk);
63814+ } while_each_thread(tsk2, tsk);
63815+ }
63816+ }
63817+unlock:
63818+ read_unlock(&grsec_exec_file_lock);
63819+ read_unlock(&tasklist_lock);
63820+ rcu_read_unlock();
63821+
63822+ if (uid)
63823+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
63824+#endif
63825+ return;
63826+}
63827+
63828+void gr_handle_brute_check(void)
63829+{
63830+#ifdef CONFIG_GRKERNSEC_BRUTE
63831+ if (current->brute)
63832+ msleep(30 * 1000);
63833+#endif
63834+ return;
63835+}
63836+
63837+void gr_handle_kernel_exploit(void)
63838+{
63839+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
63840+ const struct cred *cred;
63841+ struct task_struct *tsk, *tsk2;
63842+ struct user_struct *user;
63843+ uid_t uid;
63844+
63845+ if (in_irq() || in_serving_softirq() || in_nmi())
63846+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
63847+
63848+ uid = current_uid();
63849+
63850+ if (uid == 0)
63851+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
63852+ else {
63853+ /* kill all the processes of this user, hold a reference
63854+ to their creds struct, and prevent them from creating
63855+ another process until system reset
63856+ */
63857+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
63858+ /* we intentionally leak this ref */
63859+ user = get_uid(current->cred->user);
63860+ if (user) {
63861+ user->banned = 1;
63862+ user->ban_expires = ~0UL;
63863+ }
63864+
63865+ read_lock(&tasklist_lock);
63866+ do_each_thread(tsk2, tsk) {
63867+ cred = __task_cred(tsk);
63868+ if (cred->uid == uid)
63869+ gr_fake_force_sig(SIGKILL, tsk);
63870+ } while_each_thread(tsk2, tsk);
63871+ read_unlock(&tasklist_lock);
63872+ }
63873+#endif
63874+}
63875+
63876+int __gr_process_user_ban(struct user_struct *user)
63877+{
63878+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63879+ if (unlikely(user->banned)) {
63880+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
63881+ user->banned = 0;
63882+ user->ban_expires = 0;
63883+ free_uid(user);
63884+ } else
63885+ return -EPERM;
63886+ }
63887+#endif
63888+ return 0;
63889+}
63890+
63891+int gr_process_user_ban(void)
63892+{
63893+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
63894+ return __gr_process_user_ban(current->cred->user);
63895+#endif
63896+ return 0;
63897+}
63898diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
63899new file mode 100644
63900index 0000000..7512ea9
63901--- /dev/null
63902+++ b/grsecurity/grsec_sock.c
63903@@ -0,0 +1,275 @@
63904+#include <linux/kernel.h>
63905+#include <linux/module.h>
63906+#include <linux/sched.h>
63907+#include <linux/file.h>
63908+#include <linux/net.h>
63909+#include <linux/in.h>
63910+#include <linux/ip.h>
63911+#include <net/sock.h>
63912+#include <net/inet_sock.h>
63913+#include <linux/grsecurity.h>
63914+#include <linux/grinternal.h>
63915+#include <linux/gracl.h>
63916+
63917+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
63918+EXPORT_SYMBOL(gr_cap_rtnetlink);
63919+
63920+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
63921+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
63922+
63923+EXPORT_SYMBOL(gr_search_udp_recvmsg);
63924+EXPORT_SYMBOL(gr_search_udp_sendmsg);
63925+
63926+#ifdef CONFIG_UNIX_MODULE
63927+EXPORT_SYMBOL(gr_acl_handle_unix);
63928+EXPORT_SYMBOL(gr_acl_handle_mknod);
63929+EXPORT_SYMBOL(gr_handle_chroot_unix);
63930+EXPORT_SYMBOL(gr_handle_create);
63931+#endif
63932+
63933+#ifdef CONFIG_GRKERNSEC
63934+#define gr_conn_table_size 32749
63935+struct conn_table_entry {
63936+ struct conn_table_entry *next;
63937+ struct signal_struct *sig;
63938+};
63939+
63940+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
63941+DEFINE_SPINLOCK(gr_conn_table_lock);
63942+
63943+extern const char * gr_socktype_to_name(unsigned char type);
63944+extern const char * gr_proto_to_name(unsigned char proto);
63945+extern const char * gr_sockfamily_to_name(unsigned char family);
63946+
63947+static __inline__ int
63948+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
63949+{
63950+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
63951+}
63952+
63953+static __inline__ int
63954+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
63955+ __u16 sport, __u16 dport)
63956+{
63957+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
63958+ sig->gr_sport == sport && sig->gr_dport == dport))
63959+ return 1;
63960+ else
63961+ return 0;
63962+}
63963+
63964+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
63965+{
63966+ struct conn_table_entry **match;
63967+ unsigned int index;
63968+
63969+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63970+ sig->gr_sport, sig->gr_dport,
63971+ gr_conn_table_size);
63972+
63973+ newent->sig = sig;
63974+
63975+ match = &gr_conn_table[index];
63976+ newent->next = *match;
63977+ *match = newent;
63978+
63979+ return;
63980+}
63981+
63982+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
63983+{
63984+ struct conn_table_entry *match, *last = NULL;
63985+ unsigned int index;
63986+
63987+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
63988+ sig->gr_sport, sig->gr_dport,
63989+ gr_conn_table_size);
63990+
63991+ match = gr_conn_table[index];
63992+ while (match && !conn_match(match->sig,
63993+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
63994+ sig->gr_dport)) {
63995+ last = match;
63996+ match = match->next;
63997+ }
63998+
63999+ if (match) {
64000+ if (last)
64001+ last->next = match->next;
64002+ else
64003+ gr_conn_table[index] = NULL;
64004+ kfree(match);
64005+ }
64006+
64007+ return;
64008+}
64009+
64010+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
64011+ __u16 sport, __u16 dport)
64012+{
64013+ struct conn_table_entry *match;
64014+ unsigned int index;
64015+
64016+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
64017+
64018+ match = gr_conn_table[index];
64019+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
64020+ match = match->next;
64021+
64022+ if (match)
64023+ return match->sig;
64024+ else
64025+ return NULL;
64026+}
64027+
64028+#endif
64029+
64030+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
64031+{
64032+#ifdef CONFIG_GRKERNSEC
64033+ struct signal_struct *sig = task->signal;
64034+ struct conn_table_entry *newent;
64035+
64036+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
64037+ if (newent == NULL)
64038+ return;
64039+ /* no bh lock needed since we are called with bh disabled */
64040+ spin_lock(&gr_conn_table_lock);
64041+ gr_del_task_from_ip_table_nolock(sig);
64042+ sig->gr_saddr = inet->rcv_saddr;
64043+ sig->gr_daddr = inet->daddr;
64044+ sig->gr_sport = inet->sport;
64045+ sig->gr_dport = inet->dport;
64046+ gr_add_to_task_ip_table_nolock(sig, newent);
64047+ spin_unlock(&gr_conn_table_lock);
64048+#endif
64049+ return;
64050+}
64051+
64052+void gr_del_task_from_ip_table(struct task_struct *task)
64053+{
64054+#ifdef CONFIG_GRKERNSEC
64055+ spin_lock_bh(&gr_conn_table_lock);
64056+ gr_del_task_from_ip_table_nolock(task->signal);
64057+ spin_unlock_bh(&gr_conn_table_lock);
64058+#endif
64059+ return;
64060+}
64061+
64062+void
64063+gr_attach_curr_ip(const struct sock *sk)
64064+{
64065+#ifdef CONFIG_GRKERNSEC
64066+ struct signal_struct *p, *set;
64067+ const struct inet_sock *inet = inet_sk(sk);
64068+
64069+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
64070+ return;
64071+
64072+ set = current->signal;
64073+
64074+ spin_lock_bh(&gr_conn_table_lock);
64075+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
64076+ inet->dport, inet->sport);
64077+ if (unlikely(p != NULL)) {
64078+ set->curr_ip = p->curr_ip;
64079+ set->used_accept = 1;
64080+ gr_del_task_from_ip_table_nolock(p);
64081+ spin_unlock_bh(&gr_conn_table_lock);
64082+ return;
64083+ }
64084+ spin_unlock_bh(&gr_conn_table_lock);
64085+
64086+ set->curr_ip = inet->daddr;
64087+ set->used_accept = 1;
64088+#endif
64089+ return;
64090+}
64091+
64092+int
64093+gr_handle_sock_all(const int family, const int type, const int protocol)
64094+{
64095+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64096+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
64097+ (family != AF_UNIX)) {
64098+ if (family == AF_INET)
64099+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
64100+ else
64101+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
64102+ return -EACCES;
64103+ }
64104+#endif
64105+ return 0;
64106+}
64107+
64108+int
64109+gr_handle_sock_server(const struct sockaddr *sck)
64110+{
64111+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64112+ if (grsec_enable_socket_server &&
64113+ in_group_p(grsec_socket_server_gid) &&
64114+ sck && (sck->sa_family != AF_UNIX) &&
64115+ (sck->sa_family != AF_LOCAL)) {
64116+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64117+ return -EACCES;
64118+ }
64119+#endif
64120+ return 0;
64121+}
64122+
64123+int
64124+gr_handle_sock_server_other(const struct sock *sck)
64125+{
64126+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64127+ if (grsec_enable_socket_server &&
64128+ in_group_p(grsec_socket_server_gid) &&
64129+ sck && (sck->sk_family != AF_UNIX) &&
64130+ (sck->sk_family != AF_LOCAL)) {
64131+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
64132+ return -EACCES;
64133+ }
64134+#endif
64135+ return 0;
64136+}
64137+
64138+int
64139+gr_handle_sock_client(const struct sockaddr *sck)
64140+{
64141+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64142+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
64143+ sck && (sck->sa_family != AF_UNIX) &&
64144+ (sck->sa_family != AF_LOCAL)) {
64145+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
64146+ return -EACCES;
64147+ }
64148+#endif
64149+ return 0;
64150+}
64151+
64152+kernel_cap_t
64153+gr_cap_rtnetlink(struct sock *sock)
64154+{
64155+#ifdef CONFIG_GRKERNSEC
64156+ if (!gr_acl_is_enabled())
64157+ return current_cap();
64158+ else if (sock->sk_protocol == NETLINK_ISCSI &&
64159+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
64160+ gr_is_capable(CAP_SYS_ADMIN))
64161+ return current_cap();
64162+ else if (sock->sk_protocol == NETLINK_AUDIT &&
64163+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
64164+ gr_is_capable(CAP_AUDIT_WRITE) &&
64165+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
64166+ gr_is_capable(CAP_AUDIT_CONTROL))
64167+ return current_cap();
64168+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
64169+ ((sock->sk_protocol == NETLINK_ROUTE) ?
64170+ gr_is_capable_nolog(CAP_NET_ADMIN) :
64171+ gr_is_capable(CAP_NET_ADMIN)))
64172+ return current_cap();
64173+ else
64174+ return __cap_empty_set;
64175+#else
64176+ return current_cap();
64177+#endif
64178+}
64179diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
64180new file mode 100644
64181index 0000000..31f3258
64182--- /dev/null
64183+++ b/grsecurity/grsec_sysctl.c
64184@@ -0,0 +1,499 @@
64185+#include <linux/kernel.h>
64186+#include <linux/sched.h>
64187+#include <linux/sysctl.h>
64188+#include <linux/grsecurity.h>
64189+#include <linux/grinternal.h>
64190+
64191+int
64192+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
64193+{
64194+#ifdef CONFIG_GRKERNSEC_SYSCTL
64195+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
64196+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
64197+ return -EACCES;
64198+ }
64199+#endif
64200+ return 0;
64201+}
64202+
64203+#ifdef CONFIG_GRKERNSEC_ROFS
64204+static int __maybe_unused one = 1;
64205+#endif
64206+
64207+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
64208+ctl_table grsecurity_table[] = {
64209+#ifdef CONFIG_GRKERNSEC_SYSCTL
64210+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
64211+#ifdef CONFIG_GRKERNSEC_IO
64212+ {
64213+ .ctl_name = CTL_UNNUMBERED,
64214+ .procname = "disable_priv_io",
64215+ .data = &grsec_disable_privio,
64216+ .maxlen = sizeof(int),
64217+ .mode = 0600,
64218+ .proc_handler = &proc_dointvec,
64219+ },
64220+#endif
64221+#endif
64222+#ifdef CONFIG_GRKERNSEC_LINK
64223+ {
64224+ .ctl_name = CTL_UNNUMBERED,
64225+ .procname = "linking_restrictions",
64226+ .data = &grsec_enable_link,
64227+ .maxlen = sizeof(int),
64228+ .mode = 0600,
64229+ .proc_handler = &proc_dointvec,
64230+ },
64231+#endif
64232+#ifdef CONFIG_GRKERNSEC_BRUTE
64233+ {
64234+ .ctl_name = CTL_UNNUMBERED,
64235+ .procname = "deter_bruteforce",
64236+ .data = &grsec_enable_brute,
64237+ .maxlen = sizeof(int),
64238+ .mode = 0600,
64239+ .proc_handler = &proc_dointvec,
64240+ },
64241+#endif
64242+#ifdef CONFIG_GRKERNSEC_FIFO
64243+ {
64244+ .ctl_name = CTL_UNNUMBERED,
64245+ .procname = "fifo_restrictions",
64246+ .data = &grsec_enable_fifo,
64247+ .maxlen = sizeof(int),
64248+ .mode = 0600,
64249+ .proc_handler = &proc_dointvec,
64250+ },
64251+#endif
64252+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
64253+ {
64254+ .ctl_name = CTL_UNNUMBERED,
64255+ .procname = "ptrace_readexec",
64256+ .data = &grsec_enable_ptrace_readexec,
64257+ .maxlen = sizeof(int),
64258+ .mode = 0600,
64259+ .proc_handler = &proc_dointvec,
64260+ },
64261+#endif
64262+#ifdef CONFIG_GRKERNSEC_SETXID
64263+ {
64264+ .ctl_name = CTL_UNNUMBERED,
64265+ .procname = "consistent_setxid",
64266+ .data = &grsec_enable_setxid,
64267+ .maxlen = sizeof(int),
64268+ .mode = 0600,
64269+ .proc_handler = &proc_dointvec,
64270+ },
64271+#endif
64272+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
64273+ {
64274+ .ctl_name = CTL_UNNUMBERED,
64275+ .procname = "ip_blackhole",
64276+ .data = &grsec_enable_blackhole,
64277+ .maxlen = sizeof(int),
64278+ .mode = 0600,
64279+ .proc_handler = &proc_dointvec,
64280+ },
64281+ {
64282+ .ctl_name = CTL_UNNUMBERED,
64283+ .procname = "lastack_retries",
64284+ .data = &grsec_lastack_retries,
64285+ .maxlen = sizeof(int),
64286+ .mode = 0600,
64287+ .proc_handler = &proc_dointvec,
64288+ },
64289+#endif
64290+#ifdef CONFIG_GRKERNSEC_EXECLOG
64291+ {
64292+ .ctl_name = CTL_UNNUMBERED,
64293+ .procname = "exec_logging",
64294+ .data = &grsec_enable_execlog,
64295+ .maxlen = sizeof(int),
64296+ .mode = 0600,
64297+ .proc_handler = &proc_dointvec,
64298+ },
64299+#endif
64300+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
64301+ {
64302+ .ctl_name = CTL_UNNUMBERED,
64303+ .procname = "rwxmap_logging",
64304+ .data = &grsec_enable_log_rwxmaps,
64305+ .maxlen = sizeof(int),
64306+ .mode = 0600,
64307+ .proc_handler = &proc_dointvec,
64308+ },
64309+#endif
64310+#ifdef CONFIG_GRKERNSEC_SIGNAL
64311+ {
64312+ .ctl_name = CTL_UNNUMBERED,
64313+ .procname = "signal_logging",
64314+ .data = &grsec_enable_signal,
64315+ .maxlen = sizeof(int),
64316+ .mode = 0600,
64317+ .proc_handler = &proc_dointvec,
64318+ },
64319+#endif
64320+#ifdef CONFIG_GRKERNSEC_FORKFAIL
64321+ {
64322+ .ctl_name = CTL_UNNUMBERED,
64323+ .procname = "forkfail_logging",
64324+ .data = &grsec_enable_forkfail,
64325+ .maxlen = sizeof(int),
64326+ .mode = 0600,
64327+ .proc_handler = &proc_dointvec,
64328+ },
64329+#endif
64330+#ifdef CONFIG_GRKERNSEC_TIME
64331+ {
64332+ .ctl_name = CTL_UNNUMBERED,
64333+ .procname = "timechange_logging",
64334+ .data = &grsec_enable_time,
64335+ .maxlen = sizeof(int),
64336+ .mode = 0600,
64337+ .proc_handler = &proc_dointvec,
64338+ },
64339+#endif
64340+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64341+ {
64342+ .ctl_name = CTL_UNNUMBERED,
64343+ .procname = "chroot_deny_shmat",
64344+ .data = &grsec_enable_chroot_shmat,
64345+ .maxlen = sizeof(int),
64346+ .mode = 0600,
64347+ .proc_handler = &proc_dointvec,
64348+ },
64349+#endif
64350+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64351+ {
64352+ .ctl_name = CTL_UNNUMBERED,
64353+ .procname = "chroot_deny_unix",
64354+ .data = &grsec_enable_chroot_unix,
64355+ .maxlen = sizeof(int),
64356+ .mode = 0600,
64357+ .proc_handler = &proc_dointvec,
64358+ },
64359+#endif
64360+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64361+ {
64362+ .ctl_name = CTL_UNNUMBERED,
64363+ .procname = "chroot_deny_mount",
64364+ .data = &grsec_enable_chroot_mount,
64365+ .maxlen = sizeof(int),
64366+ .mode = 0600,
64367+ .proc_handler = &proc_dointvec,
64368+ },
64369+#endif
64370+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64371+ {
64372+ .ctl_name = CTL_UNNUMBERED,
64373+ .procname = "chroot_deny_fchdir",
64374+ .data = &grsec_enable_chroot_fchdir,
64375+ .maxlen = sizeof(int),
64376+ .mode = 0600,
64377+ .proc_handler = &proc_dointvec,
64378+ },
64379+#endif
64380+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64381+ {
64382+ .ctl_name = CTL_UNNUMBERED,
64383+ .procname = "chroot_deny_chroot",
64384+ .data = &grsec_enable_chroot_double,
64385+ .maxlen = sizeof(int),
64386+ .mode = 0600,
64387+ .proc_handler = &proc_dointvec,
64388+ },
64389+#endif
64390+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64391+ {
64392+ .ctl_name = CTL_UNNUMBERED,
64393+ .procname = "chroot_deny_pivot",
64394+ .data = &grsec_enable_chroot_pivot,
64395+ .maxlen = sizeof(int),
64396+ .mode = 0600,
64397+ .proc_handler = &proc_dointvec,
64398+ },
64399+#endif
64400+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64401+ {
64402+ .ctl_name = CTL_UNNUMBERED,
64403+ .procname = "chroot_enforce_chdir",
64404+ .data = &grsec_enable_chroot_chdir,
64405+ .maxlen = sizeof(int),
64406+ .mode = 0600,
64407+ .proc_handler = &proc_dointvec,
64408+ },
64409+#endif
64410+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64411+ {
64412+ .ctl_name = CTL_UNNUMBERED,
64413+ .procname = "chroot_deny_chmod",
64414+ .data = &grsec_enable_chroot_chmod,
64415+ .maxlen = sizeof(int),
64416+ .mode = 0600,
64417+ .proc_handler = &proc_dointvec,
64418+ },
64419+#endif
64420+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64421+ {
64422+ .ctl_name = CTL_UNNUMBERED,
64423+ .procname = "chroot_deny_mknod",
64424+ .data = &grsec_enable_chroot_mknod,
64425+ .maxlen = sizeof(int),
64426+ .mode = 0600,
64427+ .proc_handler = &proc_dointvec,
64428+ },
64429+#endif
64430+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64431+ {
64432+ .ctl_name = CTL_UNNUMBERED,
64433+ .procname = "chroot_restrict_nice",
64434+ .data = &grsec_enable_chroot_nice,
64435+ .maxlen = sizeof(int),
64436+ .mode = 0600,
64437+ .proc_handler = &proc_dointvec,
64438+ },
64439+#endif
64440+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64441+ {
64442+ .ctl_name = CTL_UNNUMBERED,
64443+ .procname = "chroot_execlog",
64444+ .data = &grsec_enable_chroot_execlog,
64445+ .maxlen = sizeof(int),
64446+ .mode = 0600,
64447+ .proc_handler = &proc_dointvec,
64448+ },
64449+#endif
64450+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64451+ {
64452+ .ctl_name = CTL_UNNUMBERED,
64453+ .procname = "chroot_caps",
64454+ .data = &grsec_enable_chroot_caps,
64455+ .maxlen = sizeof(int),
64456+ .mode = 0600,
64457+ .proc_handler = &proc_dointvec,
64458+ },
64459+#endif
64460+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64461+ {
64462+ .ctl_name = CTL_UNNUMBERED,
64463+ .procname = "chroot_deny_sysctl",
64464+ .data = &grsec_enable_chroot_sysctl,
64465+ .maxlen = sizeof(int),
64466+ .mode = 0600,
64467+ .proc_handler = &proc_dointvec,
64468+ },
64469+#endif
64470+#ifdef CONFIG_GRKERNSEC_TPE
64471+ {
64472+ .ctl_name = CTL_UNNUMBERED,
64473+ .procname = "tpe",
64474+ .data = &grsec_enable_tpe,
64475+ .maxlen = sizeof(int),
64476+ .mode = 0600,
64477+ .proc_handler = &proc_dointvec,
64478+ },
64479+ {
64480+ .ctl_name = CTL_UNNUMBERED,
64481+ .procname = "tpe_gid",
64482+ .data = &grsec_tpe_gid,
64483+ .maxlen = sizeof(int),
64484+ .mode = 0600,
64485+ .proc_handler = &proc_dointvec,
64486+ },
64487+#endif
64488+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64489+ {
64490+ .ctl_name = CTL_UNNUMBERED,
64491+ .procname = "tpe_invert",
64492+ .data = &grsec_enable_tpe_invert,
64493+ .maxlen = sizeof(int),
64494+ .mode = 0600,
64495+ .proc_handler = &proc_dointvec,
64496+ },
64497+#endif
64498+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64499+ {
64500+ .ctl_name = CTL_UNNUMBERED,
64501+ .procname = "tpe_restrict_all",
64502+ .data = &grsec_enable_tpe_all,
64503+ .maxlen = sizeof(int),
64504+ .mode = 0600,
64505+ .proc_handler = &proc_dointvec,
64506+ },
64507+#endif
64508+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
64509+ {
64510+ .ctl_name = CTL_UNNUMBERED,
64511+ .procname = "socket_all",
64512+ .data = &grsec_enable_socket_all,
64513+ .maxlen = sizeof(int),
64514+ .mode = 0600,
64515+ .proc_handler = &proc_dointvec,
64516+ },
64517+ {
64518+ .ctl_name = CTL_UNNUMBERED,
64519+ .procname = "socket_all_gid",
64520+ .data = &grsec_socket_all_gid,
64521+ .maxlen = sizeof(int),
64522+ .mode = 0600,
64523+ .proc_handler = &proc_dointvec,
64524+ },
64525+#endif
64526+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
64527+ {
64528+ .ctl_name = CTL_UNNUMBERED,
64529+ .procname = "socket_client",
64530+ .data = &grsec_enable_socket_client,
64531+ .maxlen = sizeof(int),
64532+ .mode = 0600,
64533+ .proc_handler = &proc_dointvec,
64534+ },
64535+ {
64536+ .ctl_name = CTL_UNNUMBERED,
64537+ .procname = "socket_client_gid",
64538+ .data = &grsec_socket_client_gid,
64539+ .maxlen = sizeof(int),
64540+ .mode = 0600,
64541+ .proc_handler = &proc_dointvec,
64542+ },
64543+#endif
64544+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
64545+ {
64546+ .ctl_name = CTL_UNNUMBERED,
64547+ .procname = "socket_server",
64548+ .data = &grsec_enable_socket_server,
64549+ .maxlen = sizeof(int),
64550+ .mode = 0600,
64551+ .proc_handler = &proc_dointvec,
64552+ },
64553+ {
64554+ .ctl_name = CTL_UNNUMBERED,
64555+ .procname = "socket_server_gid",
64556+ .data = &grsec_socket_server_gid,
64557+ .maxlen = sizeof(int),
64558+ .mode = 0600,
64559+ .proc_handler = &proc_dointvec,
64560+ },
64561+#endif
64562+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
64563+ {
64564+ .ctl_name = CTL_UNNUMBERED,
64565+ .procname = "audit_group",
64566+ .data = &grsec_enable_group,
64567+ .maxlen = sizeof(int),
64568+ .mode = 0600,
64569+ .proc_handler = &proc_dointvec,
64570+ },
64571+ {
64572+ .ctl_name = CTL_UNNUMBERED,
64573+ .procname = "audit_gid",
64574+ .data = &grsec_audit_gid,
64575+ .maxlen = sizeof(int),
64576+ .mode = 0600,
64577+ .proc_handler = &proc_dointvec,
64578+ },
64579+#endif
64580+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64581+ {
64582+ .ctl_name = CTL_UNNUMBERED,
64583+ .procname = "audit_chdir",
64584+ .data = &grsec_enable_chdir,
64585+ .maxlen = sizeof(int),
64586+ .mode = 0600,
64587+ .proc_handler = &proc_dointvec,
64588+ },
64589+#endif
64590+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
64591+ {
64592+ .ctl_name = CTL_UNNUMBERED,
64593+ .procname = "audit_mount",
64594+ .data = &grsec_enable_mount,
64595+ .maxlen = sizeof(int),
64596+ .mode = 0600,
64597+ .proc_handler = &proc_dointvec,
64598+ },
64599+#endif
64600+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
64601+ {
64602+ .ctl_name = CTL_UNNUMBERED,
64603+ .procname = "audit_textrel",
64604+ .data = &grsec_enable_audit_textrel,
64605+ .maxlen = sizeof(int),
64606+ .mode = 0600,
64607+ .proc_handler = &proc_dointvec,
64608+ },
64609+#endif
64610+#ifdef CONFIG_GRKERNSEC_DMESG
64611+ {
64612+ .ctl_name = CTL_UNNUMBERED,
64613+ .procname = "dmesg",
64614+ .data = &grsec_enable_dmesg,
64615+ .maxlen = sizeof(int),
64616+ .mode = 0600,
64617+ .proc_handler = &proc_dointvec,
64618+ },
64619+#endif
64620+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64621+ {
64622+ .ctl_name = CTL_UNNUMBERED,
64623+ .procname = "chroot_findtask",
64624+ .data = &grsec_enable_chroot_findtask,
64625+ .maxlen = sizeof(int),
64626+ .mode = 0600,
64627+ .proc_handler = &proc_dointvec,
64628+ },
64629+#endif
64630+#ifdef CONFIG_GRKERNSEC_RESLOG
64631+ {
64632+ .ctl_name = CTL_UNNUMBERED,
64633+ .procname = "resource_logging",
64634+ .data = &grsec_resource_logging,
64635+ .maxlen = sizeof(int),
64636+ .mode = 0600,
64637+ .proc_handler = &proc_dointvec,
64638+ },
64639+#endif
64640+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
64641+ {
64642+ .ctl_name = CTL_UNNUMBERED,
64643+ .procname = "audit_ptrace",
64644+ .data = &grsec_enable_audit_ptrace,
64645+ .maxlen = sizeof(int),
64646+ .mode = 0600,
64647+ .proc_handler = &proc_dointvec,
64648+ },
64649+#endif
64650+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
64651+ {
64652+ .ctl_name = CTL_UNNUMBERED,
64653+ .procname = "harden_ptrace",
64654+ .data = &grsec_enable_harden_ptrace,
64655+ .maxlen = sizeof(int),
64656+ .mode = 0600,
64657+ .proc_handler = &proc_dointvec,
64658+ },
64659+#endif
64660+ {
64661+ .ctl_name = CTL_UNNUMBERED,
64662+ .procname = "grsec_lock",
64663+ .data = &grsec_lock,
64664+ .maxlen = sizeof(int),
64665+ .mode = 0600,
64666+ .proc_handler = &proc_dointvec,
64667+ },
64668+#endif
64669+#ifdef CONFIG_GRKERNSEC_ROFS
64670+ {
64671+ .ctl_name = CTL_UNNUMBERED,
64672+ .procname = "romount_protect",
64673+ .data = &grsec_enable_rofs,
64674+ .maxlen = sizeof(int),
64675+ .mode = 0600,
64676+ .proc_handler = &proc_dointvec_minmax,
64677+ .extra1 = &one,
64678+ .extra2 = &one,
64679+ },
64680+#endif
64681+ { .ctl_name = 0 }
64682+};
64683+#endif
64684diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
64685new file mode 100644
64686index 0000000..0dc13c3
64687--- /dev/null
64688+++ b/grsecurity/grsec_time.c
64689@@ -0,0 +1,16 @@
64690+#include <linux/kernel.h>
64691+#include <linux/sched.h>
64692+#include <linux/grinternal.h>
64693+#include <linux/module.h>
64694+
64695+void
64696+gr_log_timechange(void)
64697+{
64698+#ifdef CONFIG_GRKERNSEC_TIME
64699+ if (grsec_enable_time)
64700+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
64701+#endif
64702+ return;
64703+}
64704+
64705+EXPORT_SYMBOL(gr_log_timechange);
64706diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
64707new file mode 100644
64708index 0000000..07e0dc0
64709--- /dev/null
64710+++ b/grsecurity/grsec_tpe.c
64711@@ -0,0 +1,73 @@
64712+#include <linux/kernel.h>
64713+#include <linux/sched.h>
64714+#include <linux/file.h>
64715+#include <linux/fs.h>
64716+#include <linux/grinternal.h>
64717+
64718+extern int gr_acl_tpe_check(void);
64719+
64720+int
64721+gr_tpe_allow(const struct file *file)
64722+{
64723+#ifdef CONFIG_GRKERNSEC
64724+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
64725+ const struct cred *cred = current_cred();
64726+ char *msg = NULL;
64727+ char *msg2 = NULL;
64728+
64729+ // never restrict root
64730+ if (!cred->uid)
64731+ return 1;
64732+
64733+ if (grsec_enable_tpe) {
64734+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
64735+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
64736+ msg = "not being in trusted group";
64737+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
64738+ msg = "being in untrusted group";
64739+#else
64740+ if (in_group_p(grsec_tpe_gid))
64741+ msg = "being in untrusted group";
64742+#endif
64743+ }
64744+ if (!msg && gr_acl_tpe_check())
64745+ msg = "being in untrusted role";
64746+
64747+ // not in any affected group/role
64748+ if (!msg)
64749+ goto next_check;
64750+
64751+ if (inode->i_uid)
64752+ msg2 = "file in non-root-owned directory";
64753+ else if (inode->i_mode & S_IWOTH)
64754+ msg2 = "file in world-writable directory";
64755+ else if (inode->i_mode & S_IWGRP)
64756+ msg2 = "file in group-writable directory";
64757+
64758+ if (msg && msg2) {
64759+ char fullmsg[70] = {0};
64760+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
64761+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
64762+ return 0;
64763+ }
64764+ msg = NULL;
64765+next_check:
64766+#ifdef CONFIG_GRKERNSEC_TPE_ALL
64767+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
64768+ return 1;
64769+
64770+ if (inode->i_uid && (inode->i_uid != cred->uid))
64771+ msg = "directory not owned by user";
64772+ else if (inode->i_mode & S_IWOTH)
64773+ msg = "file in world-writable directory";
64774+ else if (inode->i_mode & S_IWGRP)
64775+ msg = "file in group-writable directory";
64776+
64777+ if (msg) {
64778+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
64779+ return 0;
64780+ }
64781+#endif
64782+#endif
64783+ return 1;
64784+}
64785diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
64786new file mode 100644
64787index 0000000..9f7b1ac
64788--- /dev/null
64789+++ b/grsecurity/grsum.c
64790@@ -0,0 +1,61 @@
64791+#include <linux/err.h>
64792+#include <linux/kernel.h>
64793+#include <linux/sched.h>
64794+#include <linux/mm.h>
64795+#include <linux/scatterlist.h>
64796+#include <linux/crypto.h>
64797+#include <linux/gracl.h>
64798+
64799+
64800+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
64801+#error "crypto and sha256 must be built into the kernel"
64802+#endif
64803+
64804+int
64805+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
64806+{
64807+ char *p;
64808+ struct crypto_hash *tfm;
64809+ struct hash_desc desc;
64810+ struct scatterlist sg;
64811+ unsigned char temp_sum[GR_SHA_LEN];
64812+ volatile int retval = 0;
64813+ volatile int dummy = 0;
64814+ unsigned int i;
64815+
64816+ sg_init_table(&sg, 1);
64817+
64818+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
64819+ if (IS_ERR(tfm)) {
64820+ /* should never happen, since sha256 should be built in */
64821+ return 1;
64822+ }
64823+
64824+ desc.tfm = tfm;
64825+ desc.flags = 0;
64826+
64827+ crypto_hash_init(&desc);
64828+
64829+ p = salt;
64830+ sg_set_buf(&sg, p, GR_SALT_LEN);
64831+ crypto_hash_update(&desc, &sg, sg.length);
64832+
64833+ p = entry->pw;
64834+ sg_set_buf(&sg, p, strlen(p));
64835+
64836+ crypto_hash_update(&desc, &sg, sg.length);
64837+
64838+ crypto_hash_final(&desc, temp_sum);
64839+
64840+ memset(entry->pw, 0, GR_PW_LEN);
64841+
64842+ for (i = 0; i < GR_SHA_LEN; i++)
64843+ if (sum[i] != temp_sum[i])
64844+ retval = 1;
64845+ else
64846+ dummy = 1; // waste a cycle
64847+
64848+ crypto_free_hash(tfm);
64849+
64850+ return retval;
64851+}
64852diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
64853index 3cd9ccd..fe16d47 100644
64854--- a/include/acpi/acpi_bus.h
64855+++ b/include/acpi/acpi_bus.h
64856@@ -107,7 +107,7 @@ struct acpi_device_ops {
64857 acpi_op_bind bind;
64858 acpi_op_unbind unbind;
64859 acpi_op_notify notify;
64860-};
64861+} __no_const;
64862
64863 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
64864
64865diff --git a/include/acpi/acpi_drivers.h b/include/acpi/acpi_drivers.h
64866index f4906f6..71feb73 100644
64867--- a/include/acpi/acpi_drivers.h
64868+++ b/include/acpi/acpi_drivers.h
64869@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acpi_handle handle, int type);
64870 Dock Station
64871 -------------------------------------------------------------------------- */
64872 struct acpi_dock_ops {
64873- acpi_notify_handler handler;
64874- acpi_notify_handler uevent;
64875+ const acpi_notify_handler handler;
64876+ const acpi_notify_handler uevent;
64877 };
64878
64879 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
64880@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle handle);
64881 extern int register_dock_notifier(struct notifier_block *nb);
64882 extern void unregister_dock_notifier(struct notifier_block *nb);
64883 extern int register_hotplug_dock_device(acpi_handle handle,
64884- struct acpi_dock_ops *ops,
64885+ const struct acpi_dock_ops *ops,
64886 void *context);
64887 extern void unregister_hotplug_dock_device(acpi_handle handle);
64888 #else
64889@@ -144,7 +144,7 @@ static inline void unregister_dock_notifier(struct notifier_block *nb)
64890 {
64891 }
64892 static inline int register_hotplug_dock_device(acpi_handle handle,
64893- struct acpi_dock_ops *ops,
64894+ const struct acpi_dock_ops *ops,
64895 void *context)
64896 {
64897 return -ENODEV;
64898diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
64899index b7babf0..a9ac9fc 100644
64900--- a/include/asm-generic/atomic-long.h
64901+++ b/include/asm-generic/atomic-long.h
64902@@ -22,6 +22,12 @@
64903
64904 typedef atomic64_t atomic_long_t;
64905
64906+#ifdef CONFIG_PAX_REFCOUNT
64907+typedef atomic64_unchecked_t atomic_long_unchecked_t;
64908+#else
64909+typedef atomic64_t atomic_long_unchecked_t;
64910+#endif
64911+
64912 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
64913
64914 static inline long atomic_long_read(atomic_long_t *l)
64915@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
64916 return (long)atomic64_read(v);
64917 }
64918
64919+#ifdef CONFIG_PAX_REFCOUNT
64920+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
64921+{
64922+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64923+
64924+ return (long)atomic64_read_unchecked(v);
64925+}
64926+#endif
64927+
64928 static inline void atomic_long_set(atomic_long_t *l, long i)
64929 {
64930 atomic64_t *v = (atomic64_t *)l;
64931@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
64932 atomic64_set(v, i);
64933 }
64934
64935+#ifdef CONFIG_PAX_REFCOUNT
64936+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
64937+{
64938+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64939+
64940+ atomic64_set_unchecked(v, i);
64941+}
64942+#endif
64943+
64944 static inline void atomic_long_inc(atomic_long_t *l)
64945 {
64946 atomic64_t *v = (atomic64_t *)l;
64947@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
64948 atomic64_inc(v);
64949 }
64950
64951+#ifdef CONFIG_PAX_REFCOUNT
64952+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
64953+{
64954+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64955+
64956+ atomic64_inc_unchecked(v);
64957+}
64958+#endif
64959+
64960 static inline void atomic_long_dec(atomic_long_t *l)
64961 {
64962 atomic64_t *v = (atomic64_t *)l;
64963@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
64964 atomic64_dec(v);
64965 }
64966
64967+#ifdef CONFIG_PAX_REFCOUNT
64968+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
64969+{
64970+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64971+
64972+ atomic64_dec_unchecked(v);
64973+}
64974+#endif
64975+
64976 static inline void atomic_long_add(long i, atomic_long_t *l)
64977 {
64978 atomic64_t *v = (atomic64_t *)l;
64979@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
64980 atomic64_add(i, v);
64981 }
64982
64983+#ifdef CONFIG_PAX_REFCOUNT
64984+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
64985+{
64986+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
64987+
64988+ atomic64_add_unchecked(i, v);
64989+}
64990+#endif
64991+
64992 static inline void atomic_long_sub(long i, atomic_long_t *l)
64993 {
64994 atomic64_t *v = (atomic64_t *)l;
64995@@ -115,6 +166,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
64996 return (long)atomic64_inc_return(v);
64997 }
64998
64999+#ifdef CONFIG_PAX_REFCOUNT
65000+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65001+{
65002+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
65003+
65004+ return (long)atomic64_inc_return_unchecked(v);
65005+}
65006+#endif
65007+
65008 static inline long atomic_long_dec_return(atomic_long_t *l)
65009 {
65010 atomic64_t *v = (atomic64_t *)l;
65011@@ -140,6 +200,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65012
65013 typedef atomic_t atomic_long_t;
65014
65015+#ifdef CONFIG_PAX_REFCOUNT
65016+typedef atomic_unchecked_t atomic_long_unchecked_t;
65017+#else
65018+typedef atomic_t atomic_long_unchecked_t;
65019+#endif
65020+
65021 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
65022 static inline long atomic_long_read(atomic_long_t *l)
65023 {
65024@@ -148,6 +214,15 @@ static inline long atomic_long_read(atomic_long_t *l)
65025 return (long)atomic_read(v);
65026 }
65027
65028+#ifdef CONFIG_PAX_REFCOUNT
65029+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
65030+{
65031+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65032+
65033+ return (long)atomic_read_unchecked(v);
65034+}
65035+#endif
65036+
65037 static inline void atomic_long_set(atomic_long_t *l, long i)
65038 {
65039 atomic_t *v = (atomic_t *)l;
65040@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
65041 atomic_set(v, i);
65042 }
65043
65044+#ifdef CONFIG_PAX_REFCOUNT
65045+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
65046+{
65047+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65048+
65049+ atomic_set_unchecked(v, i);
65050+}
65051+#endif
65052+
65053 static inline void atomic_long_inc(atomic_long_t *l)
65054 {
65055 atomic_t *v = (atomic_t *)l;
65056@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
65057 atomic_inc(v);
65058 }
65059
65060+#ifdef CONFIG_PAX_REFCOUNT
65061+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
65062+{
65063+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65064+
65065+ atomic_inc_unchecked(v);
65066+}
65067+#endif
65068+
65069 static inline void atomic_long_dec(atomic_long_t *l)
65070 {
65071 atomic_t *v = (atomic_t *)l;
65072@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
65073 atomic_dec(v);
65074 }
65075
65076+#ifdef CONFIG_PAX_REFCOUNT
65077+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
65078+{
65079+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65080+
65081+ atomic_dec_unchecked(v);
65082+}
65083+#endif
65084+
65085 static inline void atomic_long_add(long i, atomic_long_t *l)
65086 {
65087 atomic_t *v = (atomic_t *)l;
65088@@ -176,6 +278,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
65089 atomic_add(i, v);
65090 }
65091
65092+#ifdef CONFIG_PAX_REFCOUNT
65093+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
65094+{
65095+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65096+
65097+ atomic_add_unchecked(i, v);
65098+}
65099+#endif
65100+
65101 static inline void atomic_long_sub(long i, atomic_long_t *l)
65102 {
65103 atomic_t *v = (atomic_t *)l;
65104@@ -232,6 +343,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
65105 return (long)atomic_inc_return(v);
65106 }
65107
65108+#ifdef CONFIG_PAX_REFCOUNT
65109+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
65110+{
65111+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
65112+
65113+ return (long)atomic_inc_return_unchecked(v);
65114+}
65115+#endif
65116+
65117 static inline long atomic_long_dec_return(atomic_long_t *l)
65118 {
65119 atomic_t *v = (atomic_t *)l;
65120@@ -255,4 +375,47 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
65121
65122 #endif /* BITS_PER_LONG == 64 */
65123
65124+#ifdef CONFIG_PAX_REFCOUNT
65125+static inline void pax_refcount_needs_these_functions(void)
65126+{
65127+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
65128+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
65129+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
65130+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
65131+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
65132+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
65133+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
65134+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
65135+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
65136+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
65137+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
65138+
65139+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
65140+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
65141+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
65142+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
65143+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
65144+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
65145+}
65146+#else
65147+#define atomic_read_unchecked(v) atomic_read(v)
65148+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
65149+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
65150+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
65151+#define atomic_inc_unchecked(v) atomic_inc(v)
65152+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
65153+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
65154+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
65155+#define atomic_dec_unchecked(v) atomic_dec(v)
65156+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
65157+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
65158+
65159+#define atomic_long_read_unchecked(v) atomic_long_read(v)
65160+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
65161+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
65162+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
65163+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
65164+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
65165+#endif
65166+
65167 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
65168diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
65169index b18ce4f..2ee2843 100644
65170--- a/include/asm-generic/atomic64.h
65171+++ b/include/asm-generic/atomic64.h
65172@@ -16,6 +16,8 @@ typedef struct {
65173 long long counter;
65174 } atomic64_t;
65175
65176+typedef atomic64_t atomic64_unchecked_t;
65177+
65178 #define ATOMIC64_INIT(i) { (i) }
65179
65180 extern long long atomic64_read(const atomic64_t *v);
65181@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
65182 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
65183 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
65184
65185+#define atomic64_read_unchecked(v) atomic64_read(v)
65186+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
65187+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
65188+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
65189+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
65190+#define atomic64_inc_unchecked(v) atomic64_inc(v)
65191+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
65192+#define atomic64_dec_unchecked(v) atomic64_dec(v)
65193+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
65194+
65195 #endif /* _ASM_GENERIC_ATOMIC64_H */
65196diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
65197index d48ddf0..656a0ac 100644
65198--- a/include/asm-generic/bug.h
65199+++ b/include/asm-generic/bug.h
65200@@ -105,11 +105,11 @@ extern void warn_slowpath_null(const char *file, const int line);
65201
65202 #else /* !CONFIG_BUG */
65203 #ifndef HAVE_ARCH_BUG
65204-#define BUG() do {} while(0)
65205+#define BUG() do { for (;;) ; } while(0)
65206 #endif
65207
65208 #ifndef HAVE_ARCH_BUG_ON
65209-#define BUG_ON(condition) do { if (condition) ; } while(0)
65210+#define BUG_ON(condition) do { if (condition) for (;;) ; } while(0)
65211 #endif
65212
65213 #ifndef HAVE_ARCH_WARN_ON
65214diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
65215index 1bfcfe5..e04c5c9 100644
65216--- a/include/asm-generic/cache.h
65217+++ b/include/asm-generic/cache.h
65218@@ -6,7 +6,7 @@
65219 * cache lines need to provide their own cache.h.
65220 */
65221
65222-#define L1_CACHE_SHIFT 5
65223-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
65224+#define L1_CACHE_SHIFT 5UL
65225+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
65226
65227 #endif /* __ASM_GENERIC_CACHE_H */
65228diff --git a/include/asm-generic/dma-mapping-common.h b/include/asm-generic/dma-mapping-common.h
65229index 6920695..41038bc 100644
65230--- a/include/asm-generic/dma-mapping-common.h
65231+++ b/include/asm-generic/dma-mapping-common.h
65232@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
65233 enum dma_data_direction dir,
65234 struct dma_attrs *attrs)
65235 {
65236- struct dma_map_ops *ops = get_dma_ops(dev);
65237+ const struct dma_map_ops *ops = get_dma_ops(dev);
65238 dma_addr_t addr;
65239
65240 kmemcheck_mark_initialized(ptr, size);
65241@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
65242 enum dma_data_direction dir,
65243 struct dma_attrs *attrs)
65244 {
65245- struct dma_map_ops *ops = get_dma_ops(dev);
65246+ const struct dma_map_ops *ops = get_dma_ops(dev);
65247
65248 BUG_ON(!valid_dma_direction(dir));
65249 if (ops->unmap_page)
65250@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
65251 int nents, enum dma_data_direction dir,
65252 struct dma_attrs *attrs)
65253 {
65254- struct dma_map_ops *ops = get_dma_ops(dev);
65255+ const struct dma_map_ops *ops = get_dma_ops(dev);
65256 int i, ents;
65257 struct scatterlist *s;
65258
65259@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
65260 int nents, enum dma_data_direction dir,
65261 struct dma_attrs *attrs)
65262 {
65263- struct dma_map_ops *ops = get_dma_ops(dev);
65264+ const struct dma_map_ops *ops = get_dma_ops(dev);
65265
65266 BUG_ON(!valid_dma_direction(dir));
65267 debug_dma_unmap_sg(dev, sg, nents, dir);
65268@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65269 size_t offset, size_t size,
65270 enum dma_data_direction dir)
65271 {
65272- struct dma_map_ops *ops = get_dma_ops(dev);
65273+ const struct dma_map_ops *ops = get_dma_ops(dev);
65274 dma_addr_t addr;
65275
65276 kmemcheck_mark_initialized(page_address(page) + offset, size);
65277@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
65278 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
65279 size_t size, enum dma_data_direction dir)
65280 {
65281- struct dma_map_ops *ops = get_dma_ops(dev);
65282+ const struct dma_map_ops *ops = get_dma_ops(dev);
65283
65284 BUG_ON(!valid_dma_direction(dir));
65285 if (ops->unmap_page)
65286@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
65287 size_t size,
65288 enum dma_data_direction dir)
65289 {
65290- struct dma_map_ops *ops = get_dma_ops(dev);
65291+ const struct dma_map_ops *ops = get_dma_ops(dev);
65292
65293 BUG_ON(!valid_dma_direction(dir));
65294 if (ops->sync_single_for_cpu)
65295@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_device(struct device *dev,
65296 dma_addr_t addr, size_t size,
65297 enum dma_data_direction dir)
65298 {
65299- struct dma_map_ops *ops = get_dma_ops(dev);
65300+ const struct dma_map_ops *ops = get_dma_ops(dev);
65301
65302 BUG_ON(!valid_dma_direction(dir));
65303 if (ops->sync_single_for_device)
65304@@ -123,7 +123,7 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
65305 size_t size,
65306 enum dma_data_direction dir)
65307 {
65308- struct dma_map_ops *ops = get_dma_ops(dev);
65309+ const struct dma_map_ops *ops = get_dma_ops(dev);
65310
65311 BUG_ON(!valid_dma_direction(dir));
65312 if (ops->sync_single_range_for_cpu) {
65313@@ -140,7 +140,7 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
65314 size_t size,
65315 enum dma_data_direction dir)
65316 {
65317- struct dma_map_ops *ops = get_dma_ops(dev);
65318+ const struct dma_map_ops *ops = get_dma_ops(dev);
65319
65320 BUG_ON(!valid_dma_direction(dir));
65321 if (ops->sync_single_range_for_device) {
65322@@ -155,7 +155,7 @@ static inline void
65323 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
65324 int nelems, enum dma_data_direction dir)
65325 {
65326- struct dma_map_ops *ops = get_dma_ops(dev);
65327+ const struct dma_map_ops *ops = get_dma_ops(dev);
65328
65329 BUG_ON(!valid_dma_direction(dir));
65330 if (ops->sync_sg_for_cpu)
65331@@ -167,7 +167,7 @@ static inline void
65332 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
65333 int nelems, enum dma_data_direction dir)
65334 {
65335- struct dma_map_ops *ops = get_dma_ops(dev);
65336+ const struct dma_map_ops *ops = get_dma_ops(dev);
65337
65338 BUG_ON(!valid_dma_direction(dir));
65339 if (ops->sync_sg_for_device)
65340diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
65341index 0d68a1e..b74a761 100644
65342--- a/include/asm-generic/emergency-restart.h
65343+++ b/include/asm-generic/emergency-restart.h
65344@@ -1,7 +1,7 @@
65345 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
65346 #define _ASM_GENERIC_EMERGENCY_RESTART_H
65347
65348-static inline void machine_emergency_restart(void)
65349+static inline __noreturn void machine_emergency_restart(void)
65350 {
65351 machine_restart(NULL);
65352 }
65353diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
65354index 3c2344f..4590a7d 100644
65355--- a/include/asm-generic/futex.h
65356+++ b/include/asm-generic/futex.h
65357@@ -6,7 +6,7 @@
65358 #include <asm/errno.h>
65359
65360 static inline int
65361-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65362+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
65363 {
65364 int op = (encoded_op >> 28) & 7;
65365 int cmp = (encoded_op >> 24) & 15;
65366@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
65367 }
65368
65369 static inline int
65370-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
65371+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
65372 {
65373 return -ENOSYS;
65374 }
65375diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
65376index 1ca3efc..e3dc852 100644
65377--- a/include/asm-generic/int-l64.h
65378+++ b/include/asm-generic/int-l64.h
65379@@ -46,6 +46,8 @@ typedef unsigned int u32;
65380 typedef signed long s64;
65381 typedef unsigned long u64;
65382
65383+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
65384+
65385 #define S8_C(x) x
65386 #define U8_C(x) x ## U
65387 #define S16_C(x) x
65388diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
65389index f394147..b6152b9 100644
65390--- a/include/asm-generic/int-ll64.h
65391+++ b/include/asm-generic/int-ll64.h
65392@@ -51,6 +51,8 @@ typedef unsigned int u32;
65393 typedef signed long long s64;
65394 typedef unsigned long long u64;
65395
65396+typedef unsigned long long intoverflow_t;
65397+
65398 #define S8_C(x) x
65399 #define U8_C(x) x ## U
65400 #define S16_C(x) x
65401diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
65402index e5f234a..cdb16b3 100644
65403--- a/include/asm-generic/kmap_types.h
65404+++ b/include/asm-generic/kmap_types.h
65405@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
65406 KMAP_D(16) KM_IRQ_PTE,
65407 KMAP_D(17) KM_NMI,
65408 KMAP_D(18) KM_NMI_PTE,
65409-KMAP_D(19) KM_TYPE_NR
65410+KMAP_D(19) KM_CLEARPAGE,
65411+KMAP_D(20) KM_TYPE_NR
65412 };
65413
65414 #undef KMAP_D
65415diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
65416index 725612b..9cc513a 100644
65417--- a/include/asm-generic/pgtable-nopmd.h
65418+++ b/include/asm-generic/pgtable-nopmd.h
65419@@ -1,14 +1,19 @@
65420 #ifndef _PGTABLE_NOPMD_H
65421 #define _PGTABLE_NOPMD_H
65422
65423-#ifndef __ASSEMBLY__
65424-
65425 #include <asm-generic/pgtable-nopud.h>
65426
65427-struct mm_struct;
65428-
65429 #define __PAGETABLE_PMD_FOLDED
65430
65431+#define PMD_SHIFT PUD_SHIFT
65432+#define PTRS_PER_PMD 1
65433+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
65434+#define PMD_MASK (~(PMD_SIZE-1))
65435+
65436+#ifndef __ASSEMBLY__
65437+
65438+struct mm_struct;
65439+
65440 /*
65441 * Having the pmd type consist of a pud gets the size right, and allows
65442 * us to conceptually access the pud entry that this pmd is folded into
65443@@ -16,11 +21,6 @@ struct mm_struct;
65444 */
65445 typedef struct { pud_t pud; } pmd_t;
65446
65447-#define PMD_SHIFT PUD_SHIFT
65448-#define PTRS_PER_PMD 1
65449-#define PMD_SIZE (1UL << PMD_SHIFT)
65450-#define PMD_MASK (~(PMD_SIZE-1))
65451-
65452 /*
65453 * The "pud_xxx()" functions here are trivial for a folded two-level
65454 * setup: the pmd is never bad, and a pmd always exists (as it's folded
65455diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
65456index 810431d..ccc3638 100644
65457--- a/include/asm-generic/pgtable-nopud.h
65458+++ b/include/asm-generic/pgtable-nopud.h
65459@@ -1,10 +1,15 @@
65460 #ifndef _PGTABLE_NOPUD_H
65461 #define _PGTABLE_NOPUD_H
65462
65463-#ifndef __ASSEMBLY__
65464-
65465 #define __PAGETABLE_PUD_FOLDED
65466
65467+#define PUD_SHIFT PGDIR_SHIFT
65468+#define PTRS_PER_PUD 1
65469+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
65470+#define PUD_MASK (~(PUD_SIZE-1))
65471+
65472+#ifndef __ASSEMBLY__
65473+
65474 /*
65475 * Having the pud type consist of a pgd gets the size right, and allows
65476 * us to conceptually access the pgd entry that this pud is folded into
65477@@ -12,11 +17,6 @@
65478 */
65479 typedef struct { pgd_t pgd; } pud_t;
65480
65481-#define PUD_SHIFT PGDIR_SHIFT
65482-#define PTRS_PER_PUD 1
65483-#define PUD_SIZE (1UL << PUD_SHIFT)
65484-#define PUD_MASK (~(PUD_SIZE-1))
65485-
65486 /*
65487 * The "pgd_xxx()" functions here are trivial for a folded two-level
65488 * setup: the pud is never bad, and a pud always exists (as it's folded
65489diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
65490index e2bd73e..fea8ed3 100644
65491--- a/include/asm-generic/pgtable.h
65492+++ b/include/asm-generic/pgtable.h
65493@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
65494 unsigned long size);
65495 #endif
65496
65497+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
65498+static inline unsigned long pax_open_kernel(void) { return 0; }
65499+#endif
65500+
65501+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
65502+static inline unsigned long pax_close_kernel(void) { return 0; }
65503+#endif
65504+
65505 #endif /* !__ASSEMBLY__ */
65506
65507 #endif /* _ASM_GENERIC_PGTABLE_H */
65508diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
65509index b6e818f..21aa58a 100644
65510--- a/include/asm-generic/vmlinux.lds.h
65511+++ b/include/asm-generic/vmlinux.lds.h
65512@@ -199,6 +199,7 @@
65513 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
65514 VMLINUX_SYMBOL(__start_rodata) = .; \
65515 *(.rodata) *(.rodata.*) \
65516+ *(.data.read_only) \
65517 *(__vermagic) /* Kernel version magic */ \
65518 *(__markers_strings) /* Markers: strings */ \
65519 *(__tracepoints_strings)/* Tracepoints: strings */ \
65520@@ -656,22 +657,24 @@
65521 * section in the linker script will go there too. @phdr should have
65522 * a leading colon.
65523 *
65524- * Note that this macros defines __per_cpu_load as an absolute symbol.
65525+ * Note that this macros defines per_cpu_load as an absolute symbol.
65526 * If there is no need to put the percpu section at a predetermined
65527 * address, use PERCPU().
65528 */
65529 #define PERCPU_VADDR(vaddr, phdr) \
65530- VMLINUX_SYMBOL(__per_cpu_load) = .; \
65531- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
65532+ per_cpu_load = .; \
65533+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
65534 - LOAD_OFFSET) { \
65535+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
65536 VMLINUX_SYMBOL(__per_cpu_start) = .; \
65537 *(.data.percpu.first) \
65538- *(.data.percpu.page_aligned) \
65539 *(.data.percpu) \
65540+ . = ALIGN(PAGE_SIZE); \
65541+ *(.data.percpu.page_aligned) \
65542 *(.data.percpu.shared_aligned) \
65543 VMLINUX_SYMBOL(__per_cpu_end) = .; \
65544 } phdr \
65545- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
65546+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
65547
65548 /**
65549 * PERCPU - define output section for percpu area, simple version
65550diff --git a/include/drm/drmP.h b/include/drm/drmP.h
65551index ebab6a6..351dba1 100644
65552--- a/include/drm/drmP.h
65553+++ b/include/drm/drmP.h
65554@@ -71,6 +71,7 @@
65555 #include <linux/workqueue.h>
65556 #include <linux/poll.h>
65557 #include <asm/pgalloc.h>
65558+#include <asm/local.h>
65559 #include "drm.h"
65560
65561 #include <linux/idr.h>
65562@@ -814,7 +815,7 @@ struct drm_driver {
65563 void (*vgaarb_irq)(struct drm_device *dev, bool state);
65564
65565 /* Driver private ops for this object */
65566- struct vm_operations_struct *gem_vm_ops;
65567+ const struct vm_operations_struct *gem_vm_ops;
65568
65569 int major;
65570 int minor;
65571@@ -917,7 +918,7 @@ struct drm_device {
65572
65573 /** \name Usage Counters */
65574 /*@{ */
65575- int open_count; /**< Outstanding files open */
65576+ local_t open_count; /**< Outstanding files open */
65577 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
65578 atomic_t vma_count; /**< Outstanding vma areas open */
65579 int buf_use; /**< Buffers in use -- cannot alloc */
65580@@ -928,7 +929,7 @@ struct drm_device {
65581 /*@{ */
65582 unsigned long counters;
65583 enum drm_stat_type types[15];
65584- atomic_t counts[15];
65585+ atomic_unchecked_t counts[15];
65586 /*@} */
65587
65588 struct list_head filelist;
65589@@ -1016,7 +1017,7 @@ struct drm_device {
65590 struct pci_controller *hose;
65591 #endif
65592 struct drm_sg_mem *sg; /**< Scatter gather memory */
65593- unsigned int num_crtcs; /**< Number of CRTCs on this device */
65594+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
65595 void *dev_private; /**< device private data */
65596 void *mm_private;
65597 struct address_space *dev_mapping;
65598@@ -1042,11 +1043,11 @@ struct drm_device {
65599 spinlock_t object_name_lock;
65600 struct idr object_name_idr;
65601 atomic_t object_count;
65602- atomic_t object_memory;
65603+ atomic_unchecked_t object_memory;
65604 atomic_t pin_count;
65605- atomic_t pin_memory;
65606+ atomic_unchecked_t pin_memory;
65607 atomic_t gtt_count;
65608- atomic_t gtt_memory;
65609+ atomic_unchecked_t gtt_memory;
65610 uint32_t gtt_total;
65611 uint32_t invalidate_domains; /* domains pending invalidation */
65612 uint32_t flush_domains; /* domains pending flush */
65613diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
65614index b29e201..3413cc9 100644
65615--- a/include/drm/drm_crtc_helper.h
65616+++ b/include/drm/drm_crtc_helper.h
65617@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
65618
65619 /* reload the current crtc LUT */
65620 void (*load_lut)(struct drm_crtc *crtc);
65621-};
65622+} __no_const;
65623
65624 struct drm_encoder_helper_funcs {
65625 void (*dpms)(struct drm_encoder *encoder, int mode);
65626@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
65627 struct drm_connector *connector);
65628 /* disable encoder when not in use - more explicit than dpms off */
65629 void (*disable)(struct drm_encoder *encoder);
65630-};
65631+} __no_const;
65632
65633 struct drm_connector_helper_funcs {
65634 int (*get_modes)(struct drm_connector *connector);
65635diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
65636index b199170..6f9e64c 100644
65637--- a/include/drm/ttm/ttm_memory.h
65638+++ b/include/drm/ttm/ttm_memory.h
65639@@ -47,7 +47,7 @@
65640
65641 struct ttm_mem_shrink {
65642 int (*do_shrink) (struct ttm_mem_shrink *);
65643-};
65644+} __no_const;
65645
65646 /**
65647 * struct ttm_mem_global - Global memory accounting structure.
65648diff --git a/include/linux/a.out.h b/include/linux/a.out.h
65649index e86dfca..40cc55f 100644
65650--- a/include/linux/a.out.h
65651+++ b/include/linux/a.out.h
65652@@ -39,6 +39,14 @@ enum machine_type {
65653 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
65654 };
65655
65656+/* Constants for the N_FLAGS field */
65657+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
65658+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
65659+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
65660+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
65661+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
65662+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
65663+
65664 #if !defined (N_MAGIC)
65665 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
65666 #endif
65667diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
65668index 817b237..62c10bc 100644
65669--- a/include/linux/atmdev.h
65670+++ b/include/linux/atmdev.h
65671@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
65672 #endif
65673
65674 struct k_atm_aal_stats {
65675-#define __HANDLE_ITEM(i) atomic_t i
65676+#define __HANDLE_ITEM(i) atomic_unchecked_t i
65677 __AAL_STAT_ITEMS
65678 #undef __HANDLE_ITEM
65679 };
65680diff --git a/include/linux/backlight.h b/include/linux/backlight.h
65681index 0f5f578..8c4f884 100644
65682--- a/include/linux/backlight.h
65683+++ b/include/linux/backlight.h
65684@@ -36,18 +36,18 @@ struct backlight_device;
65685 struct fb_info;
65686
65687 struct backlight_ops {
65688- unsigned int options;
65689+ const unsigned int options;
65690
65691 #define BL_CORE_SUSPENDRESUME (1 << 0)
65692
65693 /* Notify the backlight driver some property has changed */
65694- int (*update_status)(struct backlight_device *);
65695+ int (* const update_status)(struct backlight_device *);
65696 /* Return the current backlight brightness (accounting for power,
65697 fb_blank etc.) */
65698- int (*get_brightness)(struct backlight_device *);
65699+ int (* const get_brightness)(struct backlight_device *);
65700 /* Check if given framebuffer device is the one bound to this backlight;
65701 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
65702- int (*check_fb)(struct fb_info *);
65703+ int (* const check_fb)(struct fb_info *);
65704 };
65705
65706 /* This structure defines all the properties of a backlight */
65707@@ -86,7 +86,7 @@ struct backlight_device {
65708 registered this device has been unloaded, and if class_get_devdata()
65709 points to something in the body of that driver, it is also invalid. */
65710 struct mutex ops_lock;
65711- struct backlight_ops *ops;
65712+ const struct backlight_ops *ops;
65713
65714 /* The framebuffer notifier block */
65715 struct notifier_block fb_notif;
65716@@ -103,7 +103,7 @@ static inline void backlight_update_status(struct backlight_device *bd)
65717 }
65718
65719 extern struct backlight_device *backlight_device_register(const char *name,
65720- struct device *dev, void *devdata, struct backlight_ops *ops);
65721+ struct device *dev, void *devdata, const struct backlight_ops *ops);
65722 extern void backlight_device_unregister(struct backlight_device *bd);
65723 extern void backlight_force_update(struct backlight_device *bd,
65724 enum backlight_update_reason reason);
65725diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
65726index a3d802e..482f69c 100644
65727--- a/include/linux/binfmts.h
65728+++ b/include/linux/binfmts.h
65729@@ -83,6 +83,7 @@ struct linux_binfmt {
65730 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
65731 int (*load_shlib)(struct file *);
65732 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
65733+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
65734 unsigned long min_coredump; /* minimal dump size */
65735 int hasvdso;
65736 };
65737diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
65738index 5eb6cb0..a2906d2 100644
65739--- a/include/linux/blkdev.h
65740+++ b/include/linux/blkdev.h
65741@@ -1281,7 +1281,7 @@ struct block_device_operations {
65742 int (*revalidate_disk) (struct gendisk *);
65743 int (*getgeo)(struct block_device *, struct hd_geometry *);
65744 struct module *owner;
65745-};
65746+} __do_const;
65747
65748 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
65749 unsigned long);
65750diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
65751index 3b73b99..629d21b 100644
65752--- a/include/linux/blktrace_api.h
65753+++ b/include/linux/blktrace_api.h
65754@@ -160,7 +160,7 @@ struct blk_trace {
65755 struct dentry *dir;
65756 struct dentry *dropped_file;
65757 struct dentry *msg_file;
65758- atomic_t dropped;
65759+ atomic_unchecked_t dropped;
65760 };
65761
65762 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
65763diff --git a/include/linux/byteorder/little_endian.h b/include/linux/byteorder/little_endian.h
65764index 83195fb..0b0f77d 100644
65765--- a/include/linux/byteorder/little_endian.h
65766+++ b/include/linux/byteorder/little_endian.h
65767@@ -42,51 +42,51 @@
65768
65769 static inline __le64 __cpu_to_le64p(const __u64 *p)
65770 {
65771- return (__force __le64)*p;
65772+ return (__force const __le64)*p;
65773 }
65774 static inline __u64 __le64_to_cpup(const __le64 *p)
65775 {
65776- return (__force __u64)*p;
65777+ return (__force const __u64)*p;
65778 }
65779 static inline __le32 __cpu_to_le32p(const __u32 *p)
65780 {
65781- return (__force __le32)*p;
65782+ return (__force const __le32)*p;
65783 }
65784 static inline __u32 __le32_to_cpup(const __le32 *p)
65785 {
65786- return (__force __u32)*p;
65787+ return (__force const __u32)*p;
65788 }
65789 static inline __le16 __cpu_to_le16p(const __u16 *p)
65790 {
65791- return (__force __le16)*p;
65792+ return (__force const __le16)*p;
65793 }
65794 static inline __u16 __le16_to_cpup(const __le16 *p)
65795 {
65796- return (__force __u16)*p;
65797+ return (__force const __u16)*p;
65798 }
65799 static inline __be64 __cpu_to_be64p(const __u64 *p)
65800 {
65801- return (__force __be64)__swab64p(p);
65802+ return (__force const __be64)__swab64p(p);
65803 }
65804 static inline __u64 __be64_to_cpup(const __be64 *p)
65805 {
65806- return __swab64p((__u64 *)p);
65807+ return __swab64p((const __u64 *)p);
65808 }
65809 static inline __be32 __cpu_to_be32p(const __u32 *p)
65810 {
65811- return (__force __be32)__swab32p(p);
65812+ return (__force const __be32)__swab32p(p);
65813 }
65814 static inline __u32 __be32_to_cpup(const __be32 *p)
65815 {
65816- return __swab32p((__u32 *)p);
65817+ return __swab32p((const __u32 *)p);
65818 }
65819 static inline __be16 __cpu_to_be16p(const __u16 *p)
65820 {
65821- return (__force __be16)__swab16p(p);
65822+ return (__force const __be16)__swab16p(p);
65823 }
65824 static inline __u16 __be16_to_cpup(const __be16 *p)
65825 {
65826- return __swab16p((__u16 *)p);
65827+ return __swab16p((const __u16 *)p);
65828 }
65829 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
65830 #define __le64_to_cpus(x) do { (void)(x); } while (0)
65831diff --git a/include/linux/cache.h b/include/linux/cache.h
65832index 97e2488..e7576b9 100644
65833--- a/include/linux/cache.h
65834+++ b/include/linux/cache.h
65835@@ -16,6 +16,10 @@
65836 #define __read_mostly
65837 #endif
65838
65839+#ifndef __read_only
65840+#define __read_only __read_mostly
65841+#endif
65842+
65843 #ifndef ____cacheline_aligned
65844 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
65845 #endif
65846diff --git a/include/linux/capability.h b/include/linux/capability.h
65847index c8f2a5f7..1618a5c 100644
65848--- a/include/linux/capability.h
65849+++ b/include/linux/capability.h
65850@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff_set;
65851 (security_real_capable_noaudit((t), (cap)) == 0)
65852
65853 extern int capable(int cap);
65854+int capable_nolog(int cap);
65855
65856 /* audit system wants to get cap info from files as well */
65857 struct dentry;
65858diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
65859index 450fa59..86019fb 100644
65860--- a/include/linux/compiler-gcc4.h
65861+++ b/include/linux/compiler-gcc4.h
65862@@ -36,4 +36,16 @@
65863 the kernel context */
65864 #define __cold __attribute__((__cold__))
65865
65866+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
65867+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
65868+#define __bos0(ptr) __bos((ptr), 0)
65869+#define __bos1(ptr) __bos((ptr), 1)
65870+
65871+#if __GNUC_MINOR__ >= 5
65872+#ifdef CONSTIFY_PLUGIN
65873+#define __no_const __attribute__((no_const))
65874+#define __do_const __attribute__((do_const))
65875+#endif
65876+#endif
65877+
65878 #endif
65879diff --git a/include/linux/compiler.h b/include/linux/compiler.h
65880index 04fb513..fd6477b 100644
65881--- a/include/linux/compiler.h
65882+++ b/include/linux/compiler.h
65883@@ -5,11 +5,14 @@
65884
65885 #ifdef __CHECKER__
65886 # define __user __attribute__((noderef, address_space(1)))
65887+# define __force_user __force __user
65888 # define __kernel /* default address space */
65889+# define __force_kernel __force __kernel
65890 # define __safe __attribute__((safe))
65891 # define __force __attribute__((force))
65892 # define __nocast __attribute__((nocast))
65893 # define __iomem __attribute__((noderef, address_space(2)))
65894+# define __force_iomem __force __iomem
65895 # define __acquires(x) __attribute__((context(x,0,1)))
65896 # define __releases(x) __attribute__((context(x,1,0)))
65897 # define __acquire(x) __context__(x,1)
65898@@ -17,13 +20,34 @@
65899 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
65900 extern void __chk_user_ptr(const volatile void __user *);
65901 extern void __chk_io_ptr(const volatile void __iomem *);
65902+#elif defined(CHECKER_PLUGIN)
65903+//# define __user
65904+//# define __force_user
65905+//# define __kernel
65906+//# define __force_kernel
65907+# define __safe
65908+# define __force
65909+# define __nocast
65910+# define __iomem
65911+# define __force_iomem
65912+# define __chk_user_ptr(x) (void)0
65913+# define __chk_io_ptr(x) (void)0
65914+# define __builtin_warning(x, y...) (1)
65915+# define __acquires(x)
65916+# define __releases(x)
65917+# define __acquire(x) (void)0
65918+# define __release(x) (void)0
65919+# define __cond_lock(x,c) (c)
65920 #else
65921 # define __user
65922+# define __force_user
65923 # define __kernel
65924+# define __force_kernel
65925 # define __safe
65926 # define __force
65927 # define __nocast
65928 # define __iomem
65929+# define __force_iomem
65930 # define __chk_user_ptr(x) (void)0
65931 # define __chk_io_ptr(x) (void)0
65932 # define __builtin_warning(x, y...) (1)
65933@@ -247,6 +271,14 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65934 # define __attribute_const__ /* unimplemented */
65935 #endif
65936
65937+#ifndef __no_const
65938+# define __no_const
65939+#endif
65940+
65941+#ifndef __do_const
65942+# define __do_const
65943+#endif
65944+
65945 /*
65946 * Tell gcc if a function is cold. The compiler will assume any path
65947 * directly leading to the call is unlikely.
65948@@ -256,6 +288,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65949 #define __cold
65950 #endif
65951
65952+#ifndef __alloc_size
65953+#define __alloc_size(...)
65954+#endif
65955+
65956+#ifndef __bos
65957+#define __bos(ptr, arg)
65958+#endif
65959+
65960+#ifndef __bos0
65961+#define __bos0(ptr)
65962+#endif
65963+
65964+#ifndef __bos1
65965+#define __bos1(ptr)
65966+#endif
65967+
65968 /* Simple shorthand for a section definition */
65969 #ifndef __section
65970 # define __section(S) __attribute__ ((__section__(#S)))
65971@@ -278,6 +326,7 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
65972 * use is to mediate communication between process-level code and irq/NMI
65973 * handlers, all running on the same CPU.
65974 */
65975-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
65976+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
65977+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
65978
65979 #endif /* __LINUX_COMPILER_H */
65980diff --git a/include/linux/crypto.h b/include/linux/crypto.h
65981index fd92988..a3164bd 100644
65982--- a/include/linux/crypto.h
65983+++ b/include/linux/crypto.h
65984@@ -394,7 +394,7 @@ struct cipher_tfm {
65985 const u8 *key, unsigned int keylen);
65986 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65987 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
65988-};
65989+} __no_const;
65990
65991 struct hash_tfm {
65992 int (*init)(struct hash_desc *desc);
65993@@ -415,13 +415,13 @@ struct compress_tfm {
65994 int (*cot_decompress)(struct crypto_tfm *tfm,
65995 const u8 *src, unsigned int slen,
65996 u8 *dst, unsigned int *dlen);
65997-};
65998+} __no_const;
65999
66000 struct rng_tfm {
66001 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
66002 unsigned int dlen);
66003 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
66004-};
66005+} __no_const;
66006
66007 #define crt_ablkcipher crt_u.ablkcipher
66008 #define crt_aead crt_u.aead
66009diff --git a/include/linux/dcache.h b/include/linux/dcache.h
66010index 30b93b2..cd7a8db 100644
66011--- a/include/linux/dcache.h
66012+++ b/include/linux/dcache.h
66013@@ -119,6 +119,8 @@ struct dentry {
66014 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
66015 };
66016
66017+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
66018+
66019 /*
66020 * dentry->d_lock spinlock nesting subclasses:
66021 *
66022diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
66023index 3e9bd6a..f4e1aa0 100644
66024--- a/include/linux/decompress/mm.h
66025+++ b/include/linux/decompress/mm.h
66026@@ -78,7 +78,7 @@ static void free(void *where)
66027 * warnings when not needed (indeed large_malloc / large_free are not
66028 * needed by inflate */
66029
66030-#define malloc(a) kmalloc(a, GFP_KERNEL)
66031+#define malloc(a) kmalloc((a), GFP_KERNEL)
66032 #define free(a) kfree(a)
66033
66034 #define large_malloc(a) vmalloc(a)
66035diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
66036index 91b7618..92a93d32 100644
66037--- a/include/linux/dma-mapping.h
66038+++ b/include/linux/dma-mapping.h
66039@@ -16,51 +16,51 @@ enum dma_data_direction {
66040 };
66041
66042 struct dma_map_ops {
66043- void* (*alloc_coherent)(struct device *dev, size_t size,
66044+ void* (* const alloc_coherent)(struct device *dev, size_t size,
66045 dma_addr_t *dma_handle, gfp_t gfp);
66046- void (*free_coherent)(struct device *dev, size_t size,
66047+ void (* const free_coherent)(struct device *dev, size_t size,
66048 void *vaddr, dma_addr_t dma_handle);
66049- dma_addr_t (*map_page)(struct device *dev, struct page *page,
66050+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
66051 unsigned long offset, size_t size,
66052 enum dma_data_direction dir,
66053 struct dma_attrs *attrs);
66054- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
66055+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
66056 size_t size, enum dma_data_direction dir,
66057 struct dma_attrs *attrs);
66058- int (*map_sg)(struct device *dev, struct scatterlist *sg,
66059+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
66060 int nents, enum dma_data_direction dir,
66061 struct dma_attrs *attrs);
66062- void (*unmap_sg)(struct device *dev,
66063+ void (* const unmap_sg)(struct device *dev,
66064 struct scatterlist *sg, int nents,
66065 enum dma_data_direction dir,
66066 struct dma_attrs *attrs);
66067- void (*sync_single_for_cpu)(struct device *dev,
66068+ void (* const sync_single_for_cpu)(struct device *dev,
66069 dma_addr_t dma_handle, size_t size,
66070 enum dma_data_direction dir);
66071- void (*sync_single_for_device)(struct device *dev,
66072+ void (* const sync_single_for_device)(struct device *dev,
66073 dma_addr_t dma_handle, size_t size,
66074 enum dma_data_direction dir);
66075- void (*sync_single_range_for_cpu)(struct device *dev,
66076+ void (* const sync_single_range_for_cpu)(struct device *dev,
66077 dma_addr_t dma_handle,
66078 unsigned long offset,
66079 size_t size,
66080 enum dma_data_direction dir);
66081- void (*sync_single_range_for_device)(struct device *dev,
66082+ void (* const sync_single_range_for_device)(struct device *dev,
66083 dma_addr_t dma_handle,
66084 unsigned long offset,
66085 size_t size,
66086 enum dma_data_direction dir);
66087- void (*sync_sg_for_cpu)(struct device *dev,
66088+ void (* const sync_sg_for_cpu)(struct device *dev,
66089 struct scatterlist *sg, int nents,
66090 enum dma_data_direction dir);
66091- void (*sync_sg_for_device)(struct device *dev,
66092+ void (* const sync_sg_for_device)(struct device *dev,
66093 struct scatterlist *sg, int nents,
66094 enum dma_data_direction dir);
66095- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66096- int (*dma_supported)(struct device *dev, u64 mask);
66097+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
66098+ int (* const dma_supported)(struct device *dev, u64 mask);
66099 int (*set_dma_mask)(struct device *dev, u64 mask);
66100 int is_phys;
66101-};
66102+} __do_const;
66103
66104 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
66105
66106diff --git a/include/linux/dst.h b/include/linux/dst.h
66107index e26fed8..b976d9f 100644
66108--- a/include/linux/dst.h
66109+++ b/include/linux/dst.h
66110@@ -380,7 +380,7 @@ struct dst_node
66111 struct thread_pool *pool;
66112
66113 /* Transaction IDs live here */
66114- atomic_long_t gen;
66115+ atomic_long_unchecked_t gen;
66116
66117 /*
66118 * How frequently and how many times transaction
66119diff --git a/include/linux/elf.h b/include/linux/elf.h
66120index 90a4ed0..d652617 100644
66121--- a/include/linux/elf.h
66122+++ b/include/linux/elf.h
66123@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
66124 #define PT_GNU_EH_FRAME 0x6474e550
66125
66126 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
66127+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
66128+
66129+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
66130+
66131+/* Constants for the e_flags field */
66132+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
66133+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
66134+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
66135+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
66136+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
66137+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
66138
66139 /* These constants define the different elf file types */
66140 #define ET_NONE 0
66141@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
66142 #define DT_DEBUG 21
66143 #define DT_TEXTREL 22
66144 #define DT_JMPREL 23
66145+#define DT_FLAGS 30
66146+ #define DF_TEXTREL 0x00000004
66147 #define DT_ENCODING 32
66148 #define OLD_DT_LOOS 0x60000000
66149 #define DT_LOOS 0x6000000d
66150@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
66151 #define PF_W 0x2
66152 #define PF_X 0x1
66153
66154+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
66155+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
66156+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
66157+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
66158+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
66159+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
66160+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
66161+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
66162+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
66163+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
66164+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
66165+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
66166+
66167 typedef struct elf32_phdr{
66168 Elf32_Word p_type;
66169 Elf32_Off p_offset;
66170@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
66171 #define EI_OSABI 7
66172 #define EI_PAD 8
66173
66174+#define EI_PAX 14
66175+
66176 #define ELFMAG0 0x7f /* EI_MAG */
66177 #define ELFMAG1 'E'
66178 #define ELFMAG2 'L'
66179@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
66180 #define elf_phdr elf32_phdr
66181 #define elf_note elf32_note
66182 #define elf_addr_t Elf32_Off
66183+#define elf_dyn Elf32_Dyn
66184
66185 #else
66186
66187@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
66188 #define elf_phdr elf64_phdr
66189 #define elf_note elf64_note
66190 #define elf_addr_t Elf64_Off
66191+#define elf_dyn Elf64_Dyn
66192
66193 #endif
66194
66195diff --git a/include/linux/fs.h b/include/linux/fs.h
66196index 1b9a47a..6fe2934 100644
66197--- a/include/linux/fs.h
66198+++ b/include/linux/fs.h
66199@@ -568,41 +568,41 @@ typedef int (*read_actor_t)(read_descriptor_t *, struct page *,
66200 unsigned long, unsigned long);
66201
66202 struct address_space_operations {
66203- int (*writepage)(struct page *page, struct writeback_control *wbc);
66204- int (*readpage)(struct file *, struct page *);
66205- void (*sync_page)(struct page *);
66206+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
66207+ int (* const readpage)(struct file *, struct page *);
66208+ void (* const sync_page)(struct page *);
66209
66210 /* Write back some dirty pages from this mapping. */
66211- int (*writepages)(struct address_space *, struct writeback_control *);
66212+ int (* const writepages)(struct address_space *, struct writeback_control *);
66213
66214 /* Set a page dirty. Return true if this dirtied it */
66215- int (*set_page_dirty)(struct page *page);
66216+ int (* const set_page_dirty)(struct page *page);
66217
66218- int (*readpages)(struct file *filp, struct address_space *mapping,
66219+ int (* const readpages)(struct file *filp, struct address_space *mapping,
66220 struct list_head *pages, unsigned nr_pages);
66221
66222- int (*write_begin)(struct file *, struct address_space *mapping,
66223+ int (* const write_begin)(struct file *, struct address_space *mapping,
66224 loff_t pos, unsigned len, unsigned flags,
66225 struct page **pagep, void **fsdata);
66226- int (*write_end)(struct file *, struct address_space *mapping,
66227+ int (* const write_end)(struct file *, struct address_space *mapping,
66228 loff_t pos, unsigned len, unsigned copied,
66229 struct page *page, void *fsdata);
66230
66231 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
66232- sector_t (*bmap)(struct address_space *, sector_t);
66233- void (*invalidatepage) (struct page *, unsigned long);
66234- int (*releasepage) (struct page *, gfp_t);
66235- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
66236+ sector_t (* const bmap)(struct address_space *, sector_t);
66237+ void (* const invalidatepage) (struct page *, unsigned long);
66238+ int (* const releasepage) (struct page *, gfp_t);
66239+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
66240 loff_t offset, unsigned long nr_segs);
66241- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
66242+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
66243 void **, unsigned long *);
66244 /* migrate the contents of a page to the specified target */
66245- int (*migratepage) (struct address_space *,
66246+ int (* const migratepage) (struct address_space *,
66247 struct page *, struct page *);
66248- int (*launder_page) (struct page *);
66249- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
66250+ int (* const launder_page) (struct page *);
66251+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
66252 unsigned long);
66253- int (*error_remove_page)(struct address_space *, struct page *);
66254+ int (* const error_remove_page)(struct address_space *, struct page *);
66255 };
66256
66257 /*
66258@@ -1031,19 +1031,19 @@ static inline int file_check_writeable(struct file *filp)
66259 typedef struct files_struct *fl_owner_t;
66260
66261 struct file_lock_operations {
66262- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66263- void (*fl_release_private)(struct file_lock *);
66264+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66265+ void (* const fl_release_private)(struct file_lock *);
66266 };
66267
66268 struct lock_manager_operations {
66269- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
66270- void (*fl_notify)(struct file_lock *); /* unblock callback */
66271- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
66272- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
66273- void (*fl_release_private)(struct file_lock *);
66274- void (*fl_break)(struct file_lock *);
66275- int (*fl_mylease)(struct file_lock *, struct file_lock *);
66276- int (*fl_change)(struct file_lock **, int);
66277+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
66278+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
66279+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
66280+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
66281+ void (* const fl_release_private)(struct file_lock *);
66282+ void (* const fl_break)(struct file_lock *);
66283+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
66284+ int (* const fl_change)(struct file_lock **, int);
66285 };
66286
66287 struct lock_manager {
66288@@ -1442,7 +1442,7 @@ struct fiemap_extent_info {
66289 unsigned int fi_flags; /* Flags as passed from user */
66290 unsigned int fi_extents_mapped; /* Number of mapped extents */
66291 unsigned int fi_extents_max; /* Size of fiemap_extent array */
66292- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
66293+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
66294 * array */
66295 };
66296 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
66297@@ -1512,7 +1512,8 @@ struct file_operations {
66298 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int);
66299 ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int);
66300 int (*setlease)(struct file *, long, struct file_lock **);
66301-};
66302+} __do_const;
66303+typedef struct file_operations __no_const file_operations_no_const;
66304
66305 struct inode_operations {
66306 int (*create) (struct inode *,struct dentry *,int, struct nameidata *);
66307@@ -1559,30 +1560,30 @@ extern ssize_t vfs_writev(struct file *, const struct iovec __user *,
66308 unsigned long, loff_t *);
66309
66310 struct super_operations {
66311- struct inode *(*alloc_inode)(struct super_block *sb);
66312- void (*destroy_inode)(struct inode *);
66313+ struct inode *(* const alloc_inode)(struct super_block *sb);
66314+ void (* const destroy_inode)(struct inode *);
66315
66316- void (*dirty_inode) (struct inode *);
66317- int (*write_inode) (struct inode *, int);
66318- void (*drop_inode) (struct inode *);
66319- void (*delete_inode) (struct inode *);
66320- void (*put_super) (struct super_block *);
66321- void (*write_super) (struct super_block *);
66322- int (*sync_fs)(struct super_block *sb, int wait);
66323- int (*freeze_fs) (struct super_block *);
66324- int (*unfreeze_fs) (struct super_block *);
66325- int (*statfs) (struct dentry *, struct kstatfs *);
66326- int (*remount_fs) (struct super_block *, int *, char *);
66327- void (*clear_inode) (struct inode *);
66328- void (*umount_begin) (struct super_block *);
66329+ void (* const dirty_inode) (struct inode *);
66330+ int (* const write_inode) (struct inode *, int);
66331+ void (* const drop_inode) (struct inode *);
66332+ void (* const delete_inode) (struct inode *);
66333+ void (* const put_super) (struct super_block *);
66334+ void (* const write_super) (struct super_block *);
66335+ int (* const sync_fs)(struct super_block *sb, int wait);
66336+ int (* const freeze_fs) (struct super_block *);
66337+ int (* const unfreeze_fs) (struct super_block *);
66338+ int (* const statfs) (struct dentry *, struct kstatfs *);
66339+ int (* const remount_fs) (struct super_block *, int *, char *);
66340+ void (* const clear_inode) (struct inode *);
66341+ void (* const umount_begin) (struct super_block *);
66342
66343- int (*show_options)(struct seq_file *, struct vfsmount *);
66344- int (*show_stats)(struct seq_file *, struct vfsmount *);
66345+ int (* const show_options)(struct seq_file *, struct vfsmount *);
66346+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
66347 #ifdef CONFIG_QUOTA
66348- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
66349- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66350+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
66351+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
66352 #endif
66353- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66354+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
66355 };
66356
66357 /*
66358diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
66359index 78a05bf..2a7d3e1 100644
66360--- a/include/linux/fs_struct.h
66361+++ b/include/linux/fs_struct.h
66362@@ -4,7 +4,7 @@
66363 #include <linux/path.h>
66364
66365 struct fs_struct {
66366- int users;
66367+ atomic_t users;
66368 rwlock_t lock;
66369 int umask;
66370 int in_exec;
66371diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
66372index 7be0c6f..2f63a2b 100644
66373--- a/include/linux/fscache-cache.h
66374+++ b/include/linux/fscache-cache.h
66375@@ -116,7 +116,7 @@ struct fscache_operation {
66376 #endif
66377 };
66378
66379-extern atomic_t fscache_op_debug_id;
66380+extern atomic_unchecked_t fscache_op_debug_id;
66381 extern const struct slow_work_ops fscache_op_slow_work_ops;
66382
66383 extern void fscache_enqueue_operation(struct fscache_operation *);
66384@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
66385 fscache_operation_release_t release)
66386 {
66387 atomic_set(&op->usage, 1);
66388- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
66389+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
66390 op->release = release;
66391 INIT_LIST_HEAD(&op->pend_link);
66392 fscache_set_op_state(op, "Init");
66393diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
66394index 4d6f47b..00bcedb 100644
66395--- a/include/linux/fsnotify_backend.h
66396+++ b/include/linux/fsnotify_backend.h
66397@@ -86,6 +86,7 @@ struct fsnotify_ops {
66398 void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
66399 void (*free_event_priv)(struct fsnotify_event_private_data *priv);
66400 };
66401+typedef struct fsnotify_ops __no_const fsnotify_ops_no_const;
66402
66403 /*
66404 * A group is a "thing" that wants to receive notification about filesystem
66405diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
66406index 4ec5e67..42f1eb9 100644
66407--- a/include/linux/ftrace_event.h
66408+++ b/include/linux/ftrace_event.h
66409@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftrace_event_call *call,
66410 int filter_type);
66411 extern int trace_define_common_fields(struct ftrace_event_call *call);
66412
66413-#define is_signed_type(type) (((type)(-1)) < 0)
66414+#define is_signed_type(type) (((type)(-1)) < (type)1)
66415
66416 int trace_set_clr_event(const char *system, const char *event, int set);
66417
66418diff --git a/include/linux/genhd.h b/include/linux/genhd.h
66419index 297df45..b6a74ff 100644
66420--- a/include/linux/genhd.h
66421+++ b/include/linux/genhd.h
66422@@ -161,7 +161,7 @@ struct gendisk {
66423
66424 struct timer_rand_state *random;
66425
66426- atomic_t sync_io; /* RAID */
66427+ atomic_unchecked_t sync_io; /* RAID */
66428 struct work_struct async_notify;
66429 #ifdef CONFIG_BLK_DEV_INTEGRITY
66430 struct blk_integrity *integrity;
66431diff --git a/include/linux/gracl.h b/include/linux/gracl.h
66432new file mode 100644
66433index 0000000..0dc3943
66434--- /dev/null
66435+++ b/include/linux/gracl.h
66436@@ -0,0 +1,317 @@
66437+#ifndef GR_ACL_H
66438+#define GR_ACL_H
66439+
66440+#include <linux/grdefs.h>
66441+#include <linux/resource.h>
66442+#include <linux/capability.h>
66443+#include <linux/dcache.h>
66444+#include <asm/resource.h>
66445+
66446+/* Major status information */
66447+
66448+#define GR_VERSION "grsecurity 2.2.2"
66449+#define GRSECURITY_VERSION 0x2202
66450+
66451+enum {
66452+ GR_SHUTDOWN = 0,
66453+ GR_ENABLE = 1,
66454+ GR_SPROLE = 2,
66455+ GR_RELOAD = 3,
66456+ GR_SEGVMOD = 4,
66457+ GR_STATUS = 5,
66458+ GR_UNSPROLE = 6,
66459+ GR_PASSSET = 7,
66460+ GR_SPROLEPAM = 8,
66461+};
66462+
66463+/* Password setup definitions
66464+ * kernel/grhash.c */
66465+enum {
66466+ GR_PW_LEN = 128,
66467+ GR_SALT_LEN = 16,
66468+ GR_SHA_LEN = 32,
66469+};
66470+
66471+enum {
66472+ GR_SPROLE_LEN = 64,
66473+};
66474+
66475+enum {
66476+ GR_NO_GLOB = 0,
66477+ GR_REG_GLOB,
66478+ GR_CREATE_GLOB
66479+};
66480+
66481+#define GR_NLIMITS 32
66482+
66483+/* Begin Data Structures */
66484+
66485+struct sprole_pw {
66486+ unsigned char *rolename;
66487+ unsigned char salt[GR_SALT_LEN];
66488+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
66489+};
66490+
66491+struct name_entry {
66492+ __u32 key;
66493+ ino_t inode;
66494+ dev_t device;
66495+ char *name;
66496+ __u16 len;
66497+ __u8 deleted;
66498+ struct name_entry *prev;
66499+ struct name_entry *next;
66500+};
66501+
66502+struct inodev_entry {
66503+ struct name_entry *nentry;
66504+ struct inodev_entry *prev;
66505+ struct inodev_entry *next;
66506+};
66507+
66508+struct acl_role_db {
66509+ struct acl_role_label **r_hash;
66510+ __u32 r_size;
66511+};
66512+
66513+struct inodev_db {
66514+ struct inodev_entry **i_hash;
66515+ __u32 i_size;
66516+};
66517+
66518+struct name_db {
66519+ struct name_entry **n_hash;
66520+ __u32 n_size;
66521+};
66522+
66523+struct crash_uid {
66524+ uid_t uid;
66525+ unsigned long expires;
66526+};
66527+
66528+struct gr_hash_struct {
66529+ void **table;
66530+ void **nametable;
66531+ void *first;
66532+ __u32 table_size;
66533+ __u32 used_size;
66534+ int type;
66535+};
66536+
66537+/* Userspace Grsecurity ACL data structures */
66538+
66539+struct acl_subject_label {
66540+ char *filename;
66541+ ino_t inode;
66542+ dev_t device;
66543+ __u32 mode;
66544+ kernel_cap_t cap_mask;
66545+ kernel_cap_t cap_lower;
66546+ kernel_cap_t cap_invert_audit;
66547+
66548+ struct rlimit res[GR_NLIMITS];
66549+ __u32 resmask;
66550+
66551+ __u8 user_trans_type;
66552+ __u8 group_trans_type;
66553+ uid_t *user_transitions;
66554+ gid_t *group_transitions;
66555+ __u16 user_trans_num;
66556+ __u16 group_trans_num;
66557+
66558+ __u32 sock_families[2];
66559+ __u32 ip_proto[8];
66560+ __u32 ip_type;
66561+ struct acl_ip_label **ips;
66562+ __u32 ip_num;
66563+ __u32 inaddr_any_override;
66564+
66565+ __u32 crashes;
66566+ unsigned long expires;
66567+
66568+ struct acl_subject_label *parent_subject;
66569+ struct gr_hash_struct *hash;
66570+ struct acl_subject_label *prev;
66571+ struct acl_subject_label *next;
66572+
66573+ struct acl_object_label **obj_hash;
66574+ __u32 obj_hash_size;
66575+ __u16 pax_flags;
66576+};
66577+
66578+struct role_allowed_ip {
66579+ __u32 addr;
66580+ __u32 netmask;
66581+
66582+ struct role_allowed_ip *prev;
66583+ struct role_allowed_ip *next;
66584+};
66585+
66586+struct role_transition {
66587+ char *rolename;
66588+
66589+ struct role_transition *prev;
66590+ struct role_transition *next;
66591+};
66592+
66593+struct acl_role_label {
66594+ char *rolename;
66595+ uid_t uidgid;
66596+ __u16 roletype;
66597+
66598+ __u16 auth_attempts;
66599+ unsigned long expires;
66600+
66601+ struct acl_subject_label *root_label;
66602+ struct gr_hash_struct *hash;
66603+
66604+ struct acl_role_label *prev;
66605+ struct acl_role_label *next;
66606+
66607+ struct role_transition *transitions;
66608+ struct role_allowed_ip *allowed_ips;
66609+ uid_t *domain_children;
66610+ __u16 domain_child_num;
66611+
66612+ struct acl_subject_label **subj_hash;
66613+ __u32 subj_hash_size;
66614+};
66615+
66616+struct user_acl_role_db {
66617+ struct acl_role_label **r_table;
66618+ __u32 num_pointers; /* Number of allocations to track */
66619+ __u32 num_roles; /* Number of roles */
66620+ __u32 num_domain_children; /* Number of domain children */
66621+ __u32 num_subjects; /* Number of subjects */
66622+ __u32 num_objects; /* Number of objects */
66623+};
66624+
66625+struct acl_object_label {
66626+ char *filename;
66627+ ino_t inode;
66628+ dev_t device;
66629+ __u32 mode;
66630+
66631+ struct acl_subject_label *nested;
66632+ struct acl_object_label *globbed;
66633+
66634+ /* next two structures not used */
66635+
66636+ struct acl_object_label *prev;
66637+ struct acl_object_label *next;
66638+};
66639+
66640+struct acl_ip_label {
66641+ char *iface;
66642+ __u32 addr;
66643+ __u32 netmask;
66644+ __u16 low, high;
66645+ __u8 mode;
66646+ __u32 type;
66647+ __u32 proto[8];
66648+
66649+ /* next two structures not used */
66650+
66651+ struct acl_ip_label *prev;
66652+ struct acl_ip_label *next;
66653+};
66654+
66655+struct gr_arg {
66656+ struct user_acl_role_db role_db;
66657+ unsigned char pw[GR_PW_LEN];
66658+ unsigned char salt[GR_SALT_LEN];
66659+ unsigned char sum[GR_SHA_LEN];
66660+ unsigned char sp_role[GR_SPROLE_LEN];
66661+ struct sprole_pw *sprole_pws;
66662+ dev_t segv_device;
66663+ ino_t segv_inode;
66664+ uid_t segv_uid;
66665+ __u16 num_sprole_pws;
66666+ __u16 mode;
66667+};
66668+
66669+struct gr_arg_wrapper {
66670+ struct gr_arg *arg;
66671+ __u32 version;
66672+ __u32 size;
66673+};
66674+
66675+struct subject_map {
66676+ struct acl_subject_label *user;
66677+ struct acl_subject_label *kernel;
66678+ struct subject_map *prev;
66679+ struct subject_map *next;
66680+};
66681+
66682+struct acl_subj_map_db {
66683+ struct subject_map **s_hash;
66684+ __u32 s_size;
66685+};
66686+
66687+/* End Data Structures Section */
66688+
66689+/* Hash functions generated by empirical testing by Brad Spengler
66690+ Makes good use of the low bits of the inode. Generally 0-1 times
66691+ in loop for successful match. 0-3 for unsuccessful match.
66692+ Shift/add algorithm with modulus of table size and an XOR*/
66693+
66694+static __inline__ unsigned int
66695+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
66696+{
66697+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
66698+}
66699+
66700+ static __inline__ unsigned int
66701+shash(const struct acl_subject_label *userp, const unsigned int sz)
66702+{
66703+ return ((const unsigned long)userp % sz);
66704+}
66705+
66706+static __inline__ unsigned int
66707+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
66708+{
66709+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
66710+}
66711+
66712+static __inline__ unsigned int
66713+nhash(const char *name, const __u16 len, const unsigned int sz)
66714+{
66715+ return full_name_hash((const unsigned char *)name, len) % sz;
66716+}
66717+
66718+#define FOR_EACH_ROLE_START(role) \
66719+ role = role_list; \
66720+ while (role) {
66721+
66722+#define FOR_EACH_ROLE_END(role) \
66723+ role = role->prev; \
66724+ }
66725+
66726+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
66727+ subj = NULL; \
66728+ iter = 0; \
66729+ while (iter < role->subj_hash_size) { \
66730+ if (subj == NULL) \
66731+ subj = role->subj_hash[iter]; \
66732+ if (subj == NULL) { \
66733+ iter++; \
66734+ continue; \
66735+ }
66736+
66737+#define FOR_EACH_SUBJECT_END(subj,iter) \
66738+ subj = subj->next; \
66739+ if (subj == NULL) \
66740+ iter++; \
66741+ }
66742+
66743+
66744+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
66745+ subj = role->hash->first; \
66746+ while (subj != NULL) {
66747+
66748+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
66749+ subj = subj->next; \
66750+ }
66751+
66752+#endif
66753+
66754diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
66755new file mode 100644
66756index 0000000..323ecf2
66757--- /dev/null
66758+++ b/include/linux/gralloc.h
66759@@ -0,0 +1,9 @@
66760+#ifndef __GRALLOC_H
66761+#define __GRALLOC_H
66762+
66763+void acl_free_all(void);
66764+int acl_alloc_stack_init(unsigned long size);
66765+void *acl_alloc(unsigned long len);
66766+void *acl_alloc_num(unsigned long num, unsigned long len);
66767+
66768+#endif
66769diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
66770new file mode 100644
66771index 0000000..70d6cd5
66772--- /dev/null
66773+++ b/include/linux/grdefs.h
66774@@ -0,0 +1,140 @@
66775+#ifndef GRDEFS_H
66776+#define GRDEFS_H
66777+
66778+/* Begin grsecurity status declarations */
66779+
66780+enum {
66781+ GR_READY = 0x01,
66782+ GR_STATUS_INIT = 0x00 // disabled state
66783+};
66784+
66785+/* Begin ACL declarations */
66786+
66787+/* Role flags */
66788+
66789+enum {
66790+ GR_ROLE_USER = 0x0001,
66791+ GR_ROLE_GROUP = 0x0002,
66792+ GR_ROLE_DEFAULT = 0x0004,
66793+ GR_ROLE_SPECIAL = 0x0008,
66794+ GR_ROLE_AUTH = 0x0010,
66795+ GR_ROLE_NOPW = 0x0020,
66796+ GR_ROLE_GOD = 0x0040,
66797+ GR_ROLE_LEARN = 0x0080,
66798+ GR_ROLE_TPE = 0x0100,
66799+ GR_ROLE_DOMAIN = 0x0200,
66800+ GR_ROLE_PAM = 0x0400,
66801+ GR_ROLE_PERSIST = 0x800
66802+};
66803+
66804+/* ACL Subject and Object mode flags */
66805+enum {
66806+ GR_DELETED = 0x80000000
66807+};
66808+
66809+/* ACL Object-only mode flags */
66810+enum {
66811+ GR_READ = 0x00000001,
66812+ GR_APPEND = 0x00000002,
66813+ GR_WRITE = 0x00000004,
66814+ GR_EXEC = 0x00000008,
66815+ GR_FIND = 0x00000010,
66816+ GR_INHERIT = 0x00000020,
66817+ GR_SETID = 0x00000040,
66818+ GR_CREATE = 0x00000080,
66819+ GR_DELETE = 0x00000100,
66820+ GR_LINK = 0x00000200,
66821+ GR_AUDIT_READ = 0x00000400,
66822+ GR_AUDIT_APPEND = 0x00000800,
66823+ GR_AUDIT_WRITE = 0x00001000,
66824+ GR_AUDIT_EXEC = 0x00002000,
66825+ GR_AUDIT_FIND = 0x00004000,
66826+ GR_AUDIT_INHERIT= 0x00008000,
66827+ GR_AUDIT_SETID = 0x00010000,
66828+ GR_AUDIT_CREATE = 0x00020000,
66829+ GR_AUDIT_DELETE = 0x00040000,
66830+ GR_AUDIT_LINK = 0x00080000,
66831+ GR_PTRACERD = 0x00100000,
66832+ GR_NOPTRACE = 0x00200000,
66833+ GR_SUPPRESS = 0x00400000,
66834+ GR_NOLEARN = 0x00800000,
66835+ GR_INIT_TRANSFER= 0x01000000
66836+};
66837+
66838+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
66839+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
66840+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
66841+
66842+/* ACL subject-only mode flags */
66843+enum {
66844+ GR_KILL = 0x00000001,
66845+ GR_VIEW = 0x00000002,
66846+ GR_PROTECTED = 0x00000004,
66847+ GR_LEARN = 0x00000008,
66848+ GR_OVERRIDE = 0x00000010,
66849+ /* just a placeholder, this mode is only used in userspace */
66850+ GR_DUMMY = 0x00000020,
66851+ GR_PROTSHM = 0x00000040,
66852+ GR_KILLPROC = 0x00000080,
66853+ GR_KILLIPPROC = 0x00000100,
66854+ /* just a placeholder, this mode is only used in userspace */
66855+ GR_NOTROJAN = 0x00000200,
66856+ GR_PROTPROCFD = 0x00000400,
66857+ GR_PROCACCT = 0x00000800,
66858+ GR_RELAXPTRACE = 0x00001000,
66859+ GR_NESTED = 0x00002000,
66860+ GR_INHERITLEARN = 0x00004000,
66861+ GR_PROCFIND = 0x00008000,
66862+ GR_POVERRIDE = 0x00010000,
66863+ GR_KERNELAUTH = 0x00020000,
66864+ GR_ATSECURE = 0x00040000,
66865+ GR_SHMEXEC = 0x00080000
66866+};
66867+
66868+enum {
66869+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
66870+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
66871+ GR_PAX_ENABLE_MPROTECT = 0x0004,
66872+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
66873+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
66874+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
66875+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
66876+ GR_PAX_DISABLE_MPROTECT = 0x0400,
66877+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
66878+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
66879+};
66880+
66881+enum {
66882+ GR_ID_USER = 0x01,
66883+ GR_ID_GROUP = 0x02,
66884+};
66885+
66886+enum {
66887+ GR_ID_ALLOW = 0x01,
66888+ GR_ID_DENY = 0x02,
66889+};
66890+
66891+#define GR_CRASH_RES 31
66892+#define GR_UIDTABLE_MAX 500
66893+
66894+/* begin resource learning section */
66895+enum {
66896+ GR_RLIM_CPU_BUMP = 60,
66897+ GR_RLIM_FSIZE_BUMP = 50000,
66898+ GR_RLIM_DATA_BUMP = 10000,
66899+ GR_RLIM_STACK_BUMP = 1000,
66900+ GR_RLIM_CORE_BUMP = 10000,
66901+ GR_RLIM_RSS_BUMP = 500000,
66902+ GR_RLIM_NPROC_BUMP = 1,
66903+ GR_RLIM_NOFILE_BUMP = 5,
66904+ GR_RLIM_MEMLOCK_BUMP = 50000,
66905+ GR_RLIM_AS_BUMP = 500000,
66906+ GR_RLIM_LOCKS_BUMP = 2,
66907+ GR_RLIM_SIGPENDING_BUMP = 5,
66908+ GR_RLIM_MSGQUEUE_BUMP = 10000,
66909+ GR_RLIM_NICE_BUMP = 1,
66910+ GR_RLIM_RTPRIO_BUMP = 1,
66911+ GR_RLIM_RTTIME_BUMP = 1000000
66912+};
66913+
66914+#endif
66915diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
66916new file mode 100644
66917index 0000000..3826b91
66918--- /dev/null
66919+++ b/include/linux/grinternal.h
66920@@ -0,0 +1,219 @@
66921+#ifndef __GRINTERNAL_H
66922+#define __GRINTERNAL_H
66923+
66924+#ifdef CONFIG_GRKERNSEC
66925+
66926+#include <linux/fs.h>
66927+#include <linux/mnt_namespace.h>
66928+#include <linux/nsproxy.h>
66929+#include <linux/gracl.h>
66930+#include <linux/grdefs.h>
66931+#include <linux/grmsg.h>
66932+
66933+void gr_add_learn_entry(const char *fmt, ...)
66934+ __attribute__ ((format (printf, 1, 2)));
66935+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
66936+ const struct vfsmount *mnt);
66937+__u32 gr_check_create(const struct dentry *new_dentry,
66938+ const struct dentry *parent,
66939+ const struct vfsmount *mnt, const __u32 mode);
66940+int gr_check_protected_task(const struct task_struct *task);
66941+__u32 to_gr_audit(const __u32 reqmode);
66942+int gr_set_acls(const int type);
66943+int gr_apply_subject_to_task(struct task_struct *task);
66944+int gr_acl_is_enabled(void);
66945+char gr_roletype_to_char(void);
66946+
66947+void gr_handle_alertkill(struct task_struct *task);
66948+char *gr_to_filename(const struct dentry *dentry,
66949+ const struct vfsmount *mnt);
66950+char *gr_to_filename1(const struct dentry *dentry,
66951+ const struct vfsmount *mnt);
66952+char *gr_to_filename2(const struct dentry *dentry,
66953+ const struct vfsmount *mnt);
66954+char *gr_to_filename3(const struct dentry *dentry,
66955+ const struct vfsmount *mnt);
66956+
66957+extern int grsec_enable_ptrace_readexec;
66958+extern int grsec_enable_harden_ptrace;
66959+extern int grsec_enable_link;
66960+extern int grsec_enable_fifo;
66961+extern int grsec_enable_shm;
66962+extern int grsec_enable_execlog;
66963+extern int grsec_enable_signal;
66964+extern int grsec_enable_audit_ptrace;
66965+extern int grsec_enable_forkfail;
66966+extern int grsec_enable_time;
66967+extern int grsec_enable_rofs;
66968+extern int grsec_enable_chroot_shmat;
66969+extern int grsec_enable_chroot_mount;
66970+extern int grsec_enable_chroot_double;
66971+extern int grsec_enable_chroot_pivot;
66972+extern int grsec_enable_chroot_chdir;
66973+extern int grsec_enable_chroot_chmod;
66974+extern int grsec_enable_chroot_mknod;
66975+extern int grsec_enable_chroot_fchdir;
66976+extern int grsec_enable_chroot_nice;
66977+extern int grsec_enable_chroot_execlog;
66978+extern int grsec_enable_chroot_caps;
66979+extern int grsec_enable_chroot_sysctl;
66980+extern int grsec_enable_chroot_unix;
66981+extern int grsec_enable_tpe;
66982+extern int grsec_tpe_gid;
66983+extern int grsec_enable_tpe_all;
66984+extern int grsec_enable_tpe_invert;
66985+extern int grsec_enable_socket_all;
66986+extern int grsec_socket_all_gid;
66987+extern int grsec_enable_socket_client;
66988+extern int grsec_socket_client_gid;
66989+extern int grsec_enable_socket_server;
66990+extern int grsec_socket_server_gid;
66991+extern int grsec_audit_gid;
66992+extern int grsec_enable_group;
66993+extern int grsec_enable_audit_textrel;
66994+extern int grsec_enable_log_rwxmaps;
66995+extern int grsec_enable_mount;
66996+extern int grsec_enable_chdir;
66997+extern int grsec_resource_logging;
66998+extern int grsec_enable_blackhole;
66999+extern int grsec_lastack_retries;
67000+extern int grsec_enable_brute;
67001+extern int grsec_lock;
67002+
67003+extern spinlock_t grsec_alert_lock;
67004+extern unsigned long grsec_alert_wtime;
67005+extern unsigned long grsec_alert_fyet;
67006+
67007+extern spinlock_t grsec_audit_lock;
67008+
67009+extern rwlock_t grsec_exec_file_lock;
67010+
67011+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
67012+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
67013+ (tsk)->exec_file->f_vfsmnt) : "/")
67014+
67015+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
67016+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
67017+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67018+
67019+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
67020+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
67021+ (tsk)->exec_file->f_vfsmnt) : "/")
67022+
67023+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
67024+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
67025+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
67026+
67027+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
67028+
67029+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
67030+
67031+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
67032+ (task)->pid, (cred)->uid, \
67033+ (cred)->euid, (cred)->gid, (cred)->egid, \
67034+ gr_parent_task_fullpath(task), \
67035+ (task)->real_parent->comm, (task)->real_parent->pid, \
67036+ (pcred)->uid, (pcred)->euid, \
67037+ (pcred)->gid, (pcred)->egid
67038+
67039+#define GR_CHROOT_CAPS {{ \
67040+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
67041+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
67042+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
67043+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
67044+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
67045+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
67046+ CAP_TO_MASK(CAP_MAC_ADMIN) }}
67047+
67048+#define security_learn(normal_msg,args...) \
67049+({ \
67050+ read_lock(&grsec_exec_file_lock); \
67051+ gr_add_learn_entry(normal_msg "\n", ## args); \
67052+ read_unlock(&grsec_exec_file_lock); \
67053+})
67054+
67055+enum {
67056+ GR_DO_AUDIT,
67057+ GR_DONT_AUDIT,
67058+ GR_DONT_AUDIT_GOOD
67059+};
67060+
67061+enum {
67062+ GR_TTYSNIFF,
67063+ GR_RBAC,
67064+ GR_RBAC_STR,
67065+ GR_STR_RBAC,
67066+ GR_RBAC_MODE2,
67067+ GR_RBAC_MODE3,
67068+ GR_FILENAME,
67069+ GR_SYSCTL_HIDDEN,
67070+ GR_NOARGS,
67071+ GR_ONE_INT,
67072+ GR_ONE_INT_TWO_STR,
67073+ GR_ONE_STR,
67074+ GR_STR_INT,
67075+ GR_TWO_STR_INT,
67076+ GR_TWO_INT,
67077+ GR_TWO_U64,
67078+ GR_THREE_INT,
67079+ GR_FIVE_INT_TWO_STR,
67080+ GR_TWO_STR,
67081+ GR_THREE_STR,
67082+ GR_FOUR_STR,
67083+ GR_STR_FILENAME,
67084+ GR_FILENAME_STR,
67085+ GR_FILENAME_TWO_INT,
67086+ GR_FILENAME_TWO_INT_STR,
67087+ GR_TEXTREL,
67088+ GR_PTRACE,
67089+ GR_RESOURCE,
67090+ GR_CAP,
67091+ GR_SIG,
67092+ GR_SIG2,
67093+ GR_CRASH1,
67094+ GR_CRASH2,
67095+ GR_PSACCT,
67096+ GR_RWXMAP
67097+};
67098+
67099+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
67100+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
67101+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
67102+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
67103+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
67104+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
67105+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
67106+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
67107+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
67108+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
67109+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
67110+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
67111+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
67112+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
67113+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
67114+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
67115+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
67116+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
67117+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
67118+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
67119+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
67120+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
67121+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
67122+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
67123+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
67124+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
67125+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
67126+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
67127+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
67128+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
67129+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
67130+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
67131+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
67132+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
67133+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
67134+
67135+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
67136+
67137+#endif
67138+
67139+#endif
67140diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
67141new file mode 100644
67142index 0000000..7f62b30
67143--- /dev/null
67144+++ b/include/linux/grmsg.h
67145@@ -0,0 +1,109 @@
67146+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
67147+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
67148+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
67149+#define GR_STOPMOD_MSG "denied modification of module state by "
67150+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
67151+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
67152+#define GR_IOPERM_MSG "denied use of ioperm() by "
67153+#define GR_IOPL_MSG "denied use of iopl() by "
67154+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
67155+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
67156+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
67157+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
67158+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
67159+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
67160+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
67161+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
67162+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
67163+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
67164+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
67165+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
67166+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
67167+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
67168+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
67169+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
67170+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
67171+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
67172+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
67173+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
67174+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
67175+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
67176+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
67177+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
67178+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
67179+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
67180+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
67181+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
67182+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
67183+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
67184+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
67185+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
67186+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
67187+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
67188+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
67189+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
67190+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
67191+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
67192+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
67193+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
67194+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
67195+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
67196+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
67197+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
67198+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
67199+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
67200+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
67201+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
67202+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
67203+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
67204+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
67205+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
67206+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
67207+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
67208+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
67209+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
67210+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
67211+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
67212+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
67213+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
67214+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
67215+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
67216+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
67217+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
67218+#define GR_FAILFORK_MSG "failed fork with errno %s by "
67219+#define GR_NICE_CHROOT_MSG "denied priority change by "
67220+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
67221+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
67222+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
67223+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
67224+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
67225+#define GR_TIME_MSG "time set by "
67226+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
67227+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
67228+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
67229+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
67230+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
67231+#define GR_BIND_MSG "denied bind() by "
67232+#define GR_CONNECT_MSG "denied connect() by "
67233+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
67234+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
67235+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
67236+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
67237+#define GR_CAP_ACL_MSG "use of %s denied for "
67238+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
67239+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
67240+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
67241+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
67242+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
67243+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
67244+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
67245+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
67246+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
67247+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
67248+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
67249+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
67250+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
67251+#define GR_VM86_MSG "denied use of vm86 by "
67252+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
67253+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
67254+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
67255diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
67256new file mode 100644
67257index 0000000..c597c46
67258--- /dev/null
67259+++ b/include/linux/grsecurity.h
67260@@ -0,0 +1,217 @@
67261+#ifndef GR_SECURITY_H
67262+#define GR_SECURITY_H
67263+#include <linux/fs.h>
67264+#include <linux/fs_struct.h>
67265+#include <linux/binfmts.h>
67266+#include <linux/gracl.h>
67267+#include <linux/compat.h>
67268+
67269+/* notify of brain-dead configs */
67270+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
67271+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
67272+#endif
67273+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
67274+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
67275+#endif
67276+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
67277+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
67278+#endif
67279+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
67280+#error "CONFIG_PAX enabled, but no PaX options are enabled."
67281+#endif
67282+
67283+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
67284+void gr_handle_brute_check(void);
67285+void gr_handle_kernel_exploit(void);
67286+int gr_process_user_ban(void);
67287+
67288+char gr_roletype_to_char(void);
67289+
67290+int gr_acl_enable_at_secure(void);
67291+
67292+int gr_check_user_change(int real, int effective, int fs);
67293+int gr_check_group_change(int real, int effective, int fs);
67294+
67295+void gr_del_task_from_ip_table(struct task_struct *p);
67296+
67297+int gr_pid_is_chrooted(struct task_struct *p);
67298+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
67299+int gr_handle_chroot_nice(void);
67300+int gr_handle_chroot_sysctl(const int op);
67301+int gr_handle_chroot_setpriority(struct task_struct *p,
67302+ const int niceval);
67303+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
67304+int gr_handle_chroot_chroot(const struct dentry *dentry,
67305+ const struct vfsmount *mnt);
67306+void gr_handle_chroot_chdir(struct path *path);
67307+int gr_handle_chroot_chmod(const struct dentry *dentry,
67308+ const struct vfsmount *mnt, const int mode);
67309+int gr_handle_chroot_mknod(const struct dentry *dentry,
67310+ const struct vfsmount *mnt, const int mode);
67311+int gr_handle_chroot_mount(const struct dentry *dentry,
67312+ const struct vfsmount *mnt,
67313+ const char *dev_name);
67314+int gr_handle_chroot_pivot(void);
67315+int gr_handle_chroot_unix(const pid_t pid);
67316+
67317+int gr_handle_rawio(const struct inode *inode);
67318+
67319+void gr_handle_ioperm(void);
67320+void gr_handle_iopl(void);
67321+
67322+int gr_tpe_allow(const struct file *file);
67323+
67324+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
67325+void gr_clear_chroot_entries(struct task_struct *task);
67326+
67327+void gr_log_forkfail(const int retval);
67328+void gr_log_timechange(void);
67329+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
67330+void gr_log_chdir(const struct dentry *dentry,
67331+ const struct vfsmount *mnt);
67332+void gr_log_chroot_exec(const struct dentry *dentry,
67333+ const struct vfsmount *mnt);
67334+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
67335+#ifdef CONFIG_COMPAT
67336+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
67337+#endif
67338+void gr_log_remount(const char *devname, const int retval);
67339+void gr_log_unmount(const char *devname, const int retval);
67340+void gr_log_mount(const char *from, const char *to, const int retval);
67341+void gr_log_textrel(struct vm_area_struct *vma);
67342+void gr_log_rwxmmap(struct file *file);
67343+void gr_log_rwxmprotect(struct file *file);
67344+
67345+int gr_handle_follow_link(const struct inode *parent,
67346+ const struct inode *inode,
67347+ const struct dentry *dentry,
67348+ const struct vfsmount *mnt);
67349+int gr_handle_fifo(const struct dentry *dentry,
67350+ const struct vfsmount *mnt,
67351+ const struct dentry *dir, const int flag,
67352+ const int acc_mode);
67353+int gr_handle_hardlink(const struct dentry *dentry,
67354+ const struct vfsmount *mnt,
67355+ struct inode *inode,
67356+ const int mode, const char *to);
67357+
67358+int gr_is_capable(const int cap);
67359+int gr_is_capable_nolog(const int cap);
67360+void gr_learn_resource(const struct task_struct *task, const int limit,
67361+ const unsigned long wanted, const int gt);
67362+void gr_copy_label(struct task_struct *tsk);
67363+void gr_handle_crash(struct task_struct *task, const int sig);
67364+int gr_handle_signal(const struct task_struct *p, const int sig);
67365+int gr_check_crash_uid(const uid_t uid);
67366+int gr_check_protected_task(const struct task_struct *task);
67367+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
67368+int gr_acl_handle_mmap(const struct file *file,
67369+ const unsigned long prot);
67370+int gr_acl_handle_mprotect(const struct file *file,
67371+ const unsigned long prot);
67372+int gr_check_hidden_task(const struct task_struct *tsk);
67373+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
67374+ const struct vfsmount *mnt);
67375+__u32 gr_acl_handle_utime(const struct dentry *dentry,
67376+ const struct vfsmount *mnt);
67377+__u32 gr_acl_handle_access(const struct dentry *dentry,
67378+ const struct vfsmount *mnt, const int fmode);
67379+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
67380+ const struct vfsmount *mnt, mode_t mode);
67381+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
67382+ const struct vfsmount *mnt, mode_t mode);
67383+__u32 gr_acl_handle_chown(const struct dentry *dentry,
67384+ const struct vfsmount *mnt);
67385+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
67386+ const struct vfsmount *mnt);
67387+int gr_handle_ptrace(struct task_struct *task, const long request);
67388+int gr_handle_proc_ptrace(struct task_struct *task);
67389+__u32 gr_acl_handle_execve(const struct dentry *dentry,
67390+ const struct vfsmount *mnt);
67391+int gr_check_crash_exec(const struct file *filp);
67392+int gr_acl_is_enabled(void);
67393+void gr_set_kernel_label(struct task_struct *task);
67394+void gr_set_role_label(struct task_struct *task, const uid_t uid,
67395+ const gid_t gid);
67396+int gr_set_proc_label(const struct dentry *dentry,
67397+ const struct vfsmount *mnt,
67398+ const int unsafe_flags);
67399+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
67400+ const struct vfsmount *mnt);
67401+__u32 gr_acl_handle_open(const struct dentry *dentry,
67402+ const struct vfsmount *mnt, int acc_mode);
67403+__u32 gr_acl_handle_creat(const struct dentry *dentry,
67404+ const struct dentry *p_dentry,
67405+ const struct vfsmount *p_mnt,
67406+ int open_flags, int acc_mode, const int imode);
67407+void gr_handle_create(const struct dentry *dentry,
67408+ const struct vfsmount *mnt);
67409+void gr_handle_proc_create(const struct dentry *dentry,
67410+ const struct inode *inode);
67411+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
67412+ const struct dentry *parent_dentry,
67413+ const struct vfsmount *parent_mnt,
67414+ const int mode);
67415+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
67416+ const struct dentry *parent_dentry,
67417+ const struct vfsmount *parent_mnt);
67418+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
67419+ const struct vfsmount *mnt);
67420+void gr_handle_delete(const ino_t ino, const dev_t dev);
67421+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
67422+ const struct vfsmount *mnt);
67423+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
67424+ const struct dentry *parent_dentry,
67425+ const struct vfsmount *parent_mnt,
67426+ const char *from);
67427+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
67428+ const struct dentry *parent_dentry,
67429+ const struct vfsmount *parent_mnt,
67430+ const struct dentry *old_dentry,
67431+ const struct vfsmount *old_mnt, const char *to);
67432+int gr_acl_handle_rename(struct dentry *new_dentry,
67433+ struct dentry *parent_dentry,
67434+ const struct vfsmount *parent_mnt,
67435+ struct dentry *old_dentry,
67436+ struct inode *old_parent_inode,
67437+ struct vfsmount *old_mnt, const char *newname);
67438+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
67439+ struct dentry *old_dentry,
67440+ struct dentry *new_dentry,
67441+ struct vfsmount *mnt, const __u8 replace);
67442+__u32 gr_check_link(const struct dentry *new_dentry,
67443+ const struct dentry *parent_dentry,
67444+ const struct vfsmount *parent_mnt,
67445+ const struct dentry *old_dentry,
67446+ const struct vfsmount *old_mnt);
67447+int gr_acl_handle_filldir(const struct file *file, const char *name,
67448+ const unsigned int namelen, const ino_t ino);
67449+
67450+__u32 gr_acl_handle_unix(const struct dentry *dentry,
67451+ const struct vfsmount *mnt);
67452+void gr_acl_handle_exit(void);
67453+void gr_acl_handle_psacct(struct task_struct *task, const long code);
67454+int gr_acl_handle_procpidmem(const struct task_struct *task);
67455+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
67456+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
67457+void gr_audit_ptrace(struct task_struct *task);
67458+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
67459+
67460+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
67461+
67462+#ifdef CONFIG_GRKERNSEC
67463+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
67464+void gr_handle_vm86(void);
67465+void gr_handle_mem_readwrite(u64 from, u64 to);
67466+
67467+extern int grsec_enable_dmesg;
67468+extern int grsec_disable_privio;
67469+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67470+extern int grsec_enable_chroot_findtask;
67471+#endif
67472+#ifdef CONFIG_GRKERNSEC_SETXID
67473+extern int grsec_enable_setxid;
67474+#endif
67475+#endif
67476+
67477+#endif
67478diff --git a/include/linux/hdpu_features.h b/include/linux/hdpu_features.h
67479index 6a87154..a3ce57b 100644
67480--- a/include/linux/hdpu_features.h
67481+++ b/include/linux/hdpu_features.h
67482@@ -3,7 +3,7 @@
67483 struct cpustate_t {
67484 spinlock_t lock;
67485 int excl;
67486- int open_count;
67487+ atomic_t open_count;
67488 unsigned char cached_val;
67489 int inited;
67490 unsigned long *set_addr;
67491diff --git a/include/linux/highmem.h b/include/linux/highmem.h
67492index 211ff44..00ab6d7 100644
67493--- a/include/linux/highmem.h
67494+++ b/include/linux/highmem.h
67495@@ -137,6 +137,18 @@ static inline void clear_highpage(struct page *page)
67496 kunmap_atomic(kaddr, KM_USER0);
67497 }
67498
67499+static inline void sanitize_highpage(struct page *page)
67500+{
67501+ void *kaddr;
67502+ unsigned long flags;
67503+
67504+ local_irq_save(flags);
67505+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
67506+ clear_page(kaddr);
67507+ kunmap_atomic(kaddr, KM_CLEARPAGE);
67508+ local_irq_restore(flags);
67509+}
67510+
67511 static inline void zero_user_segments(struct page *page,
67512 unsigned start1, unsigned end1,
67513 unsigned start2, unsigned end2)
67514diff --git a/include/linux/i2c.h b/include/linux/i2c.h
67515index 7b40cda..24eb44e 100644
67516--- a/include/linux/i2c.h
67517+++ b/include/linux/i2c.h
67518@@ -325,6 +325,7 @@ struct i2c_algorithm {
67519 /* To determine what the adapter supports */
67520 u32 (*functionality) (struct i2c_adapter *);
67521 };
67522+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
67523
67524 /*
67525 * i2c_adapter is the structure used to identify a physical i2c bus along
67526diff --git a/include/linux/i2o.h b/include/linux/i2o.h
67527index 4c4e57d..f3c5303 100644
67528--- a/include/linux/i2o.h
67529+++ b/include/linux/i2o.h
67530@@ -564,7 +564,7 @@ struct i2o_controller {
67531 struct i2o_device *exec; /* Executive */
67532 #if BITS_PER_LONG == 64
67533 spinlock_t context_list_lock; /* lock for context_list */
67534- atomic_t context_list_counter; /* needed for unique contexts */
67535+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
67536 struct list_head context_list; /* list of context id's
67537 and pointers */
67538 #endif
67539diff --git a/include/linux/init_task.h b/include/linux/init_task.h
67540index 21a6f5d..dc42eab 100644
67541--- a/include/linux/init_task.h
67542+++ b/include/linux/init_task.h
67543@@ -83,6 +83,12 @@ extern struct group_info init_groups;
67544 #define INIT_IDS
67545 #endif
67546
67547+#ifdef CONFIG_X86
67548+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
67549+#else
67550+#define INIT_TASK_THREAD_INFO
67551+#endif
67552+
67553 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
67554 /*
67555 * Because of the reduced scope of CAP_SETPCAP when filesystem
67556@@ -156,6 +162,7 @@ extern struct cred init_cred;
67557 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
67558 .comm = "swapper", \
67559 .thread = INIT_THREAD, \
67560+ INIT_TASK_THREAD_INFO \
67561 .fs = &init_fs, \
67562 .files = &init_files, \
67563 .signal = &init_signals, \
67564diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
67565index 4f0a72a..a849599 100644
67566--- a/include/linux/intel-iommu.h
67567+++ b/include/linux/intel-iommu.h
67568@@ -296,7 +296,7 @@ struct iommu_flush {
67569 u8 fm, u64 type);
67570 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
67571 unsigned int size_order, u64 type);
67572-};
67573+} __no_const;
67574
67575 enum {
67576 SR_DMAR_FECTL_REG,
67577diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
67578index c739150..be577b5 100644
67579--- a/include/linux/interrupt.h
67580+++ b/include/linux/interrupt.h
67581@@ -369,7 +369,7 @@ enum
67582 /* map softirq index to softirq name. update 'softirq_to_name' in
67583 * kernel/softirq.c when adding a new softirq.
67584 */
67585-extern char *softirq_to_name[NR_SOFTIRQS];
67586+extern const char * const softirq_to_name[NR_SOFTIRQS];
67587
67588 /* softirq mask and active fields moved to irq_cpustat_t in
67589 * asm/hardirq.h to get better cache usage. KAO
67590@@ -377,12 +377,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
67591
67592 struct softirq_action
67593 {
67594- void (*action)(struct softirq_action *);
67595+ void (*action)(void);
67596 };
67597
67598 asmlinkage void do_softirq(void);
67599 asmlinkage void __do_softirq(void);
67600-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
67601+extern void open_softirq(int nr, void (*action)(void));
67602 extern void softirq_init(void);
67603 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
67604 extern void raise_softirq_irqoff(unsigned int nr);
67605diff --git a/include/linux/irq.h b/include/linux/irq.h
67606index 9e5f45a..025865b 100644
67607--- a/include/linux/irq.h
67608+++ b/include/linux/irq.h
67609@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
67610 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
67611 bool boot)
67612 {
67613+#ifdef CONFIG_CPUMASK_OFFSTACK
67614 gfp_t gfp = GFP_ATOMIC;
67615
67616 if (boot)
67617 gfp = GFP_NOWAIT;
67618
67619-#ifdef CONFIG_CPUMASK_OFFSTACK
67620 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
67621 return false;
67622
67623diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
67624index 7922742..27306a2 100644
67625--- a/include/linux/kallsyms.h
67626+++ b/include/linux/kallsyms.h
67627@@ -15,7 +15,8 @@
67628
67629 struct module;
67630
67631-#ifdef CONFIG_KALLSYMS
67632+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
67633+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
67634 /* Lookup the address for a symbol. Returns 0 if not found. */
67635 unsigned long kallsyms_lookup_name(const char *name);
67636
67637@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
67638 /* Stupid that this does nothing, but I didn't create this mess. */
67639 #define __print_symbol(fmt, addr)
67640 #endif /*CONFIG_KALLSYMS*/
67641+#else /* when included by kallsyms.c, vsnprintf.c, or
67642+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
67643+extern void __print_symbol(const char *fmt, unsigned long address);
67644+extern int sprint_symbol(char *buffer, unsigned long address);
67645+const char *kallsyms_lookup(unsigned long addr,
67646+ unsigned long *symbolsize,
67647+ unsigned long *offset,
67648+ char **modname, char *namebuf);
67649+#endif
67650
67651 /* This macro allows us to keep printk typechecking */
67652 static void __check_printsym_format(const char *fmt, ...)
67653diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
67654index 6adcc29..13369e8 100644
67655--- a/include/linux/kgdb.h
67656+++ b/include/linux/kgdb.h
67657@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
67658
67659 extern int kgdb_connected;
67660
67661-extern atomic_t kgdb_setting_breakpoint;
67662-extern atomic_t kgdb_cpu_doing_single_step;
67663+extern atomic_unchecked_t kgdb_setting_breakpoint;
67664+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
67665
67666 extern struct task_struct *kgdb_usethread;
67667 extern struct task_struct *kgdb_contthread;
67668@@ -235,7 +235,7 @@ struct kgdb_arch {
67669 int (*remove_hw_breakpoint)(unsigned long, int, enum kgdb_bptype);
67670 void (*remove_all_hw_break)(void);
67671 void (*correct_hw_break)(void);
67672-};
67673+} __do_const;
67674
67675 /**
67676 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
67677@@ -257,14 +257,14 @@ struct kgdb_io {
67678 int (*init) (void);
67679 void (*pre_exception) (void);
67680 void (*post_exception) (void);
67681-};
67682+} __do_const;
67683
67684-extern struct kgdb_arch arch_kgdb_ops;
67685+extern const struct kgdb_arch arch_kgdb_ops;
67686
67687 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
67688
67689-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
67690-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
67691+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
67692+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
67693
67694 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
67695 extern int kgdb_mem2hex(char *mem, char *buf, int count);
67696diff --git a/include/linux/kmod.h b/include/linux/kmod.h
67697index 384ca8b..83dd97d 100644
67698--- a/include/linux/kmod.h
67699+++ b/include/linux/kmod.h
67700@@ -31,6 +31,8 @@
67701 * usually useless though. */
67702 extern int __request_module(bool wait, const char *name, ...) \
67703 __attribute__((format(printf, 2, 3)));
67704+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
67705+ __attribute__((format(printf, 3, 4)));
67706 #define request_module(mod...) __request_module(true, mod)
67707 #define request_module_nowait(mod...) __request_module(false, mod)
67708 #define try_then_request_module(x, mod...) \
67709diff --git a/include/linux/kobject.h b/include/linux/kobject.h
67710index 58ae8e0..3950d3c 100644
67711--- a/include/linux/kobject.h
67712+++ b/include/linux/kobject.h
67713@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
67714
67715 struct kobj_type {
67716 void (*release)(struct kobject *kobj);
67717- struct sysfs_ops *sysfs_ops;
67718+ const struct sysfs_ops *sysfs_ops;
67719 struct attribute **default_attrs;
67720 };
67721
67722@@ -118,9 +118,9 @@ struct kobj_uevent_env {
67723 };
67724
67725 struct kset_uevent_ops {
67726- int (*filter)(struct kset *kset, struct kobject *kobj);
67727- const char *(*name)(struct kset *kset, struct kobject *kobj);
67728- int (*uevent)(struct kset *kset, struct kobject *kobj,
67729+ int (* const filter)(struct kset *kset, struct kobject *kobj);
67730+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
67731+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
67732 struct kobj_uevent_env *env);
67733 };
67734
67735@@ -132,7 +132,7 @@ struct kobj_attribute {
67736 const char *buf, size_t count);
67737 };
67738
67739-extern struct sysfs_ops kobj_sysfs_ops;
67740+extern const struct sysfs_ops kobj_sysfs_ops;
67741
67742 /**
67743 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
67744@@ -155,14 +155,14 @@ struct kset {
67745 struct list_head list;
67746 spinlock_t list_lock;
67747 struct kobject kobj;
67748- struct kset_uevent_ops *uevent_ops;
67749+ const struct kset_uevent_ops *uevent_ops;
67750 };
67751
67752 extern void kset_init(struct kset *kset);
67753 extern int __must_check kset_register(struct kset *kset);
67754 extern void kset_unregister(struct kset *kset);
67755 extern struct kset * __must_check kset_create_and_add(const char *name,
67756- struct kset_uevent_ops *u,
67757+ const struct kset_uevent_ops *u,
67758 struct kobject *parent_kobj);
67759
67760 static inline struct kset *to_kset(struct kobject *kobj)
67761diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
67762index c728a50..752d821 100644
67763--- a/include/linux/kvm_host.h
67764+++ b/include/linux/kvm_host.h
67765@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
67766 void vcpu_load(struct kvm_vcpu *vcpu);
67767 void vcpu_put(struct kvm_vcpu *vcpu);
67768
67769-int kvm_init(void *opaque, unsigned int vcpu_size,
67770+int kvm_init(const void *opaque, unsigned int vcpu_size,
67771 struct module *module);
67772 void kvm_exit(void);
67773
67774@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
67775 struct kvm_guest_debug *dbg);
67776 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
67777
67778-int kvm_arch_init(void *opaque);
67779+int kvm_arch_init(const void *opaque);
67780 void kvm_arch_exit(void);
67781
67782 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
67783diff --git a/include/linux/libata.h b/include/linux/libata.h
67784index a069916..223edde 100644
67785--- a/include/linux/libata.h
67786+++ b/include/linux/libata.h
67787@@ -525,11 +525,11 @@ struct ata_ioports {
67788
67789 struct ata_host {
67790 spinlock_t lock;
67791- struct device *dev;
67792+ struct device *dev;
67793 void __iomem * const *iomap;
67794 unsigned int n_ports;
67795 void *private_data;
67796- struct ata_port_operations *ops;
67797+ const struct ata_port_operations *ops;
67798 unsigned long flags;
67799 #ifdef CONFIG_ATA_ACPI
67800 acpi_handle acpi_handle;
67801@@ -710,7 +710,7 @@ struct ata_link {
67802
67803 struct ata_port {
67804 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
67805- struct ata_port_operations *ops;
67806+ const struct ata_port_operations *ops;
67807 spinlock_t *lock;
67808 /* Flags owned by the EH context. Only EH should touch these once the
67809 port is active */
67810@@ -884,7 +884,7 @@ struct ata_port_operations {
67811 * fields must be pointers.
67812 */
67813 const struct ata_port_operations *inherits;
67814-};
67815+} __do_const;
67816
67817 struct ata_port_info {
67818 unsigned long flags;
67819@@ -892,7 +892,7 @@ struct ata_port_info {
67820 unsigned long pio_mask;
67821 unsigned long mwdma_mask;
67822 unsigned long udma_mask;
67823- struct ata_port_operations *port_ops;
67824+ const struct ata_port_operations *port_ops;
67825 void *private_data;
67826 };
67827
67828@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timing_normal[];
67829 extern const unsigned long sata_deb_timing_hotplug[];
67830 extern const unsigned long sata_deb_timing_long[];
67831
67832-extern struct ata_port_operations ata_dummy_port_ops;
67833+extern const struct ata_port_operations ata_dummy_port_ops;
67834 extern const struct ata_port_info ata_dummy_port_info;
67835
67836 static inline const unsigned long *
67837@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_host *host, int irq,
67838 struct scsi_host_template *sht);
67839 extern void ata_host_detach(struct ata_host *host);
67840 extern void ata_host_init(struct ata_host *, struct device *,
67841- unsigned long, struct ata_port_operations *);
67842+ unsigned long, const struct ata_port_operations *);
67843 extern int ata_scsi_detect(struct scsi_host_template *sht);
67844 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
67845 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
67846diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
67847index fbc48f8..0886e57 100644
67848--- a/include/linux/lockd/bind.h
67849+++ b/include/linux/lockd/bind.h
67850@@ -23,13 +23,13 @@ struct svc_rqst;
67851 * This is the set of functions for lockd->nfsd communication
67852 */
67853 struct nlmsvc_binding {
67854- __be32 (*fopen)(struct svc_rqst *,
67855+ __be32 (* const fopen)(struct svc_rqst *,
67856 struct nfs_fh *,
67857 struct file **);
67858- void (*fclose)(struct file *);
67859+ void (* const fclose)(struct file *);
67860 };
67861
67862-extern struct nlmsvc_binding * nlmsvc_ops;
67863+extern const struct nlmsvc_binding * nlmsvc_ops;
67864
67865 /*
67866 * Similar to nfs_client_initdata, but without the NFS-specific
67867diff --git a/include/linux/mca.h b/include/linux/mca.h
67868index 3797270..7765ede 100644
67869--- a/include/linux/mca.h
67870+++ b/include/linux/mca.h
67871@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
67872 int region);
67873 void * (*mca_transform_memory)(struct mca_device *,
67874 void *memory);
67875-};
67876+} __no_const;
67877
67878 struct mca_bus {
67879 u64 default_dma_mask;
67880diff --git a/include/linux/memory.h b/include/linux/memory.h
67881index 37fa19b..b597c85 100644
67882--- a/include/linux/memory.h
67883+++ b/include/linux/memory.h
67884@@ -108,7 +108,7 @@ struct memory_accessor {
67885 size_t count);
67886 ssize_t (*write)(struct memory_accessor *, const char *buf,
67887 off_t offset, size_t count);
67888-};
67889+} __no_const;
67890
67891 /*
67892 * Kernel text modification mutex, used for code patching. Users of this lock
67893diff --git a/include/linux/mm.h b/include/linux/mm.h
67894index 11e5be6..1ff2423 100644
67895--- a/include/linux/mm.h
67896+++ b/include/linux/mm.h
67897@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void *objp);
67898
67899 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
67900 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
67901+
67902+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
67903+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
67904+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
67905+#else
67906 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
67907+#endif
67908+
67909 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
67910 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
67911
67912@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
67913 int set_page_dirty_lock(struct page *page);
67914 int clear_page_dirty_for_io(struct page *page);
67915
67916-/* Is the vma a continuation of the stack vma above it? */
67917-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
67918-{
67919- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
67920-}
67921-
67922 extern unsigned long move_page_tables(struct vm_area_struct *vma,
67923 unsigned long old_addr, struct vm_area_struct *new_vma,
67924 unsigned long new_addr, unsigned long len);
67925@@ -890,6 +891,8 @@ struct shrinker {
67926 extern void register_shrinker(struct shrinker *);
67927 extern void unregister_shrinker(struct shrinker *);
67928
67929+pgprot_t vm_get_page_prot(unsigned long vm_flags);
67930+
67931 int vma_wants_writenotify(struct vm_area_struct *vma);
67932
67933 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
67934@@ -1162,6 +1165,7 @@ out:
67935 }
67936
67937 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
67938+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
67939
67940 extern unsigned long do_brk(unsigned long, unsigned long);
67941
67942@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
67943 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
67944 struct vm_area_struct **pprev);
67945
67946+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
67947+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
67948+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
67949+
67950 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
67951 NULL if none. Assume start_addr < end_addr. */
67952 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
67953@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma)
67954 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
67955 }
67956
67957-pgprot_t vm_get_page_prot(unsigned long vm_flags);
67958 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
67959 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
67960 unsigned long pfn, unsigned long size, pgprot_t);
67961@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long pfn, int trapno);
67962 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
67963 extern int sysctl_memory_failure_early_kill;
67964 extern int sysctl_memory_failure_recovery;
67965-extern atomic_long_t mce_bad_pages;
67966+extern atomic_long_unchecked_t mce_bad_pages;
67967+
67968+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
67969+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
67970+#else
67971+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
67972+#endif
67973
67974 #endif /* __KERNEL__ */
67975 #endif /* _LINUX_MM_H */
67976diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
67977index 9d12ed5..6d9707a 100644
67978--- a/include/linux/mm_types.h
67979+++ b/include/linux/mm_types.h
67980@@ -186,6 +186,8 @@ struct vm_area_struct {
67981 #ifdef CONFIG_NUMA
67982 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
67983 #endif
67984+
67985+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
67986 };
67987
67988 struct core_thread {
67989@@ -287,6 +289,24 @@ struct mm_struct {
67990 #ifdef CONFIG_MMU_NOTIFIER
67991 struct mmu_notifier_mm *mmu_notifier_mm;
67992 #endif
67993+
67994+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
67995+ unsigned long pax_flags;
67996+#endif
67997+
67998+#ifdef CONFIG_PAX_DLRESOLVE
67999+ unsigned long call_dl_resolve;
68000+#endif
68001+
68002+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
68003+ unsigned long call_syscall;
68004+#endif
68005+
68006+#ifdef CONFIG_PAX_ASLR
68007+ unsigned long delta_mmap; /* randomized offset */
68008+ unsigned long delta_stack; /* randomized offset */
68009+#endif
68010+
68011 };
68012
68013 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
68014diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
68015index 4e02ee2..afb159e 100644
68016--- a/include/linux/mmu_notifier.h
68017+++ b/include/linux/mmu_notifier.h
68018@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
68019 */
68020 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
68021 ({ \
68022- pte_t __pte; \
68023+ pte_t ___pte; \
68024 struct vm_area_struct *___vma = __vma; \
68025 unsigned long ___address = __address; \
68026- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
68027+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
68028 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
68029- __pte; \
68030+ ___pte; \
68031 })
68032
68033 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
68034diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
68035index 6c31a2a..4b0e930 100644
68036--- a/include/linux/mmzone.h
68037+++ b/include/linux/mmzone.h
68038@@ -350,7 +350,7 @@ struct zone {
68039 unsigned long flags; /* zone flags, see below */
68040
68041 /* Zone statistics */
68042- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68043+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
68044
68045 /*
68046 * prev_priority holds the scanning priority for this zone. It is
68047diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
68048index f58e9d8..3503935 100644
68049--- a/include/linux/mod_devicetable.h
68050+++ b/include/linux/mod_devicetable.h
68051@@ -12,7 +12,7 @@
68052 typedef unsigned long kernel_ulong_t;
68053 #endif
68054
68055-#define PCI_ANY_ID (~0)
68056+#define PCI_ANY_ID ((__u16)~0)
68057
68058 struct pci_device_id {
68059 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
68060@@ -131,7 +131,7 @@ struct usb_device_id {
68061 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
68062 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
68063
68064-#define HID_ANY_ID (~0)
68065+#define HID_ANY_ID (~0U)
68066
68067 struct hid_device_id {
68068 __u16 bus;
68069diff --git a/include/linux/module.h b/include/linux/module.h
68070index 482efc8..642032b 100644
68071--- a/include/linux/module.h
68072+++ b/include/linux/module.h
68073@@ -16,6 +16,7 @@
68074 #include <linux/kobject.h>
68075 #include <linux/moduleparam.h>
68076 #include <linux/tracepoint.h>
68077+#include <linux/fs.h>
68078
68079 #include <asm/local.h>
68080 #include <asm/module.h>
68081@@ -287,16 +288,16 @@ struct module
68082 int (*init)(void);
68083
68084 /* If this is non-NULL, vfree after init() returns */
68085- void *module_init;
68086+ void *module_init_rx, *module_init_rw;
68087
68088 /* Here is the actual code + data, vfree'd on unload. */
68089- void *module_core;
68090+ void *module_core_rx, *module_core_rw;
68091
68092 /* Here are the sizes of the init and core sections */
68093- unsigned int init_size, core_size;
68094+ unsigned int init_size_rw, core_size_rw;
68095
68096 /* The size of the executable code in each section. */
68097- unsigned int init_text_size, core_text_size;
68098+ unsigned int init_size_rx, core_size_rx;
68099
68100 /* Arch-specific module values */
68101 struct mod_arch_specific arch;
68102@@ -345,6 +346,10 @@ struct module
68103 #ifdef CONFIG_EVENT_TRACING
68104 struct ftrace_event_call *trace_events;
68105 unsigned int num_trace_events;
68106+ struct file_operations trace_id;
68107+ struct file_operations trace_enable;
68108+ struct file_operations trace_format;
68109+ struct file_operations trace_filter;
68110 #endif
68111 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
68112 unsigned long *ftrace_callsites;
68113@@ -393,16 +398,46 @@ struct module *__module_address(unsigned long addr);
68114 bool is_module_address(unsigned long addr);
68115 bool is_module_text_address(unsigned long addr);
68116
68117+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
68118+{
68119+
68120+#ifdef CONFIG_PAX_KERNEXEC
68121+ if (ktla_ktva(addr) >= (unsigned long)start &&
68122+ ktla_ktva(addr) < (unsigned long)start + size)
68123+ return 1;
68124+#endif
68125+
68126+ return ((void *)addr >= start && (void *)addr < start + size);
68127+}
68128+
68129+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
68130+{
68131+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
68132+}
68133+
68134+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
68135+{
68136+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
68137+}
68138+
68139+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
68140+{
68141+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
68142+}
68143+
68144+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
68145+{
68146+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
68147+}
68148+
68149 static inline int within_module_core(unsigned long addr, struct module *mod)
68150 {
68151- return (unsigned long)mod->module_core <= addr &&
68152- addr < (unsigned long)mod->module_core + mod->core_size;
68153+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
68154 }
68155
68156 static inline int within_module_init(unsigned long addr, struct module *mod)
68157 {
68158- return (unsigned long)mod->module_init <= addr &&
68159- addr < (unsigned long)mod->module_init + mod->init_size;
68160+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
68161 }
68162
68163 /* Search for module by name: must hold module_mutex. */
68164diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
68165index c1f40c2..682ca53 100644
68166--- a/include/linux/moduleloader.h
68167+++ b/include/linux/moduleloader.h
68168@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
68169 sections. Returns NULL on failure. */
68170 void *module_alloc(unsigned long size);
68171
68172+#ifdef CONFIG_PAX_KERNEXEC
68173+void *module_alloc_exec(unsigned long size);
68174+#else
68175+#define module_alloc_exec(x) module_alloc(x)
68176+#endif
68177+
68178 /* Free memory returned from module_alloc. */
68179 void module_free(struct module *mod, void *module_region);
68180
68181+#ifdef CONFIG_PAX_KERNEXEC
68182+void module_free_exec(struct module *mod, void *module_region);
68183+#else
68184+#define module_free_exec(x, y) module_free((x), (y))
68185+#endif
68186+
68187 /* Apply the given relocation to the (simplified) ELF. Return -error
68188 or 0. */
68189 int apply_relocate(Elf_Shdr *sechdrs,
68190diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
68191index 82a9124..8a5f622 100644
68192--- a/include/linux/moduleparam.h
68193+++ b/include/linux/moduleparam.h
68194@@ -132,7 +132,7 @@ struct kparam_array
68195
68196 /* Actually copy string: maxlen param is usually sizeof(string). */
68197 #define module_param_string(name, string, len, perm) \
68198- static const struct kparam_string __param_string_##name \
68199+ static const struct kparam_string __param_string_##name __used \
68200 = { len, string }; \
68201 __module_param_call(MODULE_PARAM_PREFIX, name, \
68202 param_set_copystring, param_get_string, \
68203@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffer, struct kernel_param *kp);
68204
68205 /* Comma-separated array: *nump is set to number they actually specified. */
68206 #define module_param_array_named(name, array, type, nump, perm) \
68207- static const struct kparam_array __param_arr_##name \
68208+ static const struct kparam_array __param_arr_##name __used \
68209 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
68210 sizeof(array[0]), array }; \
68211 __module_param_call(MODULE_PARAM_PREFIX, name, \
68212diff --git a/include/linux/mutex.h b/include/linux/mutex.h
68213index 878cab4..c92cb3e 100644
68214--- a/include/linux/mutex.h
68215+++ b/include/linux/mutex.h
68216@@ -51,7 +51,7 @@ struct mutex {
68217 spinlock_t wait_lock;
68218 struct list_head wait_list;
68219 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
68220- struct thread_info *owner;
68221+ struct task_struct *owner;
68222 #endif
68223 #ifdef CONFIG_DEBUG_MUTEXES
68224 const char *name;
68225diff --git a/include/linux/namei.h b/include/linux/namei.h
68226index ec0f607..d19e675 100644
68227--- a/include/linux/namei.h
68228+++ b/include/linux/namei.h
68229@@ -22,7 +22,7 @@ struct nameidata {
68230 unsigned int flags;
68231 int last_type;
68232 unsigned depth;
68233- char *saved_names[MAX_NESTED_LINKS + 1];
68234+ const char *saved_names[MAX_NESTED_LINKS + 1];
68235
68236 /* Intent data */
68237 union {
68238@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
68239 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
68240 extern void unlock_rename(struct dentry *, struct dentry *);
68241
68242-static inline void nd_set_link(struct nameidata *nd, char *path)
68243+static inline void nd_set_link(struct nameidata *nd, const char *path)
68244 {
68245 nd->saved_names[nd->depth] = path;
68246 }
68247
68248-static inline char *nd_get_link(struct nameidata *nd)
68249+static inline const char *nd_get_link(const struct nameidata *nd)
68250 {
68251 return nd->saved_names[nd->depth];
68252 }
68253diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
68254index 9d7e8f7..04428c5 100644
68255--- a/include/linux/netdevice.h
68256+++ b/include/linux/netdevice.h
68257@@ -637,6 +637,7 @@ struct net_device_ops {
68258 u16 xid);
68259 #endif
68260 };
68261+typedef struct net_device_ops __no_const net_device_ops_no_const;
68262
68263 /*
68264 * The DEVICE structure.
68265diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
68266new file mode 100644
68267index 0000000..33f4af8
68268--- /dev/null
68269+++ b/include/linux/netfilter/xt_gradm.h
68270@@ -0,0 +1,9 @@
68271+#ifndef _LINUX_NETFILTER_XT_GRADM_H
68272+#define _LINUX_NETFILTER_XT_GRADM_H 1
68273+
68274+struct xt_gradm_mtinfo {
68275+ __u16 flags;
68276+ __u16 invflags;
68277+};
68278+
68279+#endif
68280diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
68281index b359c4a..c08b334 100644
68282--- a/include/linux/nodemask.h
68283+++ b/include/linux/nodemask.h
68284@@ -464,11 +464,11 @@ static inline int num_node_state(enum node_states state)
68285
68286 #define any_online_node(mask) \
68287 ({ \
68288- int node; \
68289- for_each_node_mask(node, (mask)) \
68290- if (node_online(node)) \
68291+ int __node; \
68292+ for_each_node_mask(__node, (mask)) \
68293+ if (node_online(__node)) \
68294 break; \
68295- node; \
68296+ __node; \
68297 })
68298
68299 #define num_online_nodes() num_node_state(N_ONLINE)
68300diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
68301index 5171639..7cf4235 100644
68302--- a/include/linux/oprofile.h
68303+++ b/include/linux/oprofile.h
68304@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
68305 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
68306 char const * name, ulong * val);
68307
68308-/** Create a file for read-only access to an atomic_t. */
68309+/** Create a file for read-only access to an atomic_unchecked_t. */
68310 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
68311- char const * name, atomic_t * val);
68312+ char const * name, atomic_unchecked_t * val);
68313
68314 /** create a directory */
68315 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
68316diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
68317index 3c62ed4..8924c7c 100644
68318--- a/include/linux/pagemap.h
68319+++ b/include/linux/pagemap.h
68320@@ -425,7 +425,9 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
68321 if (((unsigned long)uaddr & PAGE_MASK) !=
68322 ((unsigned long)end & PAGE_MASK))
68323 ret = __get_user(c, end);
68324+ (void)c;
68325 }
68326+ (void)c;
68327 return ret;
68328 }
68329
68330diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
68331index 81c9689..a567a55 100644
68332--- a/include/linux/perf_event.h
68333+++ b/include/linux/perf_event.h
68334@@ -476,7 +476,7 @@ struct hw_perf_event {
68335 struct hrtimer hrtimer;
68336 };
68337 };
68338- atomic64_t prev_count;
68339+ atomic64_unchecked_t prev_count;
68340 u64 sample_period;
68341 u64 last_period;
68342 atomic64_t period_left;
68343@@ -557,7 +557,7 @@ struct perf_event {
68344 const struct pmu *pmu;
68345
68346 enum perf_event_active_state state;
68347- atomic64_t count;
68348+ atomic64_unchecked_t count;
68349
68350 /*
68351 * These are the total time in nanoseconds that the event
68352@@ -595,8 +595,8 @@ struct perf_event {
68353 * These accumulate total time (in nanoseconds) that children
68354 * events have been enabled and running, respectively.
68355 */
68356- atomic64_t child_total_time_enabled;
68357- atomic64_t child_total_time_running;
68358+ atomic64_unchecked_t child_total_time_enabled;
68359+ atomic64_unchecked_t child_total_time_running;
68360
68361 /*
68362 * Protect attach/detach and child_list:
68363diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
68364index b43a9e0..b77d869 100644
68365--- a/include/linux/pipe_fs_i.h
68366+++ b/include/linux/pipe_fs_i.h
68367@@ -46,9 +46,9 @@ struct pipe_inode_info {
68368 wait_queue_head_t wait;
68369 unsigned int nrbufs, curbuf;
68370 struct page *tmp_page;
68371- unsigned int readers;
68372- unsigned int writers;
68373- unsigned int waiting_writers;
68374+ atomic_t readers;
68375+ atomic_t writers;
68376+ atomic_t waiting_writers;
68377 unsigned int r_counter;
68378 unsigned int w_counter;
68379 struct fasync_struct *fasync_readers;
68380diff --git a/include/linux/poison.h b/include/linux/poison.h
68381index 34066ff..e95d744 100644
68382--- a/include/linux/poison.h
68383+++ b/include/linux/poison.h
68384@@ -19,8 +19,8 @@
68385 * under normal circumstances, used to verify that nobody uses
68386 * non-initialized list entries.
68387 */
68388-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
68389-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
68390+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
68391+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
68392
68393 /********** include/linux/timer.h **********/
68394 /*
68395diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
68396index 4f71bf4..77ffa64 100644
68397--- a/include/linux/posix-timers.h
68398+++ b/include/linux/posix-timers.h
68399@@ -67,7 +67,7 @@ struct k_itimer {
68400 };
68401
68402 struct k_clock {
68403- int res; /* in nanoseconds */
68404+ const int res; /* in nanoseconds */
68405 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
68406 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
68407 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
68408diff --git a/include/linux/preempt.h b/include/linux/preempt.h
68409index 72b1a10..13303a9 100644
68410--- a/include/linux/preempt.h
68411+++ b/include/linux/preempt.h
68412@@ -110,7 +110,7 @@ struct preempt_ops {
68413 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
68414 void (*sched_out)(struct preempt_notifier *notifier,
68415 struct task_struct *next);
68416-};
68417+} __no_const;
68418
68419 /**
68420 * preempt_notifier - key for installing preemption notifiers
68421diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
68422index 379eaed..1bf73e3 100644
68423--- a/include/linux/proc_fs.h
68424+++ b/include/linux/proc_fs.h
68425@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *proc_create(const char *name, mode_t mode,
68426 return proc_create_data(name, mode, parent, proc_fops, NULL);
68427 }
68428
68429+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
68430+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
68431+{
68432+#ifdef CONFIG_GRKERNSEC_PROC_USER
68433+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
68434+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
68435+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
68436+#else
68437+ return proc_create_data(name, mode, parent, proc_fops, NULL);
68438+#endif
68439+}
68440+
68441+
68442 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
68443 mode_t mode, struct proc_dir_entry *base,
68444 read_proc_t *read_proc, void * data)
68445@@ -256,7 +269,7 @@ union proc_op {
68446 int (*proc_show)(struct seq_file *m,
68447 struct pid_namespace *ns, struct pid *pid,
68448 struct task_struct *task);
68449-};
68450+} __no_const;
68451
68452 struct ctl_table_header;
68453 struct ctl_table;
68454diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
68455index 7456d7d..6c1cfc9 100644
68456--- a/include/linux/ptrace.h
68457+++ b/include/linux/ptrace.h
68458@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_struct *child);
68459 extern void exit_ptrace(struct task_struct *tracer);
68460 #define PTRACE_MODE_READ 1
68461 #define PTRACE_MODE_ATTACH 2
68462-/* Returns 0 on success, -errno on denial. */
68463-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
68464 /* Returns true on success, false on denial. */
68465 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
68466+/* Returns true on success, false on denial. */
68467+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
68468
68469 static inline int ptrace_reparented(struct task_struct *child)
68470 {
68471diff --git a/include/linux/random.h b/include/linux/random.h
68472index 2948046..3262567 100644
68473--- a/include/linux/random.h
68474+++ b/include/linux/random.h
68475@@ -63,6 +63,11 @@ unsigned long randomize_range(unsigned long start, unsigned long end, unsigned l
68476 u32 random32(void);
68477 void srandom32(u32 seed);
68478
68479+static inline unsigned long pax_get_random_long(void)
68480+{
68481+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
68482+}
68483+
68484 #endif /* __KERNEL___ */
68485
68486 #endif /* _LINUX_RANDOM_H */
68487diff --git a/include/linux/reboot.h b/include/linux/reboot.h
68488index 988e55f..17cb4ef 100644
68489--- a/include/linux/reboot.h
68490+++ b/include/linux/reboot.h
68491@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
68492 * Architecture-specific implementations of sys_reboot commands.
68493 */
68494
68495-extern void machine_restart(char *cmd);
68496-extern void machine_halt(void);
68497-extern void machine_power_off(void);
68498+extern void machine_restart(char *cmd) __noreturn;
68499+extern void machine_halt(void) __noreturn;
68500+extern void machine_power_off(void) __noreturn;
68501
68502 extern void machine_shutdown(void);
68503 struct pt_regs;
68504@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
68505 */
68506
68507 extern void kernel_restart_prepare(char *cmd);
68508-extern void kernel_restart(char *cmd);
68509-extern void kernel_halt(void);
68510-extern void kernel_power_off(void);
68511+extern void kernel_restart(char *cmd) __noreturn;
68512+extern void kernel_halt(void) __noreturn;
68513+extern void kernel_power_off(void) __noreturn;
68514
68515 void ctrl_alt_del(void);
68516
68517@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
68518 * Emergency restart, callable from an interrupt handler.
68519 */
68520
68521-extern void emergency_restart(void);
68522+extern void emergency_restart(void) __noreturn;
68523 #include <asm/emergency-restart.h>
68524
68525 #endif
68526diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h
68527index dd31e7b..5b03c5c 100644
68528--- a/include/linux/reiserfs_fs.h
68529+++ b/include/linux/reiserfs_fs.h
68530@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
68531 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
68532
68533 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
68534-#define get_generation(s) atomic_read (&fs_generation(s))
68535+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
68536 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
68537 #define __fs_changed(gen,s) (gen != get_generation (s))
68538 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
68539@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_from_bi(struct buffer_info *bi)
68540 */
68541
68542 struct item_operations {
68543- int (*bytes_number) (struct item_head * ih, int block_size);
68544- void (*decrement_key) (struct cpu_key *);
68545- int (*is_left_mergeable) (struct reiserfs_key * ih,
68546+ int (* const bytes_number) (struct item_head * ih, int block_size);
68547+ void (* const decrement_key) (struct cpu_key *);
68548+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
68549 unsigned long bsize);
68550- void (*print_item) (struct item_head *, char *item);
68551- void (*check_item) (struct item_head *, char *item);
68552+ void (* const print_item) (struct item_head *, char *item);
68553+ void (* const check_item) (struct item_head *, char *item);
68554
68555- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68556+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
68557 int is_affected, int insert_size);
68558- int (*check_left) (struct virtual_item * vi, int free,
68559+ int (* const check_left) (struct virtual_item * vi, int free,
68560 int start_skip, int end_skip);
68561- int (*check_right) (struct virtual_item * vi, int free);
68562- int (*part_size) (struct virtual_item * vi, int from, int to);
68563- int (*unit_num) (struct virtual_item * vi);
68564- void (*print_vi) (struct virtual_item * vi);
68565+ int (* const check_right) (struct virtual_item * vi, int free);
68566+ int (* const part_size) (struct virtual_item * vi, int from, int to);
68567+ int (* const unit_num) (struct virtual_item * vi);
68568+ void (* const print_vi) (struct virtual_item * vi);
68569 };
68570
68571-extern struct item_operations *item_ops[TYPE_ANY + 1];
68572+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
68573
68574 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
68575 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
68576diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
68577index dab68bb..0688727 100644
68578--- a/include/linux/reiserfs_fs_sb.h
68579+++ b/include/linux/reiserfs_fs_sb.h
68580@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
68581 /* Comment? -Hans */
68582 wait_queue_head_t s_wait;
68583 /* To be obsoleted soon by per buffer seals.. -Hans */
68584- atomic_t s_generation_counter; // increased by one every time the
68585+ atomic_unchecked_t s_generation_counter; // increased by one every time the
68586 // tree gets re-balanced
68587 unsigned long s_properties; /* File system properties. Currently holds
68588 on-disk FS format */
68589diff --git a/include/linux/relay.h b/include/linux/relay.h
68590index 14a86bc..17d0700 100644
68591--- a/include/linux/relay.h
68592+++ b/include/linux/relay.h
68593@@ -159,7 +159,7 @@ struct rchan_callbacks
68594 * The callback should return 0 if successful, negative if not.
68595 */
68596 int (*remove_buf_file)(struct dentry *dentry);
68597-};
68598+} __no_const;
68599
68600 /*
68601 * CONFIG_RELAY kernel API, kernel/relay.c
68602diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
68603index 3392c59..a746428 100644
68604--- a/include/linux/rfkill.h
68605+++ b/include/linux/rfkill.h
68606@@ -144,6 +144,7 @@ struct rfkill_ops {
68607 void (*query)(struct rfkill *rfkill, void *data);
68608 int (*set_block)(void *data, bool blocked);
68609 };
68610+typedef struct rfkill_ops __no_const rfkill_ops_no_const;
68611
68612 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
68613 /**
68614diff --git a/include/linux/sched.h b/include/linux/sched.h
68615index 71849bf..0ad2f74 100644
68616--- a/include/linux/sched.h
68617+++ b/include/linux/sched.h
68618@@ -101,6 +101,7 @@ struct bio;
68619 struct fs_struct;
68620 struct bts_context;
68621 struct perf_event_context;
68622+struct linux_binprm;
68623
68624 /*
68625 * List of flags we want to share for kernel threads,
68626@@ -350,7 +351,7 @@ extern signed long schedule_timeout_killable(signed long timeout);
68627 extern signed long schedule_timeout_uninterruptible(signed long timeout);
68628 asmlinkage void __schedule(void);
68629 asmlinkage void schedule(void);
68630-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
68631+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
68632
68633 struct nsproxy;
68634 struct user_namespace;
68635@@ -371,9 +372,12 @@ struct user_namespace;
68636 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
68637
68638 extern int sysctl_max_map_count;
68639+extern unsigned long sysctl_heap_stack_gap;
68640
68641 #include <linux/aio.h>
68642
68643+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
68644+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
68645 extern unsigned long
68646 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
68647 unsigned long, unsigned long);
68648@@ -666,6 +670,16 @@ struct signal_struct {
68649 struct tty_audit_buf *tty_audit_buf;
68650 #endif
68651
68652+#ifdef CONFIG_GRKERNSEC
68653+ u32 curr_ip;
68654+ u32 saved_ip;
68655+ u32 gr_saddr;
68656+ u32 gr_daddr;
68657+ u16 gr_sport;
68658+ u16 gr_dport;
68659+ u8 used_accept:1;
68660+#endif
68661+
68662 int oom_adj; /* OOM kill score adjustment (bit shift) */
68663 };
68664
68665@@ -723,6 +737,11 @@ struct user_struct {
68666 struct key *session_keyring; /* UID's default session keyring */
68667 #endif
68668
68669+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
68670+ unsigned int banned;
68671+ unsigned long ban_expires;
68672+#endif
68673+
68674 /* Hash table maintenance information */
68675 struct hlist_node uidhash_node;
68676 uid_t uid;
68677@@ -1328,8 +1347,8 @@ struct task_struct {
68678 struct list_head thread_group;
68679
68680 struct completion *vfork_done; /* for vfork() */
68681- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
68682- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68683+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
68684+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
68685
68686 cputime_t utime, stime, utimescaled, stimescaled;
68687 cputime_t gtime;
68688@@ -1343,16 +1362,6 @@ struct task_struct {
68689 struct task_cputime cputime_expires;
68690 struct list_head cpu_timers[3];
68691
68692-/* process credentials */
68693- const struct cred *real_cred; /* objective and real subjective task
68694- * credentials (COW) */
68695- const struct cred *cred; /* effective (overridable) subjective task
68696- * credentials (COW) */
68697- struct mutex cred_guard_mutex; /* guard against foreign influences on
68698- * credential calculations
68699- * (notably. ptrace) */
68700- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68701-
68702 char comm[TASK_COMM_LEN]; /* executable name excluding path
68703 - access with [gs]et_task_comm (which lock
68704 it with task_lock())
68705@@ -1369,6 +1378,10 @@ struct task_struct {
68706 #endif
68707 /* CPU-specific state of this task */
68708 struct thread_struct thread;
68709+/* thread_info moved to task_struct */
68710+#ifdef CONFIG_X86
68711+ struct thread_info tinfo;
68712+#endif
68713 /* filesystem information */
68714 struct fs_struct *fs;
68715 /* open file information */
68716@@ -1436,6 +1449,15 @@ struct task_struct {
68717 int hardirq_context;
68718 int softirq_context;
68719 #endif
68720+
68721+/* process credentials */
68722+ const struct cred *real_cred; /* objective and real subjective task
68723+ * credentials (COW) */
68724+ struct mutex cred_guard_mutex; /* guard against foreign influences on
68725+ * credential calculations
68726+ * (notably. ptrace) */
68727+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
68728+
68729 #ifdef CONFIG_LOCKDEP
68730 # define MAX_LOCK_DEPTH 48UL
68731 u64 curr_chain_key;
68732@@ -1456,6 +1478,9 @@ struct task_struct {
68733
68734 struct backing_dev_info *backing_dev_info;
68735
68736+ const struct cred *cred; /* effective (overridable) subjective task
68737+ * credentials (COW) */
68738+
68739 struct io_context *io_context;
68740
68741 unsigned long ptrace_message;
68742@@ -1519,6 +1544,24 @@ struct task_struct {
68743 unsigned long default_timer_slack_ns;
68744
68745 struct list_head *scm_work_list;
68746+
68747+#ifdef CONFIG_GRKERNSEC
68748+ /* grsecurity */
68749+#ifdef CONFIG_GRKERNSEC_SETXID
68750+ const struct cred *delayed_cred;
68751+#endif
68752+ struct dentry *gr_chroot_dentry;
68753+ struct acl_subject_label *acl;
68754+ struct acl_role_label *role;
68755+ struct file *exec_file;
68756+ u16 acl_role_id;
68757+ /* is this the task that authenticated to the special role */
68758+ u8 acl_sp_role;
68759+ u8 is_writable;
68760+ u8 brute;
68761+ u8 gr_is_chrooted;
68762+#endif
68763+
68764 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
68765 /* Index of current stored adress in ret_stack */
68766 int curr_ret_stack;
68767@@ -1542,6 +1585,57 @@ struct task_struct {
68768 #endif /* CONFIG_TRACING */
68769 };
68770
68771+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
68772+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
68773+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
68774+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
68775+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
68776+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
68777+
68778+#ifdef CONFIG_PAX_SOFTMODE
68779+extern int pax_softmode;
68780+#endif
68781+
68782+extern int pax_check_flags(unsigned long *);
68783+
68784+/* if tsk != current then task_lock must be held on it */
68785+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
68786+static inline unsigned long pax_get_flags(struct task_struct *tsk)
68787+{
68788+ if (likely(tsk->mm))
68789+ return tsk->mm->pax_flags;
68790+ else
68791+ return 0UL;
68792+}
68793+
68794+/* if tsk != current then task_lock must be held on it */
68795+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
68796+{
68797+ if (likely(tsk->mm)) {
68798+ tsk->mm->pax_flags = flags;
68799+ return 0;
68800+ }
68801+ return -EINVAL;
68802+}
68803+#endif
68804+
68805+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
68806+extern void pax_set_initial_flags(struct linux_binprm *bprm);
68807+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
68808+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
68809+#endif
68810+
68811+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
68812+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
68813+extern void pax_report_refcount_overflow(struct pt_regs *regs);
68814+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
68815+
68816+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
68817+extern void pax_track_stack(void);
68818+#else
68819+static inline void pax_track_stack(void) {}
68820+#endif
68821+
68822 /* Future-safe accessor for struct task_struct's cpus_allowed. */
68823 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
68824
68825@@ -1740,7 +1834,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
68826 #define PF_DUMPCORE 0x00000200 /* dumped core */
68827 #define PF_SIGNALED 0x00000400 /* killed by a signal */
68828 #define PF_MEMALLOC 0x00000800 /* Allocating memory */
68829-#define PF_FLUSHER 0x00001000 /* responsible for disk writeback */
68830+#define PF_NPROC_EXCEEDED 0x00001000 /* set_user noticed that RLIMIT_NPROC was exceeded */
68831 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */
68832 #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */
68833 #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */
68834@@ -1978,7 +2072,9 @@ void yield(void);
68835 extern struct exec_domain default_exec_domain;
68836
68837 union thread_union {
68838+#ifndef CONFIG_X86
68839 struct thread_info thread_info;
68840+#endif
68841 unsigned long stack[THREAD_SIZE/sizeof(long)];
68842 };
68843
68844@@ -2011,6 +2107,7 @@ extern struct pid_namespace init_pid_ns;
68845 */
68846
68847 extern struct task_struct *find_task_by_vpid(pid_t nr);
68848+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
68849 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
68850 struct pid_namespace *ns);
68851
68852@@ -2155,7 +2252,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
68853 extern void exit_itimers(struct signal_struct *);
68854 extern void flush_itimer_signals(void);
68855
68856-extern NORET_TYPE void do_group_exit(int);
68857+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
68858
68859 extern void daemonize(const char *, ...);
68860 extern int allow_signal(int);
68861@@ -2284,13 +2381,17 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
68862
68863 #endif
68864
68865-static inline int object_is_on_stack(void *obj)
68866+static inline int object_starts_on_stack(void *obj)
68867 {
68868- void *stack = task_stack_page(current);
68869+ const void *stack = task_stack_page(current);
68870
68871 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
68872 }
68873
68874+#ifdef CONFIG_PAX_USERCOPY
68875+extern int object_is_on_stack(const void *obj, unsigned long len);
68876+#endif
68877+
68878 extern void thread_info_cache_init(void);
68879
68880 #ifdef CONFIG_DEBUG_STACK_USAGE
68881diff --git a/include/linux/screen_info.h b/include/linux/screen_info.h
68882index 1ee2c05..81b7ec4 100644
68883--- a/include/linux/screen_info.h
68884+++ b/include/linux/screen_info.h
68885@@ -42,7 +42,8 @@ struct screen_info {
68886 __u16 pages; /* 0x32 */
68887 __u16 vesa_attributes; /* 0x34 */
68888 __u32 capabilities; /* 0x36 */
68889- __u8 _reserved[6]; /* 0x3a */
68890+ __u16 vesapm_size; /* 0x3a */
68891+ __u8 _reserved[4]; /* 0x3c */
68892 } __attribute__((packed));
68893
68894 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
68895diff --git a/include/linux/security.h b/include/linux/security.h
68896index d40d23f..d739b08 100644
68897--- a/include/linux/security.h
68898+++ b/include/linux/security.h
68899@@ -34,6 +34,7 @@
68900 #include <linux/key.h>
68901 #include <linux/xfrm.h>
68902 #include <linux/gfp.h>
68903+#include <linux/grsecurity.h>
68904 #include <net/flow.h>
68905
68906 /* Maximum number of letters for an LSM name string */
68907@@ -76,7 +77,7 @@ extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
68908 extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp);
68909 extern int cap_task_setioprio(struct task_struct *p, int ioprio);
68910 extern int cap_task_setnice(struct task_struct *p, int nice);
68911-extern int cap_syslog(int type);
68912+extern int cap_syslog(int type, bool from_file);
68913 extern int cap_vm_enough_memory(struct mm_struct *mm, long pages);
68914
68915 struct msghdr;
68916@@ -1331,6 +1332,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
68917 * logging to the console.
68918 * See the syslog(2) manual page for an explanation of the @type values.
68919 * @type contains the type of action.
68920+ * @from_file indicates the context of action (if it came from /proc).
68921 * Return 0 if permission is granted.
68922 * @settime:
68923 * Check permission to change the system time.
68924@@ -1445,7 +1447,7 @@ struct security_operations {
68925 int (*sysctl) (struct ctl_table *table, int op);
68926 int (*quotactl) (int cmds, int type, int id, struct super_block *sb);
68927 int (*quota_on) (struct dentry *dentry);
68928- int (*syslog) (int type);
68929+ int (*syslog) (int type, bool from_file);
68930 int (*settime) (struct timespec *ts, struct timezone *tz);
68931 int (*vm_enough_memory) (struct mm_struct *mm, long pages);
68932
68933@@ -1740,7 +1742,7 @@ int security_acct(struct file *file);
68934 int security_sysctl(struct ctl_table *table, int op);
68935 int security_quotactl(int cmds, int type, int id, struct super_block *sb);
68936 int security_quota_on(struct dentry *dentry);
68937-int security_syslog(int type);
68938+int security_syslog(int type, bool from_file);
68939 int security_settime(struct timespec *ts, struct timezone *tz);
68940 int security_vm_enough_memory(long pages);
68941 int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
68942@@ -1986,9 +1988,9 @@ static inline int security_quota_on(struct dentry *dentry)
68943 return 0;
68944 }
68945
68946-static inline int security_syslog(int type)
68947+static inline int security_syslog(int type, bool from_file)
68948 {
68949- return cap_syslog(type);
68950+ return cap_syslog(type, from_file);
68951 }
68952
68953 static inline int security_settime(struct timespec *ts, struct timezone *tz)
68954diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
68955index 8366d8f..2307490 100644
68956--- a/include/linux/seq_file.h
68957+++ b/include/linux/seq_file.h
68958@@ -32,6 +32,7 @@ struct seq_operations {
68959 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
68960 int (*show) (struct seq_file *m, void *v);
68961 };
68962+typedef struct seq_operations __no_const seq_operations_no_const;
68963
68964 #define SEQ_SKIP 1
68965
68966diff --git a/include/linux/shm.h b/include/linux/shm.h
68967index eca6235..c7417ed 100644
68968--- a/include/linux/shm.h
68969+++ b/include/linux/shm.h
68970@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the kernel */
68971 pid_t shm_cprid;
68972 pid_t shm_lprid;
68973 struct user_struct *mlock_user;
68974+#ifdef CONFIG_GRKERNSEC
68975+ time_t shm_createtime;
68976+ pid_t shm_lapid;
68977+#endif
68978 };
68979
68980 /* shm_mode upper byte flags */
68981diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
68982index bcdd660..6e12e11 100644
68983--- a/include/linux/skbuff.h
68984+++ b/include/linux/skbuff.h
68985@@ -14,6 +14,7 @@
68986 #ifndef _LINUX_SKBUFF_H
68987 #define _LINUX_SKBUFF_H
68988
68989+#include <linux/const.h>
68990 #include <linux/kernel.h>
68991 #include <linux/kmemcheck.h>
68992 #include <linux/compiler.h>
68993@@ -544,7 +545,7 @@ static inline union skb_shared_tx *skb_tx(struct sk_buff *skb)
68994 */
68995 static inline int skb_queue_empty(const struct sk_buff_head *list)
68996 {
68997- return list->next == (struct sk_buff *)list;
68998+ return list->next == (const struct sk_buff *)list;
68999 }
69000
69001 /**
69002@@ -557,7 +558,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
69003 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69004 const struct sk_buff *skb)
69005 {
69006- return (skb->next == (struct sk_buff *) list);
69007+ return (skb->next == (const struct sk_buff *) list);
69008 }
69009
69010 /**
69011@@ -570,7 +571,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
69012 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
69013 const struct sk_buff *skb)
69014 {
69015- return (skb->prev == (struct sk_buff *) list);
69016+ return (skb->prev == (const struct sk_buff *) list);
69017 }
69018
69019 /**
69020@@ -1367,7 +1368,7 @@ static inline int skb_network_offset(const struct sk_buff *skb)
69021 * headroom, you should not reduce this.
69022 */
69023 #ifndef NET_SKB_PAD
69024-#define NET_SKB_PAD 32
69025+#define NET_SKB_PAD (_AC(32,UL))
69026 #endif
69027
69028 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
69029diff --git a/include/linux/slab.h b/include/linux/slab.h
69030index 2da8372..a3be824 100644
69031--- a/include/linux/slab.h
69032+++ b/include/linux/slab.h
69033@@ -11,12 +11,20 @@
69034
69035 #include <linux/gfp.h>
69036 #include <linux/types.h>
69037+#include <linux/err.h>
69038
69039 /*
69040 * Flags to pass to kmem_cache_create().
69041 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
69042 */
69043 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
69044+
69045+#ifdef CONFIG_PAX_USERCOPY
69046+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
69047+#else
69048+#define SLAB_USERCOPY 0x00000000UL
69049+#endif
69050+
69051 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
69052 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
69053 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
69054@@ -82,10 +90,13 @@
69055 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
69056 * Both make kfree a no-op.
69057 */
69058-#define ZERO_SIZE_PTR ((void *)16)
69059+#define ZERO_SIZE_PTR \
69060+({ \
69061+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
69062+ (void *)(-MAX_ERRNO-1L); \
69063+})
69064
69065-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
69066- (unsigned long)ZERO_SIZE_PTR)
69067+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
69068
69069 /*
69070 * struct kmem_cache related prototypes
69071@@ -138,6 +149,7 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
69072 void kfree(const void *);
69073 void kzfree(const void *);
69074 size_t ksize(const void *);
69075+void check_object_size(const void *ptr, unsigned long n, bool to);
69076
69077 /*
69078 * Allocator specific definitions. These are mainly used to establish optimized
69079@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
69080
69081 void __init kmem_cache_init_late(void);
69082
69083+#define kmalloc(x, y) \
69084+({ \
69085+ void *___retval; \
69086+ intoverflow_t ___x = (intoverflow_t)x; \
69087+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
69088+ ___retval = NULL; \
69089+ else \
69090+ ___retval = kmalloc((size_t)___x, (y)); \
69091+ ___retval; \
69092+})
69093+
69094+#define kmalloc_node(x, y, z) \
69095+({ \
69096+ void *___retval; \
69097+ intoverflow_t ___x = (intoverflow_t)x; \
69098+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
69099+ ___retval = NULL; \
69100+ else \
69101+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
69102+ ___retval; \
69103+})
69104+
69105+#define kzalloc(x, y) \
69106+({ \
69107+ void *___retval; \
69108+ intoverflow_t ___x = (intoverflow_t)x; \
69109+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
69110+ ___retval = NULL; \
69111+ else \
69112+ ___retval = kzalloc((size_t)___x, (y)); \
69113+ ___retval; \
69114+})
69115+
69116 #endif /* _LINUX_SLAB_H */
69117diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
69118index 850d057..d9dfe3c 100644
69119--- a/include/linux/slab_def.h
69120+++ b/include/linux/slab_def.h
69121@@ -69,10 +69,10 @@ struct kmem_cache {
69122 unsigned long node_allocs;
69123 unsigned long node_frees;
69124 unsigned long node_overflow;
69125- atomic_t allochit;
69126- atomic_t allocmiss;
69127- atomic_t freehit;
69128- atomic_t freemiss;
69129+ atomic_unchecked_t allochit;
69130+ atomic_unchecked_t allocmiss;
69131+ atomic_unchecked_t freehit;
69132+ atomic_unchecked_t freemiss;
69133
69134 /*
69135 * If debugging is enabled, then the allocator can add additional
69136diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
69137index 5ad70a6..57f9f65 100644
69138--- a/include/linux/slub_def.h
69139+++ b/include/linux/slub_def.h
69140@@ -86,7 +86,7 @@ struct kmem_cache {
69141 struct kmem_cache_order_objects max;
69142 struct kmem_cache_order_objects min;
69143 gfp_t allocflags; /* gfp flags to use on each alloc */
69144- int refcount; /* Refcount for slab cache destroy */
69145+ atomic_t refcount; /* Refcount for slab cache destroy */
69146 void (*ctor)(void *);
69147 int inuse; /* Offset to metadata */
69148 int align; /* Alignment */
69149@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
69150 #endif
69151
69152 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
69153-void *__kmalloc(size_t size, gfp_t flags);
69154+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
69155
69156 #ifdef CONFIG_KMEMTRACE
69157 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
69158diff --git a/include/linux/sonet.h b/include/linux/sonet.h
69159index 67ad11f..0bbd8af 100644
69160--- a/include/linux/sonet.h
69161+++ b/include/linux/sonet.h
69162@@ -61,7 +61,7 @@ struct sonet_stats {
69163 #include <asm/atomic.h>
69164
69165 struct k_sonet_stats {
69166-#define __HANDLE_ITEM(i) atomic_t i
69167+#define __HANDLE_ITEM(i) atomic_unchecked_t i
69168 __SONET_ITEMS
69169 #undef __HANDLE_ITEM
69170 };
69171diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
69172index 6f52b4d..5500323 100644
69173--- a/include/linux/sunrpc/cache.h
69174+++ b/include/linux/sunrpc/cache.h
69175@@ -125,7 +125,7 @@ struct cache_detail {
69176 */
69177 struct cache_req {
69178 struct cache_deferred_req *(*defer)(struct cache_req *req);
69179-};
69180+} __no_const;
69181 /* this must be embedded in a deferred_request that is being
69182 * delayed awaiting cache-fill
69183 */
69184diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
69185index 8ed9642..101ceab 100644
69186--- a/include/linux/sunrpc/clnt.h
69187+++ b/include/linux/sunrpc/clnt.h
69188@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
69189 {
69190 switch (sap->sa_family) {
69191 case AF_INET:
69192- return ntohs(((struct sockaddr_in *)sap)->sin_port);
69193+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
69194 case AF_INET6:
69195- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
69196+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
69197 }
69198 return 0;
69199 }
69200@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
69201 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
69202 const struct sockaddr *src)
69203 {
69204- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
69205+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
69206 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
69207
69208 dsin->sin_family = ssin->sin_family;
69209@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
69210 if (sa->sa_family != AF_INET6)
69211 return 0;
69212
69213- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
69214+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
69215 }
69216
69217 #endif /* __KERNEL__ */
69218diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
69219index c14fe86..393245e 100644
69220--- a/include/linux/sunrpc/svc_rdma.h
69221+++ b/include/linux/sunrpc/svc_rdma.h
69222@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
69223 extern unsigned int svcrdma_max_requests;
69224 extern unsigned int svcrdma_max_req_size;
69225
69226-extern atomic_t rdma_stat_recv;
69227-extern atomic_t rdma_stat_read;
69228-extern atomic_t rdma_stat_write;
69229-extern atomic_t rdma_stat_sq_starve;
69230-extern atomic_t rdma_stat_rq_starve;
69231-extern atomic_t rdma_stat_rq_poll;
69232-extern atomic_t rdma_stat_rq_prod;
69233-extern atomic_t rdma_stat_sq_poll;
69234-extern atomic_t rdma_stat_sq_prod;
69235+extern atomic_unchecked_t rdma_stat_recv;
69236+extern atomic_unchecked_t rdma_stat_read;
69237+extern atomic_unchecked_t rdma_stat_write;
69238+extern atomic_unchecked_t rdma_stat_sq_starve;
69239+extern atomic_unchecked_t rdma_stat_rq_starve;
69240+extern atomic_unchecked_t rdma_stat_rq_poll;
69241+extern atomic_unchecked_t rdma_stat_rq_prod;
69242+extern atomic_unchecked_t rdma_stat_sq_poll;
69243+extern atomic_unchecked_t rdma_stat_sq_prod;
69244
69245 #define RPCRDMA_VERSION 1
69246
69247diff --git a/include/linux/suspend.h b/include/linux/suspend.h
69248index 5e781d8..1e62818 100644
69249--- a/include/linux/suspend.h
69250+++ b/include/linux/suspend.h
69251@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
69252 * which require special recovery actions in that situation.
69253 */
69254 struct platform_suspend_ops {
69255- int (*valid)(suspend_state_t state);
69256- int (*begin)(suspend_state_t state);
69257- int (*prepare)(void);
69258- int (*prepare_late)(void);
69259- int (*enter)(suspend_state_t state);
69260- void (*wake)(void);
69261- void (*finish)(void);
69262- void (*end)(void);
69263- void (*recover)(void);
69264+ int (* const valid)(suspend_state_t state);
69265+ int (* const begin)(suspend_state_t state);
69266+ int (* const prepare)(void);
69267+ int (* const prepare_late)(void);
69268+ int (* const enter)(suspend_state_t state);
69269+ void (* const wake)(void);
69270+ void (* const finish)(void);
69271+ void (* const end)(void);
69272+ void (* const recover)(void);
69273 };
69274
69275 #ifdef CONFIG_SUSPEND
69276@@ -120,7 +120,7 @@ struct platform_suspend_ops {
69277 * suspend_set_ops - set platform dependent suspend operations
69278 * @ops: The new suspend operations to set.
69279 */
69280-extern void suspend_set_ops(struct platform_suspend_ops *ops);
69281+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
69282 extern int suspend_valid_only_mem(suspend_state_t state);
69283
69284 /**
69285@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t state);
69286 #else /* !CONFIG_SUSPEND */
69287 #define suspend_valid_only_mem NULL
69288
69289-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
69290+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
69291 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
69292 #endif /* !CONFIG_SUSPEND */
69293
69294@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone *zone);
69295 * platforms which require special recovery actions in that situation.
69296 */
69297 struct platform_hibernation_ops {
69298- int (*begin)(void);
69299- void (*end)(void);
69300- int (*pre_snapshot)(void);
69301- void (*finish)(void);
69302- int (*prepare)(void);
69303- int (*enter)(void);
69304- void (*leave)(void);
69305- int (*pre_restore)(void);
69306- void (*restore_cleanup)(void);
69307- void (*recover)(void);
69308+ int (* const begin)(void);
69309+ void (* const end)(void);
69310+ int (* const pre_snapshot)(void);
69311+ void (* const finish)(void);
69312+ int (* const prepare)(void);
69313+ int (* const enter)(void);
69314+ void (* const leave)(void);
69315+ int (* const pre_restore)(void);
69316+ void (* const restore_cleanup)(void);
69317+ void (* const recover)(void);
69318 };
69319
69320 #ifdef CONFIG_HIBERNATION
69321@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct page *);
69322 extern void swsusp_unset_page_free(struct page *);
69323 extern unsigned long get_safe_page(gfp_t gfp_mask);
69324
69325-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
69326+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
69327 extern int hibernate(void);
69328 extern bool system_entering_hibernation(void);
69329 #else /* CONFIG_HIBERNATION */
69330@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
69331 static inline void swsusp_set_page_free(struct page *p) {}
69332 static inline void swsusp_unset_page_free(struct page *p) {}
69333
69334-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
69335+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
69336 static inline int hibernate(void) { return -ENOSYS; }
69337 static inline bool system_entering_hibernation(void) { return false; }
69338 #endif /* CONFIG_HIBERNATION */
69339diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
69340index 0eb6942..a805cb6 100644
69341--- a/include/linux/sysctl.h
69342+++ b/include/linux/sysctl.h
69343@@ -164,7 +164,11 @@ enum
69344 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
69345 };
69346
69347-
69348+#ifdef CONFIG_PAX_SOFTMODE
69349+enum {
69350+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
69351+};
69352+#endif
69353
69354 /* CTL_VM names: */
69355 enum
69356@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_table *ctl, int write,
69357
69358 extern int proc_dostring(struct ctl_table *, int,
69359 void __user *, size_t *, loff_t *);
69360+extern int proc_dostring_modpriv(struct ctl_table *, int,
69361+ void __user *, size_t *, loff_t *);
69362 extern int proc_dointvec(struct ctl_table *, int,
69363 void __user *, size_t *, loff_t *);
69364 extern int proc_dointvec_minmax(struct ctl_table *, int,
69365@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name, int nlen,
69366
69367 extern ctl_handler sysctl_data;
69368 extern ctl_handler sysctl_string;
69369+extern ctl_handler sysctl_string_modpriv;
69370 extern ctl_handler sysctl_intvec;
69371 extern ctl_handler sysctl_jiffies;
69372 extern ctl_handler sysctl_ms_jiffies;
69373diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
69374index 9d68fed..71f02cc 100644
69375--- a/include/linux/sysfs.h
69376+++ b/include/linux/sysfs.h
69377@@ -75,8 +75,8 @@ struct bin_attribute {
69378 };
69379
69380 struct sysfs_ops {
69381- ssize_t (*show)(struct kobject *, struct attribute *,char *);
69382- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
69383+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
69384+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
69385 };
69386
69387 struct sysfs_dirent;
69388diff --git a/include/linux/syslog.h b/include/linux/syslog.h
69389new file mode 100644
69390index 0000000..3891139
69391--- /dev/null
69392+++ b/include/linux/syslog.h
69393@@ -0,0 +1,52 @@
69394+/* Syslog internals
69395+ *
69396+ * Copyright 2010 Canonical, Ltd.
69397+ * Author: Kees Cook <kees.cook@canonical.com>
69398+ *
69399+ * This program is free software; you can redistribute it and/or modify
69400+ * it under the terms of the GNU General Public License as published by
69401+ * the Free Software Foundation; either version 2, or (at your option)
69402+ * any later version.
69403+ *
69404+ * This program is distributed in the hope that it will be useful,
69405+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
69406+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
69407+ * GNU General Public License for more details.
69408+ *
69409+ * You should have received a copy of the GNU General Public License
69410+ * along with this program; see the file COPYING. If not, write to
69411+ * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
69412+ */
69413+
69414+#ifndef _LINUX_SYSLOG_H
69415+#define _LINUX_SYSLOG_H
69416+
69417+/* Close the log. Currently a NOP. */
69418+#define SYSLOG_ACTION_CLOSE 0
69419+/* Open the log. Currently a NOP. */
69420+#define SYSLOG_ACTION_OPEN 1
69421+/* Read from the log. */
69422+#define SYSLOG_ACTION_READ 2
69423+/* Read all messages remaining in the ring buffer. */
69424+#define SYSLOG_ACTION_READ_ALL 3
69425+/* Read and clear all messages remaining in the ring buffer */
69426+#define SYSLOG_ACTION_READ_CLEAR 4
69427+/* Clear ring buffer. */
69428+#define SYSLOG_ACTION_CLEAR 5
69429+/* Disable printk's to console */
69430+#define SYSLOG_ACTION_CONSOLE_OFF 6
69431+/* Enable printk's to console */
69432+#define SYSLOG_ACTION_CONSOLE_ON 7
69433+/* Set level of messages printed to console */
69434+#define SYSLOG_ACTION_CONSOLE_LEVEL 8
69435+/* Return number of unread characters in the log buffer */
69436+#define SYSLOG_ACTION_SIZE_UNREAD 9
69437+/* Return size of the log buffer */
69438+#define SYSLOG_ACTION_SIZE_BUFFER 10
69439+
69440+#define SYSLOG_FROM_CALL 0
69441+#define SYSLOG_FROM_FILE 1
69442+
69443+int do_syslog(int type, char __user *buf, int count, bool from_file);
69444+
69445+#endif /* _LINUX_SYSLOG_H */
69446diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
69447index a8cc4e1..98d3b85 100644
69448--- a/include/linux/thread_info.h
69449+++ b/include/linux/thread_info.h
69450@@ -23,7 +23,7 @@ struct restart_block {
69451 };
69452 /* For futex_wait and futex_wait_requeue_pi */
69453 struct {
69454- u32 *uaddr;
69455+ u32 __user *uaddr;
69456 u32 val;
69457 u32 flags;
69458 u32 bitset;
69459diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
69460index 1eb44a9..f582df3 100644
69461--- a/include/linux/tracehook.h
69462+++ b/include/linux/tracehook.h
69463@@ -69,12 +69,12 @@ static inline int tracehook_expect_breakpoints(struct task_struct *task)
69464 /*
69465 * ptrace report for syscall entry and exit looks identical.
69466 */
69467-static inline void ptrace_report_syscall(struct pt_regs *regs)
69468+static inline int ptrace_report_syscall(struct pt_regs *regs)
69469 {
69470 int ptrace = task_ptrace(current);
69471
69472 if (!(ptrace & PT_PTRACED))
69473- return;
69474+ return 0;
69475
69476 ptrace_notify(SIGTRAP | ((ptrace & PT_TRACESYSGOOD) ? 0x80 : 0));
69477
69478@@ -87,6 +87,8 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69479 send_sig(current->exit_code, current, 1);
69480 current->exit_code = 0;
69481 }
69482+
69483+ return fatal_signal_pending(current);
69484 }
69485
69486 /**
69487@@ -111,8 +113,7 @@ static inline void ptrace_report_syscall(struct pt_regs *regs)
69488 static inline __must_check int tracehook_report_syscall_entry(
69489 struct pt_regs *regs)
69490 {
69491- ptrace_report_syscall(regs);
69492- return 0;
69493+ return ptrace_report_syscall(regs);
69494 }
69495
69496 /**
69497diff --git a/include/linux/tty.h b/include/linux/tty.h
69498index e9c57e9..ee6d489 100644
69499--- a/include/linux/tty.h
69500+++ b/include/linux/tty.h
69501@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
69502 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
69503 extern void tty_ldisc_enable(struct tty_struct *tty);
69504
69505-
69506 /* n_tty.c */
69507 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
69508
69509diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
69510index 0c4ee9b..9f7c426 100644
69511--- a/include/linux/tty_ldisc.h
69512+++ b/include/linux/tty_ldisc.h
69513@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
69514
69515 struct module *owner;
69516
69517- int refcount;
69518+ atomic_t refcount;
69519 };
69520
69521 struct tty_ldisc {
69522diff --git a/include/linux/types.h b/include/linux/types.h
69523index c42724f..d190eee 100644
69524--- a/include/linux/types.h
69525+++ b/include/linux/types.h
69526@@ -191,10 +191,26 @@ typedef struct {
69527 volatile int counter;
69528 } atomic_t;
69529
69530+#ifdef CONFIG_PAX_REFCOUNT
69531+typedef struct {
69532+ volatile int counter;
69533+} atomic_unchecked_t;
69534+#else
69535+typedef atomic_t atomic_unchecked_t;
69536+#endif
69537+
69538 #ifdef CONFIG_64BIT
69539 typedef struct {
69540 volatile long counter;
69541 } atomic64_t;
69542+
69543+#ifdef CONFIG_PAX_REFCOUNT
69544+typedef struct {
69545+ volatile long counter;
69546+} atomic64_unchecked_t;
69547+#else
69548+typedef atomic64_t atomic64_unchecked_t;
69549+#endif
69550 #endif
69551
69552 struct ustat {
69553diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
69554index 6b58367..53a3e8e 100644
69555--- a/include/linux/uaccess.h
69556+++ b/include/linux/uaccess.h
69557@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69558 long ret; \
69559 mm_segment_t old_fs = get_fs(); \
69560 \
69561- set_fs(KERNEL_DS); \
69562 pagefault_disable(); \
69563- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
69564- pagefault_enable(); \
69565+ set_fs(KERNEL_DS); \
69566+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
69567 set_fs(old_fs); \
69568+ pagefault_enable(); \
69569 ret; \
69570 })
69571
69572@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_user_nocache(void *to,
69573 * Safely read from address @src to the buffer at @dst. If a kernel fault
69574 * happens, handle that and return -EFAULT.
69575 */
69576-extern long probe_kernel_read(void *dst, void *src, size_t size);
69577+extern long probe_kernel_read(void *dst, const void *src, size_t size);
69578
69579 /*
69580 * probe_kernel_write(): safely attempt to write to a location
69581@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst, void *src, size_t size);
69582 * Safely write to address @dst from the buffer at @src. If a kernel fault
69583 * happens, handle that and return -EFAULT.
69584 */
69585-extern long probe_kernel_write(void *dst, void *src, size_t size);
69586+extern long probe_kernel_write(void *dst, const void *src, size_t size);
69587
69588 #endif /* __LINUX_UACCESS_H__ */
69589diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
69590index 99c1b4d..bb94261 100644
69591--- a/include/linux/unaligned/access_ok.h
69592+++ b/include/linux/unaligned/access_ok.h
69593@@ -6,32 +6,32 @@
69594
69595 static inline u16 get_unaligned_le16(const void *p)
69596 {
69597- return le16_to_cpup((__le16 *)p);
69598+ return le16_to_cpup((const __le16 *)p);
69599 }
69600
69601 static inline u32 get_unaligned_le32(const void *p)
69602 {
69603- return le32_to_cpup((__le32 *)p);
69604+ return le32_to_cpup((const __le32 *)p);
69605 }
69606
69607 static inline u64 get_unaligned_le64(const void *p)
69608 {
69609- return le64_to_cpup((__le64 *)p);
69610+ return le64_to_cpup((const __le64 *)p);
69611 }
69612
69613 static inline u16 get_unaligned_be16(const void *p)
69614 {
69615- return be16_to_cpup((__be16 *)p);
69616+ return be16_to_cpup((const __be16 *)p);
69617 }
69618
69619 static inline u32 get_unaligned_be32(const void *p)
69620 {
69621- return be32_to_cpup((__be32 *)p);
69622+ return be32_to_cpup((const __be32 *)p);
69623 }
69624
69625 static inline u64 get_unaligned_be64(const void *p)
69626 {
69627- return be64_to_cpup((__be64 *)p);
69628+ return be64_to_cpup((const __be64 *)p);
69629 }
69630
69631 static inline void put_unaligned_le16(u16 val, void *p)
69632diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
69633index 79b9837..b5a56f9 100644
69634--- a/include/linux/vermagic.h
69635+++ b/include/linux/vermagic.h
69636@@ -26,9 +26,35 @@
69637 #define MODULE_ARCH_VERMAGIC ""
69638 #endif
69639
69640+#ifdef CONFIG_PAX_REFCOUNT
69641+#define MODULE_PAX_REFCOUNT "REFCOUNT "
69642+#else
69643+#define MODULE_PAX_REFCOUNT ""
69644+#endif
69645+
69646+#ifdef CONSTIFY_PLUGIN
69647+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
69648+#else
69649+#define MODULE_CONSTIFY_PLUGIN ""
69650+#endif
69651+
69652+#ifdef STACKLEAK_PLUGIN
69653+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
69654+#else
69655+#define MODULE_STACKLEAK_PLUGIN ""
69656+#endif
69657+
69658+#ifdef CONFIG_GRKERNSEC
69659+#define MODULE_GRSEC "GRSEC "
69660+#else
69661+#define MODULE_GRSEC ""
69662+#endif
69663+
69664 #define VERMAGIC_STRING \
69665 UTS_RELEASE " " \
69666 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
69667 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
69668- MODULE_ARCH_VERMAGIC
69669+ MODULE_ARCH_VERMAGIC \
69670+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
69671+ MODULE_GRSEC
69672
69673diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
69674index 819a634..462ac12 100644
69675--- a/include/linux/vmalloc.h
69676+++ b/include/linux/vmalloc.h
69677@@ -14,6 +14,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
69678 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
69679 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
69680 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
69681+
69682+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
69683+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
69684+#endif
69685+
69686 /* bits [20..32] reserved for arch specific ioremap internals */
69687
69688 /*
69689@@ -124,4 +129,81 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
69690
69691 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
69692
69693+#define vmalloc(x) \
69694+({ \
69695+ void *___retval; \
69696+ intoverflow_t ___x = (intoverflow_t)x; \
69697+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
69698+ ___retval = NULL; \
69699+ else \
69700+ ___retval = vmalloc((unsigned long)___x); \
69701+ ___retval; \
69702+})
69703+
69704+#define __vmalloc(x, y, z) \
69705+({ \
69706+ void *___retval; \
69707+ intoverflow_t ___x = (intoverflow_t)x; \
69708+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
69709+ ___retval = NULL; \
69710+ else \
69711+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
69712+ ___retval; \
69713+})
69714+
69715+#define vmalloc_user(x) \
69716+({ \
69717+ void *___retval; \
69718+ intoverflow_t ___x = (intoverflow_t)x; \
69719+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
69720+ ___retval = NULL; \
69721+ else \
69722+ ___retval = vmalloc_user((unsigned long)___x); \
69723+ ___retval; \
69724+})
69725+
69726+#define vmalloc_exec(x) \
69727+({ \
69728+ void *___retval; \
69729+ intoverflow_t ___x = (intoverflow_t)x; \
69730+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
69731+ ___retval = NULL; \
69732+ else \
69733+ ___retval = vmalloc_exec((unsigned long)___x); \
69734+ ___retval; \
69735+})
69736+
69737+#define vmalloc_node(x, y) \
69738+({ \
69739+ void *___retval; \
69740+ intoverflow_t ___x = (intoverflow_t)x; \
69741+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
69742+ ___retval = NULL; \
69743+ else \
69744+ ___retval = vmalloc_node((unsigned long)___x, (y));\
69745+ ___retval; \
69746+})
69747+
69748+#define vmalloc_32(x) \
69749+({ \
69750+ void *___retval; \
69751+ intoverflow_t ___x = (intoverflow_t)x; \
69752+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
69753+ ___retval = NULL; \
69754+ else \
69755+ ___retval = vmalloc_32((unsigned long)___x); \
69756+ ___retval; \
69757+})
69758+
69759+#define vmalloc_32_user(x) \
69760+({ \
69761+ void *___retval; \
69762+ intoverflow_t ___x = (intoverflow_t)x; \
69763+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
69764+ ___retval = NULL; \
69765+ else \
69766+ ___retval = vmalloc_32_user((unsigned long)___x);\
69767+ ___retval; \
69768+})
69769+
69770 #endif /* _LINUX_VMALLOC_H */
69771diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
69772index 13070d6..aa4159a 100644
69773--- a/include/linux/vmstat.h
69774+++ b/include/linux/vmstat.h
69775@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(int cpu)
69776 /*
69777 * Zone based page accounting with per cpu differentials.
69778 */
69779-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69780+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
69781
69782 static inline void zone_page_state_add(long x, struct zone *zone,
69783 enum zone_stat_item item)
69784 {
69785- atomic_long_add(x, &zone->vm_stat[item]);
69786- atomic_long_add(x, &vm_stat[item]);
69787+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
69788+ atomic_long_add_unchecked(x, &vm_stat[item]);
69789 }
69790
69791 static inline unsigned long global_page_state(enum zone_stat_item item)
69792 {
69793- long x = atomic_long_read(&vm_stat[item]);
69794+ long x = atomic_long_read_unchecked(&vm_stat[item]);
69795 #ifdef CONFIG_SMP
69796 if (x < 0)
69797 x = 0;
69798@@ -158,7 +158,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
69799 static inline unsigned long zone_page_state(struct zone *zone,
69800 enum zone_stat_item item)
69801 {
69802- long x = atomic_long_read(&zone->vm_stat[item]);
69803+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69804 #ifdef CONFIG_SMP
69805 if (x < 0)
69806 x = 0;
69807@@ -175,7 +175,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
69808 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
69809 enum zone_stat_item item)
69810 {
69811- long x = atomic_long_read(&zone->vm_stat[item]);
69812+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
69813
69814 #ifdef CONFIG_SMP
69815 int cpu;
69816@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
69817
69818 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
69819 {
69820- atomic_long_inc(&zone->vm_stat[item]);
69821- atomic_long_inc(&vm_stat[item]);
69822+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
69823+ atomic_long_inc_unchecked(&vm_stat[item]);
69824 }
69825
69826 static inline void __inc_zone_page_state(struct page *page,
69827@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state(struct page *page,
69828
69829 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
69830 {
69831- atomic_long_dec(&zone->vm_stat[item]);
69832- atomic_long_dec(&vm_stat[item]);
69833+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
69834+ atomic_long_dec_unchecked(&vm_stat[item]);
69835 }
69836
69837 static inline void __dec_zone_page_state(struct page *page,
69838diff --git a/include/linux/xattr.h b/include/linux/xattr.h
69839index 5c84af8..1a3b6e2 100644
69840--- a/include/linux/xattr.h
69841+++ b/include/linux/xattr.h
69842@@ -33,6 +33,11 @@
69843 #define XATTR_USER_PREFIX "user."
69844 #define XATTR_USER_PREFIX_LEN (sizeof (XATTR_USER_PREFIX) - 1)
69845
69846+/* User namespace */
69847+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
69848+#define XATTR_PAX_FLAGS_SUFFIX "flags"
69849+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
69850+
69851 struct inode;
69852 struct dentry;
69853
69854diff --git a/include/media/saa7146_vv.h b/include/media/saa7146_vv.h
69855index eed5fcc..5080d24 100644
69856--- a/include/media/saa7146_vv.h
69857+++ b/include/media/saa7146_vv.h
69858@@ -167,7 +167,7 @@ struct saa7146_ext_vv
69859 int (*std_callback)(struct saa7146_dev*, struct saa7146_standard *);
69860
69861 /* the extension can override this */
69862- struct v4l2_ioctl_ops ops;
69863+ v4l2_ioctl_ops_no_const ops;
69864 /* pointer to the saa7146 core ops */
69865 const struct v4l2_ioctl_ops *core_ops;
69866
69867diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
69868index 73c9867..2da8837 100644
69869--- a/include/media/v4l2-dev.h
69870+++ b/include/media/v4l2-dev.h
69871@@ -34,7 +34,7 @@ struct v4l2_device;
69872 #define V4L2_FL_UNREGISTERED (0)
69873
69874 struct v4l2_file_operations {
69875- struct module *owner;
69876+ struct module * const owner;
69877 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
69878 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
69879 unsigned int (*poll) (struct file *, struct poll_table_struct *);
69880@@ -46,6 +46,7 @@ struct v4l2_file_operations {
69881 int (*open) (struct file *);
69882 int (*release) (struct file *);
69883 };
69884+typedef struct v4l2_file_operations __no_const v4l2_file_operations_no_const;
69885
69886 /*
69887 * Newer version of video_device, handled by videodev2.c
69888diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
69889index 5d5d550..f559ef1 100644
69890--- a/include/media/v4l2-device.h
69891+++ b/include/media/v4l2-device.h
69892@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(struct device *dev, struct v4l2_device *v4
69893 this function returns 0. If the name ends with a digit (e.g. cx18),
69894 then the name will be set to cx18-0 since cx180 looks really odd. */
69895 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
69896- atomic_t *instance);
69897+ atomic_unchecked_t *instance);
69898
69899 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
69900 Since the parent disappears this ensures that v4l2_dev doesn't have an
69901diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
69902index 7a4529d..7244290 100644
69903--- a/include/media/v4l2-ioctl.h
69904+++ b/include/media/v4l2-ioctl.h
69905@@ -243,6 +243,7 @@ struct v4l2_ioctl_ops {
69906 long (*vidioc_default) (struct file *file, void *fh,
69907 int cmd, void *arg);
69908 };
69909+typedef struct v4l2_ioctl_ops __no_const v4l2_ioctl_ops_no_const;
69910
69911
69912 /* v4l debugging and diagnostics */
69913diff --git a/include/net/flow.h b/include/net/flow.h
69914index 809970b..c3df4f3 100644
69915--- a/include/net/flow.h
69916+++ b/include/net/flow.h
69917@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net *net, struct flowi *key, u16 family,
69918 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
69919 u8 dir, flow_resolve_t resolver);
69920 extern void flow_cache_flush(void);
69921-extern atomic_t flow_cache_genid;
69922+extern atomic_unchecked_t flow_cache_genid;
69923
69924 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
69925 {
69926diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
69927index 15e1f8fe..668837c 100644
69928--- a/include/net/inetpeer.h
69929+++ b/include/net/inetpeer.h
69930@@ -24,7 +24,7 @@ struct inet_peer
69931 __u32 dtime; /* the time of last use of not
69932 * referenced entries */
69933 atomic_t refcnt;
69934- atomic_t rid; /* Frag reception counter */
69935+ atomic_unchecked_t rid; /* Frag reception counter */
69936 __u32 tcp_ts;
69937 unsigned long tcp_ts_stamp;
69938 };
69939diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
69940index 98978e7..2243a3d 100644
69941--- a/include/net/ip_vs.h
69942+++ b/include/net/ip_vs.h
69943@@ -365,7 +365,7 @@ struct ip_vs_conn {
69944 struct ip_vs_conn *control; /* Master control connection */
69945 atomic_t n_control; /* Number of controlled ones */
69946 struct ip_vs_dest *dest; /* real server */
69947- atomic_t in_pkts; /* incoming packet counter */
69948+ atomic_unchecked_t in_pkts; /* incoming packet counter */
69949
69950 /* packet transmitter for different forwarding methods. If it
69951 mangles the packet, it must return NF_DROP or better NF_STOLEN,
69952@@ -466,7 +466,7 @@ struct ip_vs_dest {
69953 union nf_inet_addr addr; /* IP address of the server */
69954 __be16 port; /* port number of the server */
69955 volatile unsigned flags; /* dest status flags */
69956- atomic_t conn_flags; /* flags to copy to conn */
69957+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
69958 atomic_t weight; /* server weight */
69959
69960 atomic_t refcnt; /* reference counter */
69961diff --git a/include/net/irda/ircomm_core.h b/include/net/irda/ircomm_core.h
69962index 69b610a..fe3962c 100644
69963--- a/include/net/irda/ircomm_core.h
69964+++ b/include/net/irda/ircomm_core.h
69965@@ -51,7 +51,7 @@ typedef struct {
69966 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
69967 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
69968 struct ircomm_info *);
69969-} call_t;
69970+} __no_const call_t;
69971
69972 struct ircomm_cb {
69973 irda_queue_t queue;
69974diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
69975index eea2e61..08c692d 100644
69976--- a/include/net/irda/ircomm_tty.h
69977+++ b/include/net/irda/ircomm_tty.h
69978@@ -35,6 +35,7 @@
69979 #include <linux/termios.h>
69980 #include <linux/timer.h>
69981 #include <linux/tty.h> /* struct tty_struct */
69982+#include <asm/local.h>
69983
69984 #include <net/irda/irias_object.h>
69985 #include <net/irda/ircomm_core.h>
69986@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
69987 unsigned short close_delay;
69988 unsigned short closing_wait; /* time to wait before closing */
69989
69990- int open_count;
69991- int blocked_open; /* # of blocked opens */
69992+ local_t open_count;
69993+ local_t blocked_open; /* # of blocked opens */
69994
69995 /* Protect concurent access to :
69996 * o self->open_count
69997diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
69998index f82a1e8..82d81e8 100644
69999--- a/include/net/iucv/af_iucv.h
70000+++ b/include/net/iucv/af_iucv.h
70001@@ -87,7 +87,7 @@ struct iucv_sock {
70002 struct iucv_sock_list {
70003 struct hlist_head head;
70004 rwlock_t lock;
70005- atomic_t autobind_name;
70006+ atomic_unchecked_t autobind_name;
70007 };
70008
70009 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
70010diff --git a/include/net/lapb.h b/include/net/lapb.h
70011index 96cb5dd..25e8d4f 100644
70012--- a/include/net/lapb.h
70013+++ b/include/net/lapb.h
70014@@ -95,7 +95,7 @@ struct lapb_cb {
70015 struct sk_buff_head write_queue;
70016 struct sk_buff_head ack_queue;
70017 unsigned char window;
70018- struct lapb_register_struct callbacks;
70019+ struct lapb_register_struct *callbacks;
70020
70021 /* FRMR control information */
70022 struct lapb_frame frmr_data;
70023diff --git a/include/net/neighbour.h b/include/net/neighbour.h
70024index 3817fda..cdb2343 100644
70025--- a/include/net/neighbour.h
70026+++ b/include/net/neighbour.h
70027@@ -131,7 +131,7 @@ struct neigh_ops
70028 int (*connected_output)(struct sk_buff*);
70029 int (*hh_output)(struct sk_buff*);
70030 int (*queue_xmit)(struct sk_buff*);
70031-};
70032+} __do_const;
70033
70034 struct pneigh_entry
70035 {
70036diff --git a/include/net/netlink.h b/include/net/netlink.h
70037index c344646..4778c71 100644
70038--- a/include/net/netlink.h
70039+++ b/include/net/netlink.h
70040@@ -335,7 +335,7 @@ static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
70041 {
70042 return (remaining >= (int) sizeof(struct nlmsghdr) &&
70043 nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
70044- nlh->nlmsg_len <= remaining);
70045+ nlh->nlmsg_len <= (unsigned int)remaining);
70046 }
70047
70048 /**
70049@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
70050 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
70051 {
70052 if (mark)
70053- skb_trim(skb, (unsigned char *) mark - skb->data);
70054+ skb_trim(skb, (const unsigned char *) mark - skb->data);
70055 }
70056
70057 /**
70058diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
70059index 9a4b8b7..e49e077 100644
70060--- a/include/net/netns/ipv4.h
70061+++ b/include/net/netns/ipv4.h
70062@@ -54,7 +54,7 @@ struct netns_ipv4 {
70063 int current_rt_cache_rebuild_count;
70064
70065 struct timer_list rt_secret_timer;
70066- atomic_t rt_genid;
70067+ atomic_unchecked_t rt_genid;
70068
70069 #ifdef CONFIG_IP_MROUTE
70070 struct sock *mroute_sk;
70071diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
70072index 8a6d529..171f401 100644
70073--- a/include/net/sctp/sctp.h
70074+++ b/include/net/sctp/sctp.h
70075@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
70076
70077 #else /* SCTP_DEBUG */
70078
70079-#define SCTP_DEBUG_PRINTK(whatever...)
70080-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
70081+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
70082+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
70083 #define SCTP_ENABLE_DEBUG
70084 #define SCTP_DISABLE_DEBUG
70085 #define SCTP_ASSERT(expr, str, func)
70086diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
70087index d97f689..f3b90ab 100644
70088--- a/include/net/secure_seq.h
70089+++ b/include/net/secure_seq.h
70090@@ -7,14 +7,14 @@ extern __u32 secure_ip_id(__be32 daddr);
70091 extern __u32 secure_ipv6_id(const __be32 daddr[4]);
70092 extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
70093 extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
70094- __be16 dport);
70095+ __be16 dport);
70096 extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
70097 __be16 sport, __be16 dport);
70098 extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70099- __be16 sport, __be16 dport);
70100+ __be16 sport, __be16 dport);
70101 extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
70102- __be16 sport, __be16 dport);
70103+ __be16 sport, __be16 dport);
70104 extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
70105- __be16 sport, __be16 dport);
70106+ __be16 sport, __be16 dport);
70107
70108 #endif /* _NET_SECURE_SEQ */
70109diff --git a/include/net/sock.h b/include/net/sock.h
70110index 9f96394..76fc9c7 100644
70111--- a/include/net/sock.h
70112+++ b/include/net/sock.h
70113@@ -272,7 +272,7 @@ struct sock {
70114 rwlock_t sk_callback_lock;
70115 int sk_err,
70116 sk_err_soft;
70117- atomic_t sk_drops;
70118+ atomic_unchecked_t sk_drops;
70119 unsigned short sk_ack_backlog;
70120 unsigned short sk_max_ack_backlog;
70121 __u32 sk_priority;
70122@@ -737,7 +737,7 @@ static inline void sk_refcnt_debug_release(const struct sock *sk)
70123 extern void sock_prot_inuse_add(struct net *net, struct proto *prot, int inc);
70124 extern int sock_prot_inuse_get(struct net *net, struct proto *proto);
70125 #else
70126-static void inline sock_prot_inuse_add(struct net *net, struct proto *prot,
70127+static inline void sock_prot_inuse_add(struct net *net, struct proto *prot,
70128 int inc)
70129 {
70130 }
70131diff --git a/include/net/tcp.h b/include/net/tcp.h
70132index 6cfe18b..dd21acb 100644
70133--- a/include/net/tcp.h
70134+++ b/include/net/tcp.h
70135@@ -1444,8 +1444,8 @@ enum tcp_seq_states {
70136 struct tcp_seq_afinfo {
70137 char *name;
70138 sa_family_t family;
70139- struct file_operations seq_fops;
70140- struct seq_operations seq_ops;
70141+ file_operations_no_const seq_fops;
70142+ seq_operations_no_const seq_ops;
70143 };
70144
70145 struct tcp_iter_state {
70146diff --git a/include/net/udp.h b/include/net/udp.h
70147index f98abd2..b4b042f 100644
70148--- a/include/net/udp.h
70149+++ b/include/net/udp.h
70150@@ -187,8 +187,8 @@ struct udp_seq_afinfo {
70151 char *name;
70152 sa_family_t family;
70153 struct udp_table *udp_table;
70154- struct file_operations seq_fops;
70155- struct seq_operations seq_ops;
70156+ file_operations_no_const seq_fops;
70157+ seq_operations_no_const seq_ops;
70158 };
70159
70160 struct udp_iter_state {
70161diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
70162index cbb822e..e9c1cbe 100644
70163--- a/include/rdma/iw_cm.h
70164+++ b/include/rdma/iw_cm.h
70165@@ -129,7 +129,7 @@ struct iw_cm_verbs {
70166 int backlog);
70167
70168 int (*destroy_listen)(struct iw_cm_id *cm_id);
70169-};
70170+} __no_const;
70171
70172 /**
70173 * iw_create_cm_id - Create an IW CM identifier.
70174diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
70175index 09a124b..caa8ca8 100644
70176--- a/include/scsi/libfc.h
70177+++ b/include/scsi/libfc.h
70178@@ -675,6 +675,7 @@ struct libfc_function_template {
70179 */
70180 void (*disc_stop_final) (struct fc_lport *);
70181 };
70182+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
70183
70184 /* information used by the discovery layer */
70185 struct fc_disc {
70186@@ -707,7 +708,7 @@ struct fc_lport {
70187 struct fc_disc disc;
70188
70189 /* Operational Information */
70190- struct libfc_function_template tt;
70191+ libfc_function_template_no_const tt;
70192 u8 link_up;
70193 u8 qfull;
70194 enum fc_lport_state state;
70195diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
70196index de8e180..f15e0d7 100644
70197--- a/include/scsi/scsi_device.h
70198+++ b/include/scsi/scsi_device.h
70199@@ -156,9 +156,9 @@ struct scsi_device {
70200 unsigned int max_device_blocked; /* what device_blocked counts down from */
70201 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
70202
70203- atomic_t iorequest_cnt;
70204- atomic_t iodone_cnt;
70205- atomic_t ioerr_cnt;
70206+ atomic_unchecked_t iorequest_cnt;
70207+ atomic_unchecked_t iodone_cnt;
70208+ atomic_unchecked_t ioerr_cnt;
70209
70210 struct device sdev_gendev,
70211 sdev_dev;
70212diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
70213index fc50bd6..81ba9cb 100644
70214--- a/include/scsi/scsi_transport_fc.h
70215+++ b/include/scsi/scsi_transport_fc.h
70216@@ -708,7 +708,7 @@ struct fc_function_template {
70217 unsigned long show_host_system_hostname:1;
70218
70219 unsigned long disable_target_scan:1;
70220-};
70221+} __do_const;
70222
70223
70224 /**
70225diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
70226index 3dae3f7..8440d6f 100644
70227--- a/include/sound/ac97_codec.h
70228+++ b/include/sound/ac97_codec.h
70229@@ -419,15 +419,15 @@
70230 struct snd_ac97;
70231
70232 struct snd_ac97_build_ops {
70233- int (*build_3d) (struct snd_ac97 *ac97);
70234- int (*build_specific) (struct snd_ac97 *ac97);
70235- int (*build_spdif) (struct snd_ac97 *ac97);
70236- int (*build_post_spdif) (struct snd_ac97 *ac97);
70237+ int (* const build_3d) (struct snd_ac97 *ac97);
70238+ int (* const build_specific) (struct snd_ac97 *ac97);
70239+ int (* const build_spdif) (struct snd_ac97 *ac97);
70240+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
70241 #ifdef CONFIG_PM
70242- void (*suspend) (struct snd_ac97 *ac97);
70243- void (*resume) (struct snd_ac97 *ac97);
70244+ void (* const suspend) (struct snd_ac97 *ac97);
70245+ void (* const resume) (struct snd_ac97 *ac97);
70246 #endif
70247- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70248+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
70249 };
70250
70251 struct snd_ac97_bus_ops {
70252@@ -477,7 +477,7 @@ struct snd_ac97_template {
70253
70254 struct snd_ac97 {
70255 /* -- lowlevel (hardware) driver specific -- */
70256- struct snd_ac97_build_ops * build_ops;
70257+ const struct snd_ac97_build_ops * build_ops;
70258 void *private_data;
70259 void (*private_free) (struct snd_ac97 *ac97);
70260 /* --- */
70261diff --git a/include/sound/ak4xxx-adda.h b/include/sound/ak4xxx-adda.h
70262index 891cf1a..a94ba2b 100644
70263--- a/include/sound/ak4xxx-adda.h
70264+++ b/include/sound/ak4xxx-adda.h
70265@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
70266 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
70267 unsigned char val);
70268 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
70269-};
70270+} __no_const;
70271
70272 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
70273
70274diff --git a/include/sound/hwdep.h b/include/sound/hwdep.h
70275index 8c05e47..2b5df97 100644
70276--- a/include/sound/hwdep.h
70277+++ b/include/sound/hwdep.h
70278@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
70279 struct snd_hwdep_dsp_status *status);
70280 int (*dsp_load)(struct snd_hwdep *hw,
70281 struct snd_hwdep_dsp_image *image);
70282-};
70283+} __no_const;
70284
70285 struct snd_hwdep {
70286 struct snd_card *card;
70287diff --git a/include/sound/info.h b/include/sound/info.h
70288index 112e894..6fda5b5 100644
70289--- a/include/sound/info.h
70290+++ b/include/sound/info.h
70291@@ -44,7 +44,7 @@ struct snd_info_entry_text {
70292 struct snd_info_buffer *buffer);
70293 void (*write)(struct snd_info_entry *entry,
70294 struct snd_info_buffer *buffer);
70295-};
70296+} __no_const;
70297
70298 struct snd_info_entry_ops {
70299 int (*open)(struct snd_info_entry *entry,
70300diff --git a/include/sound/pcm.h b/include/sound/pcm.h
70301index de6d981..590a550 100644
70302--- a/include/sound/pcm.h
70303+++ b/include/sound/pcm.h
70304@@ -80,6 +80,7 @@ struct snd_pcm_ops {
70305 int (*mmap)(struct snd_pcm_substream *substream, struct vm_area_struct *vma);
70306 int (*ack)(struct snd_pcm_substream *substream);
70307 };
70308+typedef struct snd_pcm_ops __no_const snd_pcm_ops_no_const;
70309
70310 /*
70311 *
70312diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
70313index 736eac7..fe8a80f 100644
70314--- a/include/sound/sb16_csp.h
70315+++ b/include/sound/sb16_csp.h
70316@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
70317 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
70318 int (*csp_stop) (struct snd_sb_csp * p);
70319 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
70320-};
70321+} __no_const;
70322
70323 /*
70324 * CSP private data
70325diff --git a/include/sound/ymfpci.h b/include/sound/ymfpci.h
70326index 444cd6b..3327cc5 100644
70327--- a/include/sound/ymfpci.h
70328+++ b/include/sound/ymfpci.h
70329@@ -358,7 +358,7 @@ struct snd_ymfpci {
70330 spinlock_t reg_lock;
70331 spinlock_t voice_lock;
70332 wait_queue_head_t interrupt_sleep;
70333- atomic_t interrupt_sleep_count;
70334+ atomic_unchecked_t interrupt_sleep_count;
70335 struct snd_info_entry *proc_entry;
70336 const struct firmware *dsp_microcode;
70337 const struct firmware *controller_microcode;
70338diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
70339index b89f9db..f097b38 100644
70340--- a/include/trace/events/irq.h
70341+++ b/include/trace/events/irq.h
70342@@ -34,7 +34,7 @@
70343 */
70344 TRACE_EVENT(irq_handler_entry,
70345
70346- TP_PROTO(int irq, struct irqaction *action),
70347+ TP_PROTO(int irq, const struct irqaction *action),
70348
70349 TP_ARGS(irq, action),
70350
70351@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
70352 */
70353 TRACE_EVENT(irq_handler_exit,
70354
70355- TP_PROTO(int irq, struct irqaction *action, int ret),
70356+ TP_PROTO(int irq, const struct irqaction *action, int ret),
70357
70358 TP_ARGS(irq, action, ret),
70359
70360@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
70361 */
70362 TRACE_EVENT(softirq_entry,
70363
70364- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70365+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70366
70367 TP_ARGS(h, vec),
70368
70369@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
70370 */
70371 TRACE_EVENT(softirq_exit,
70372
70373- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
70374+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
70375
70376 TP_ARGS(h, vec),
70377
70378diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
70379index 0993a22..32ba2fe 100644
70380--- a/include/video/uvesafb.h
70381+++ b/include/video/uvesafb.h
70382@@ -177,6 +177,7 @@ struct uvesafb_par {
70383 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
70384 u8 pmi_setpal; /* PMI for palette changes */
70385 u16 *pmi_base; /* protected mode interface location */
70386+ u8 *pmi_code; /* protected mode code location */
70387 void *pmi_start;
70388 void *pmi_pal;
70389 u8 *vbe_state_orig; /*
70390diff --git a/init/Kconfig b/init/Kconfig
70391index d72691b..3996e54 100644
70392--- a/init/Kconfig
70393+++ b/init/Kconfig
70394@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
70395
70396 config COMPAT_BRK
70397 bool "Disable heap randomization"
70398- default y
70399+ default n
70400 help
70401 Randomizing heap placement makes heap exploits harder, but it
70402 also breaks ancient binaries (including anything libc5 based).
70403diff --git a/init/do_mounts.c b/init/do_mounts.c
70404index bb008d0..4fa3933 100644
70405--- a/init/do_mounts.c
70406+++ b/init/do_mounts.c
70407@@ -216,11 +216,11 @@ static void __init get_fs_names(char *page)
70408
70409 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
70410 {
70411- int err = sys_mount(name, "/root", fs, flags, data);
70412+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
70413 if (err)
70414 return err;
70415
70416- sys_chdir("/root");
70417+ sys_chdir((__force const char __user *)"/root");
70418 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
70419 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
70420 current->fs->pwd.mnt->mnt_sb->s_type->name,
70421@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...)
70422 va_start(args, fmt);
70423 vsprintf(buf, fmt, args);
70424 va_end(args);
70425- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
70426+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
70427 if (fd >= 0) {
70428 sys_ioctl(fd, FDEJECT, 0);
70429 sys_close(fd);
70430 }
70431 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
70432- fd = sys_open("/dev/console", O_RDWR, 0);
70433+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
70434 if (fd >= 0) {
70435 sys_ioctl(fd, TCGETS, (long)&termios);
70436 termios.c_lflag &= ~ICANON;
70437 sys_ioctl(fd, TCSETSF, (long)&termios);
70438- sys_read(fd, &c, 1);
70439+ sys_read(fd, (char __user *)&c, 1);
70440 termios.c_lflag |= ICANON;
70441 sys_ioctl(fd, TCSETSF, (long)&termios);
70442 sys_close(fd);
70443@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
70444 mount_root();
70445 out:
70446 devtmpfs_mount("dev");
70447- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70448- sys_chroot(".");
70449+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
70450+ sys_chroot((__force char __user *)".");
70451 }
70452diff --git a/init/do_mounts.h b/init/do_mounts.h
70453index f5b978a..69dbfe8 100644
70454--- a/init/do_mounts.h
70455+++ b/init/do_mounts.h
70456@@ -15,15 +15,15 @@ extern int root_mountflags;
70457
70458 static inline int create_dev(char *name, dev_t dev)
70459 {
70460- sys_unlink(name);
70461- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
70462+ sys_unlink((char __force_user *)name);
70463+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
70464 }
70465
70466 #if BITS_PER_LONG == 32
70467 static inline u32 bstat(char *name)
70468 {
70469 struct stat64 stat;
70470- if (sys_stat64(name, &stat) != 0)
70471+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
70472 return 0;
70473 if (!S_ISBLK(stat.st_mode))
70474 return 0;
70475@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
70476 static inline u32 bstat(char *name)
70477 {
70478 struct stat stat;
70479- if (sys_newstat(name, &stat) != 0)
70480+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
70481 return 0;
70482 if (!S_ISBLK(stat.st_mode))
70483 return 0;
70484diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
70485index 614241b..4da046b 100644
70486--- a/init/do_mounts_initrd.c
70487+++ b/init/do_mounts_initrd.c
70488@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shell)
70489 sys_close(old_fd);sys_close(root_fd);
70490 sys_close(0);sys_close(1);sys_close(2);
70491 sys_setsid();
70492- (void) sys_open("/dev/console",O_RDWR,0);
70493+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
70494 (void) sys_dup(0);
70495 (void) sys_dup(0);
70496 return kernel_execve(shell, argv, envp_init);
70497@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
70498 create_dev("/dev/root.old", Root_RAM0);
70499 /* mount initrd on rootfs' /root */
70500 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
70501- sys_mkdir("/old", 0700);
70502- root_fd = sys_open("/", 0, 0);
70503- old_fd = sys_open("/old", 0, 0);
70504+ sys_mkdir((const char __force_user *)"/old", 0700);
70505+ root_fd = sys_open((const char __force_user *)"/", 0, 0);
70506+ old_fd = sys_open((const char __force_user *)"/old", 0, 0);
70507 /* move initrd over / and chdir/chroot in initrd root */
70508- sys_chdir("/root");
70509- sys_mount(".", "/", NULL, MS_MOVE, NULL);
70510- sys_chroot(".");
70511+ sys_chdir((const char __force_user *)"/root");
70512+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
70513+ sys_chroot((const char __force_user *)".");
70514
70515 /*
70516 * In case that a resume from disk is carried out by linuxrc or one of
70517@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
70518
70519 /* move initrd to rootfs' /old */
70520 sys_fchdir(old_fd);
70521- sys_mount("/", ".", NULL, MS_MOVE, NULL);
70522+ sys_mount((char __force_user *)"/", (char __force_user *)".", NULL, MS_MOVE, NULL);
70523 /* switch root and cwd back to / of rootfs */
70524 sys_fchdir(root_fd);
70525- sys_chroot(".");
70526+ sys_chroot((const char __force_user *)".");
70527 sys_close(old_fd);
70528 sys_close(root_fd);
70529
70530 if (new_decode_dev(real_root_dev) == Root_RAM0) {
70531- sys_chdir("/old");
70532+ sys_chdir((const char __force_user *)"/old");
70533 return;
70534 }
70535
70536@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
70537 mount_root();
70538
70539 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
70540- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
70541+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
70542 if (!error)
70543 printk("okay\n");
70544 else {
70545- int fd = sys_open("/dev/root.old", O_RDWR, 0);
70546+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
70547 if (error == -ENOENT)
70548 printk("/initrd does not exist. Ignored.\n");
70549 else
70550 printk("failed\n");
70551 printk(KERN_NOTICE "Unmounting old root\n");
70552- sys_umount("/old", MNT_DETACH);
70553+ sys_umount((char __force_user *)"/old", MNT_DETACH);
70554 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
70555 if (fd < 0) {
70556 error = fd;
70557@@ -119,11 +119,11 @@ int __init initrd_load(void)
70558 * mounted in the normal path.
70559 */
70560 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
70561- sys_unlink("/initrd.image");
70562+ sys_unlink((const char __force_user *)"/initrd.image");
70563 handle_initrd();
70564 return 1;
70565 }
70566 }
70567- sys_unlink("/initrd.image");
70568+ sys_unlink((const char __force_user *)"/initrd.image");
70569 return 0;
70570 }
70571diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
70572index 69aebbf..c0bf6a7 100644
70573--- a/init/do_mounts_md.c
70574+++ b/init/do_mounts_md.c
70575@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
70576 partitioned ? "_d" : "", minor,
70577 md_setup_args[ent].device_names);
70578
70579- fd = sys_open(name, 0, 0);
70580+ fd = sys_open((char __force_user *)name, 0, 0);
70581 if (fd < 0) {
70582 printk(KERN_ERR "md: open failed - cannot start "
70583 "array %s\n", name);
70584@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
70585 * array without it
70586 */
70587 sys_close(fd);
70588- fd = sys_open(name, 0, 0);
70589+ fd = sys_open((char __force_user *)name, 0, 0);
70590 sys_ioctl(fd, BLKRRPART, 0);
70591 }
70592 sys_close(fd);
70593@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
70594
70595 wait_for_device_probe();
70596
70597- fd = sys_open("/dev/md0", 0, 0);
70598+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
70599 if (fd >= 0) {
70600 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
70601 sys_close(fd);
70602diff --git a/init/initramfs.c b/init/initramfs.c
70603index 1fd59b8..a01b079 100644
70604--- a/init/initramfs.c
70605+++ b/init/initramfs.c
70606@@ -74,7 +74,7 @@ static void __init free_hash(void)
70607 }
70608 }
70609
70610-static long __init do_utime(char __user *filename, time_t mtime)
70611+static long __init do_utime(__force char __user *filename, time_t mtime)
70612 {
70613 struct timespec t[2];
70614
70615@@ -109,7 +109,7 @@ static void __init dir_utime(void)
70616 struct dir_entry *de, *tmp;
70617 list_for_each_entry_safe(de, tmp, &dir_list, list) {
70618 list_del(&de->list);
70619- do_utime(de->name, de->mtime);
70620+ do_utime((char __force_user *)de->name, de->mtime);
70621 kfree(de->name);
70622 kfree(de);
70623 }
70624@@ -271,7 +271,7 @@ static int __init maybe_link(void)
70625 if (nlink >= 2) {
70626 char *old = find_link(major, minor, ino, mode, collected);
70627 if (old)
70628- return (sys_link(old, collected) < 0) ? -1 : 1;
70629+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
70630 }
70631 return 0;
70632 }
70633@@ -280,11 +280,11 @@ static void __init clean_path(char *path, mode_t mode)
70634 {
70635 struct stat st;
70636
70637- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
70638+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
70639 if (S_ISDIR(st.st_mode))
70640- sys_rmdir(path);
70641+ sys_rmdir((char __force_user *)path);
70642 else
70643- sys_unlink(path);
70644+ sys_unlink((char __force_user *)path);
70645 }
70646 }
70647
70648@@ -305,7 +305,7 @@ static int __init do_name(void)
70649 int openflags = O_WRONLY|O_CREAT;
70650 if (ml != 1)
70651 openflags |= O_TRUNC;
70652- wfd = sys_open(collected, openflags, mode);
70653+ wfd = sys_open((char __force_user *)collected, openflags, mode);
70654
70655 if (wfd >= 0) {
70656 sys_fchown(wfd, uid, gid);
70657@@ -317,17 +317,17 @@ static int __init do_name(void)
70658 }
70659 }
70660 } else if (S_ISDIR(mode)) {
70661- sys_mkdir(collected, mode);
70662- sys_chown(collected, uid, gid);
70663- sys_chmod(collected, mode);
70664+ sys_mkdir((char __force_user *)collected, mode);
70665+ sys_chown((char __force_user *)collected, uid, gid);
70666+ sys_chmod((char __force_user *)collected, mode);
70667 dir_add(collected, mtime);
70668 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
70669 S_ISFIFO(mode) || S_ISSOCK(mode)) {
70670 if (maybe_link() == 0) {
70671- sys_mknod(collected, mode, rdev);
70672- sys_chown(collected, uid, gid);
70673- sys_chmod(collected, mode);
70674- do_utime(collected, mtime);
70675+ sys_mknod((char __force_user *)collected, mode, rdev);
70676+ sys_chown((char __force_user *)collected, uid, gid);
70677+ sys_chmod((char __force_user *)collected, mode);
70678+ do_utime((char __force_user *)collected, mtime);
70679 }
70680 }
70681 return 0;
70682@@ -336,15 +336,15 @@ static int __init do_name(void)
70683 static int __init do_copy(void)
70684 {
70685 if (count >= body_len) {
70686- sys_write(wfd, victim, body_len);
70687+ sys_write(wfd, (char __force_user *)victim, body_len);
70688 sys_close(wfd);
70689- do_utime(vcollected, mtime);
70690+ do_utime((char __force_user *)vcollected, mtime);
70691 kfree(vcollected);
70692 eat(body_len);
70693 state = SkipIt;
70694 return 0;
70695 } else {
70696- sys_write(wfd, victim, count);
70697+ sys_write(wfd, (char __force_user *)victim, count);
70698 body_len -= count;
70699 eat(count);
70700 return 1;
70701@@ -355,9 +355,9 @@ static int __init do_symlink(void)
70702 {
70703 collected[N_ALIGN(name_len) + body_len] = '\0';
70704 clean_path(collected, 0);
70705- sys_symlink(collected + N_ALIGN(name_len), collected);
70706- sys_lchown(collected, uid, gid);
70707- do_utime(collected, mtime);
70708+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
70709+ sys_lchown((char __force_user *)collected, uid, gid);
70710+ do_utime((char __force_user *)collected, mtime);
70711 state = SkipIt;
70712 next_state = Reset;
70713 return 0;
70714diff --git a/init/main.c b/init/main.c
70715index 1eb4bd5..da8c6f5 100644
70716--- a/init/main.c
70717+++ b/init/main.c
70718@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void) { }
70719 #ifdef CONFIG_TC
70720 extern void tc_init(void);
70721 #endif
70722+extern void grsecurity_init(void);
70723
70724 enum system_states system_state __read_mostly;
70725 EXPORT_SYMBOL(system_state);
70726@@ -183,6 +184,49 @@ static int __init set_reset_devices(char *str)
70727
70728 __setup("reset_devices", set_reset_devices);
70729
70730+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
70731+extern char pax_enter_kernel_user[];
70732+extern char pax_exit_kernel_user[];
70733+extern pgdval_t clone_pgd_mask;
70734+#endif
70735+
70736+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
70737+static int __init setup_pax_nouderef(char *str)
70738+{
70739+#ifdef CONFIG_X86_32
70740+ unsigned int cpu;
70741+ struct desc_struct *gdt;
70742+
70743+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
70744+ gdt = get_cpu_gdt_table(cpu);
70745+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
70746+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
70747+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
70748+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
70749+ }
70750+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
70751+#else
70752+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
70753+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
70754+ clone_pgd_mask = ~(pgdval_t)0UL;
70755+#endif
70756+
70757+ return 0;
70758+}
70759+early_param("pax_nouderef", setup_pax_nouderef);
70760+#endif
70761+
70762+#ifdef CONFIG_PAX_SOFTMODE
70763+int pax_softmode;
70764+
70765+static int __init setup_pax_softmode(char *str)
70766+{
70767+ get_option(&str, &pax_softmode);
70768+ return 1;
70769+}
70770+__setup("pax_softmode=", setup_pax_softmode);
70771+#endif
70772+
70773 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
70774 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
70775 static const char *panic_later, *panic_param;
70776@@ -705,52 +749,53 @@ int initcall_debug;
70777 core_param(initcall_debug, initcall_debug, bool, 0644);
70778
70779 static char msgbuf[64];
70780-static struct boot_trace_call call;
70781-static struct boot_trace_ret ret;
70782+static struct boot_trace_call trace_call;
70783+static struct boot_trace_ret trace_ret;
70784
70785 int do_one_initcall(initcall_t fn)
70786 {
70787 int count = preempt_count();
70788 ktime_t calltime, delta, rettime;
70789+ const char *msg1 = "", *msg2 = "";
70790
70791 if (initcall_debug) {
70792- call.caller = task_pid_nr(current);
70793- printk("calling %pF @ %i\n", fn, call.caller);
70794+ trace_call.caller = task_pid_nr(current);
70795+ printk("calling %pF @ %i\n", fn, trace_call.caller);
70796 calltime = ktime_get();
70797- trace_boot_call(&call, fn);
70798+ trace_boot_call(&trace_call, fn);
70799 enable_boot_trace();
70800 }
70801
70802- ret.result = fn();
70803+ trace_ret.result = fn();
70804
70805 if (initcall_debug) {
70806 disable_boot_trace();
70807 rettime = ktime_get();
70808 delta = ktime_sub(rettime, calltime);
70809- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70810- trace_boot_ret(&ret, fn);
70811+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
70812+ trace_boot_ret(&trace_ret, fn);
70813 printk("initcall %pF returned %d after %Ld usecs\n", fn,
70814- ret.result, ret.duration);
70815+ trace_ret.result, trace_ret.duration);
70816 }
70817
70818 msgbuf[0] = 0;
70819
70820- if (ret.result && ret.result != -ENODEV && initcall_debug)
70821- sprintf(msgbuf, "error code %d ", ret.result);
70822+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
70823+ sprintf(msgbuf, "error code %d ", trace_ret.result);
70824
70825 if (preempt_count() != count) {
70826- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
70827+ msg1 = " preemption imbalance";
70828 preempt_count() = count;
70829 }
70830 if (irqs_disabled()) {
70831- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
70832+ msg2 = " disabled interrupts";
70833 local_irq_enable();
70834 }
70835- if (msgbuf[0]) {
70836- printk("initcall %pF returned with %s\n", fn, msgbuf);
70837+ if (msgbuf[0] || *msg1 || *msg2) {
70838+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
70839 }
70840
70841- return ret.result;
70842+ return trace_ret.result;
70843 }
70844
70845
70846@@ -893,11 +938,13 @@ static int __init kernel_init(void * unused)
70847 if (!ramdisk_execute_command)
70848 ramdisk_execute_command = "/init";
70849
70850- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
70851+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
70852 ramdisk_execute_command = NULL;
70853 prepare_namespace();
70854 }
70855
70856+ grsecurity_init();
70857+
70858 /*
70859 * Ok, we have completed the initial bootup, and
70860 * we're essentially up and running. Get rid of the
70861diff --git a/init/noinitramfs.c b/init/noinitramfs.c
70862index f4c1a3a..96c19bd 100644
70863--- a/init/noinitramfs.c
70864+++ b/init/noinitramfs.c
70865@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
70866 {
70867 int err;
70868
70869- err = sys_mkdir("/dev", 0755);
70870+ err = sys_mkdir((const char __user *)"/dev", 0755);
70871 if (err < 0)
70872 goto out;
70873
70874@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
70875 if (err < 0)
70876 goto out;
70877
70878- err = sys_mkdir("/root", 0700);
70879+ err = sys_mkdir((const char __user *)"/root", 0700);
70880 if (err < 0)
70881 goto out;
70882
70883diff --git a/ipc/mqueue.c b/ipc/mqueue.c
70884index d01bc14..8df81db 100644
70885--- a/ipc/mqueue.c
70886+++ b/ipc/mqueue.c
70887@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
70888 mq_bytes = (mq_msg_tblsz +
70889 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
70890
70891+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
70892 spin_lock(&mq_lock);
70893 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
70894 u->mq_bytes + mq_bytes >
70895diff --git a/ipc/msg.c b/ipc/msg.c
70896index 779f762..4af9e36 100644
70897--- a/ipc/msg.c
70898+++ b/ipc/msg.c
70899@@ -310,18 +310,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
70900 return security_msg_queue_associate(msq, msgflg);
70901 }
70902
70903+static struct ipc_ops msg_ops = {
70904+ .getnew = newque,
70905+ .associate = msg_security,
70906+ .more_checks = NULL
70907+};
70908+
70909 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
70910 {
70911 struct ipc_namespace *ns;
70912- struct ipc_ops msg_ops;
70913 struct ipc_params msg_params;
70914
70915 ns = current->nsproxy->ipc_ns;
70916
70917- msg_ops.getnew = newque;
70918- msg_ops.associate = msg_security;
70919- msg_ops.more_checks = NULL;
70920-
70921 msg_params.key = key;
70922 msg_params.flg = msgflg;
70923
70924diff --git a/ipc/sem.c b/ipc/sem.c
70925index b781007..f738b04 100644
70926--- a/ipc/sem.c
70927+++ b/ipc/sem.c
70928@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
70929 return 0;
70930 }
70931
70932+static struct ipc_ops sem_ops = {
70933+ .getnew = newary,
70934+ .associate = sem_security,
70935+ .more_checks = sem_more_checks
70936+};
70937+
70938 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70939 {
70940 struct ipc_namespace *ns;
70941- struct ipc_ops sem_ops;
70942 struct ipc_params sem_params;
70943
70944 ns = current->nsproxy->ipc_ns;
70945@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
70946 if (nsems < 0 || nsems > ns->sc_semmsl)
70947 return -EINVAL;
70948
70949- sem_ops.getnew = newary;
70950- sem_ops.associate = sem_security;
70951- sem_ops.more_checks = sem_more_checks;
70952-
70953 sem_params.key = key;
70954 sem_params.flg = semflg;
70955 sem_params.u.nsems = nsems;
70956@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
70957 ushort* sem_io = fast_sem_io;
70958 int nsems;
70959
70960+ pax_track_stack();
70961+
70962 sma = sem_lock_check(ns, semid);
70963 if (IS_ERR(sma))
70964 return PTR_ERR(sma);
70965@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
70966 unsigned long jiffies_left = 0;
70967 struct ipc_namespace *ns;
70968
70969+ pax_track_stack();
70970+
70971 ns = current->nsproxy->ipc_ns;
70972
70973 if (nsops < 1 || semid < 0)
70974diff --git a/ipc/shm.c b/ipc/shm.c
70975index d30732c..e4992cd 100644
70976--- a/ipc/shm.c
70977+++ b/ipc/shm.c
70978@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
70979 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
70980 #endif
70981
70982+#ifdef CONFIG_GRKERNSEC
70983+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70984+ const time_t shm_createtime, const uid_t cuid,
70985+ const int shmid);
70986+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
70987+ const time_t shm_createtime);
70988+#endif
70989+
70990 void shm_init_ns(struct ipc_namespace *ns)
70991 {
70992 ns->shm_ctlmax = SHMMAX;
70993@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
70994 shp->shm_lprid = 0;
70995 shp->shm_atim = shp->shm_dtim = 0;
70996 shp->shm_ctim = get_seconds();
70997+#ifdef CONFIG_GRKERNSEC
70998+ {
70999+ struct timespec timeval;
71000+ do_posix_clock_monotonic_gettime(&timeval);
71001+
71002+ shp->shm_createtime = timeval.tv_sec;
71003+ }
71004+#endif
71005 shp->shm_segsz = size;
71006 shp->shm_nattch = 0;
71007 shp->shm_file = file;
71008@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
71009 return 0;
71010 }
71011
71012+static struct ipc_ops shm_ops = {
71013+ .getnew = newseg,
71014+ .associate = shm_security,
71015+ .more_checks = shm_more_checks
71016+};
71017+
71018 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
71019 {
71020 struct ipc_namespace *ns;
71021- struct ipc_ops shm_ops;
71022 struct ipc_params shm_params;
71023
71024 ns = current->nsproxy->ipc_ns;
71025
71026- shm_ops.getnew = newseg;
71027- shm_ops.associate = shm_security;
71028- shm_ops.more_checks = shm_more_checks;
71029-
71030 shm_params.key = key;
71031 shm_params.flg = shmflg;
71032 shm_params.u.size = size;
71033@@ -857,6 +874,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71034 f_mode = FMODE_READ | FMODE_WRITE;
71035 }
71036 if (shmflg & SHM_EXEC) {
71037+
71038+#ifdef CONFIG_PAX_MPROTECT
71039+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
71040+ goto out;
71041+#endif
71042+
71043 prot |= PROT_EXEC;
71044 acc_mode |= S_IXUGO;
71045 }
71046@@ -880,9 +903,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
71047 if (err)
71048 goto out_unlock;
71049
71050+#ifdef CONFIG_GRKERNSEC
71051+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
71052+ shp->shm_perm.cuid, shmid) ||
71053+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
71054+ err = -EACCES;
71055+ goto out_unlock;
71056+ }
71057+#endif
71058+
71059 path.dentry = dget(shp->shm_file->f_path.dentry);
71060 path.mnt = shp->shm_file->f_path.mnt;
71061 shp->shm_nattch++;
71062+#ifdef CONFIG_GRKERNSEC
71063+ shp->shm_lapid = current->pid;
71064+#endif
71065 size = i_size_read(path.dentry->d_inode);
71066 shm_unlock(shp);
71067
71068diff --git a/kernel/acct.c b/kernel/acct.c
71069index a6605ca..ca91111 100644
71070--- a/kernel/acct.c
71071+++ b/kernel/acct.c
71072@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
71073 */
71074 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
71075 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
71076- file->f_op->write(file, (char *)&ac,
71077+ file->f_op->write(file, (char __force_user *)&ac,
71078 sizeof(acct_t), &file->f_pos);
71079 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
71080 set_fs(fs);
71081diff --git a/kernel/audit.c b/kernel/audit.c
71082index 5feed23..48415fd 100644
71083--- a/kernel/audit.c
71084+++ b/kernel/audit.c
71085@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
71086 3) suppressed due to audit_rate_limit
71087 4) suppressed due to audit_backlog_limit
71088 */
71089-static atomic_t audit_lost = ATOMIC_INIT(0);
71090+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
71091
71092 /* The netlink socket. */
71093 static struct sock *audit_sock;
71094@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
71095 unsigned long now;
71096 int print;
71097
71098- atomic_inc(&audit_lost);
71099+ atomic_inc_unchecked(&audit_lost);
71100
71101 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
71102
71103@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
71104 printk(KERN_WARNING
71105 "audit: audit_lost=%d audit_rate_limit=%d "
71106 "audit_backlog_limit=%d\n",
71107- atomic_read(&audit_lost),
71108+ atomic_read_unchecked(&audit_lost),
71109 audit_rate_limit,
71110 audit_backlog_limit);
71111 audit_panic(message);
71112@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71113 status_set.pid = audit_pid;
71114 status_set.rate_limit = audit_rate_limit;
71115 status_set.backlog_limit = audit_backlog_limit;
71116- status_set.lost = atomic_read(&audit_lost);
71117+ status_set.lost = atomic_read_unchecked(&audit_lost);
71118 status_set.backlog = skb_queue_len(&audit_skb_queue);
71119 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
71120 &status_set, sizeof(status_set));
71121@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
71122 spin_unlock_irq(&tsk->sighand->siglock);
71123 }
71124 read_unlock(&tasklist_lock);
71125- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
71126- &s, sizeof(s));
71127+
71128+ if (!err)
71129+ audit_send_reply(NETLINK_CB(skb).pid, seq,
71130+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
71131 break;
71132 }
71133 case AUDIT_TTY_SET: {
71134@@ -1262,12 +1264,13 @@ static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
71135 avail = audit_expand(ab,
71136 max_t(unsigned, AUDIT_BUFSIZ, 1+len-avail));
71137 if (!avail)
71138- goto out;
71139+ goto out_va_end;
71140 len = vsnprintf(skb_tail_pointer(skb), avail, fmt, args2);
71141 }
71142- va_end(args2);
71143 if (len > 0)
71144 skb_put(skb, len);
71145+out_va_end:
71146+ va_end(args2);
71147 out:
71148 return;
71149 }
71150diff --git a/kernel/auditsc.c b/kernel/auditsc.c
71151index 267e484..ac41bc3 100644
71152--- a/kernel/auditsc.c
71153+++ b/kernel/auditsc.c
71154@@ -1157,8 +1157,8 @@ static void audit_log_execve_info(struct audit_context *context,
71155 struct audit_buffer **ab,
71156 struct audit_aux_data_execve *axi)
71157 {
71158- int i;
71159- size_t len, len_sent = 0;
71160+ int i, len;
71161+ size_t len_sent = 0;
71162 const char __user *p;
71163 char *buf;
71164
71165@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
71166 }
71167
71168 /* global counter which is incremented every time something logs in */
71169-static atomic_t session_id = ATOMIC_INIT(0);
71170+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
71171
71172 /**
71173 * audit_set_loginuid - set a task's audit_context loginuid
71174@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT(0);
71175 */
71176 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
71177 {
71178- unsigned int sessionid = atomic_inc_return(&session_id);
71179+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
71180 struct audit_context *context = task->audit_context;
71181
71182 if (context && context->in_syscall) {
71183diff --git a/kernel/capability.c b/kernel/capability.c
71184index 8a944f5..db5001e 100644
71185--- a/kernel/capability.c
71186+++ b/kernel/capability.c
71187@@ -305,10 +305,26 @@ int capable(int cap)
71188 BUG();
71189 }
71190
71191- if (security_capable(cap) == 0) {
71192+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
71193 current->flags |= PF_SUPERPRIV;
71194 return 1;
71195 }
71196 return 0;
71197 }
71198+
71199+int capable_nolog(int cap)
71200+{
71201+ if (unlikely(!cap_valid(cap))) {
71202+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
71203+ BUG();
71204+ }
71205+
71206+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
71207+ current->flags |= PF_SUPERPRIV;
71208+ return 1;
71209+ }
71210+ return 0;
71211+}
71212+
71213 EXPORT_SYMBOL(capable);
71214+EXPORT_SYMBOL(capable_nolog);
71215diff --git a/kernel/cgroup.c b/kernel/cgroup.c
71216index 1fbcc74..7000012 100644
71217--- a/kernel/cgroup.c
71218+++ b/kernel/cgroup.c
71219@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
71220 struct hlist_head *hhead;
71221 struct cg_cgroup_link *link;
71222
71223+ pax_track_stack();
71224+
71225 /* First see if we already have a cgroup group that matches
71226 * the desired set */
71227 read_lock(&css_set_lock);
71228diff --git a/kernel/compat.c b/kernel/compat.c
71229index 8bc5578..186e44a 100644
71230--- a/kernel/compat.c
71231+++ b/kernel/compat.c
71232@@ -108,7 +108,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
71233 mm_segment_t oldfs;
71234 long ret;
71235
71236- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
71237+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
71238 oldfs = get_fs();
71239 set_fs(KERNEL_DS);
71240 ret = hrtimer_nanosleep_restart(restart);
71241@@ -140,7 +140,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
71242 oldfs = get_fs();
71243 set_fs(KERNEL_DS);
71244 ret = hrtimer_nanosleep(&tu,
71245- rmtp ? (struct timespec __user *)&rmt : NULL,
71246+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
71247 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
71248 set_fs(oldfs);
71249
71250@@ -247,7 +247,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
71251 mm_segment_t old_fs = get_fs();
71252
71253 set_fs(KERNEL_DS);
71254- ret = sys_sigpending((old_sigset_t __user *) &s);
71255+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
71256 set_fs(old_fs);
71257 if (ret == 0)
71258 ret = put_user(s, set);
71259@@ -266,8 +266,8 @@ asmlinkage long compat_sys_sigprocmask(int how, compat_old_sigset_t __user *set,
71260 old_fs = get_fs();
71261 set_fs(KERNEL_DS);
71262 ret = sys_sigprocmask(how,
71263- set ? (old_sigset_t __user *) &s : NULL,
71264- oset ? (old_sigset_t __user *) &s : NULL);
71265+ set ? (old_sigset_t __force_user *) &s : NULL,
71266+ oset ? (old_sigset_t __force_user *) &s : NULL);
71267 set_fs(old_fs);
71268 if (ret == 0)
71269 if (oset)
71270@@ -310,7 +310,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
71271 mm_segment_t old_fs = get_fs();
71272
71273 set_fs(KERNEL_DS);
71274- ret = sys_old_getrlimit(resource, &r);
71275+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
71276 set_fs(old_fs);
71277
71278 if (!ret) {
71279@@ -385,7 +385,7 @@ asmlinkage long compat_sys_getrusage(int who, struct compat_rusage __user *ru)
71280 mm_segment_t old_fs = get_fs();
71281
71282 set_fs(KERNEL_DS);
71283- ret = sys_getrusage(who, (struct rusage __user *) &r);
71284+ ret = sys_getrusage(who, (struct rusage __force_user *) &r);
71285 set_fs(old_fs);
71286
71287 if (ret)
71288@@ -412,8 +412,8 @@ compat_sys_wait4(compat_pid_t pid, compat_uint_t __user *stat_addr, int options,
71289 set_fs (KERNEL_DS);
71290 ret = sys_wait4(pid,
71291 (stat_addr ?
71292- (unsigned int __user *) &status : NULL),
71293- options, (struct rusage __user *) &r);
71294+ (unsigned int __force_user *) &status : NULL),
71295+ options, (struct rusage __force_user *) &r);
71296 set_fs (old_fs);
71297
71298 if (ret > 0) {
71299@@ -438,8 +438,8 @@ asmlinkage long compat_sys_waitid(int which, compat_pid_t pid,
71300 memset(&info, 0, sizeof(info));
71301
71302 set_fs(KERNEL_DS);
71303- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
71304- uru ? (struct rusage __user *)&ru : NULL);
71305+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
71306+ uru ? (struct rusage __force_user *)&ru : NULL);
71307 set_fs(old_fs);
71308
71309 if ((ret < 0) || (info.si_signo == 0))
71310@@ -569,8 +569,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
71311 oldfs = get_fs();
71312 set_fs(KERNEL_DS);
71313 err = sys_timer_settime(timer_id, flags,
71314- (struct itimerspec __user *) &newts,
71315- (struct itimerspec __user *) &oldts);
71316+ (struct itimerspec __force_user *) &newts,
71317+ (struct itimerspec __force_user *) &oldts);
71318 set_fs(oldfs);
71319 if (!err && old && put_compat_itimerspec(old, &oldts))
71320 return -EFAULT;
71321@@ -587,7 +587,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
71322 oldfs = get_fs();
71323 set_fs(KERNEL_DS);
71324 err = sys_timer_gettime(timer_id,
71325- (struct itimerspec __user *) &ts);
71326+ (struct itimerspec __force_user *) &ts);
71327 set_fs(oldfs);
71328 if (!err && put_compat_itimerspec(setting, &ts))
71329 return -EFAULT;
71330@@ -606,7 +606,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
71331 oldfs = get_fs();
71332 set_fs(KERNEL_DS);
71333 err = sys_clock_settime(which_clock,
71334- (struct timespec __user *) &ts);
71335+ (struct timespec __force_user *) &ts);
71336 set_fs(oldfs);
71337 return err;
71338 }
71339@@ -621,7 +621,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
71340 oldfs = get_fs();
71341 set_fs(KERNEL_DS);
71342 err = sys_clock_gettime(which_clock,
71343- (struct timespec __user *) &ts);
71344+ (struct timespec __force_user *) &ts);
71345 set_fs(oldfs);
71346 if (!err && put_compat_timespec(&ts, tp))
71347 return -EFAULT;
71348@@ -638,7 +638,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
71349 oldfs = get_fs();
71350 set_fs(KERNEL_DS);
71351 err = sys_clock_getres(which_clock,
71352- (struct timespec __user *) &ts);
71353+ (struct timespec __force_user *) &ts);
71354 set_fs(oldfs);
71355 if (!err && tp && put_compat_timespec(&ts, tp))
71356 return -EFAULT;
71357@@ -650,9 +650,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
71358 long err;
71359 mm_segment_t oldfs;
71360 struct timespec tu;
71361- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
71362+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
71363
71364- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
71365+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
71366 oldfs = get_fs();
71367 set_fs(KERNEL_DS);
71368 err = clock_nanosleep_restart(restart);
71369@@ -684,8 +684,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
71370 oldfs = get_fs();
71371 set_fs(KERNEL_DS);
71372 err = sys_clock_nanosleep(which_clock, flags,
71373- (struct timespec __user *) &in,
71374- (struct timespec __user *) &out);
71375+ (struct timespec __force_user *) &in,
71376+ (struct timespec __force_user *) &out);
71377 set_fs(oldfs);
71378
71379 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
71380diff --git a/kernel/configs.c b/kernel/configs.c
71381index abaee68..047facd 100644
71382--- a/kernel/configs.c
71383+++ b/kernel/configs.c
71384@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
71385 struct proc_dir_entry *entry;
71386
71387 /* create the current config file */
71388+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
71389+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
71390+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
71391+ &ikconfig_file_ops);
71392+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71393+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
71394+ &ikconfig_file_ops);
71395+#endif
71396+#else
71397 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
71398 &ikconfig_file_ops);
71399+#endif
71400+
71401 if (!entry)
71402 return -ENOMEM;
71403
71404diff --git a/kernel/cpu.c b/kernel/cpu.c
71405index 3f2f04f..4e53ded 100644
71406--- a/kernel/cpu.c
71407+++ b/kernel/cpu.c
71408@@ -20,7 +20,7 @@
71409 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
71410 static DEFINE_MUTEX(cpu_add_remove_lock);
71411
71412-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
71413+static RAW_NOTIFIER_HEAD(cpu_chain);
71414
71415 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
71416 * Should always be manipulated under cpu_add_remove_lock
71417diff --git a/kernel/cred.c b/kernel/cred.c
71418index 0b5b5fc..f7fe51a 100644
71419--- a/kernel/cred.c
71420+++ b/kernel/cred.c
71421@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head *rcu)
71422 */
71423 void __put_cred(struct cred *cred)
71424 {
71425+ pax_track_stack();
71426+
71427 kdebug("__put_cred(%p{%d,%d})", cred,
71428 atomic_read(&cred->usage),
71429 read_cred_subscribers(cred));
71430@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
71431 {
71432 struct cred *cred;
71433
71434+ pax_track_stack();
71435+
71436 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
71437 atomic_read(&tsk->cred->usage),
71438 read_cred_subscribers(tsk->cred));
71439@@ -206,6 +210,15 @@ void exit_creds(struct task_struct *tsk)
71440 validate_creds(cred);
71441 put_cred(cred);
71442 }
71443+
71444+#ifdef CONFIG_GRKERNSEC_SETXID
71445+ cred = (struct cred *) tsk->delayed_cred;
71446+ if (cred) {
71447+ tsk->delayed_cred = NULL;
71448+ validate_creds(cred);
71449+ put_cred(cred);
71450+ }
71451+#endif
71452 }
71453
71454 /**
71455@@ -222,6 +235,8 @@ const struct cred *get_task_cred(struct task_struct *task)
71456 {
71457 const struct cred *cred;
71458
71459+ pax_track_stack();
71460+
71461 rcu_read_lock();
71462
71463 do {
71464@@ -241,6 +256,8 @@ struct cred *cred_alloc_blank(void)
71465 {
71466 struct cred *new;
71467
71468+ pax_track_stack();
71469+
71470 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
71471 if (!new)
71472 return NULL;
71473@@ -289,6 +306,8 @@ struct cred *prepare_creds(void)
71474 const struct cred *old;
71475 struct cred *new;
71476
71477+ pax_track_stack();
71478+
71479 validate_process_creds();
71480
71481 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71482@@ -335,6 +354,8 @@ struct cred *prepare_exec_creds(void)
71483 struct thread_group_cred *tgcred = NULL;
71484 struct cred *new;
71485
71486+ pax_track_stack();
71487+
71488 #ifdef CONFIG_KEYS
71489 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
71490 if (!tgcred)
71491@@ -441,6 +462,8 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
71492 struct cred *new;
71493 int ret;
71494
71495+ pax_track_stack();
71496+
71497 mutex_init(&p->cred_guard_mutex);
71498
71499 if (
71500@@ -523,11 +546,13 @@ error_put:
71501 * Always returns 0 thus allowing this function to be tail-called at the end
71502 * of, say, sys_setgid().
71503 */
71504-int commit_creds(struct cred *new)
71505+static int __commit_creds(struct cred *new)
71506 {
71507 struct task_struct *task = current;
71508 const struct cred *old = task->real_cred;
71509
71510+ pax_track_stack();
71511+
71512 kdebug("commit_creds(%p{%d,%d})", new,
71513 atomic_read(&new->usage),
71514 read_cred_subscribers(new));
71515@@ -544,6 +569,8 @@ int commit_creds(struct cred *new)
71516
71517 get_cred(new); /* we will require a ref for the subj creds too */
71518
71519+ gr_set_role_label(task, new->uid, new->gid);
71520+
71521 /* dumpability changes */
71522 if (old->euid != new->euid ||
71523 old->egid != new->egid ||
71524@@ -563,10 +590,8 @@ int commit_creds(struct cred *new)
71525 key_fsgid_changed(task);
71526
71527 /* do it
71528- * - What if a process setreuid()'s and this brings the
71529- * new uid over his NPROC rlimit? We can check this now
71530- * cheaply with the new uid cache, so if it matters
71531- * we should be checking for it. -DaveM
71532+ * RLIMIT_NPROC limits on user->processes have already been checked
71533+ * in set_user().
71534 */
71535 alter_cred_subscribers(new, 2);
71536 if (new->user != old->user)
71537@@ -595,8 +620,96 @@ int commit_creds(struct cred *new)
71538 put_cred(old);
71539 return 0;
71540 }
71541+
71542+#ifdef CONFIG_GRKERNSEC_SETXID
71543+extern int set_user(struct cred *new);
71544+
71545+void gr_delayed_cred_worker(void)
71546+{
71547+ const struct cred *new = current->delayed_cred;
71548+ struct cred *ncred;
71549+
71550+ current->delayed_cred = NULL;
71551+
71552+ if (current_uid() && new != NULL) {
71553+ // from doing get_cred on it when queueing this
71554+ put_cred(new);
71555+ return;
71556+ } else if (new == NULL)
71557+ return;
71558+
71559+ ncred = prepare_creds();
71560+ if (!ncred)
71561+ goto die;
71562+ // uids
71563+ ncred->uid = new->uid;
71564+ ncred->euid = new->euid;
71565+ ncred->suid = new->suid;
71566+ ncred->fsuid = new->fsuid;
71567+ // gids
71568+ ncred->gid = new->gid;
71569+ ncred->egid = new->egid;
71570+ ncred->sgid = new->sgid;
71571+ ncred->fsgid = new->fsgid;
71572+ // groups
71573+ if (set_groups(ncred, new->group_info) < 0) {
71574+ abort_creds(ncred);
71575+ goto die;
71576+ }
71577+ // caps
71578+ ncred->securebits = new->securebits;
71579+ ncred->cap_inheritable = new->cap_inheritable;
71580+ ncred->cap_permitted = new->cap_permitted;
71581+ ncred->cap_effective = new->cap_effective;
71582+ ncred->cap_bset = new->cap_bset;
71583+
71584+ if (set_user(ncred)) {
71585+ abort_creds(ncred);
71586+ goto die;
71587+ }
71588+
71589+ // from doing get_cred on it when queueing this
71590+ put_cred(new);
71591+
71592+ __commit_creds(ncred);
71593+ return;
71594+die:
71595+ // from doing get_cred on it when queueing this
71596+ put_cred(new);
71597+ do_group_exit(SIGKILL);
71598+}
71599+#endif
71600+
71601+int commit_creds(struct cred *new)
71602+{
71603+#ifdef CONFIG_GRKERNSEC_SETXID
71604+ struct task_struct *t;
71605+
71606+ /* we won't get called with tasklist_lock held for writing
71607+ and interrupts disabled as the cred struct in that case is
71608+ init_cred
71609+ */
71610+ if (grsec_enable_setxid && !current_is_single_threaded() &&
71611+ !current_uid() && new->uid) {
71612+ rcu_read_lock();
71613+ read_lock(&tasklist_lock);
71614+ for (t = next_thread(current); t != current;
71615+ t = next_thread(t)) {
71616+ if (t->delayed_cred == NULL) {
71617+ t->delayed_cred = get_cred(new);
71618+ set_tsk_need_resched(t);
71619+ }
71620+ }
71621+ read_unlock(&tasklist_lock);
71622+ rcu_read_unlock();
71623+ }
71624+#endif
71625+ return __commit_creds(new);
71626+}
71627+
71628 EXPORT_SYMBOL(commit_creds);
71629
71630+
71631 /**
71632 * abort_creds - Discard a set of credentials and unlock the current task
71633 * @new: The credentials that were going to be applied
71634@@ -606,6 +719,8 @@ EXPORT_SYMBOL(commit_creds);
71635 */
71636 void abort_creds(struct cred *new)
71637 {
71638+ pax_track_stack();
71639+
71640 kdebug("abort_creds(%p{%d,%d})", new,
71641 atomic_read(&new->usage),
71642 read_cred_subscribers(new));
71643@@ -629,6 +744,8 @@ const struct cred *override_creds(const struct cred *new)
71644 {
71645 const struct cred *old = current->cred;
71646
71647+ pax_track_stack();
71648+
71649 kdebug("override_creds(%p{%d,%d})", new,
71650 atomic_read(&new->usage),
71651 read_cred_subscribers(new));
71652@@ -658,6 +775,8 @@ void revert_creds(const struct cred *old)
71653 {
71654 const struct cred *override = current->cred;
71655
71656+ pax_track_stack();
71657+
71658 kdebug("revert_creds(%p{%d,%d})", old,
71659 atomic_read(&old->usage),
71660 read_cred_subscribers(old));
71661@@ -704,6 +823,8 @@ struct cred *prepare_kernel_cred(struct task_struct *daemon)
71662 const struct cred *old;
71663 struct cred *new;
71664
71665+ pax_track_stack();
71666+
71667 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
71668 if (!new)
71669 return NULL;
71670@@ -758,6 +879,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
71671 */
71672 int set_security_override(struct cred *new, u32 secid)
71673 {
71674+ pax_track_stack();
71675+
71676 return security_kernel_act_as(new, secid);
71677 }
71678 EXPORT_SYMBOL(set_security_override);
71679@@ -777,6 +900,8 @@ int set_security_override_from_ctx(struct cred *new, const char *secctx)
71680 u32 secid;
71681 int ret;
71682
71683+ pax_track_stack();
71684+
71685 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
71686 if (ret < 0)
71687 return ret;
71688diff --git a/kernel/exit.c b/kernel/exit.c
71689index 0f8fae3..9344a56 100644
71690--- a/kernel/exit.c
71691+++ b/kernel/exit.c
71692@@ -55,6 +55,10 @@
71693 #include <asm/pgtable.h>
71694 #include <asm/mmu_context.h>
71695
71696+#ifdef CONFIG_GRKERNSEC
71697+extern rwlock_t grsec_exec_file_lock;
71698+#endif
71699+
71700 static void exit_mm(struct task_struct * tsk);
71701
71702 static void __unhash_process(struct task_struct *p)
71703@@ -174,6 +178,10 @@ void release_task(struct task_struct * p)
71704 struct task_struct *leader;
71705 int zap_leader;
71706 repeat:
71707+#ifdef CONFIG_NET
71708+ gr_del_task_from_ip_table(p);
71709+#endif
71710+
71711 tracehook_prepare_release_task(p);
71712 /* don't need to get the RCU readlock here - the process is dead and
71713 * can't be modifying its own credentials */
71714@@ -397,7 +405,7 @@ int allow_signal(int sig)
71715 * know it'll be handled, so that they don't get converted to
71716 * SIGKILL or just silently dropped.
71717 */
71718- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
71719+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
71720 recalc_sigpending();
71721 spin_unlock_irq(&current->sighand->siglock);
71722 return 0;
71723@@ -433,6 +441,17 @@ void daemonize(const char *name, ...)
71724 vsnprintf(current->comm, sizeof(current->comm), name, args);
71725 va_end(args);
71726
71727+#ifdef CONFIG_GRKERNSEC
71728+ write_lock(&grsec_exec_file_lock);
71729+ if (current->exec_file) {
71730+ fput(current->exec_file);
71731+ current->exec_file = NULL;
71732+ }
71733+ write_unlock(&grsec_exec_file_lock);
71734+#endif
71735+
71736+ gr_set_kernel_label(current);
71737+
71738 /*
71739 * If we were started as result of loading a module, close all of the
71740 * user space pages. We don't need them, and if we didn't close them
71741@@ -897,17 +916,17 @@ NORET_TYPE void do_exit(long code)
71742 struct task_struct *tsk = current;
71743 int group_dead;
71744
71745- profile_task_exit(tsk);
71746-
71747- WARN_ON(atomic_read(&tsk->fs_excl));
71748-
71749+ /*
71750+ * Check this first since set_fs() below depends on
71751+ * current_thread_info(), which we better not access when we're in
71752+ * interrupt context. Other than that, we want to do the set_fs()
71753+ * as early as possible.
71754+ */
71755 if (unlikely(in_interrupt()))
71756 panic("Aiee, killing interrupt handler!");
71757- if (unlikely(!tsk->pid))
71758- panic("Attempted to kill the idle task!");
71759
71760 /*
71761- * If do_exit is called because this processes oopsed, it's possible
71762+ * If do_exit is called because this processes Oops'ed, it's possible
71763 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
71764 * continuing. Amongst other possible reasons, this is to prevent
71765 * mm_release()->clear_child_tid() from writing to a user-controlled
71766@@ -915,6 +934,13 @@ NORET_TYPE void do_exit(long code)
71767 */
71768 set_fs(USER_DS);
71769
71770+ profile_task_exit(tsk);
71771+
71772+ WARN_ON(atomic_read(&tsk->fs_excl));
71773+
71774+ if (unlikely(!tsk->pid))
71775+ panic("Attempted to kill the idle task!");
71776+
71777 tracehook_report_exit(&code);
71778
71779 validate_creds_for_do_exit(tsk);
71780@@ -973,6 +999,9 @@ NORET_TYPE void do_exit(long code)
71781 tsk->exit_code = code;
71782 taskstats_exit(tsk, group_dead);
71783
71784+ gr_acl_handle_psacct(tsk, code);
71785+ gr_acl_handle_exit();
71786+
71787 exit_mm(tsk);
71788
71789 if (group_dead)
71790@@ -1188,7 +1217,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
71791
71792 if (unlikely(wo->wo_flags & WNOWAIT)) {
71793 int exit_code = p->exit_code;
71794- int why, status;
71795+ int why;
71796
71797 get_task_struct(p);
71798 read_unlock(&tasklist_lock);
71799diff --git a/kernel/fork.c b/kernel/fork.c
71800index 4bde56f..29a9bab 100644
71801--- a/kernel/fork.c
71802+++ b/kernel/fork.c
71803@@ -253,7 +253,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
71804 *stackend = STACK_END_MAGIC; /* for overflow detection */
71805
71806 #ifdef CONFIG_CC_STACKPROTECTOR
71807- tsk->stack_canary = get_random_int();
71808+ tsk->stack_canary = pax_get_random_long();
71809 #endif
71810
71811 /* One for us, one for whoever does the "release_task()" (usually parent) */
71812@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71813 mm->locked_vm = 0;
71814 mm->mmap = NULL;
71815 mm->mmap_cache = NULL;
71816- mm->free_area_cache = oldmm->mmap_base;
71817- mm->cached_hole_size = ~0UL;
71818+ mm->free_area_cache = oldmm->free_area_cache;
71819+ mm->cached_hole_size = oldmm->cached_hole_size;
71820 mm->map_count = 0;
71821 cpumask_clear(mm_cpumask(mm));
71822 mm->mm_rb = RB_ROOT;
71823@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71824 tmp->vm_flags &= ~VM_LOCKED;
71825 tmp->vm_mm = mm;
71826 tmp->vm_next = tmp->vm_prev = NULL;
71827+ tmp->vm_mirror = NULL;
71828 anon_vma_link(tmp);
71829 file = tmp->vm_file;
71830 if (file) {
71831@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
71832 if (retval)
71833 goto out;
71834 }
71835+
71836+#ifdef CONFIG_PAX_SEGMEXEC
71837+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
71838+ struct vm_area_struct *mpnt_m;
71839+
71840+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
71841+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
71842+
71843+ if (!mpnt->vm_mirror)
71844+ continue;
71845+
71846+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
71847+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
71848+ mpnt->vm_mirror = mpnt_m;
71849+ } else {
71850+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
71851+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
71852+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
71853+ mpnt->vm_mirror->vm_mirror = mpnt;
71854+ }
71855+ }
71856+ BUG_ON(mpnt_m);
71857+ }
71858+#endif
71859+
71860 /* a new mm has just been created */
71861 arch_dup_mmap(oldmm, mm);
71862 retval = 0;
71863@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
71864 write_unlock(&fs->lock);
71865 return -EAGAIN;
71866 }
71867- fs->users++;
71868+ atomic_inc(&fs->users);
71869 write_unlock(&fs->lock);
71870 return 0;
71871 }
71872 tsk->fs = copy_fs_struct(fs);
71873 if (!tsk->fs)
71874 return -ENOMEM;
71875+ gr_set_chroot_entries(tsk, &tsk->fs->root);
71876 return 0;
71877 }
71878
71879@@ -1033,12 +1060,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71880 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
71881 #endif
71882 retval = -EAGAIN;
71883+
71884+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
71885+
71886 if (atomic_read(&p->real_cred->user->processes) >=
71887 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
71888- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
71889- p->real_cred->user != INIT_USER)
71890+ if (p->real_cred->user != INIT_USER &&
71891+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
71892 goto bad_fork_free;
71893 }
71894+ current->flags &= ~PF_NPROC_EXCEEDED;
71895
71896 retval = copy_creds(p, clone_flags);
71897 if (retval < 0)
71898@@ -1183,6 +1214,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
71899 goto bad_fork_free_pid;
71900 }
71901
71902+ gr_copy_label(p);
71903+
71904 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
71905 /*
71906 * Clear TID on mm_release()?
71907@@ -1333,6 +1366,8 @@ bad_fork_cleanup_count:
71908 bad_fork_free:
71909 free_task(p);
71910 fork_out:
71911+ gr_log_forkfail(retval);
71912+
71913 return ERR_PTR(retval);
71914 }
71915
71916@@ -1426,6 +1461,8 @@ long do_fork(unsigned long clone_flags,
71917 if (clone_flags & CLONE_PARENT_SETTID)
71918 put_user(nr, parent_tidptr);
71919
71920+ gr_handle_brute_check();
71921+
71922 if (clone_flags & CLONE_VFORK) {
71923 p->vfork_done = &vfork;
71924 init_completion(&vfork);
71925@@ -1558,7 +1595,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
71926 return 0;
71927
71928 /* don't need lock here; in the worst case we'll do useless copy */
71929- if (fs->users == 1)
71930+ if (atomic_read(&fs->users) == 1)
71931 return 0;
71932
71933 *new_fsp = copy_fs_struct(fs);
71934@@ -1681,7 +1718,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
71935 fs = current->fs;
71936 write_lock(&fs->lock);
71937 current->fs = new_fs;
71938- if (--fs->users)
71939+ gr_set_chroot_entries(current, &current->fs->root);
71940+ if (atomic_dec_return(&fs->users))
71941 new_fs = NULL;
71942 else
71943 new_fs = fs;
71944diff --git a/kernel/futex.c b/kernel/futex.c
71945index fb98c9f..333faec 100644
71946--- a/kernel/futex.c
71947+++ b/kernel/futex.c
71948@@ -54,6 +54,7 @@
71949 #include <linux/mount.h>
71950 #include <linux/pagemap.h>
71951 #include <linux/syscalls.h>
71952+#include <linux/ptrace.h>
71953 #include <linux/signal.h>
71954 #include <linux/module.h>
71955 #include <linux/magic.h>
71956@@ -223,6 +224,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
71957 struct page *page;
71958 int err, ro = 0;
71959
71960+#ifdef CONFIG_PAX_SEGMEXEC
71961+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
71962+ return -EFAULT;
71963+#endif
71964+
71965 /*
71966 * The futex address must be "naturally" aligned.
71967 */
71968@@ -1819,6 +1825,8 @@ static int futex_wait(u32 __user *uaddr, int fshared,
71969 struct futex_q q;
71970 int ret;
71971
71972+ pax_track_stack();
71973+
71974 if (!bitset)
71975 return -EINVAL;
71976
71977@@ -1871,7 +1879,7 @@ retry:
71978
71979 restart = &current_thread_info()->restart_block;
71980 restart->fn = futex_wait_restart;
71981- restart->futex.uaddr = (u32 *)uaddr;
71982+ restart->futex.uaddr = uaddr;
71983 restart->futex.val = val;
71984 restart->futex.time = abs_time->tv64;
71985 restart->futex.bitset = bitset;
71986@@ -2233,6 +2241,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, int fshared,
71987 struct futex_q q;
71988 int res, ret;
71989
71990+ pax_track_stack();
71991+
71992 if (!bitset)
71993 return -EINVAL;
71994
71995@@ -2423,6 +2433,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
71996 if (!p)
71997 goto err_unlock;
71998 ret = -EPERM;
71999+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72000+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72001+ goto err_unlock;
72002+#endif
72003 pcred = __task_cred(p);
72004 if (cred->euid != pcred->euid &&
72005 cred->euid != pcred->uid &&
72006@@ -2489,7 +2503,7 @@ retry:
72007 */
72008 static inline int fetch_robust_entry(struct robust_list __user **entry,
72009 struct robust_list __user * __user *head,
72010- int *pi)
72011+ unsigned int *pi)
72012 {
72013 unsigned long uentry;
72014
72015@@ -2670,6 +2684,7 @@ static int __init futex_init(void)
72016 {
72017 u32 curval;
72018 int i;
72019+ mm_segment_t oldfs;
72020
72021 /*
72022 * This will fail and we want it. Some arch implementations do
72023@@ -2681,7 +2696,10 @@ static int __init futex_init(void)
72024 * implementation, the non functional ones will return
72025 * -ENOSYS.
72026 */
72027+ oldfs = get_fs();
72028+ set_fs(USER_DS);
72029 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
72030+ set_fs(oldfs);
72031 if (curval == -EFAULT)
72032 futex_cmpxchg_enabled = 1;
72033
72034diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
72035index 2357165..eb25501 100644
72036--- a/kernel/futex_compat.c
72037+++ b/kernel/futex_compat.c
72038@@ -10,6 +10,7 @@
72039 #include <linux/compat.h>
72040 #include <linux/nsproxy.h>
72041 #include <linux/futex.h>
72042+#include <linux/ptrace.h>
72043
72044 #include <asm/uaccess.h>
72045
72046@@ -135,7 +136,8 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72047 {
72048 struct compat_robust_list_head __user *head;
72049 unsigned long ret;
72050- const struct cred *cred = current_cred(), *pcred;
72051+ const struct cred *cred = current_cred();
72052+ const struct cred *pcred;
72053
72054 if (!futex_cmpxchg_enabled)
72055 return -ENOSYS;
72056@@ -151,6 +153,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
72057 if (!p)
72058 goto err_unlock;
72059 ret = -EPERM;
72060+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
72061+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
72062+ goto err_unlock;
72063+#endif
72064 pcred = __task_cred(p);
72065 if (cred->euid != pcred->euid &&
72066 cred->euid != pcred->uid &&
72067diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
72068index 9b22d03..6295b62 100644
72069--- a/kernel/gcov/base.c
72070+++ b/kernel/gcov/base.c
72071@@ -102,11 +102,6 @@ void gcov_enable_events(void)
72072 }
72073
72074 #ifdef CONFIG_MODULES
72075-static inline int within(void *addr, void *start, unsigned long size)
72076-{
72077- return ((addr >= start) && (addr < start + size));
72078-}
72079-
72080 /* Update list and generate events when modules are unloaded. */
72081 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72082 void *data)
72083@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
72084 prev = NULL;
72085 /* Remove entries located in module from linked list. */
72086 for (info = gcov_info_head; info; info = info->next) {
72087- if (within(info, mod->module_core, mod->core_size)) {
72088+ if (within_module_core_rw((unsigned long)info, mod)) {
72089 if (prev)
72090 prev->next = info->next;
72091 else
72092diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
72093index a6e9d00..a0da4f9 100644
72094--- a/kernel/hrtimer.c
72095+++ b/kernel/hrtimer.c
72096@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
72097 local_irq_restore(flags);
72098 }
72099
72100-static void run_hrtimer_softirq(struct softirq_action *h)
72101+static void run_hrtimer_softirq(void)
72102 {
72103 hrtimer_peek_ahead_timers();
72104 }
72105diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
72106index 8b6b8b6..6bc87df 100644
72107--- a/kernel/kallsyms.c
72108+++ b/kernel/kallsyms.c
72109@@ -11,6 +11,9 @@
72110 * Changed the compression method from stem compression to "table lookup"
72111 * compression (see scripts/kallsyms.c for a more complete description)
72112 */
72113+#ifdef CONFIG_GRKERNSEC_HIDESYM
72114+#define __INCLUDED_BY_HIDESYM 1
72115+#endif
72116 #include <linux/kallsyms.h>
72117 #include <linux/module.h>
72118 #include <linux/init.h>
72119@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
72120
72121 static inline int is_kernel_inittext(unsigned long addr)
72122 {
72123+ if (system_state != SYSTEM_BOOTING)
72124+ return 0;
72125+
72126 if (addr >= (unsigned long)_sinittext
72127 && addr <= (unsigned long)_einittext)
72128 return 1;
72129 return 0;
72130 }
72131
72132+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72133+#ifdef CONFIG_MODULES
72134+static inline int is_module_text(unsigned long addr)
72135+{
72136+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
72137+ return 1;
72138+
72139+ addr = ktla_ktva(addr);
72140+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
72141+}
72142+#else
72143+static inline int is_module_text(unsigned long addr)
72144+{
72145+ return 0;
72146+}
72147+#endif
72148+#endif
72149+
72150 static inline int is_kernel_text(unsigned long addr)
72151 {
72152 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
72153@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigned long addr)
72154
72155 static inline int is_kernel(unsigned long addr)
72156 {
72157+
72158+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72159+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
72160+ return 1;
72161+
72162+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
72163+#else
72164 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
72165+#endif
72166+
72167 return 1;
72168 return in_gate_area_no_task(addr);
72169 }
72170
72171 static int is_ksym_addr(unsigned long addr)
72172 {
72173+
72174+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
72175+ if (is_module_text(addr))
72176+ return 0;
72177+#endif
72178+
72179 if (all_var)
72180 return is_kernel(addr);
72181
72182@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
72183
72184 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
72185 {
72186- iter->name[0] = '\0';
72187 iter->nameoff = get_symbol_offset(new_pos);
72188 iter->pos = new_pos;
72189 }
72190@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, void *p)
72191 {
72192 struct kallsym_iter *iter = m->private;
72193
72194+#ifdef CONFIG_GRKERNSEC_HIDESYM
72195+ if (current_uid())
72196+ return 0;
72197+#endif
72198+
72199 /* Some debugging symbols have no name. Ignore them. */
72200 if (!iter->name[0])
72201 return 0;
72202@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
72203 struct kallsym_iter *iter;
72204 int ret;
72205
72206- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
72207+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
72208 if (!iter)
72209 return -ENOMEM;
72210 reset_iter(iter, 0);
72211diff --git a/kernel/kexec.c b/kernel/kexec.c
72212index f336e21..9c1c20b 100644
72213--- a/kernel/kexec.c
72214+++ b/kernel/kexec.c
72215@@ -1028,7 +1028,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
72216 unsigned long flags)
72217 {
72218 struct compat_kexec_segment in;
72219- struct kexec_segment out, __user *ksegments;
72220+ struct kexec_segment out;
72221+ struct kexec_segment __user *ksegments;
72222 unsigned long i, result;
72223
72224 /* Don't allow clients that don't understand the native
72225diff --git a/kernel/kgdb.c b/kernel/kgdb.c
72226index 53dae4b..9ba3743 100644
72227--- a/kernel/kgdb.c
72228+++ b/kernel/kgdb.c
72229@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
72230 /* Guard for recursive entry */
72231 static int exception_level;
72232
72233-static struct kgdb_io *kgdb_io_ops;
72234+static const struct kgdb_io *kgdb_io_ops;
72235 static DEFINE_SPINLOCK(kgdb_registration_lock);
72236
72237 /* kgdb console driver is loaded */
72238@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1);
72239 */
72240 static atomic_t passive_cpu_wait[NR_CPUS];
72241 static atomic_t cpu_in_kgdb[NR_CPUS];
72242-atomic_t kgdb_setting_breakpoint;
72243+atomic_unchecked_t kgdb_setting_breakpoint;
72244
72245 struct task_struct *kgdb_usethread;
72246 struct task_struct *kgdb_contthread;
72247@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBYTES +
72248 sizeof(unsigned long)];
72249
72250 /* to keep track of the CPU which is doing the single stepping*/
72251-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72252+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
72253
72254 /*
72255 * If you are debugging a problem where roundup (the collection of
72256@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
72257 return 0;
72258 if (kgdb_connected)
72259 return 1;
72260- if (atomic_read(&kgdb_setting_breakpoint))
72261+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
72262 return 1;
72263 if (print_wait)
72264 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
72265@@ -1426,8 +1426,8 @@ acquirelock:
72266 * instance of the exception handler wanted to come into the
72267 * debugger on a different CPU via a single step
72268 */
72269- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
72270- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
72271+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
72272+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
72273
72274 atomic_set(&kgdb_active, -1);
72275 touch_softlockup_watchdog();
72276@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void)
72277 *
72278 * Register it with the KGDB core.
72279 */
72280-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
72281+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
72282 {
72283 int err;
72284
72285@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_module);
72286 *
72287 * Unregister it with the KGDB core.
72288 */
72289-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
72290+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
72291 {
72292 BUG_ON(kgdb_connected);
72293
72294@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
72295 */
72296 void kgdb_breakpoint(void)
72297 {
72298- atomic_set(&kgdb_setting_breakpoint, 1);
72299+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
72300 wmb(); /* Sync point before breakpoint */
72301 arch_kgdb_breakpoint();
72302 wmb(); /* Sync point after breakpoint */
72303- atomic_set(&kgdb_setting_breakpoint, 0);
72304+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
72305 }
72306 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
72307
72308diff --git a/kernel/kmod.c b/kernel/kmod.c
72309index d206078..e27ba6a 100644
72310--- a/kernel/kmod.c
72311+++ b/kernel/kmod.c
72312@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sbin/modprobe";
72313 * If module auto-loading support is disabled then this function
72314 * becomes a no-operation.
72315 */
72316-int __request_module(bool wait, const char *fmt, ...)
72317+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
72318 {
72319- va_list args;
72320 char module_name[MODULE_NAME_LEN];
72321 unsigned int max_modprobes;
72322 int ret;
72323- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
72324+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
72325 static char *envp[] = { "HOME=/",
72326 "TERM=linux",
72327 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
72328@@ -84,12 +83,24 @@ int __request_module(bool wait, const char *fmt, ...)
72329 if (ret)
72330 return ret;
72331
72332- va_start(args, fmt);
72333- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
72334- va_end(args);
72335+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
72336 if (ret >= MODULE_NAME_LEN)
72337 return -ENAMETOOLONG;
72338
72339+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72340+ if (!current_uid()) {
72341+ /* hack to workaround consolekit/udisks stupidity */
72342+ read_lock(&tasklist_lock);
72343+ if (!strcmp(current->comm, "mount") &&
72344+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
72345+ read_unlock(&tasklist_lock);
72346+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
72347+ return -EPERM;
72348+ }
72349+ read_unlock(&tasklist_lock);
72350+ }
72351+#endif
72352+
72353 /* If modprobe needs a service that is in a module, we get a recursive
72354 * loop. Limit the number of running kmod threads to max_threads/2 or
72355 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
72356@@ -123,6 +134,48 @@ int __request_module(bool wait, const char *fmt, ...)
72357 atomic_dec(&kmod_concurrent);
72358 return ret;
72359 }
72360+
72361+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
72362+{
72363+ va_list args;
72364+ int ret;
72365+
72366+ va_start(args, fmt);
72367+ ret = ____request_module(wait, module_param, fmt, args);
72368+ va_end(args);
72369+
72370+ return ret;
72371+}
72372+
72373+int __request_module(bool wait, const char *fmt, ...)
72374+{
72375+ va_list args;
72376+ int ret;
72377+
72378+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72379+ if (current_uid()) {
72380+ char module_param[MODULE_NAME_LEN];
72381+
72382+ memset(module_param, 0, sizeof(module_param));
72383+
72384+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
72385+
72386+ va_start(args, fmt);
72387+ ret = ____request_module(wait, module_param, fmt, args);
72388+ va_end(args);
72389+
72390+ return ret;
72391+ }
72392+#endif
72393+
72394+ va_start(args, fmt);
72395+ ret = ____request_module(wait, NULL, fmt, args);
72396+ va_end(args);
72397+
72398+ return ret;
72399+}
72400+
72401+
72402 EXPORT_SYMBOL(__request_module);
72403 #endif /* CONFIG_MODULES */
72404
72405@@ -228,7 +281,7 @@ static int wait_for_helper(void *data)
72406 *
72407 * Thus the __user pointer cast is valid here.
72408 */
72409- sys_wait4(pid, (int __user *)&ret, 0, NULL);
72410+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
72411
72412 /*
72413 * If ret is 0, either ____call_usermodehelper failed and the
72414diff --git a/kernel/kprobes.c b/kernel/kprobes.c
72415index 176d825..77fa8ea 100644
72416--- a/kernel/kprobes.c
72417+++ b/kernel/kprobes.c
72418@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(void)
72419 * kernel image and loaded module images reside. This is required
72420 * so x86_64 can correctly handle the %rip-relative fixups.
72421 */
72422- kip->insns = module_alloc(PAGE_SIZE);
72423+ kip->insns = module_alloc_exec(PAGE_SIZE);
72424 if (!kip->insns) {
72425 kfree(kip);
72426 return NULL;
72427@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
72428 */
72429 if (!list_is_singular(&kprobe_insn_pages)) {
72430 list_del(&kip->list);
72431- module_free(NULL, kip->insns);
72432+ module_free_exec(NULL, kip->insns);
72433 kfree(kip);
72434 }
72435 return 1;
72436@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
72437 {
72438 int i, err = 0;
72439 unsigned long offset = 0, size = 0;
72440- char *modname, namebuf[128];
72441+ char *modname, namebuf[KSYM_NAME_LEN];
72442 const char *symbol_name;
72443 void *addr;
72444 struct kprobe_blackpoint *kb;
72445@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
72446 const char *sym = NULL;
72447 unsigned int i = *(loff_t *) v;
72448 unsigned long offset = 0;
72449- char *modname, namebuf[128];
72450+ char *modname, namebuf[KSYM_NAME_LEN];
72451
72452 head = &kprobe_table[i];
72453 preempt_disable();
72454diff --git a/kernel/lockdep.c b/kernel/lockdep.c
72455index d86fe89..d12fc66 100644
72456--- a/kernel/lockdep.c
72457+++ b/kernel/lockdep.c
72458@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_trace = {
72459 /*
72460 * Various lockdep statistics:
72461 */
72462-atomic_t chain_lookup_hits;
72463-atomic_t chain_lookup_misses;
72464-atomic_t hardirqs_on_events;
72465-atomic_t hardirqs_off_events;
72466-atomic_t redundant_hardirqs_on;
72467-atomic_t redundant_hardirqs_off;
72468-atomic_t softirqs_on_events;
72469-atomic_t softirqs_off_events;
72470-atomic_t redundant_softirqs_on;
72471-atomic_t redundant_softirqs_off;
72472-atomic_t nr_unused_locks;
72473-atomic_t nr_cyclic_checks;
72474-atomic_t nr_find_usage_forwards_checks;
72475-atomic_t nr_find_usage_backwards_checks;
72476+atomic_unchecked_t chain_lookup_hits;
72477+atomic_unchecked_t chain_lookup_misses;
72478+atomic_unchecked_t hardirqs_on_events;
72479+atomic_unchecked_t hardirqs_off_events;
72480+atomic_unchecked_t redundant_hardirqs_on;
72481+atomic_unchecked_t redundant_hardirqs_off;
72482+atomic_unchecked_t softirqs_on_events;
72483+atomic_unchecked_t softirqs_off_events;
72484+atomic_unchecked_t redundant_softirqs_on;
72485+atomic_unchecked_t redundant_softirqs_off;
72486+atomic_unchecked_t nr_unused_locks;
72487+atomic_unchecked_t nr_cyclic_checks;
72488+atomic_unchecked_t nr_find_usage_forwards_checks;
72489+atomic_unchecked_t nr_find_usage_backwards_checks;
72490 #endif
72491
72492 /*
72493@@ -577,6 +577,10 @@ static int static_obj(void *obj)
72494 int i;
72495 #endif
72496
72497+#ifdef CONFIG_PAX_KERNEXEC
72498+ start = ktla_ktva(start);
72499+#endif
72500+
72501 /*
72502 * static variable?
72503 */
72504@@ -592,8 +596,7 @@ static int static_obj(void *obj)
72505 */
72506 for_each_possible_cpu(i) {
72507 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
72508- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
72509- + per_cpu_offset(i);
72510+ end = start + PERCPU_ENOUGH_ROOM;
72511
72512 if ((addr >= start) && (addr < end))
72513 return 1;
72514@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
72515 if (!static_obj(lock->key)) {
72516 debug_locks_off();
72517 printk("INFO: trying to register non-static key.\n");
72518+ printk("lock:%pS key:%pS.\n", lock, lock->key);
72519 printk("the code is fine but needs lockdep annotation.\n");
72520 printk("turning off the locking correctness validator.\n");
72521 dump_stack();
72522@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
72523 if (!class)
72524 return 0;
72525 }
72526- debug_atomic_inc((atomic_t *)&class->ops);
72527+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
72528 if (very_verbose(class)) {
72529 printk("\nacquire class [%p] %s", class->key, class->name);
72530 if (class->name_version > 1)
72531diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
72532index a2ee95a..092f0f2 100644
72533--- a/kernel/lockdep_internals.h
72534+++ b/kernel/lockdep_internals.h
72535@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_class *class)
72536 /*
72537 * Various lockdep statistics:
72538 */
72539-extern atomic_t chain_lookup_hits;
72540-extern atomic_t chain_lookup_misses;
72541-extern atomic_t hardirqs_on_events;
72542-extern atomic_t hardirqs_off_events;
72543-extern atomic_t redundant_hardirqs_on;
72544-extern atomic_t redundant_hardirqs_off;
72545-extern atomic_t softirqs_on_events;
72546-extern atomic_t softirqs_off_events;
72547-extern atomic_t redundant_softirqs_on;
72548-extern atomic_t redundant_softirqs_off;
72549-extern atomic_t nr_unused_locks;
72550-extern atomic_t nr_cyclic_checks;
72551-extern atomic_t nr_cyclic_check_recursions;
72552-extern atomic_t nr_find_usage_forwards_checks;
72553-extern atomic_t nr_find_usage_forwards_recursions;
72554-extern atomic_t nr_find_usage_backwards_checks;
72555-extern atomic_t nr_find_usage_backwards_recursions;
72556-# define debug_atomic_inc(ptr) atomic_inc(ptr)
72557-# define debug_atomic_dec(ptr) atomic_dec(ptr)
72558-# define debug_atomic_read(ptr) atomic_read(ptr)
72559+extern atomic_unchecked_t chain_lookup_hits;
72560+extern atomic_unchecked_t chain_lookup_misses;
72561+extern atomic_unchecked_t hardirqs_on_events;
72562+extern atomic_unchecked_t hardirqs_off_events;
72563+extern atomic_unchecked_t redundant_hardirqs_on;
72564+extern atomic_unchecked_t redundant_hardirqs_off;
72565+extern atomic_unchecked_t softirqs_on_events;
72566+extern atomic_unchecked_t softirqs_off_events;
72567+extern atomic_unchecked_t redundant_softirqs_on;
72568+extern atomic_unchecked_t redundant_softirqs_off;
72569+extern atomic_unchecked_t nr_unused_locks;
72570+extern atomic_unchecked_t nr_cyclic_checks;
72571+extern atomic_unchecked_t nr_cyclic_check_recursions;
72572+extern atomic_unchecked_t nr_find_usage_forwards_checks;
72573+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
72574+extern atomic_unchecked_t nr_find_usage_backwards_checks;
72575+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
72576+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
72577+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
72578+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
72579 #else
72580 # define debug_atomic_inc(ptr) do { } while (0)
72581 # define debug_atomic_dec(ptr) do { } while (0)
72582diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
72583index d4aba4f..02a353f 100644
72584--- a/kernel/lockdep_proc.c
72585+++ b/kernel/lockdep_proc.c
72586@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, void *v)
72587
72588 static void print_name(struct seq_file *m, struct lock_class *class)
72589 {
72590- char str[128];
72591+ char str[KSYM_NAME_LEN];
72592 const char *name = class->name;
72593
72594 if (!name) {
72595diff --git a/kernel/module.c b/kernel/module.c
72596index 4b270e6..2226274 100644
72597--- a/kernel/module.c
72598+++ b/kernel/module.c
72599@@ -55,6 +55,7 @@
72600 #include <linux/async.h>
72601 #include <linux/percpu.h>
72602 #include <linux/kmemleak.h>
72603+#include <linux/grsecurity.h>
72604
72605 #define CREATE_TRACE_POINTS
72606 #include <trace/events/module.h>
72607@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq);
72608 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
72609
72610 /* Bounds of module allocation, for speeding __module_address */
72611-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
72612+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
72613+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
72614
72615 int register_module_notifier(struct notifier_block * nb)
72616 {
72617@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72618 return true;
72619
72620 list_for_each_entry_rcu(mod, &modules, list) {
72621- struct symsearch arr[] = {
72622+ struct symsearch modarr[] = {
72623 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
72624 NOT_GPL_ONLY, false },
72625 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
72626@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct symsearch *arr, struct module *owner,
72627 #endif
72628 };
72629
72630- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
72631+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
72632 return true;
72633 }
72634 return false;
72635@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
72636 void *ptr;
72637 int cpu;
72638
72639- if (align > PAGE_SIZE) {
72640+ if (align-1 >= PAGE_SIZE) {
72641 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
72642 name, align, PAGE_SIZE);
72643 align = PAGE_SIZE;
72644@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resolve_symbol(Elf_Shdr *sechdrs,
72645 * /sys/module/foo/sections stuff
72646 * J. Corbet <corbet@lwn.net>
72647 */
72648-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
72649+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
72650
72651 static inline bool sect_empty(const Elf_Shdr *sect)
72652 {
72653@@ -1545,7 +1547,8 @@ static void free_module(struct module *mod)
72654 destroy_params(mod->kp, mod->num_kp);
72655
72656 /* This may be NULL, but that's OK */
72657- module_free(mod, mod->module_init);
72658+ module_free(mod, mod->module_init_rw);
72659+ module_free_exec(mod, mod->module_init_rx);
72660 kfree(mod->args);
72661 if (mod->percpu)
72662 percpu_modfree(mod->percpu);
72663@@ -1554,10 +1557,12 @@ static void free_module(struct module *mod)
72664 percpu_modfree(mod->refptr);
72665 #endif
72666 /* Free lock-classes: */
72667- lockdep_free_key_range(mod->module_core, mod->core_size);
72668+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
72669+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
72670
72671 /* Finally, free the core (containing the module structure) */
72672- module_free(mod, mod->module_core);
72673+ module_free_exec(mod, mod->module_core_rx);
72674+ module_free(mod, mod->module_core_rw);
72675
72676 #ifdef CONFIG_MPU
72677 update_protections(current->mm);
72678@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72679 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72680 int ret = 0;
72681 const struct kernel_symbol *ksym;
72682+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72683+ int is_fs_load = 0;
72684+ int register_filesystem_found = 0;
72685+ char *p;
72686+
72687+ p = strstr(mod->args, "grsec_modharden_fs");
72688+
72689+ if (p) {
72690+ char *endptr = p + strlen("grsec_modharden_fs");
72691+ /* copy \0 as well */
72692+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
72693+ is_fs_load = 1;
72694+ }
72695+#endif
72696+
72697
72698 for (i = 1; i < n; i++) {
72699+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72700+ const char *name = strtab + sym[i].st_name;
72701+
72702+ /* it's a real shame this will never get ripped and copied
72703+ upstream! ;(
72704+ */
72705+ if (is_fs_load && !strcmp(name, "register_filesystem"))
72706+ register_filesystem_found = 1;
72707+#endif
72708 switch (sym[i].st_shndx) {
72709 case SHN_COMMON:
72710 /* We compiled with -fno-common. These are not
72711@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72712 strtab + sym[i].st_name, mod);
72713 /* Ok if resolved. */
72714 if (ksym) {
72715+ pax_open_kernel();
72716 sym[i].st_value = ksym->value;
72717+ pax_close_kernel();
72718 break;
72719 }
72720
72721@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
72722 secbase = (unsigned long)mod->percpu;
72723 else
72724 secbase = sechdrs[sym[i].st_shndx].sh_addr;
72725+ pax_open_kernel();
72726 sym[i].st_value += secbase;
72727+ pax_close_kernel();
72728 break;
72729 }
72730 }
72731
72732+#ifdef CONFIG_GRKERNSEC_MODHARDEN
72733+ if (is_fs_load && !register_filesystem_found) {
72734+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
72735+ ret = -EPERM;
72736+ }
72737+#endif
72738+
72739 return ret;
72740 }
72741
72742@@ -1731,11 +1771,12 @@ static void layout_sections(struct module *mod,
72743 || s->sh_entsize != ~0UL
72744 || strstarts(secstrings + s->sh_name, ".init"))
72745 continue;
72746- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
72747+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72748+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
72749+ else
72750+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
72751 DEBUGP("\t%s\n", secstrings + s->sh_name);
72752 }
72753- if (m == 0)
72754- mod->core_text_size = mod->core_size;
72755 }
72756
72757 DEBUGP("Init section allocation order:\n");
72758@@ -1748,12 +1789,13 @@ static void layout_sections(struct module *mod,
72759 || s->sh_entsize != ~0UL
72760 || !strstarts(secstrings + s->sh_name, ".init"))
72761 continue;
72762- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
72763- | INIT_OFFSET_MASK);
72764+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
72765+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
72766+ else
72767+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
72768+ s->sh_entsize |= INIT_OFFSET_MASK;
72769 DEBUGP("\t%s\n", secstrings + s->sh_name);
72770 }
72771- if (m == 0)
72772- mod->init_text_size = mod->init_size;
72773 }
72774 }
72775
72776@@ -1857,9 +1899,8 @@ static int is_exported(const char *name, unsigned long value,
72777
72778 /* As per nm */
72779 static char elf_type(const Elf_Sym *sym,
72780- Elf_Shdr *sechdrs,
72781- const char *secstrings,
72782- struct module *mod)
72783+ const Elf_Shdr *sechdrs,
72784+ const char *secstrings)
72785 {
72786 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
72787 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
72788@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struct module *mod,
72789
72790 /* Put symbol section at end of init part of module. */
72791 symsect->sh_flags |= SHF_ALLOC;
72792- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
72793+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
72794 symindex) | INIT_OFFSET_MASK;
72795 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
72796
72797@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struct module *mod,
72798 }
72799
72800 /* Append room for core symbols at end of core part. */
72801- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
72802- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
72803+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
72804+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
72805
72806 /* Put string table section at end of init part of module. */
72807 strsect->sh_flags |= SHF_ALLOC;
72808- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
72809+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
72810 strindex) | INIT_OFFSET_MASK;
72811 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
72812
72813 /* Append room for core symbols' strings at end of core part. */
72814- *pstroffs = mod->core_size;
72815+ *pstroffs = mod->core_size_rx;
72816 __set_bit(0, strmap);
72817- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
72818+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
72819
72820 return symoffs;
72821 }
72822@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *mod,
72823 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
72824 mod->strtab = (void *)sechdrs[strindex].sh_addr;
72825
72826+ pax_open_kernel();
72827+
72828 /* Set types up while we still have access to sections. */
72829 for (i = 0; i < mod->num_symtab; i++)
72830 mod->symtab[i].st_info
72831- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
72832+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
72833
72834- mod->core_symtab = dst = mod->module_core + symoffs;
72835+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
72836 src = mod->symtab;
72837 *dst = *src;
72838 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
72839@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *mod,
72840 }
72841 mod->core_num_syms = ndst;
72842
72843- mod->core_strtab = s = mod->module_core + stroffs;
72844+ mod->core_strtab = s = mod->module_core_rx + stroffs;
72845 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
72846 if (test_bit(i, strmap))
72847 *++s = mod->strtab[i];
72848+
72849+ pax_close_kernel();
72850 }
72851 #else
72852 static inline unsigned long layout_symtab(struct module *mod,
72853@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
72854 #endif
72855 }
72856
72857-static void *module_alloc_update_bounds(unsigned long size)
72858+static void *module_alloc_update_bounds_rw(unsigned long size)
72859 {
72860 void *ret = module_alloc(size);
72861
72862 if (ret) {
72863 /* Update module bounds. */
72864- if ((unsigned long)ret < module_addr_min)
72865- module_addr_min = (unsigned long)ret;
72866- if ((unsigned long)ret + size > module_addr_max)
72867- module_addr_max = (unsigned long)ret + size;
72868+ if ((unsigned long)ret < module_addr_min_rw)
72869+ module_addr_min_rw = (unsigned long)ret;
72870+ if ((unsigned long)ret + size > module_addr_max_rw)
72871+ module_addr_max_rw = (unsigned long)ret + size;
72872+ }
72873+ return ret;
72874+}
72875+
72876+static void *module_alloc_update_bounds_rx(unsigned long size)
72877+{
72878+ void *ret = module_alloc_exec(size);
72879+
72880+ if (ret) {
72881+ /* Update module bounds. */
72882+ if ((unsigned long)ret < module_addr_min_rx)
72883+ module_addr_min_rx = (unsigned long)ret;
72884+ if ((unsigned long)ret + size > module_addr_max_rx)
72885+ module_addr_max_rx = (unsigned long)ret + size;
72886 }
72887 return ret;
72888 }
72889@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72890 unsigned int i;
72891
72892 /* only scan the sections containing data */
72893- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
72894- (unsigned long)mod->module_core,
72895+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
72896+ (unsigned long)mod->module_core_rw,
72897 sizeof(struct module), GFP_KERNEL);
72898
72899 for (i = 1; i < hdr->e_shnum; i++) {
72900@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
72901 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
72902 continue;
72903
72904- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
72905- (unsigned long)mod->module_core,
72906+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
72907+ (unsigned long)mod->module_core_rw,
72908 sechdrs[i].sh_size, GFP_KERNEL);
72909 }
72910 }
72911@@ -2097,7 +2156,7 @@ static noinline struct module *load_module(void __user *umod,
72912 Elf_Ehdr *hdr;
72913 Elf_Shdr *sechdrs;
72914 char *secstrings, *args, *modmagic, *strtab = NULL;
72915- char *staging;
72916+ char *staging, *license;
72917 unsigned int i;
72918 unsigned int symindex = 0;
72919 unsigned int strindex = 0;
72920@@ -2195,6 +2254,14 @@ static noinline struct module *load_module(void __user *umod,
72921 goto free_hdr;
72922 }
72923
72924+ license = get_modinfo(sechdrs, infoindex, "license");
72925+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
72926+ if (!license || !license_is_gpl_compatible(license)) {
72927+ err -ENOEXEC;
72928+ goto free_hdr;
72929+ }
72930+#endif
72931+
72932 modmagic = get_modinfo(sechdrs, infoindex, "vermagic");
72933 /* This is allowed: modprobe --force will invalidate it. */
72934 if (!modmagic) {
72935@@ -2263,7 +2330,7 @@ static noinline struct module *load_module(void __user *umod,
72936 secstrings, &stroffs, strmap);
72937
72938 /* Do the allocs. */
72939- ptr = module_alloc_update_bounds(mod->core_size);
72940+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
72941 /*
72942 * The pointer to this block is stored in the module structure
72943 * which is inside the block. Just mark it as not being a
72944@@ -2274,23 +2341,47 @@ static noinline struct module *load_module(void __user *umod,
72945 err = -ENOMEM;
72946 goto free_percpu;
72947 }
72948- memset(ptr, 0, mod->core_size);
72949- mod->module_core = ptr;
72950+ memset(ptr, 0, mod->core_size_rw);
72951+ mod->module_core_rw = ptr;
72952
72953- ptr = module_alloc_update_bounds(mod->init_size);
72954+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
72955 /*
72956 * The pointer to this block is stored in the module structure
72957 * which is inside the block. This block doesn't need to be
72958 * scanned as it contains data and code that will be freed
72959 * after the module is initialized.
72960 */
72961- kmemleak_ignore(ptr);
72962- if (!ptr && mod->init_size) {
72963+ kmemleak_not_leak(ptr);
72964+ if (!ptr && mod->init_size_rw) {
72965 err = -ENOMEM;
72966- goto free_core;
72967+ goto free_core_rw;
72968 }
72969- memset(ptr, 0, mod->init_size);
72970- mod->module_init = ptr;
72971+ memset(ptr, 0, mod->init_size_rw);
72972+ mod->module_init_rw = ptr;
72973+
72974+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
72975+ kmemleak_not_leak(ptr);
72976+ if (!ptr) {
72977+ err = -ENOMEM;
72978+ goto free_init_rw;
72979+ }
72980+
72981+ pax_open_kernel();
72982+ memset(ptr, 0, mod->core_size_rx);
72983+ pax_close_kernel();
72984+ mod->module_core_rx = ptr;
72985+
72986+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
72987+ kmemleak_not_leak(ptr);
72988+ if (!ptr && mod->init_size_rx) {
72989+ err = -ENOMEM;
72990+ goto free_core_rx;
72991+ }
72992+
72993+ pax_open_kernel();
72994+ memset(ptr, 0, mod->init_size_rx);
72995+ pax_close_kernel();
72996+ mod->module_init_rx = ptr;
72997
72998 /* Transfer each section which specifies SHF_ALLOC */
72999 DEBUGP("final section addresses:\n");
73000@@ -2300,17 +2391,45 @@ static noinline struct module *load_module(void __user *umod,
73001 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
73002 continue;
73003
73004- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
73005- dest = mod->module_init
73006- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73007- else
73008- dest = mod->module_core + sechdrs[i].sh_entsize;
73009+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
73010+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73011+ dest = mod->module_init_rw
73012+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73013+ else
73014+ dest = mod->module_init_rx
73015+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
73016+ } else {
73017+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
73018+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
73019+ else
73020+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
73021+ }
73022
73023- if (sechdrs[i].sh_type != SHT_NOBITS)
73024- memcpy(dest, (void *)sechdrs[i].sh_addr,
73025- sechdrs[i].sh_size);
73026+ if (sechdrs[i].sh_type != SHT_NOBITS) {
73027+
73028+#ifdef CONFIG_PAX_KERNEXEC
73029+#ifdef CONFIG_X86_64
73030+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
73031+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
73032+#endif
73033+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
73034+ pax_open_kernel();
73035+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73036+ pax_close_kernel();
73037+ } else
73038+#endif
73039+
73040+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
73041+ }
73042 /* Update sh_addr to point to copy in image. */
73043- sechdrs[i].sh_addr = (unsigned long)dest;
73044+
73045+#ifdef CONFIG_PAX_KERNEXEC
73046+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
73047+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
73048+ else
73049+#endif
73050+
73051+ sechdrs[i].sh_addr = (unsigned long)dest;
73052 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
73053 }
73054 /* Module has been moved. */
73055@@ -2322,7 +2441,7 @@ static noinline struct module *load_module(void __user *umod,
73056 mod->name);
73057 if (!mod->refptr) {
73058 err = -ENOMEM;
73059- goto free_init;
73060+ goto free_init_rx;
73061 }
73062 #endif
73063 /* Now we've moved module, initialize linked lists, etc. */
73064@@ -2334,7 +2453,7 @@ static noinline struct module *load_module(void __user *umod,
73065 goto free_unload;
73066
73067 /* Set up license info based on the info section */
73068- set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
73069+ set_license(mod, license);
73070
73071 /*
73072 * ndiswrapper is under GPL by itself, but loads proprietary modules.
73073@@ -2351,6 +2470,31 @@ static noinline struct module *load_module(void __user *umod,
73074 /* Set up MODINFO_ATTR fields */
73075 setup_modinfo(mod, sechdrs, infoindex);
73076
73077+ mod->args = args;
73078+
73079+#ifdef CONFIG_GRKERNSEC_MODHARDEN
73080+ {
73081+ char *p, *p2;
73082+
73083+ if (strstr(mod->args, "grsec_modharden_netdev")) {
73084+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
73085+ err = -EPERM;
73086+ goto cleanup;
73087+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
73088+ p += strlen("grsec_modharden_normal");
73089+ p2 = strstr(p, "_");
73090+ if (p2) {
73091+ *p2 = '\0';
73092+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
73093+ *p2 = '_';
73094+ }
73095+ err = -EPERM;
73096+ goto cleanup;
73097+ }
73098+ }
73099+#endif
73100+
73101+
73102 /* Fix up syms, so that st_value is a pointer to location. */
73103 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
73104 mod);
73105@@ -2431,8 +2575,8 @@ static noinline struct module *load_module(void __user *umod,
73106
73107 /* Now do relocations. */
73108 for (i = 1; i < hdr->e_shnum; i++) {
73109- const char *strtab = (char *)sechdrs[strindex].sh_addr;
73110 unsigned int info = sechdrs[i].sh_info;
73111+ strtab = (char *)sechdrs[strindex].sh_addr;
73112
73113 /* Not a valid relocation section? */
73114 if (info >= hdr->e_shnum)
73115@@ -2493,16 +2637,15 @@ static noinline struct module *load_module(void __user *umod,
73116 * Do it before processing of module parameters, so the module
73117 * can provide parameter accessor functions of its own.
73118 */
73119- if (mod->module_init)
73120- flush_icache_range((unsigned long)mod->module_init,
73121- (unsigned long)mod->module_init
73122- + mod->init_size);
73123- flush_icache_range((unsigned long)mod->module_core,
73124- (unsigned long)mod->module_core + mod->core_size);
73125+ if (mod->module_init_rx)
73126+ flush_icache_range((unsigned long)mod->module_init_rx,
73127+ (unsigned long)mod->module_init_rx
73128+ + mod->init_size_rx);
73129+ flush_icache_range((unsigned long)mod->module_core_rx,
73130+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
73131
73132 set_fs(old_fs);
73133
73134- mod->args = args;
73135 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
73136 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
73137 mod->name);
73138@@ -2546,12 +2689,16 @@ static noinline struct module *load_module(void __user *umod,
73139 free_unload:
73140 module_unload_free(mod);
73141 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
73142+ free_init_rx:
73143 percpu_modfree(mod->refptr);
73144- free_init:
73145 #endif
73146- module_free(mod, mod->module_init);
73147- free_core:
73148- module_free(mod, mod->module_core);
73149+ module_free_exec(mod, mod->module_init_rx);
73150+ free_core_rx:
73151+ module_free_exec(mod, mod->module_core_rx);
73152+ free_init_rw:
73153+ module_free(mod, mod->module_init_rw);
73154+ free_core_rw:
73155+ module_free(mod, mod->module_core_rw);
73156 /* mod will be freed with core. Don't access it beyond this line! */
73157 free_percpu:
73158 if (percpu)
73159@@ -2653,10 +2800,12 @@ SYSCALL_DEFINE3(init_module, void __user *, umod,
73160 mod->symtab = mod->core_symtab;
73161 mod->strtab = mod->core_strtab;
73162 #endif
73163- module_free(mod, mod->module_init);
73164- mod->module_init = NULL;
73165- mod->init_size = 0;
73166- mod->init_text_size = 0;
73167+ module_free(mod, mod->module_init_rw);
73168+ module_free_exec(mod, mod->module_init_rx);
73169+ mod->module_init_rw = NULL;
73170+ mod->module_init_rx = NULL;
73171+ mod->init_size_rw = 0;
73172+ mod->init_size_rx = 0;
73173 mutex_unlock(&module_mutex);
73174
73175 return 0;
73176@@ -2687,10 +2836,16 @@ static const char *get_ksymbol(struct module *mod,
73177 unsigned long nextval;
73178
73179 /* At worse, next value is at end of module */
73180- if (within_module_init(addr, mod))
73181- nextval = (unsigned long)mod->module_init+mod->init_text_size;
73182+ if (within_module_init_rx(addr, mod))
73183+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
73184+ else if (within_module_init_rw(addr, mod))
73185+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
73186+ else if (within_module_core_rx(addr, mod))
73187+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
73188+ else if (within_module_core_rw(addr, mod))
73189+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
73190 else
73191- nextval = (unsigned long)mod->module_core+mod->core_text_size;
73192+ return NULL;
73193
73194 /* Scan for closest preceeding symbol, and next symbol. (ELF
73195 starts real symbols at 1). */
73196@@ -2936,7 +3091,7 @@ static int m_show(struct seq_file *m, void *p)
73197 char buf[8];
73198
73199 seq_printf(m, "%s %u",
73200- mod->name, mod->init_size + mod->core_size);
73201+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
73202 print_unload_info(m, mod);
73203
73204 /* Informative for users. */
73205@@ -2945,7 +3100,7 @@ static int m_show(struct seq_file *m, void *p)
73206 mod->state == MODULE_STATE_COMING ? "Loading":
73207 "Live");
73208 /* Used by oprofile and other similar tools. */
73209- seq_printf(m, " 0x%p", mod->module_core);
73210+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
73211
73212 /* Taints info */
73213 if (mod->taints)
73214@@ -2981,7 +3136,17 @@ static const struct file_operations proc_modules_operations = {
73215
73216 static int __init proc_modules_init(void)
73217 {
73218+#ifndef CONFIG_GRKERNSEC_HIDESYM
73219+#ifdef CONFIG_GRKERNSEC_PROC_USER
73220+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73221+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
73222+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
73223+#else
73224 proc_create("modules", 0, NULL, &proc_modules_operations);
73225+#endif
73226+#else
73227+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
73228+#endif
73229 return 0;
73230 }
73231 module_init(proc_modules_init);
73232@@ -3040,12 +3205,12 @@ struct module *__module_address(unsigned long addr)
73233 {
73234 struct module *mod;
73235
73236- if (addr < module_addr_min || addr > module_addr_max)
73237+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
73238+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
73239 return NULL;
73240
73241 list_for_each_entry_rcu(mod, &modules, list)
73242- if (within_module_core(addr, mod)
73243- || within_module_init(addr, mod))
73244+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
73245 return mod;
73246 return NULL;
73247 }
73248@@ -3079,11 +3244,20 @@ bool is_module_text_address(unsigned long addr)
73249 */
73250 struct module *__module_text_address(unsigned long addr)
73251 {
73252- struct module *mod = __module_address(addr);
73253+ struct module *mod;
73254+
73255+#ifdef CONFIG_X86_32
73256+ addr = ktla_ktva(addr);
73257+#endif
73258+
73259+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
73260+ return NULL;
73261+
73262+ mod = __module_address(addr);
73263+
73264 if (mod) {
73265 /* Make sure it's within the text section. */
73266- if (!within(addr, mod->module_init, mod->init_text_size)
73267- && !within(addr, mod->module_core, mod->core_text_size))
73268+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
73269 mod = NULL;
73270 }
73271 return mod;
73272diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
73273index ec815a9..fe46e99 100644
73274--- a/kernel/mutex-debug.c
73275+++ b/kernel/mutex-debug.c
73276@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
73277 }
73278
73279 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73280- struct thread_info *ti)
73281+ struct task_struct *task)
73282 {
73283 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
73284
73285 /* Mark the current thread as blocked on the lock: */
73286- ti->task->blocked_on = waiter;
73287+ task->blocked_on = waiter;
73288 }
73289
73290 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73291- struct thread_info *ti)
73292+ struct task_struct *task)
73293 {
73294 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
73295- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
73296- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
73297- ti->task->blocked_on = NULL;
73298+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
73299+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
73300+ task->blocked_on = NULL;
73301
73302 list_del_init(&waiter->list);
73303 waiter->task = NULL;
73304@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lock)
73305 return;
73306
73307 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
73308- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
73309+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
73310 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
73311 mutex_clear_owner(lock);
73312 }
73313diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
73314index 6b2d735..372d3c4 100644
73315--- a/kernel/mutex-debug.h
73316+++ b/kernel/mutex-debug.h
73317@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
73318 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
73319 extern void debug_mutex_add_waiter(struct mutex *lock,
73320 struct mutex_waiter *waiter,
73321- struct thread_info *ti);
73322+ struct task_struct *task);
73323 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
73324- struct thread_info *ti);
73325+ struct task_struct *task);
73326 extern void debug_mutex_unlock(struct mutex *lock);
73327 extern void debug_mutex_init(struct mutex *lock, const char *name,
73328 struct lock_class_key *key);
73329
73330 static inline void mutex_set_owner(struct mutex *lock)
73331 {
73332- lock->owner = current_thread_info();
73333+ lock->owner = current;
73334 }
73335
73336 static inline void mutex_clear_owner(struct mutex *lock)
73337diff --git a/kernel/mutex.c b/kernel/mutex.c
73338index f85644c..5ee9f77 100644
73339--- a/kernel/mutex.c
73340+++ b/kernel/mutex.c
73341@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73342 */
73343
73344 for (;;) {
73345- struct thread_info *owner;
73346+ struct task_struct *owner;
73347
73348 /*
73349 * If we own the BKL, then don't spin. The owner of
73350@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73351 spin_lock_mutex(&lock->wait_lock, flags);
73352
73353 debug_mutex_lock_common(lock, &waiter);
73354- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
73355+ debug_mutex_add_waiter(lock, &waiter, task);
73356
73357 /* add waiting tasks to the end of the waitqueue (FIFO): */
73358 list_add_tail(&waiter.list, &lock->wait_list);
73359@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73360 * TASK_UNINTERRUPTIBLE case.)
73361 */
73362 if (unlikely(signal_pending_state(state, task))) {
73363- mutex_remove_waiter(lock, &waiter,
73364- task_thread_info(task));
73365+ mutex_remove_waiter(lock, &waiter, task);
73366 mutex_release(&lock->dep_map, 1, ip);
73367 spin_unlock_mutex(&lock->wait_lock, flags);
73368
73369@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
73370 done:
73371 lock_acquired(&lock->dep_map, ip);
73372 /* got the lock - rejoice! */
73373- mutex_remove_waiter(lock, &waiter, current_thread_info());
73374+ mutex_remove_waiter(lock, &waiter, task);
73375 mutex_set_owner(lock);
73376
73377 /* set it to 0 if there are no waiters left: */
73378diff --git a/kernel/mutex.h b/kernel/mutex.h
73379index 67578ca..4115fbf 100644
73380--- a/kernel/mutex.h
73381+++ b/kernel/mutex.h
73382@@ -19,7 +19,7 @@
73383 #ifdef CONFIG_SMP
73384 static inline void mutex_set_owner(struct mutex *lock)
73385 {
73386- lock->owner = current_thread_info();
73387+ lock->owner = current;
73388 }
73389
73390 static inline void mutex_clear_owner(struct mutex *lock)
73391diff --git a/kernel/panic.c b/kernel/panic.c
73392index 96b45d0..ff70a46 100644
73393--- a/kernel/panic.c
73394+++ b/kernel/panic.c
73395@@ -71,7 +71,11 @@ NORET_TYPE void panic(const char * fmt, ...)
73396 va_end(args);
73397 printk(KERN_EMERG "Kernel panic - not syncing: %s\n",buf);
73398 #ifdef CONFIG_DEBUG_BUGVERBOSE
73399- dump_stack();
73400+ /*
73401+ * Avoid nested stack-dumping if a panic occurs during oops processing
73402+ */
73403+ if (!oops_in_progress)
73404+ dump_stack();
73405 #endif
73406
73407 /*
73408@@ -352,7 +356,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller, struc
73409 const char *board;
73410
73411 printk(KERN_WARNING "------------[ cut here ]------------\n");
73412- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
73413+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
73414 board = dmi_get_system_info(DMI_PRODUCT_NAME);
73415 if (board)
73416 printk(KERN_WARNING "Hardware name: %s\n", board);
73417@@ -392,7 +396,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
73418 */
73419 void __stack_chk_fail(void)
73420 {
73421- panic("stack-protector: Kernel stack is corrupted in: %p\n",
73422+ dump_stack();
73423+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
73424 __builtin_return_address(0));
73425 }
73426 EXPORT_SYMBOL(__stack_chk_fail);
73427diff --git a/kernel/params.c b/kernel/params.c
73428index d656c27..21e452c 100644
73429--- a/kernel/params.c
73430+++ b/kernel/params.c
73431@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct kobject *kobj,
73432 return ret;
73433 }
73434
73435-static struct sysfs_ops module_sysfs_ops = {
73436+static const struct sysfs_ops module_sysfs_ops = {
73437 .show = module_attr_show,
73438 .store = module_attr_store,
73439 };
73440@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
73441 return 0;
73442 }
73443
73444-static struct kset_uevent_ops module_uevent_ops = {
73445+static const struct kset_uevent_ops module_uevent_ops = {
73446 .filter = uevent_filter,
73447 };
73448
73449diff --git a/kernel/perf_event.c b/kernel/perf_event.c
73450index 37ebc14..9c121d9 100644
73451--- a/kernel/perf_event.c
73452+++ b/kernel/perf_event.c
73453@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */
73454 */
73455 int sysctl_perf_event_sample_rate __read_mostly = 100000;
73456
73457-static atomic64_t perf_event_id;
73458+static atomic64_unchecked_t perf_event_id;
73459
73460 /*
73461 * Lock for (sysadmin-configurable) event reservations:
73462@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struct perf_event *event,
73463 * In order to keep per-task stats reliable we need to flip the event
73464 * values when we flip the contexts.
73465 */
73466- value = atomic64_read(&next_event->count);
73467- value = atomic64_xchg(&event->count, value);
73468- atomic64_set(&next_event->count, value);
73469+ value = atomic64_read_unchecked(&next_event->count);
73470+ value = atomic64_xchg_unchecked(&event->count, value);
73471+ atomic64_set_unchecked(&next_event->count, value);
73472
73473 swap(event->total_time_enabled, next_event->total_time_enabled);
73474 swap(event->total_time_running, next_event->total_time_running);
73475@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_event *event)
73476 update_event_times(event);
73477 }
73478
73479- return atomic64_read(&event->count);
73480+ return atomic64_read_unchecked(&event->count);
73481 }
73482
73483 /*
73484@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct perf_event *event,
73485 values[n++] = 1 + leader->nr_siblings;
73486 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73487 values[n++] = leader->total_time_enabled +
73488- atomic64_read(&leader->child_total_time_enabled);
73489+ atomic64_read_unchecked(&leader->child_total_time_enabled);
73490 }
73491 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73492 values[n++] = leader->total_time_running +
73493- atomic64_read(&leader->child_total_time_running);
73494+ atomic64_read_unchecked(&leader->child_total_time_running);
73495 }
73496
73497 size = n * sizeof(u64);
73498@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct perf_event *event,
73499 values[n++] = perf_event_read_value(event);
73500 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73501 values[n++] = event->total_time_enabled +
73502- atomic64_read(&event->child_total_time_enabled);
73503+ atomic64_read_unchecked(&event->child_total_time_enabled);
73504 }
73505 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73506 values[n++] = event->total_time_running +
73507- atomic64_read(&event->child_total_time_running);
73508+ atomic64_read_unchecked(&event->child_total_time_running);
73509 }
73510 if (read_format & PERF_FORMAT_ID)
73511 values[n++] = primary_event_id(event);
73512@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct file *file, poll_table *wait)
73513 static void perf_event_reset(struct perf_event *event)
73514 {
73515 (void)perf_event_read(event);
73516- atomic64_set(&event->count, 0);
73517+ atomic64_set_unchecked(&event->count, 0);
73518 perf_event_update_userpage(event);
73519 }
73520
73521@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct perf_event *event)
73522 ++userpg->lock;
73523 barrier();
73524 userpg->index = perf_event_index(event);
73525- userpg->offset = atomic64_read(&event->count);
73526+ userpg->offset = atomic64_read_unchecked(&event->count);
73527 if (event->state == PERF_EVENT_STATE_ACTIVE)
73528- userpg->offset -= atomic64_read(&event->hw.prev_count);
73529+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
73530
73531 userpg->time_enabled = event->total_time_enabled +
73532- atomic64_read(&event->child_total_time_enabled);
73533+ atomic64_read_unchecked(&event->child_total_time_enabled);
73534
73535 userpg->time_running = event->total_time_running +
73536- atomic64_read(&event->child_total_time_running);
73537+ atomic64_read_unchecked(&event->child_total_time_running);
73538
73539 barrier();
73540 ++userpg->lock;
73541@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct perf_output_handle *handle,
73542 u64 values[4];
73543 int n = 0;
73544
73545- values[n++] = atomic64_read(&event->count);
73546+ values[n++] = atomic64_read_unchecked(&event->count);
73547 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
73548 values[n++] = event->total_time_enabled +
73549- atomic64_read(&event->child_total_time_enabled);
73550+ atomic64_read_unchecked(&event->child_total_time_enabled);
73551 }
73552 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
73553 values[n++] = event->total_time_running +
73554- atomic64_read(&event->child_total_time_running);
73555+ atomic64_read_unchecked(&event->child_total_time_running);
73556 }
73557 if (read_format & PERF_FORMAT_ID)
73558 values[n++] = primary_event_id(event);
73559@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73560 if (leader != event)
73561 leader->pmu->read(leader);
73562
73563- values[n++] = atomic64_read(&leader->count);
73564+ values[n++] = atomic64_read_unchecked(&leader->count);
73565 if (read_format & PERF_FORMAT_ID)
73566 values[n++] = primary_event_id(leader);
73567
73568@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struct perf_output_handle *handle,
73569 if (sub != event)
73570 sub->pmu->read(sub);
73571
73572- values[n++] = atomic64_read(&sub->count);
73573+ values[n++] = atomic64_read_unchecked(&sub->count);
73574 if (read_format & PERF_FORMAT_ID)
73575 values[n++] = primary_event_id(sub);
73576
73577@@ -3525,12 +3525,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
73578 * need to add enough zero bytes after the string to handle
73579 * the 64bit alignment we do later.
73580 */
73581- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
73582+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
73583 if (!buf) {
73584 name = strncpy(tmp, "//enomem", sizeof(tmp));
73585 goto got_name;
73586 }
73587- name = d_path(&file->f_path, buf, PATH_MAX);
73588+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
73589 if (IS_ERR(name)) {
73590 name = strncpy(tmp, "//toolong", sizeof(tmp));
73591 goto got_name;
73592@@ -3783,7 +3783,7 @@ static void perf_swevent_add(struct perf_event *event, u64 nr,
73593 {
73594 struct hw_perf_event *hwc = &event->hw;
73595
73596- atomic64_add(nr, &event->count);
73597+ atomic64_add_unchecked(nr, &event->count);
73598
73599 if (!hwc->sample_period)
73600 return;
73601@@ -4040,9 +4040,9 @@ static void cpu_clock_perf_event_update(struct perf_event *event)
73602 u64 now;
73603
73604 now = cpu_clock(cpu);
73605- prev = atomic64_read(&event->hw.prev_count);
73606- atomic64_set(&event->hw.prev_count, now);
73607- atomic64_add(now - prev, &event->count);
73608+ prev = atomic64_read_unchecked(&event->hw.prev_count);
73609+ atomic64_set_unchecked(&event->hw.prev_count, now);
73610+ atomic64_add_unchecked(now - prev, &event->count);
73611 }
73612
73613 static int cpu_clock_perf_event_enable(struct perf_event *event)
73614@@ -4050,7 +4050,7 @@ static int cpu_clock_perf_event_enable(struct perf_event *event)
73615 struct hw_perf_event *hwc = &event->hw;
73616 int cpu = raw_smp_processor_id();
73617
73618- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
73619+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
73620 perf_swevent_start_hrtimer(event);
73621
73622 return 0;
73623@@ -4082,9 +4082,9 @@ static void task_clock_perf_event_update(struct perf_event *event, u64 now)
73624 u64 prev;
73625 s64 delta;
73626
73627- prev = atomic64_xchg(&event->hw.prev_count, now);
73628+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
73629 delta = now - prev;
73630- atomic64_add(delta, &event->count);
73631+ atomic64_add_unchecked(delta, &event->count);
73632 }
73633
73634 static int task_clock_perf_event_enable(struct perf_event *event)
73635@@ -4094,7 +4094,7 @@ static int task_clock_perf_event_enable(struct perf_event *event)
73636
73637 now = event->ctx->time;
73638
73639- atomic64_set(&hwc->prev_count, now);
73640+ atomic64_set_unchecked(&hwc->prev_count, now);
73641
73642 perf_swevent_start_hrtimer(event);
73643
73644@@ -4289,7 +4289,7 @@ perf_event_alloc(struct perf_event_attr *attr,
73645 event->parent = parent_event;
73646
73647 event->ns = get_pid_ns(current->nsproxy->pid_ns);
73648- event->id = atomic64_inc_return(&perf_event_id);
73649+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
73650
73651 event->state = PERF_EVENT_STATE_INACTIVE;
73652
73653@@ -4720,15 +4720,15 @@ static void sync_child_event(struct perf_event *child_event,
73654 if (child_event->attr.inherit_stat)
73655 perf_event_read_event(child_event, child);
73656
73657- child_val = atomic64_read(&child_event->count);
73658+ child_val = atomic64_read_unchecked(&child_event->count);
73659
73660 /*
73661 * Add back the child's count to the parent's count:
73662 */
73663- atomic64_add(child_val, &parent_event->count);
73664- atomic64_add(child_event->total_time_enabled,
73665+ atomic64_add_unchecked(child_val, &parent_event->count);
73666+ atomic64_add_unchecked(child_event->total_time_enabled,
73667 &parent_event->child_total_time_enabled);
73668- atomic64_add(child_event->total_time_running,
73669+ atomic64_add_unchecked(child_event->total_time_running,
73670 &parent_event->child_total_time_running);
73671
73672 /*
73673diff --git a/kernel/pid.c b/kernel/pid.c
73674index fce7198..4f23a7e 100644
73675--- a/kernel/pid.c
73676+++ b/kernel/pid.c
73677@@ -33,6 +33,7 @@
73678 #include <linux/rculist.h>
73679 #include <linux/bootmem.h>
73680 #include <linux/hash.h>
73681+#include <linux/security.h>
73682 #include <linux/pid_namespace.h>
73683 #include <linux/init_task.h>
73684 #include <linux/syscalls.h>
73685@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
73686
73687 int pid_max = PID_MAX_DEFAULT;
73688
73689-#define RESERVED_PIDS 300
73690+#define RESERVED_PIDS 500
73691
73692 int pid_max_min = RESERVED_PIDS + 1;
73693 int pid_max_max = PID_MAX_LIMIT;
73694@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
73695 */
73696 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
73697 {
73698- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73699+ struct task_struct *task;
73700+
73701+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
73702+
73703+ if (gr_pid_is_chrooted(task))
73704+ return NULL;
73705+
73706+ return task;
73707 }
73708
73709 struct task_struct *find_task_by_vpid(pid_t vnr)
73710@@ -391,6 +399,11 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
73711 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
73712 }
73713
73714+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
73715+{
73716+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
73717+}
73718+
73719 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
73720 {
73721 struct pid *pid;
73722diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
73723index 5c9dc22..d271117 100644
73724--- a/kernel/posix-cpu-timers.c
73725+++ b/kernel/posix-cpu-timers.c
73726@@ -6,6 +6,7 @@
73727 #include <linux/posix-timers.h>
73728 #include <linux/errno.h>
73729 #include <linux/math64.h>
73730+#include <linux/security.h>
73731 #include <asm/uaccess.h>
73732 #include <linux/kernel_stat.h>
73733 #include <trace/events/timer.h>
73734@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(struct restart_block *restart_block)
73735
73736 static __init int init_posix_cpu_timers(void)
73737 {
73738- struct k_clock process = {
73739+ static struct k_clock process = {
73740 .clock_getres = process_cpu_clock_getres,
73741 .clock_get = process_cpu_clock_get,
73742 .clock_set = do_posix_clock_nosettime,
73743@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(void)
73744 .nsleep = process_cpu_nsleep,
73745 .nsleep_restart = process_cpu_nsleep_restart,
73746 };
73747- struct k_clock thread = {
73748+ static struct k_clock thread = {
73749 .clock_getres = thread_cpu_clock_getres,
73750 .clock_get = thread_cpu_clock_get,
73751 .clock_set = do_posix_clock_nosettime,
73752diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
73753index 5e76d22..cf1baeb 100644
73754--- a/kernel/posix-timers.c
73755+++ b/kernel/posix-timers.c
73756@@ -42,6 +42,7 @@
73757 #include <linux/compiler.h>
73758 #include <linux/idr.h>
73759 #include <linux/posix-timers.h>
73760+#include <linux/grsecurity.h>
73761 #include <linux/syscalls.h>
73762 #include <linux/wait.h>
73763 #include <linux/workqueue.h>
73764@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
73765 * which we beg off on and pass to do_sys_settimeofday().
73766 */
73767
73768-static struct k_clock posix_clocks[MAX_CLOCKS];
73769+static struct k_clock *posix_clocks[MAX_CLOCKS];
73770
73771 /*
73772 * These ones are defined below.
73773@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k_itimer *timr, unsigned long flags)
73774 */
73775 #define CLOCK_DISPATCH(clock, call, arglist) \
73776 ((clock) < 0 ? posix_cpu_##call arglist : \
73777- (posix_clocks[clock].call != NULL \
73778- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
73779+ (posix_clocks[clock]->call != NULL \
73780+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
73781
73782 /*
73783 * Default clock hook functions when the struct k_clock passed
73784@@ -172,7 +173,7 @@ static inline int common_clock_getres(const clockid_t which_clock,
73785 struct timespec *tp)
73786 {
73787 tp->tv_sec = 0;
73788- tp->tv_nsec = posix_clocks[which_clock].res;
73789+ tp->tv_nsec = posix_clocks[which_clock]->res;
73790 return 0;
73791 }
73792
73793@@ -217,9 +218,11 @@ static inline int invalid_clockid(const clockid_t which_clock)
73794 return 0;
73795 if ((unsigned) which_clock >= MAX_CLOCKS)
73796 return 1;
73797- if (posix_clocks[which_clock].clock_getres != NULL)
73798+ if (posix_clocks[which_clock] == NULL)
73799 return 0;
73800- if (posix_clocks[which_clock].res != 0)
73801+ if (posix_clocks[which_clock]->clock_getres != NULL)
73802+ return 0;
73803+ if (posix_clocks[which_clock]->res != 0)
73804 return 0;
73805 return 1;
73806 }
73807@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t which_clock, struct timespec *tp)
73808 */
73809 static __init int init_posix_timers(void)
73810 {
73811- struct k_clock clock_realtime = {
73812+ static struct k_clock clock_realtime = {
73813 .clock_getres = hrtimer_get_res,
73814 };
73815- struct k_clock clock_monotonic = {
73816+ static struct k_clock clock_monotonic = {
73817 .clock_getres = hrtimer_get_res,
73818 .clock_get = posix_ktime_get_ts,
73819 .clock_set = do_posix_clock_nosettime,
73820 };
73821- struct k_clock clock_monotonic_raw = {
73822+ static struct k_clock clock_monotonic_raw = {
73823 .clock_getres = hrtimer_get_res,
73824 .clock_get = posix_get_monotonic_raw,
73825 .clock_set = do_posix_clock_nosettime,
73826 .timer_create = no_timer_create,
73827 .nsleep = no_nsleep,
73828 };
73829- struct k_clock clock_realtime_coarse = {
73830+ static struct k_clock clock_realtime_coarse = {
73831 .clock_getres = posix_get_coarse_res,
73832 .clock_get = posix_get_realtime_coarse,
73833 .clock_set = do_posix_clock_nosettime,
73834 .timer_create = no_timer_create,
73835 .nsleep = no_nsleep,
73836 };
73837- struct k_clock clock_monotonic_coarse = {
73838+ static struct k_clock clock_monotonic_coarse = {
73839 .clock_getres = posix_get_coarse_res,
73840 .clock_get = posix_get_monotonic_coarse,
73841 .clock_set = do_posix_clock_nosettime,
73842@@ -296,6 +299,8 @@ static __init int init_posix_timers(void)
73843 .nsleep = no_nsleep,
73844 };
73845
73846+ pax_track_stack();
73847+
73848 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
73849 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
73850 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
73851@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_t clock_id, struct k_clock *new_clock)
73852 return;
73853 }
73854
73855- posix_clocks[clock_id] = *new_clock;
73856+ posix_clocks[clock_id] = new_clock;
73857 }
73858 EXPORT_SYMBOL_GPL(register_posix_clock);
73859
73860@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
73861 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
73862 return -EFAULT;
73863
73864+ /* only the CLOCK_REALTIME clock can be set, all other clocks
73865+ have their clock_set fptr set to a nosettime dummy function
73866+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
73867+ call common_clock_set, which calls do_sys_settimeofday, which
73868+ we hook
73869+ */
73870+
73871 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
73872 }
73873
73874diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
73875index 04a9e90..bc355aa 100644
73876--- a/kernel/power/hibernate.c
73877+++ b/kernel/power/hibernate.c
73878@@ -48,14 +48,14 @@ enum {
73879
73880 static int hibernation_mode = HIBERNATION_SHUTDOWN;
73881
73882-static struct platform_hibernation_ops *hibernation_ops;
73883+static const struct platform_hibernation_ops *hibernation_ops;
73884
73885 /**
73886 * hibernation_set_ops - set the global hibernate operations
73887 * @ops: the hibernation operations to use in subsequent hibernation transitions
73888 */
73889
73890-void hibernation_set_ops(struct platform_hibernation_ops *ops)
73891+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
73892 {
73893 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
73894 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
73895diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
73896index e8b3370..484c2e4 100644
73897--- a/kernel/power/poweroff.c
73898+++ b/kernel/power/poweroff.c
73899@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_poweroff_op = {
73900 .enable_mask = SYSRQ_ENABLE_BOOT,
73901 };
73902
73903-static int pm_sysrq_init(void)
73904+static int __init pm_sysrq_init(void)
73905 {
73906 register_sysrq_key('o', &sysrq_poweroff_op);
73907 return 0;
73908diff --git a/kernel/power/process.c b/kernel/power/process.c
73909index e7cd671..56d5f459 100644
73910--- a/kernel/power/process.c
73911+++ b/kernel/power/process.c
73912@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_only)
73913 struct timeval start, end;
73914 u64 elapsed_csecs64;
73915 unsigned int elapsed_csecs;
73916+ bool timedout = false;
73917
73918 do_gettimeofday(&start);
73919
73920 end_time = jiffies + TIMEOUT;
73921 do {
73922 todo = 0;
73923+ if (time_after(jiffies, end_time))
73924+ timedout = true;
73925 read_lock(&tasklist_lock);
73926 do_each_thread(g, p) {
73927 if (frozen(p) || !freezeable(p))
73928@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_only)
73929 * It is "frozen enough". If the task does wake
73930 * up, it will immediately call try_to_freeze.
73931 */
73932- if (!task_is_stopped_or_traced(p) &&
73933- !freezer_should_skip(p))
73934+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
73935 todo++;
73936+ if (timedout) {
73937+ printk(KERN_ERR "Task refusing to freeze:\n");
73938+ sched_show_task(p);
73939+ }
73940+ }
73941 } while_each_thread(g, p);
73942 read_unlock(&tasklist_lock);
73943 yield(); /* Yield is okay here */
73944- if (time_after(jiffies, end_time))
73945- break;
73946- } while (todo);
73947+ } while (todo && !timedout);
73948
73949 do_gettimeofday(&end);
73950 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
73951diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
73952index 40dd021..fb30ceb 100644
73953--- a/kernel/power/suspend.c
73954+++ b/kernel/power/suspend.c
73955@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_MAX] = {
73956 [PM_SUSPEND_MEM] = "mem",
73957 };
73958
73959-static struct platform_suspend_ops *suspend_ops;
73960+static const struct platform_suspend_ops *suspend_ops;
73961
73962 /**
73963 * suspend_set_ops - Set the global suspend method table.
73964 * @ops: Pointer to ops structure.
73965 */
73966-void suspend_set_ops(struct platform_suspend_ops *ops)
73967+void suspend_set_ops(const struct platform_suspend_ops *ops)
73968 {
73969 mutex_lock(&pm_mutex);
73970 suspend_ops = ops;
73971diff --git a/kernel/printk.c b/kernel/printk.c
73972index 4cade47..4d17900 100644
73973--- a/kernel/printk.c
73974+++ b/kernel/printk.c
73975@@ -33,6 +33,7 @@
73976 #include <linux/bootmem.h>
73977 #include <linux/syscalls.h>
73978 #include <linux/kexec.h>
73979+#include <linux/syslog.h>
73980
73981 #include <asm/uaccess.h>
73982
73983@@ -256,38 +257,30 @@ static inline void boot_delay_msec(void)
73984 }
73985 #endif
73986
73987-/*
73988- * Commands to do_syslog:
73989- *
73990- * 0 -- Close the log. Currently a NOP.
73991- * 1 -- Open the log. Currently a NOP.
73992- * 2 -- Read from the log.
73993- * 3 -- Read all messages remaining in the ring buffer.
73994- * 4 -- Read and clear all messages remaining in the ring buffer
73995- * 5 -- Clear ring buffer.
73996- * 6 -- Disable printk's to console
73997- * 7 -- Enable printk's to console
73998- * 8 -- Set level of messages printed to console
73999- * 9 -- Return number of unread characters in the log buffer
74000- * 10 -- Return size of the log buffer
74001- */
74002-int do_syslog(int type, char __user *buf, int len)
74003+int do_syslog(int type, char __user *buf, int len, bool from_file)
74004 {
74005 unsigned i, j, limit, count;
74006 int do_clear = 0;
74007 char c;
74008 int error = 0;
74009
74010- error = security_syslog(type);
74011+#ifdef CONFIG_GRKERNSEC_DMESG
74012+ if (grsec_enable_dmesg &&
74013+ (!from_file || (from_file && type == SYSLOG_ACTION_OPEN)) &&
74014+ !capable(CAP_SYS_ADMIN))
74015+ return -EPERM;
74016+#endif
74017+
74018+ error = security_syslog(type, from_file);
74019 if (error)
74020 return error;
74021
74022 switch (type) {
74023- case 0: /* Close log */
74024+ case SYSLOG_ACTION_CLOSE: /* Close log */
74025 break;
74026- case 1: /* Open log */
74027+ case SYSLOG_ACTION_OPEN: /* Open log */
74028 break;
74029- case 2: /* Read from log */
74030+ case SYSLOG_ACTION_READ: /* Read from log */
74031 error = -EINVAL;
74032 if (!buf || len < 0)
74033 goto out;
74034@@ -318,10 +311,12 @@ int do_syslog(int type, char __user *buf, int len)
74035 if (!error)
74036 error = i;
74037 break;
74038- case 4: /* Read/clear last kernel messages */
74039+ /* Read/clear last kernel messages */
74040+ case SYSLOG_ACTION_READ_CLEAR:
74041 do_clear = 1;
74042 /* FALL THRU */
74043- case 3: /* Read last kernel messages */
74044+ /* Read last kernel messages */
74045+ case SYSLOG_ACTION_READ_ALL:
74046 error = -EINVAL;
74047 if (!buf || len < 0)
74048 goto out;
74049@@ -374,21 +369,25 @@ int do_syslog(int type, char __user *buf, int len)
74050 }
74051 }
74052 break;
74053- case 5: /* Clear ring buffer */
74054+ /* Clear ring buffer */
74055+ case SYSLOG_ACTION_CLEAR:
74056 logged_chars = 0;
74057 break;
74058- case 6: /* Disable logging to console */
74059+ /* Disable logging to console */
74060+ case SYSLOG_ACTION_CONSOLE_OFF:
74061 if (saved_console_loglevel == -1)
74062 saved_console_loglevel = console_loglevel;
74063 console_loglevel = minimum_console_loglevel;
74064 break;
74065- case 7: /* Enable logging to console */
74066+ /* Enable logging to console */
74067+ case SYSLOG_ACTION_CONSOLE_ON:
74068 if (saved_console_loglevel != -1) {
74069 console_loglevel = saved_console_loglevel;
74070 saved_console_loglevel = -1;
74071 }
74072 break;
74073- case 8: /* Set level of messages printed to console */
74074+ /* Set level of messages printed to console */
74075+ case SYSLOG_ACTION_CONSOLE_LEVEL:
74076 error = -EINVAL;
74077 if (len < 1 || len > 8)
74078 goto out;
74079@@ -399,10 +398,12 @@ int do_syslog(int type, char __user *buf, int len)
74080 saved_console_loglevel = -1;
74081 error = 0;
74082 break;
74083- case 9: /* Number of chars in the log buffer */
74084+ /* Number of chars in the log buffer */
74085+ case SYSLOG_ACTION_SIZE_UNREAD:
74086 error = log_end - log_start;
74087 break;
74088- case 10: /* Size of the log buffer */
74089+ /* Size of the log buffer */
74090+ case SYSLOG_ACTION_SIZE_BUFFER:
74091 error = log_buf_len;
74092 break;
74093 default:
74094@@ -415,7 +416,7 @@ out:
74095
74096 SYSCALL_DEFINE3(syslog, int, type, char __user *, buf, int, len)
74097 {
74098- return do_syslog(type, buf, len);
74099+ return do_syslog(type, buf, len, SYSLOG_FROM_CALL);
74100 }
74101
74102 /*
74103diff --git a/kernel/profile.c b/kernel/profile.c
74104index dfadc5b..7f59404 100644
74105--- a/kernel/profile.c
74106+++ b/kernel/profile.c
74107@@ -39,7 +39,7 @@ struct profile_hit {
74108 /* Oprofile timer tick hook */
74109 static int (*timer_hook)(struct pt_regs *) __read_mostly;
74110
74111-static atomic_t *prof_buffer;
74112+static atomic_unchecked_t *prof_buffer;
74113 static unsigned long prof_len, prof_shift;
74114
74115 int prof_on __read_mostly;
74116@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
74117 hits[i].pc = 0;
74118 continue;
74119 }
74120- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74121+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74122 hits[i].hits = hits[i].pc = 0;
74123 }
74124 }
74125@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74126 * Add the current hit(s) and flush the write-queue out
74127 * to the global buffer:
74128 */
74129- atomic_add(nr_hits, &prof_buffer[pc]);
74130+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
74131 for (i = 0; i < NR_PROFILE_HIT; ++i) {
74132- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
74133+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
74134 hits[i].pc = hits[i].hits = 0;
74135 }
74136 out:
74137@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc, unsigned int nr_hits)
74138 if (prof_on != type || !prof_buffer)
74139 return;
74140 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
74141- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74142+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
74143 }
74144 #endif /* !CONFIG_SMP */
74145 EXPORT_SYMBOL_GPL(profile_hits);
74146@@ -517,7 +517,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
74147 return -EFAULT;
74148 buf++; p++; count--; read++;
74149 }
74150- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
74151+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
74152 if (copy_to_user(buf, (void *)pnt, count))
74153 return -EFAULT;
74154 read += count;
74155@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
74156 }
74157 #endif
74158 profile_discard_flip_buffers();
74159- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
74160+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
74161 return count;
74162 }
74163
74164diff --git a/kernel/ptrace.c b/kernel/ptrace.c
74165index 05625f6..733bf70 100644
74166--- a/kernel/ptrace.c
74167+++ b/kernel/ptrace.c
74168@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_struct *child, int kill)
74169 return ret;
74170 }
74171
74172-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74173+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
74174+ unsigned int log)
74175 {
74176 const struct cred *cred = current_cred(), *tcred;
74177
74178@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74179 cred->gid != tcred->egid ||
74180 cred->gid != tcred->sgid ||
74181 cred->gid != tcred->gid) &&
74182- !capable(CAP_SYS_PTRACE)) {
74183+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74184+ (log && !capable(CAP_SYS_PTRACE)))
74185+ ) {
74186 rcu_read_unlock();
74187 return -EPERM;
74188 }
74189@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_struct *task, unsigned int mode)
74190 smp_rmb();
74191 if (task->mm)
74192 dumpable = get_dumpable(task->mm);
74193- if (!dumpable && !capable(CAP_SYS_PTRACE))
74194+ if (!dumpable &&
74195+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
74196+ (log && !capable(CAP_SYS_PTRACE))))
74197 return -EPERM;
74198
74199 return security_ptrace_access_check(task, mode);
74200@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struct *task, unsigned int mode)
74201 {
74202 int err;
74203 task_lock(task);
74204- err = __ptrace_may_access(task, mode);
74205+ err = __ptrace_may_access(task, mode, 0);
74206+ task_unlock(task);
74207+ return !err;
74208+}
74209+
74210+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
74211+{
74212+ int err;
74213+ task_lock(task);
74214+ err = __ptrace_may_access(task, mode, 1);
74215 task_unlock(task);
74216 return !err;
74217 }
74218@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *task)
74219 goto out;
74220
74221 task_lock(task);
74222- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
74223+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
74224 task_unlock(task);
74225 if (retval)
74226 goto unlock_creds;
74227@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *task)
74228 goto unlock_tasklist;
74229
74230 task->ptrace = PT_PTRACED;
74231- if (capable(CAP_SYS_PTRACE))
74232+ if (capable_nolog(CAP_SYS_PTRACE))
74233 task->ptrace |= PT_PTRACE_CAP;
74234
74235 __ptrace_link(task, current);
74236@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
74237 {
74238 int copied = 0;
74239
74240+ pax_track_stack();
74241+
74242 while (len > 0) {
74243 char buf[128];
74244 int this_len, retval;
74245@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long ds
74246 {
74247 int copied = 0;
74248
74249+ pax_track_stack();
74250+
74251 while (len > 0) {
74252 char buf[128];
74253 int this_len, retval;
74254@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *child, long request,
74255 int ret = -EIO;
74256 siginfo_t siginfo;
74257
74258+ pax_track_stack();
74259+
74260 switch (request) {
74261 case PTRACE_PEEKTEXT:
74262 case PTRACE_PEEKDATA:
74263@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *child, long request,
74264 ret = ptrace_setoptions(child, data);
74265 break;
74266 case PTRACE_GETEVENTMSG:
74267- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
74268+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
74269 break;
74270
74271 case PTRACE_GETSIGINFO:
74272 ret = ptrace_getsiginfo(child, &siginfo);
74273 if (!ret)
74274- ret = copy_siginfo_to_user((siginfo_t __user *) data,
74275+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
74276 &siginfo);
74277 break;
74278
74279 case PTRACE_SETSIGINFO:
74280- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
74281+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
74282 sizeof siginfo))
74283 ret = -EFAULT;
74284 else
74285@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
74286 goto out;
74287 }
74288
74289+ if (gr_handle_ptrace(child, request)) {
74290+ ret = -EPERM;
74291+ goto out_put_task_struct;
74292+ }
74293+
74294 if (request == PTRACE_ATTACH) {
74295 ret = ptrace_attach(child);
74296 /*
74297 * Some architectures need to do book-keeping after
74298 * a ptrace attach.
74299 */
74300- if (!ret)
74301+ if (!ret) {
74302 arch_ptrace_attach(child);
74303+ gr_audit_ptrace(child);
74304+ }
74305 goto out_put_task_struct;
74306 }
74307
74308@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
74309 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
74310 if (copied != sizeof(tmp))
74311 return -EIO;
74312- return put_user(tmp, (unsigned long __user *)data);
74313+ return put_user(tmp, (__force unsigned long __user *)data);
74314 }
74315
74316 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
74317@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
74318 siginfo_t siginfo;
74319 int ret;
74320
74321+ pax_track_stack();
74322+
74323 switch (request) {
74324 case PTRACE_PEEKTEXT:
74325 case PTRACE_PEEKDATA:
74326@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
74327 goto out;
74328 }
74329
74330+ if (gr_handle_ptrace(child, request)) {
74331+ ret = -EPERM;
74332+ goto out_put_task_struct;
74333+ }
74334+
74335 if (request == PTRACE_ATTACH) {
74336 ret = ptrace_attach(child);
74337 /*
74338 * Some architectures need to do book-keeping after
74339 * a ptrace attach.
74340 */
74341- if (!ret)
74342+ if (!ret) {
74343 arch_ptrace_attach(child);
74344+ gr_audit_ptrace(child);
74345+ }
74346 goto out_put_task_struct;
74347 }
74348
74349diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
74350index 697c0a0..2402696 100644
74351--- a/kernel/rcutorture.c
74352+++ b/kernel/rcutorture.c
74353@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
74354 { 0 };
74355 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
74356 { 0 };
74357-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74358-static atomic_t n_rcu_torture_alloc;
74359-static atomic_t n_rcu_torture_alloc_fail;
74360-static atomic_t n_rcu_torture_free;
74361-static atomic_t n_rcu_torture_mberror;
74362-static atomic_t n_rcu_torture_error;
74363+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
74364+static atomic_unchecked_t n_rcu_torture_alloc;
74365+static atomic_unchecked_t n_rcu_torture_alloc_fail;
74366+static atomic_unchecked_t n_rcu_torture_free;
74367+static atomic_unchecked_t n_rcu_torture_mberror;
74368+static atomic_unchecked_t n_rcu_torture_error;
74369 static long n_rcu_torture_timers;
74370 static struct list_head rcu_torture_removed;
74371 static cpumask_var_t shuffle_tmp_mask;
74372@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
74373
74374 spin_lock_bh(&rcu_torture_lock);
74375 if (list_empty(&rcu_torture_freelist)) {
74376- atomic_inc(&n_rcu_torture_alloc_fail);
74377+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
74378 spin_unlock_bh(&rcu_torture_lock);
74379 return NULL;
74380 }
74381- atomic_inc(&n_rcu_torture_alloc);
74382+ atomic_inc_unchecked(&n_rcu_torture_alloc);
74383 p = rcu_torture_freelist.next;
74384 list_del_init(p);
74385 spin_unlock_bh(&rcu_torture_lock);
74386@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
74387 static void
74388 rcu_torture_free(struct rcu_torture *p)
74389 {
74390- atomic_inc(&n_rcu_torture_free);
74391+ atomic_inc_unchecked(&n_rcu_torture_free);
74392 spin_lock_bh(&rcu_torture_lock);
74393 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
74394 spin_unlock_bh(&rcu_torture_lock);
74395@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
74396 i = rp->rtort_pipe_count;
74397 if (i > RCU_TORTURE_PIPE_LEN)
74398 i = RCU_TORTURE_PIPE_LEN;
74399- atomic_inc(&rcu_torture_wcount[i]);
74400+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74401 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74402 rp->rtort_mbtest = 0;
74403 rcu_torture_free(rp);
74404@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
74405 i = rp->rtort_pipe_count;
74406 if (i > RCU_TORTURE_PIPE_LEN)
74407 i = RCU_TORTURE_PIPE_LEN;
74408- atomic_inc(&rcu_torture_wcount[i]);
74409+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74410 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
74411 rp->rtort_mbtest = 0;
74412 list_del(&rp->rtort_free);
74413@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
74414 i = old_rp->rtort_pipe_count;
74415 if (i > RCU_TORTURE_PIPE_LEN)
74416 i = RCU_TORTURE_PIPE_LEN;
74417- atomic_inc(&rcu_torture_wcount[i]);
74418+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
74419 old_rp->rtort_pipe_count++;
74420 cur_ops->deferred_free(old_rp);
74421 }
74422@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned long unused)
74423 return;
74424 }
74425 if (p->rtort_mbtest == 0)
74426- atomic_inc(&n_rcu_torture_mberror);
74427+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74428 spin_lock(&rand_lock);
74429 cur_ops->read_delay(&rand);
74430 n_rcu_torture_timers++;
74431@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
74432 continue;
74433 }
74434 if (p->rtort_mbtest == 0)
74435- atomic_inc(&n_rcu_torture_mberror);
74436+ atomic_inc_unchecked(&n_rcu_torture_mberror);
74437 cur_ops->read_delay(&rand);
74438 preempt_disable();
74439 pipe_count = p->rtort_pipe_count;
74440@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
74441 rcu_torture_current,
74442 rcu_torture_current_version,
74443 list_empty(&rcu_torture_freelist),
74444- atomic_read(&n_rcu_torture_alloc),
74445- atomic_read(&n_rcu_torture_alloc_fail),
74446- atomic_read(&n_rcu_torture_free),
74447- atomic_read(&n_rcu_torture_mberror),
74448+ atomic_read_unchecked(&n_rcu_torture_alloc),
74449+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
74450+ atomic_read_unchecked(&n_rcu_torture_free),
74451+ atomic_read_unchecked(&n_rcu_torture_mberror),
74452 n_rcu_torture_timers);
74453- if (atomic_read(&n_rcu_torture_mberror) != 0)
74454+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
74455 cnt += sprintf(&page[cnt], " !!!");
74456 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
74457 if (i > 1) {
74458 cnt += sprintf(&page[cnt], "!!! ");
74459- atomic_inc(&n_rcu_torture_error);
74460+ atomic_inc_unchecked(&n_rcu_torture_error);
74461 WARN_ON_ONCE(1);
74462 }
74463 cnt += sprintf(&page[cnt], "Reader Pipe: ");
74464@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
74465 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
74466 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74467 cnt += sprintf(&page[cnt], " %d",
74468- atomic_read(&rcu_torture_wcount[i]));
74469+ atomic_read_unchecked(&rcu_torture_wcount[i]));
74470 }
74471 cnt += sprintf(&page[cnt], "\n");
74472 if (cur_ops->stats)
74473@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
74474
74475 if (cur_ops->cleanup)
74476 cur_ops->cleanup();
74477- if (atomic_read(&n_rcu_torture_error))
74478+ if (atomic_read_unchecked(&n_rcu_torture_error))
74479 rcu_torture_print_module_parms("End of test: FAILURE");
74480 else
74481 rcu_torture_print_module_parms("End of test: SUCCESS");
74482@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
74483
74484 rcu_torture_current = NULL;
74485 rcu_torture_current_version = 0;
74486- atomic_set(&n_rcu_torture_alloc, 0);
74487- atomic_set(&n_rcu_torture_alloc_fail, 0);
74488- atomic_set(&n_rcu_torture_free, 0);
74489- atomic_set(&n_rcu_torture_mberror, 0);
74490- atomic_set(&n_rcu_torture_error, 0);
74491+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
74492+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
74493+ atomic_set_unchecked(&n_rcu_torture_free, 0);
74494+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
74495+ atomic_set_unchecked(&n_rcu_torture_error, 0);
74496 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
74497- atomic_set(&rcu_torture_wcount[i], 0);
74498+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
74499 for_each_possible_cpu(cpu) {
74500 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
74501 per_cpu(rcu_torture_count, cpu)[i] = 0;
74502diff --git a/kernel/rcutree.c b/kernel/rcutree.c
74503index 683c4f3..97f54c6 100644
74504--- a/kernel/rcutree.c
74505+++ b/kernel/rcutree.c
74506@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
74507 /*
74508 * Do softirq processing for the current CPU.
74509 */
74510-static void rcu_process_callbacks(struct softirq_action *unused)
74511+static void rcu_process_callbacks(void)
74512 {
74513 /*
74514 * Memory references from any prior RCU read-side critical sections
74515diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
74516index c03edf7..ac1b341 100644
74517--- a/kernel/rcutree_plugin.h
74518+++ b/kernel/rcutree_plugin.h
74519@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_switch(int cpu)
74520 */
74521 void __rcu_read_lock(void)
74522 {
74523- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
74524+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
74525 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
74526 }
74527 EXPORT_SYMBOL_GPL(__rcu_read_lock);
74528@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
74529 struct task_struct *t = current;
74530
74531 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
74532- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
74533+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
74534 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
74535 rcu_read_unlock_special(t);
74536 }
74537diff --git a/kernel/relay.c b/kernel/relay.c
74538index 760c262..a9fd241 100644
74539--- a/kernel/relay.c
74540+++ b/kernel/relay.c
74541@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct file *in,
74542 unsigned int flags,
74543 int *nonpad_ret)
74544 {
74545- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
74546+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
74547 struct rchan_buf *rbuf = in->private_data;
74548 unsigned int subbuf_size = rbuf->chan->subbuf_size;
74549 uint64_t pos = (uint64_t) *ppos;
74550@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct file *in,
74551 .ops = &relay_pipe_buf_ops,
74552 .spd_release = relay_page_release,
74553 };
74554+ ssize_t ret;
74555+
74556+ pax_track_stack();
74557
74558 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
74559 return 0;
74560diff --git a/kernel/resource.c b/kernel/resource.c
74561index fb11a58..4e61ae1 100644
74562--- a/kernel/resource.c
74563+++ b/kernel/resource.c
74564@@ -132,8 +132,18 @@ static const struct file_operations proc_iomem_operations = {
74565
74566 static int __init ioresources_init(void)
74567 {
74568+#ifdef CONFIG_GRKERNSEC_PROC_ADD
74569+#ifdef CONFIG_GRKERNSEC_PROC_USER
74570+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
74571+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
74572+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
74573+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
74574+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
74575+#endif
74576+#else
74577 proc_create("ioports", 0, NULL, &proc_ioports_operations);
74578 proc_create("iomem", 0, NULL, &proc_iomem_operations);
74579+#endif
74580 return 0;
74581 }
74582 __initcall(ioresources_init);
74583diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
74584index a56f629..1fc4989 100644
74585--- a/kernel/rtmutex-tester.c
74586+++ b/kernel/rtmutex-tester.c
74587@@ -21,7 +21,7 @@
74588 #define MAX_RT_TEST_MUTEXES 8
74589
74590 static spinlock_t rttest_lock;
74591-static atomic_t rttest_event;
74592+static atomic_unchecked_t rttest_event;
74593
74594 struct test_thread_data {
74595 int opcode;
74596@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74597
74598 case RTTEST_LOCKCONT:
74599 td->mutexes[td->opdata] = 1;
74600- td->event = atomic_add_return(1, &rttest_event);
74601+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74602 return 0;
74603
74604 case RTTEST_RESET:
74605@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74606 return 0;
74607
74608 case RTTEST_RESETEVENT:
74609- atomic_set(&rttest_event, 0);
74610+ atomic_set_unchecked(&rttest_event, 0);
74611 return 0;
74612
74613 default:
74614@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74615 return ret;
74616
74617 td->mutexes[id] = 1;
74618- td->event = atomic_add_return(1, &rttest_event);
74619+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74620 rt_mutex_lock(&mutexes[id]);
74621- td->event = atomic_add_return(1, &rttest_event);
74622+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74623 td->mutexes[id] = 4;
74624 return 0;
74625
74626@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74627 return ret;
74628
74629 td->mutexes[id] = 1;
74630- td->event = atomic_add_return(1, &rttest_event);
74631+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74632 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
74633- td->event = atomic_add_return(1, &rttest_event);
74634+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74635 td->mutexes[id] = ret ? 0 : 4;
74636 return ret ? -EINTR : 0;
74637
74638@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
74639 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
74640 return ret;
74641
74642- td->event = atomic_add_return(1, &rttest_event);
74643+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74644 rt_mutex_unlock(&mutexes[id]);
74645- td->event = atomic_add_return(1, &rttest_event);
74646+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74647 td->mutexes[id] = 0;
74648 return 0;
74649
74650@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74651 break;
74652
74653 td->mutexes[dat] = 2;
74654- td->event = atomic_add_return(1, &rttest_event);
74655+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74656 break;
74657
74658 case RTTEST_LOCKBKL:
74659@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74660 return;
74661
74662 td->mutexes[dat] = 3;
74663- td->event = atomic_add_return(1, &rttest_event);
74664+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74665 break;
74666
74667 case RTTEST_LOCKNOWAIT:
74668@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
74669 return;
74670
74671 td->mutexes[dat] = 1;
74672- td->event = atomic_add_return(1, &rttest_event);
74673+ td->event = atomic_add_return_unchecked(1, &rttest_event);
74674 return;
74675
74676 case RTTEST_LOCKBKL:
74677diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
74678index 29bd4ba..8c5de90 100644
74679--- a/kernel/rtmutex.c
74680+++ b/kernel/rtmutex.c
74681@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
74682 */
74683 spin_lock_irqsave(&pendowner->pi_lock, flags);
74684
74685- WARN_ON(!pendowner->pi_blocked_on);
74686+ BUG_ON(!pendowner->pi_blocked_on);
74687 WARN_ON(pendowner->pi_blocked_on != waiter);
74688 WARN_ON(pendowner->pi_blocked_on->lock != lock);
74689
74690diff --git a/kernel/sched.c b/kernel/sched.c
74691index 0591df8..e3af3a4 100644
74692--- a/kernel/sched.c
74693+++ b/kernel/sched.c
74694@@ -5043,7 +5043,7 @@ out:
74695 * In CONFIG_NO_HZ case, the idle load balance owner will do the
74696 * rebalancing for all the cpus for whom scheduler ticks are stopped.
74697 */
74698-static void run_rebalance_domains(struct softirq_action *h)
74699+static void run_rebalance_domains(void)
74700 {
74701 int this_cpu = smp_processor_id();
74702 struct rq *this_rq = cpu_rq(this_cpu);
74703@@ -5690,6 +5690,19 @@ pick_next_task(struct rq *rq)
74704 }
74705 }
74706
74707+#ifdef CONFIG_GRKERNSEC_SETXID
74708+extern void gr_delayed_cred_worker(void);
74709+static inline void gr_cred_schedule(void)
74710+{
74711+ if (unlikely(current->delayed_cred))
74712+ gr_delayed_cred_worker();
74713+}
74714+#else
74715+static inline void gr_cred_schedule(void)
74716+{
74717+}
74718+#endif
74719+
74720 /*
74721 * schedule() is the main scheduler function.
74722 */
74723@@ -5700,6 +5713,8 @@ asmlinkage void __sched schedule(void)
74724 struct rq *rq;
74725 int cpu;
74726
74727+ pax_track_stack();
74728+
74729 need_resched:
74730 preempt_disable();
74731 cpu = smp_processor_id();
74732@@ -5713,6 +5728,8 @@ need_resched_nonpreemptible:
74733
74734 schedule_debug(prev);
74735
74736+ gr_cred_schedule();
74737+
74738 if (sched_feat(HRTICK))
74739 hrtick_clear(rq);
74740
74741@@ -5770,7 +5787,7 @@ EXPORT_SYMBOL(schedule);
74742 * Look out! "owner" is an entirely speculative pointer
74743 * access and not reliable.
74744 */
74745-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74746+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
74747 {
74748 unsigned int cpu;
74749 struct rq *rq;
74750@@ -5784,10 +5801,10 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74751 * DEBUG_PAGEALLOC could have unmapped it if
74752 * the mutex owner just released it and exited.
74753 */
74754- if (probe_kernel_address(&owner->cpu, cpu))
74755+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
74756 return 0;
74757 #else
74758- cpu = owner->cpu;
74759+ cpu = task_thread_info(owner)->cpu;
74760 #endif
74761
74762 /*
74763@@ -5816,7 +5833,7 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
74764 /*
74765 * Is that owner really running on that cpu?
74766 */
74767- if (task_thread_info(rq->curr) != owner || need_resched())
74768+ if (rq->curr != owner || need_resched())
74769 return 0;
74770
74771 cpu_relax();
74772@@ -6359,6 +6376,8 @@ int can_nice(const struct task_struct *p, const int nice)
74773 /* convert nice value [19,-20] to rlimit style value [1,40] */
74774 int nice_rlim = 20 - nice;
74775
74776+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
74777+
74778 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
74779 capable(CAP_SYS_NICE));
74780 }
74781@@ -6392,7 +6411,8 @@ SYSCALL_DEFINE1(nice, int, increment)
74782 if (nice > 19)
74783 nice = 19;
74784
74785- if (increment < 0 && !can_nice(current, nice))
74786+ if (increment < 0 && (!can_nice(current, nice) ||
74787+ gr_handle_chroot_nice()))
74788 return -EPERM;
74789
74790 retval = security_task_setnice(current, nice);
74791@@ -8774,7 +8794,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
74792 long power;
74793 int weight;
74794
74795- WARN_ON(!sd || !sd->groups);
74796+ BUG_ON(!sd || !sd->groups);
74797
74798 if (cpu != group_first_cpu(sd->groups))
74799 return;
74800diff --git a/kernel/signal.c b/kernel/signal.c
74801index 2494827..cda80a0 100644
74802--- a/kernel/signal.c
74803+++ b/kernel/signal.c
74804@@ -41,12 +41,12 @@
74805
74806 static struct kmem_cache *sigqueue_cachep;
74807
74808-static void __user *sig_handler(struct task_struct *t, int sig)
74809+static __sighandler_t sig_handler(struct task_struct *t, int sig)
74810 {
74811 return t->sighand->action[sig - 1].sa.sa_handler;
74812 }
74813
74814-static int sig_handler_ignored(void __user *handler, int sig)
74815+static int sig_handler_ignored(__sighandler_t handler, int sig)
74816 {
74817 /* Is it explicitly or implicitly ignored? */
74818 return handler == SIG_IGN ||
74819@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
74820 static int sig_task_ignored(struct task_struct *t, int sig,
74821 int from_ancestor_ns)
74822 {
74823- void __user *handler;
74824+ __sighandler_t handler;
74825
74826 handler = sig_handler(t, sig);
74827
74828@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc(struct task_struct *t, gfp_t flags,
74829 */
74830 user = get_uid(__task_cred(t)->user);
74831 atomic_inc(&user->sigpending);
74832+
74833+ if (!override_rlimit)
74834+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
74835 if (override_rlimit ||
74836 atomic_read(&user->sigpending) <=
74837 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
74838@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
74839
74840 int unhandled_signal(struct task_struct *tsk, int sig)
74841 {
74842- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
74843+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
74844 if (is_global_init(tsk))
74845 return 1;
74846 if (handler != SIG_IGN && handler != SIG_DFL)
74847@@ -627,6 +630,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
74848 }
74849 }
74850
74851+ /* allow glibc communication via tgkill to other threads in our
74852+ thread group */
74853+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
74854+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
74855+ && gr_handle_signal(t, sig))
74856+ return -EPERM;
74857+
74858 return security_task_kill(t, info, sig, 0);
74859 }
74860
74861@@ -968,7 +978,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74862 return send_signal(sig, info, p, 1);
74863 }
74864
74865-static int
74866+int
74867 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74868 {
74869 return send_signal(sig, info, t, 0);
74870@@ -1005,6 +1015,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74871 unsigned long int flags;
74872 int ret, blocked, ignored;
74873 struct k_sigaction *action;
74874+ int is_unhandled = 0;
74875
74876 spin_lock_irqsave(&t->sighand->siglock, flags);
74877 action = &t->sighand->action[sig-1];
74878@@ -1019,9 +1030,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
74879 }
74880 if (action->sa.sa_handler == SIG_DFL)
74881 t->signal->flags &= ~SIGNAL_UNKILLABLE;
74882+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
74883+ is_unhandled = 1;
74884 ret = specific_send_sig_info(sig, info, t);
74885 spin_unlock_irqrestore(&t->sighand->siglock, flags);
74886
74887+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
74888+ normal operation */
74889+ if (is_unhandled) {
74890+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
74891+ gr_handle_crash(t, sig);
74892+ }
74893+
74894 return ret;
74895 }
74896
74897@@ -1081,8 +1101,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
74898 {
74899 int ret = check_kill_permission(sig, info, p);
74900
74901- if (!ret && sig)
74902+ if (!ret && sig) {
74903 ret = do_send_sig_info(sig, info, p, true);
74904+ if (!ret)
74905+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
74906+ }
74907
74908 return ret;
74909 }
74910@@ -1644,6 +1667,8 @@ void ptrace_notify(int exit_code)
74911 {
74912 siginfo_t info;
74913
74914+ pax_track_stack();
74915+
74916 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
74917
74918 memset(&info, 0, sizeof info);
74919@@ -2275,7 +2300,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
74920 int error = -ESRCH;
74921
74922 rcu_read_lock();
74923- p = find_task_by_vpid(pid);
74924+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
74925+ /* allow glibc communication via tgkill to other threads in our
74926+ thread group */
74927+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
74928+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
74929+ p = find_task_by_vpid_unrestricted(pid);
74930+ else
74931+#endif
74932+ p = find_task_by_vpid(pid);
74933 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
74934 error = check_kill_permission(sig, info, p);
74935 /*
74936diff --git a/kernel/smp.c b/kernel/smp.c
74937index aa9cff3..631a0de 100644
74938--- a/kernel/smp.c
74939+++ b/kernel/smp.c
74940@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void *), void *info, int wait)
74941 }
74942 EXPORT_SYMBOL(smp_call_function);
74943
74944-void ipi_call_lock(void)
74945+void ipi_call_lock(void) __acquires(call_function.lock)
74946 {
74947 spin_lock(&call_function.lock);
74948 }
74949
74950-void ipi_call_unlock(void)
74951+void ipi_call_unlock(void) __releases(call_function.lock)
74952 {
74953 spin_unlock(&call_function.lock);
74954 }
74955
74956-void ipi_call_lock_irq(void)
74957+void ipi_call_lock_irq(void) __acquires(call_function.lock)
74958 {
74959 spin_lock_irq(&call_function.lock);
74960 }
74961
74962-void ipi_call_unlock_irq(void)
74963+void ipi_call_unlock_irq(void) __releases(call_function.lock)
74964 {
74965 spin_unlock_irq(&call_function.lock);
74966 }
74967diff --git a/kernel/softirq.c b/kernel/softirq.c
74968index 04a0252..580c512 100644
74969--- a/kernel/softirq.c
74970+++ b/kernel/softirq.c
74971@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp
74972
74973 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
74974
74975-char *softirq_to_name[NR_SOFTIRQS] = {
74976+const char * const softirq_to_name[NR_SOFTIRQS] = {
74977 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
74978 "TASKLET", "SCHED", "HRTIMER", "RCU"
74979 };
74980@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
74981
74982 asmlinkage void __do_softirq(void)
74983 {
74984- struct softirq_action *h;
74985+ const struct softirq_action *h;
74986 __u32 pending;
74987 int max_restart = MAX_SOFTIRQ_RESTART;
74988 int cpu;
74989@@ -233,7 +233,7 @@ restart:
74990 kstat_incr_softirqs_this_cpu(h - softirq_vec);
74991
74992 trace_softirq_entry(h, softirq_vec);
74993- h->action(h);
74994+ h->action();
74995 trace_softirq_exit(h, softirq_vec);
74996 if (unlikely(prev_count != preempt_count())) {
74997 printk(KERN_ERR "huh, entered softirq %td %s %p"
74998@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
74999 local_irq_restore(flags);
75000 }
75001
75002-void open_softirq(int nr, void (*action)(struct softirq_action *))
75003+void open_softirq(int nr, void (*action)(void))
75004 {
75005- softirq_vec[nr].action = action;
75006+ pax_open_kernel();
75007+ *(void **)&softirq_vec[nr].action = action;
75008+ pax_close_kernel();
75009 }
75010
75011 /*
75012@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
75013
75014 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
75015
75016-static void tasklet_action(struct softirq_action *a)
75017+static void tasklet_action(void)
75018 {
75019 struct tasklet_struct *list;
75020
75021@@ -454,7 +456,7 @@ static void tasklet_action(struct softirq_action *a)
75022 }
75023 }
75024
75025-static void tasklet_hi_action(struct softirq_action *a)
75026+static void tasklet_hi_action(void)
75027 {
75028 struct tasklet_struct *list;
75029
75030diff --git a/kernel/sys.c b/kernel/sys.c
75031index e9512b1..8a10cb3 100644
75032--- a/kernel/sys.c
75033+++ b/kernel/sys.c
75034@@ -133,6 +133,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
75035 error = -EACCES;
75036 goto out;
75037 }
75038+
75039+ if (gr_handle_chroot_setpriority(p, niceval)) {
75040+ error = -EACCES;
75041+ goto out;
75042+ }
75043+
75044 no_nice = security_task_setnice(p, niceval);
75045 if (no_nice) {
75046 error = no_nice;
75047@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which, int, who, int, niceval)
75048 !(user = find_user(who)))
75049 goto out_unlock; /* No processes for this user */
75050
75051- do_each_thread(g, p)
75052+ do_each_thread(g, p) {
75053 if (__task_cred(p)->uid == who)
75054 error = set_one_prio(p, niceval, error);
75055- while_each_thread(g, p);
75056+ } while_each_thread(g, p);
75057 if (who != cred->uid)
75058 free_uid(user); /* For find_user() */
75059 break;
75060@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which, int, who)
75061 !(user = find_user(who)))
75062 goto out_unlock; /* No processes for this user */
75063
75064- do_each_thread(g, p)
75065+ do_each_thread(g, p) {
75066 if (__task_cred(p)->uid == who) {
75067 niceval = 20 - task_nice(p);
75068 if (niceval > retval)
75069 retval = niceval;
75070 }
75071- while_each_thread(g, p);
75072+ } while_each_thread(g, p);
75073 if (who != cred->uid)
75074 free_uid(user); /* for find_user() */
75075 break;
75076@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
75077 goto error;
75078 }
75079
75080+ if (gr_check_group_change(new->gid, new->egid, -1))
75081+ goto error;
75082+
75083 if (rgid != (gid_t) -1 ||
75084 (egid != (gid_t) -1 && egid != old->gid))
75085 new->sgid = new->egid;
75086@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
75087 goto error;
75088
75089 retval = -EPERM;
75090+
75091+ if (gr_check_group_change(gid, gid, gid))
75092+ goto error;
75093+
75094 if (capable(CAP_SETGID))
75095 new->gid = new->egid = new->sgid = new->fsgid = gid;
75096 else if (gid == old->gid || gid == old->sgid)
75097@@ -559,7 +572,7 @@ error:
75098 /*
75099 * change the user struct in a credentials set to match the new UID
75100 */
75101-static int set_user(struct cred *new)
75102+int set_user(struct cred *new)
75103 {
75104 struct user_struct *new_user;
75105
75106@@ -567,12 +580,19 @@ static int set_user(struct cred *new)
75107 if (!new_user)
75108 return -EAGAIN;
75109
75110+ /*
75111+ * We don't fail in case of NPROC limit excess here because too many
75112+ * poorly written programs don't check set*uid() return code, assuming
75113+ * it never fails if called by root. We may still enforce NPROC limit
75114+ * for programs doing set*uid()+execve() by harmlessly deferring the
75115+ * failure to the execve() stage.
75116+ */
75117 if (atomic_read(&new_user->processes) >=
75118 current->signal->rlim[RLIMIT_NPROC].rlim_cur &&
75119- new_user != INIT_USER) {
75120- free_uid(new_user);
75121- return -EAGAIN;
75122- }
75123+ new_user != INIT_USER)
75124+ current->flags |= PF_NPROC_EXCEEDED;
75125+ else
75126+ current->flags &= ~PF_NPROC_EXCEEDED;
75127
75128 free_uid(new->user);
75129 new->user = new_user;
75130@@ -627,6 +647,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
75131 goto error;
75132 }
75133
75134+ if (gr_check_user_change(new->uid, new->euid, -1))
75135+ goto error;
75136+
75137 if (new->uid != old->uid) {
75138 retval = set_user(new);
75139 if (retval < 0)
75140@@ -675,6 +698,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
75141 goto error;
75142
75143 retval = -EPERM;
75144+
75145+ if (gr_check_crash_uid(uid))
75146+ goto error;
75147+ if (gr_check_user_change(uid, uid, uid))
75148+ goto error;
75149+
75150 if (capable(CAP_SETUID)) {
75151 new->suid = new->uid = uid;
75152 if (uid != old->uid) {
75153@@ -721,9 +750,18 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75154
75155 retval = -EPERM;
75156 if (!capable(CAP_SETUID)) {
75157- if (ruid != (uid_t) -1 && ruid != old->uid &&
75158- ruid != old->euid && ruid != old->suid)
75159- goto error;
75160+ // if RBAC is enabled, require CAP_SETUID to change
75161+ // uid to euid (from a suid binary, for instance)
75162+ // this is a hardening of normal permissions, not
75163+ // weakening
75164+ if (gr_acl_is_enabled()) {
75165+ if (ruid != (uid_t) -1 && ruid != old->uid)
75166+ goto error;
75167+ } else {
75168+ if (ruid != (uid_t) -1 && ruid != old->uid &&
75169+ ruid != old->euid && ruid != old->suid)
75170+ goto error;
75171+ }
75172 if (euid != (uid_t) -1 && euid != old->uid &&
75173 euid != old->euid && euid != old->suid)
75174 goto error;
75175@@ -732,6 +770,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
75176 goto error;
75177 }
75178
75179+ if (gr_check_user_change(ruid, euid, -1))
75180+ goto error;
75181+
75182 if (ruid != (uid_t) -1) {
75183 new->uid = ruid;
75184 if (ruid != old->uid) {
75185@@ -789,9 +830,18 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75186
75187 retval = -EPERM;
75188 if (!capable(CAP_SETGID)) {
75189- if (rgid != (gid_t) -1 && rgid != old->gid &&
75190- rgid != old->egid && rgid != old->sgid)
75191- goto error;
75192+ // if RBAC is enabled, require CAP_SETGID to change
75193+ // gid to egid (from a sgid binary, for instance)
75194+ // this is a hardening of normal permissions, not
75195+ // weakening
75196+ if (gr_acl_is_enabled()) {
75197+ if (rgid != (gid_t) -1 && rgid != old->gid)
75198+ goto error;
75199+ } else {
75200+ if (rgid != (gid_t) -1 && rgid != old->gid &&
75201+ rgid != old->egid && rgid != old->sgid)
75202+ goto error;
75203+ }
75204 if (egid != (gid_t) -1 && egid != old->gid &&
75205 egid != old->egid && egid != old->sgid)
75206 goto error;
75207@@ -800,6 +850,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
75208 goto error;
75209 }
75210
75211+ if (gr_check_group_change(rgid, egid, -1))
75212+ goto error;
75213+
75214 if (rgid != (gid_t) -1)
75215 new->gid = rgid;
75216 if (egid != (gid_t) -1)
75217@@ -849,6 +902,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
75218 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
75219 goto error;
75220
75221+ if (gr_check_user_change(-1, -1, uid))
75222+ goto error;
75223+
75224 if (uid == old->uid || uid == old->euid ||
75225 uid == old->suid || uid == old->fsuid ||
75226 capable(CAP_SETUID)) {
75227@@ -889,6 +945,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
75228 if (gid == old->gid || gid == old->egid ||
75229 gid == old->sgid || gid == old->fsgid ||
75230 capable(CAP_SETGID)) {
75231+ if (gr_check_group_change(-1, -1, gid))
75232+ goto error;
75233+
75234 if (gid != old_fsgid) {
75235 new->fsgid = gid;
75236 goto change_okay;
75237@@ -1454,7 +1513,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
75238 error = get_dumpable(me->mm);
75239 break;
75240 case PR_SET_DUMPABLE:
75241- if (arg2 < 0 || arg2 > 1) {
75242+ if (arg2 > 1) {
75243 error = -EINVAL;
75244 break;
75245 }
75246diff --git a/kernel/sysctl.c b/kernel/sysctl.c
75247index b8bd058..ab6a76be 100644
75248--- a/kernel/sysctl.c
75249+++ b/kernel/sysctl.c
75250@@ -63,6 +63,13 @@
75251 static int deprecated_sysctl_warning(struct __sysctl_args *args);
75252
75253 #if defined(CONFIG_SYSCTL)
75254+#include <linux/grsecurity.h>
75255+#include <linux/grinternal.h>
75256+
75257+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
75258+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
75259+ const int op);
75260+extern int gr_handle_chroot_sysctl(const int op);
75261
75262 /* External variables not in a header file. */
75263 extern int C_A_D;
75264@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write,
75265 static int proc_taint(struct ctl_table *table, int write,
75266 void __user *buffer, size_t *lenp, loff_t *ppos);
75267 #endif
75268+extern ctl_table grsecurity_table[];
75269
75270 static struct ctl_table root_table[];
75271 static struct ctl_table_root sysctl_table_root;
75272@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
75273 int sysctl_legacy_va_layout;
75274 #endif
75275
75276+#ifdef CONFIG_PAX_SOFTMODE
75277+static ctl_table pax_table[] = {
75278+ {
75279+ .ctl_name = CTL_UNNUMBERED,
75280+ .procname = "softmode",
75281+ .data = &pax_softmode,
75282+ .maxlen = sizeof(unsigned int),
75283+ .mode = 0600,
75284+ .proc_handler = &proc_dointvec,
75285+ },
75286+
75287+ { .ctl_name = 0 }
75288+};
75289+#endif
75290+
75291 extern int prove_locking;
75292 extern int lock_stat;
75293
75294@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */
75295 #endif
75296
75297 static struct ctl_table kern_table[] = {
75298+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
75299+ {
75300+ .ctl_name = CTL_UNNUMBERED,
75301+ .procname = "grsecurity",
75302+ .mode = 0500,
75303+ .child = grsecurity_table,
75304+ },
75305+#endif
75306+
75307+#ifdef CONFIG_PAX_SOFTMODE
75308+ {
75309+ .ctl_name = CTL_UNNUMBERED,
75310+ .procname = "pax",
75311+ .mode = 0500,
75312+ .child = pax_table,
75313+ },
75314+#endif
75315+
75316 {
75317 .ctl_name = CTL_UNNUMBERED,
75318 .procname = "sched_child_runs_first",
75319@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
75320 .data = &modprobe_path,
75321 .maxlen = KMOD_PATH_LEN,
75322 .mode = 0644,
75323- .proc_handler = &proc_dostring,
75324- .strategy = &sysctl_string,
75325+ .proc_handler = &proc_dostring_modpriv,
75326+ .strategy = &sysctl_string_modpriv,
75327 },
75328 {
75329 .ctl_name = CTL_UNNUMBERED,
75330@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
75331 .mode = 0644,
75332 .proc_handler = &proc_dointvec
75333 },
75334+ {
75335+ .procname = "heap_stack_gap",
75336+ .data = &sysctl_heap_stack_gap,
75337+ .maxlen = sizeof(sysctl_heap_stack_gap),
75338+ .mode = 0644,
75339+ .proc_handler = proc_doulongvec_minmax,
75340+ },
75341 #else
75342 {
75343 .ctl_name = CTL_UNNUMBERED,
75344@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
75345 return 0;
75346 }
75347
75348+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
75349+
75350 static int parse_table(int __user *name, int nlen,
75351 void __user *oldval, size_t __user *oldlenp,
75352 void __user *newval, size_t newlen,
75353@@ -1821,7 +1871,7 @@ repeat:
75354 if (n == table->ctl_name) {
75355 int error;
75356 if (table->child) {
75357- if (sysctl_perm(root, table, MAY_EXEC))
75358+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
75359 return -EPERM;
75360 name++;
75361 nlen--;
75362@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *root, struct ctl_table *table, int op)
75363 int error;
75364 int mode;
75365
75366+ if (table->parent != NULL && table->parent->procname != NULL &&
75367+ table->procname != NULL &&
75368+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
75369+ return -EACCES;
75370+ if (gr_handle_chroot_sysctl(op))
75371+ return -EACCES;
75372+ error = gr_handle_sysctl(table, op);
75373+ if (error)
75374+ return error;
75375+
75376+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75377+ if (error)
75378+ return error;
75379+
75380+ if (root->permissions)
75381+ mode = root->permissions(root, current->nsproxy, table);
75382+ else
75383+ mode = table->mode;
75384+
75385+ return test_perm(mode, op);
75386+}
75387+
75388+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
75389+{
75390+ int error;
75391+ int mode;
75392+
75393 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
75394 if (error)
75395 return error;
75396@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *table, int write,
75397 buffer, lenp, ppos);
75398 }
75399
75400+int proc_dostring_modpriv(struct ctl_table *table, int write,
75401+ void __user *buffer, size_t *lenp, loff_t *ppos)
75402+{
75403+ if (write && !capable(CAP_SYS_MODULE))
75404+ return -EPERM;
75405+
75406+ return _proc_do_string(table->data, table->maxlen, write,
75407+ buffer, lenp, ppos);
75408+}
75409+
75410
75411 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
75412 int *valp,
75413@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
75414 vleft = table->maxlen / sizeof(unsigned long);
75415 left = *lenp;
75416
75417- for (; left && vleft--; i++, min++, max++, first=0) {
75418+ for (; left && vleft--; i++, first=0) {
75419 if (write) {
75420 while (left) {
75421 char c;
75422@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *table, int write,
75423 return -ENOSYS;
75424 }
75425
75426+int proc_dostring_modpriv(struct ctl_table *table, int write,
75427+ void __user *buffer, size_t *lenp, loff_t *ppos)
75428+{
75429+ return -ENOSYS;
75430+}
75431+
75432 int proc_dointvec(struct ctl_table *table, int write,
75433 void __user *buffer, size_t *lenp, loff_t *ppos)
75434 {
75435@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *table,
75436 return 1;
75437 }
75438
75439+int sysctl_string_modpriv(struct ctl_table *table,
75440+ void __user *oldval, size_t __user *oldlenp,
75441+ void __user *newval, size_t newlen)
75442+{
75443+ if (newval && newlen && !capable(CAP_SYS_MODULE))
75444+ return -EPERM;
75445+
75446+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
75447+}
75448+
75449 /*
75450 * This function makes sure that all of the integers in the vector
75451 * are between the minimum and maximum values given in the arrays
75452@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *table,
75453 return -ENOSYS;
75454 }
75455
75456+int sysctl_string_modpriv(struct ctl_table *table,
75457+ void __user *oldval, size_t __user *oldlenp,
75458+ void __user *newval, size_t newlen)
75459+{
75460+ return -ENOSYS;
75461+}
75462+
75463 int sysctl_intvec(struct ctl_table *table,
75464 void __user *oldval, size_t __user *oldlenp,
75465 void __user *newval, size_t newlen)
75466@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
75467 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
75468 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
75469 EXPORT_SYMBOL(proc_dostring);
75470+EXPORT_SYMBOL(proc_dostring_modpriv);
75471 EXPORT_SYMBOL(proc_doulongvec_minmax);
75472 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
75473 EXPORT_SYMBOL(register_sysctl_table);
75474@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
75475 EXPORT_SYMBOL(sysctl_jiffies);
75476 EXPORT_SYMBOL(sysctl_ms_jiffies);
75477 EXPORT_SYMBOL(sysctl_string);
75478+EXPORT_SYMBOL(sysctl_string_modpriv);
75479 EXPORT_SYMBOL(sysctl_data);
75480 EXPORT_SYMBOL(unregister_sysctl_table);
75481diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
75482index 469193c..ea3ecb2 100644
75483--- a/kernel/sysctl_check.c
75484+++ b/kernel/sysctl_check.c
75485@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
75486 } else {
75487 if ((table->strategy == sysctl_data) ||
75488 (table->strategy == sysctl_string) ||
75489+ (table->strategy == sysctl_string_modpriv) ||
75490 (table->strategy == sysctl_intvec) ||
75491 (table->strategy == sysctl_jiffies) ||
75492 (table->strategy == sysctl_ms_jiffies) ||
75493 (table->proc_handler == proc_dostring) ||
75494+ (table->proc_handler == proc_dostring_modpriv) ||
75495 (table->proc_handler == proc_dointvec) ||
75496 (table->proc_handler == proc_dointvec_minmax) ||
75497 (table->proc_handler == proc_dointvec_jiffies) ||
75498diff --git a/kernel/taskstats.c b/kernel/taskstats.c
75499index a4ef542..798bcd7 100644
75500--- a/kernel/taskstats.c
75501+++ b/kernel/taskstats.c
75502@@ -26,9 +26,12 @@
75503 #include <linux/cgroup.h>
75504 #include <linux/fs.h>
75505 #include <linux/file.h>
75506+#include <linux/grsecurity.h>
75507 #include <net/genetlink.h>
75508 #include <asm/atomic.h>
75509
75510+extern int gr_is_taskstats_denied(int pid);
75511+
75512 /*
75513 * Maximum length of a cpumask that can be specified in
75514 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
75515@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
75516 size_t size;
75517 cpumask_var_t mask;
75518
75519+ if (gr_is_taskstats_denied(current->pid))
75520+ return -EACCES;
75521+
75522 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
75523 return -ENOMEM;
75524
75525diff --git a/kernel/time.c b/kernel/time.c
75526index 33df60e..ca768bd 100644
75527--- a/kernel/time.c
75528+++ b/kernel/time.c
75529@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
75530 return error;
75531
75532 if (tz) {
75533+ /* we log in do_settimeofday called below, so don't log twice
75534+ */
75535+ if (!tv)
75536+ gr_log_timechange();
75537+
75538 /* SMP safe, global irq locking makes it work. */
75539 sys_tz = *tz;
75540 update_vsyscall_tz();
75541@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
75542 * Avoid unnecessary multiplications/divisions in the
75543 * two most common HZ cases:
75544 */
75545-unsigned int inline jiffies_to_msecs(const unsigned long j)
75546+inline unsigned int jiffies_to_msecs(const unsigned long j)
75547 {
75548 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
75549 return (MSEC_PER_SEC / HZ) * j;
75550@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
75551 }
75552 EXPORT_SYMBOL(jiffies_to_msecs);
75553
75554-unsigned int inline jiffies_to_usecs(const unsigned long j)
75555+inline unsigned int jiffies_to_usecs(const unsigned long j)
75556 {
75557 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
75558 return (USEC_PER_SEC / HZ) * j;
75559diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
75560index 57b953f..06f149f 100644
75561--- a/kernel/time/tick-broadcast.c
75562+++ b/kernel/time/tick-broadcast.c
75563@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
75564 * then clear the broadcast bit.
75565 */
75566 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
75567- int cpu = smp_processor_id();
75568+ cpu = smp_processor_id();
75569
75570 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
75571 tick_broadcast_clear_oneshot(cpu);
75572diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
75573index 4a71cff..ffb5548 100644
75574--- a/kernel/time/timekeeping.c
75575+++ b/kernel/time/timekeeping.c
75576@@ -14,6 +14,7 @@
75577 #include <linux/init.h>
75578 #include <linux/mm.h>
75579 #include <linux/sched.h>
75580+#include <linux/grsecurity.h>
75581 #include <linux/sysdev.h>
75582 #include <linux/clocksource.h>
75583 #include <linux/jiffies.h>
75584@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
75585 */
75586 struct timespec ts = xtime;
75587 timespec_add_ns(&ts, nsec);
75588- ACCESS_ONCE(xtime_cache) = ts;
75589+ ACCESS_ONCE_RW(xtime_cache) = ts;
75590 }
75591
75592 /* must hold xtime_lock */
75593@@ -337,6 +338,8 @@ int do_settimeofday(struct timespec *tv)
75594 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
75595 return -EINVAL;
75596
75597+ gr_log_timechange();
75598+
75599 write_seqlock_irqsave(&xtime_lock, flags);
75600
75601 timekeeping_forward_now();
75602diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
75603index 54c0dda..e9095d9 100644
75604--- a/kernel/time/timer_list.c
75605+++ b/kernel/time/timer_list.c
75606@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
75607
75608 static void print_name_offset(struct seq_file *m, void *sym)
75609 {
75610+#ifdef CONFIG_GRKERNSEC_HIDESYM
75611+ SEQ_printf(m, "<%p>", NULL);
75612+#else
75613 char symname[KSYM_NAME_LEN];
75614
75615 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
75616 SEQ_printf(m, "<%p>", sym);
75617 else
75618 SEQ_printf(m, "%s", symname);
75619+#endif
75620 }
75621
75622 static void
75623@@ -112,7 +116,11 @@ next_one:
75624 static void
75625 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
75626 {
75627+#ifdef CONFIG_GRKERNSEC_HIDESYM
75628+ SEQ_printf(m, " .base: %p\n", NULL);
75629+#else
75630 SEQ_printf(m, " .base: %p\n", base);
75631+#endif
75632 SEQ_printf(m, " .index: %d\n",
75633 base->index);
75634 SEQ_printf(m, " .resolution: %Lu nsecs\n",
75635@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs(void)
75636 {
75637 struct proc_dir_entry *pe;
75638
75639+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75640+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
75641+#else
75642 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
75643+#endif
75644 if (!pe)
75645 return -ENOMEM;
75646 return 0;
75647diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
75648index ee5681f..634089b 100644
75649--- a/kernel/time/timer_stats.c
75650+++ b/kernel/time/timer_stats.c
75651@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
75652 static unsigned long nr_entries;
75653 static struct entry entries[MAX_ENTRIES];
75654
75655-static atomic_t overflow_count;
75656+static atomic_unchecked_t overflow_count;
75657
75658 /*
75659 * The entries are in a hash-table, for fast lookup:
75660@@ -140,7 +140,7 @@ static void reset_entries(void)
75661 nr_entries = 0;
75662 memset(entries, 0, sizeof(entries));
75663 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
75664- atomic_set(&overflow_count, 0);
75665+ atomic_set_unchecked(&overflow_count, 0);
75666 }
75667
75668 static struct entry *alloc_entry(void)
75669@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75670 if (likely(entry))
75671 entry->count++;
75672 else
75673- atomic_inc(&overflow_count);
75674+ atomic_inc_unchecked(&overflow_count);
75675
75676 out_unlock:
75677 spin_unlock_irqrestore(lock, flags);
75678@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
75679
75680 static void print_name_offset(struct seq_file *m, unsigned long addr)
75681 {
75682+#ifdef CONFIG_GRKERNSEC_HIDESYM
75683+ seq_printf(m, "<%p>", NULL);
75684+#else
75685 char symname[KSYM_NAME_LEN];
75686
75687 if (lookup_symbol_name(addr, symname) < 0)
75688 seq_printf(m, "<%p>", (void *)addr);
75689 else
75690 seq_printf(m, "%s", symname);
75691+#endif
75692 }
75693
75694 static int tstats_show(struct seq_file *m, void *v)
75695@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
75696
75697 seq_puts(m, "Timer Stats Version: v0.2\n");
75698 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
75699- if (atomic_read(&overflow_count))
75700+ if (atomic_read_unchecked(&overflow_count))
75701 seq_printf(m, "Overflow: %d entries\n",
75702- atomic_read(&overflow_count));
75703+ atomic_read_unchecked(&overflow_count));
75704
75705 for (i = 0; i < nr_entries; i++) {
75706 entry = entries + i;
75707@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(void)
75708 {
75709 struct proc_dir_entry *pe;
75710
75711+#ifdef CONFIG_GRKERNSEC_PROC_ADD
75712+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
75713+#else
75714 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
75715+#endif
75716 if (!pe)
75717 return -ENOMEM;
75718 return 0;
75719diff --git a/kernel/timer.c b/kernel/timer.c
75720index cb3c1f1..8bf5526 100644
75721--- a/kernel/timer.c
75722+++ b/kernel/timer.c
75723@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
75724 /*
75725 * This function runs timers and the timer-tq in bottom half context.
75726 */
75727-static void run_timer_softirq(struct softirq_action *h)
75728+static void run_timer_softirq(void)
75729 {
75730 struct tvec_base *base = __get_cpu_var(tvec_bases);
75731
75732diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
75733index d9d6206..f19467e 100644
75734--- a/kernel/trace/blktrace.c
75735+++ b/kernel/trace/blktrace.c
75736@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
75737 struct blk_trace *bt = filp->private_data;
75738 char buf[16];
75739
75740- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
75741+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
75742
75743 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
75744 }
75745@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
75746 return 1;
75747
75748 bt = buf->chan->private_data;
75749- atomic_inc(&bt->dropped);
75750+ atomic_inc_unchecked(&bt->dropped);
75751 return 0;
75752 }
75753
75754@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
75755
75756 bt->dir = dir;
75757 bt->dev = dev;
75758- atomic_set(&bt->dropped, 0);
75759+ atomic_set_unchecked(&bt->dropped, 0);
75760
75761 ret = -EIO;
75762 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
75763diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
75764index 4872937..c794d40 100644
75765--- a/kernel/trace/ftrace.c
75766+++ b/kernel/trace/ftrace.c
75767@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
75768
75769 ip = rec->ip;
75770
75771+ ret = ftrace_arch_code_modify_prepare();
75772+ FTRACE_WARN_ON(ret);
75773+ if (ret)
75774+ return 0;
75775+
75776 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
75777+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
75778 if (ret) {
75779 ftrace_bug(ret, ip);
75780 rec->flags |= FTRACE_FL_FAILED;
75781- return 0;
75782 }
75783- return 1;
75784+ return ret ? 0 : 1;
75785 }
75786
75787 /*
75788diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
75789index e749a05..19c6e94 100644
75790--- a/kernel/trace/ring_buffer.c
75791+++ b/kernel/trace/ring_buffer.c
75792@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(struct list_head *list)
75793 * the reader page). But if the next page is a header page,
75794 * its flags will be non zero.
75795 */
75796-static int inline
75797+static inline int
75798 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
75799 struct buffer_page *page, struct list_head *list)
75800 {
75801diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
75802index a2a2d1f..7f32b09 100644
75803--- a/kernel/trace/trace.c
75804+++ b/kernel/trace/trace.c
75805@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(struct file *filp,
75806 size_t rem;
75807 unsigned int i;
75808
75809+ pax_track_stack();
75810+
75811 /* copy the tracer to avoid using a global lock all around */
75812 mutex_lock(&trace_types_lock);
75813 if (unlikely(old_tracer != current_trace && current_trace)) {
75814@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
75815 int entries, size, i;
75816 size_t ret;
75817
75818+ pax_track_stack();
75819+
75820 if (*ppos & (PAGE_SIZE - 1)) {
75821 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
75822 return -EINVAL;
75823@@ -3816,10 +3820,9 @@ static const struct file_operations tracing_dyn_info_fops = {
75824 };
75825 #endif
75826
75827-static struct dentry *d_tracer;
75828-
75829 struct dentry *tracing_init_dentry(void)
75830 {
75831+ static struct dentry *d_tracer;
75832 static int once;
75833
75834 if (d_tracer)
75835@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
75836 return d_tracer;
75837 }
75838
75839-static struct dentry *d_percpu;
75840-
75841 struct dentry *tracing_dentry_percpu(void)
75842 {
75843+ static struct dentry *d_percpu;
75844 static int once;
75845 struct dentry *d_tracer;
75846
75847diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
75848index d128f65..f37b4af 100644
75849--- a/kernel/trace/trace_events.c
75850+++ b/kernel/trace/trace_events.c
75851@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list);
75852 * Modules must own their file_operations to keep up with
75853 * reference counting.
75854 */
75855+
75856 struct ftrace_module_file_ops {
75857 struct list_head list;
75858 struct module *mod;
75859- struct file_operations id;
75860- struct file_operations enable;
75861- struct file_operations format;
75862- struct file_operations filter;
75863 };
75864
75865 static void remove_subsystem_dir(const char *name)
75866@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod)
75867
75868 file_ops->mod = mod;
75869
75870- file_ops->id = ftrace_event_id_fops;
75871- file_ops->id.owner = mod;
75872-
75873- file_ops->enable = ftrace_enable_fops;
75874- file_ops->enable.owner = mod;
75875-
75876- file_ops->filter = ftrace_event_filter_fops;
75877- file_ops->filter.owner = mod;
75878-
75879- file_ops->format = ftrace_event_format_fops;
75880- file_ops->format.owner = mod;
75881+ pax_open_kernel();
75882+ *(void **)&mod->trace_id.owner = mod;
75883+ *(void **)&mod->trace_enable.owner = mod;
75884+ *(void **)&mod->trace_filter.owner = mod;
75885+ *(void **)&mod->trace_format.owner = mod;
75886+ pax_close_kernel();
75887
75888 list_add(&file_ops->list, &ftrace_module_file_list);
75889
75890@@ -1063,8 +1055,8 @@ static void trace_module_add_events(struct module *mod)
75891 call->mod = mod;
75892 list_add(&call->list, &ftrace_events);
75893 event_create_dir(call, d_events,
75894- &file_ops->id, &file_ops->enable,
75895- &file_ops->filter, &file_ops->format);
75896+ &mod->trace_id, &mod->trace_enable,
75897+ &mod->trace_filter, &mod->trace_format);
75898 }
75899 }
75900
75901diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
75902index 0acd834..b800b56 100644
75903--- a/kernel/trace/trace_mmiotrace.c
75904+++ b/kernel/trace/trace_mmiotrace.c
75905@@ -23,7 +23,7 @@ struct header_iter {
75906 static struct trace_array *mmio_trace_array;
75907 static bool overrun_detected;
75908 static unsigned long prev_overruns;
75909-static atomic_t dropped_count;
75910+static atomic_unchecked_t dropped_count;
75911
75912 static void mmio_reset_data(struct trace_array *tr)
75913 {
75914@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iterator *iter)
75915
75916 static unsigned long count_overruns(struct trace_iterator *iter)
75917 {
75918- unsigned long cnt = atomic_xchg(&dropped_count, 0);
75919+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
75920 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
75921
75922 if (over > prev_overruns)
75923@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
75924 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
75925 sizeof(*entry), 0, pc);
75926 if (!event) {
75927- atomic_inc(&dropped_count);
75928+ atomic_inc_unchecked(&dropped_count);
75929 return;
75930 }
75931 entry = ring_buffer_event_data(event);
75932@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
75933 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
75934 sizeof(*entry), 0, pc);
75935 if (!event) {
75936- atomic_inc(&dropped_count);
75937+ atomic_inc_unchecked(&dropped_count);
75938 return;
75939 }
75940 entry = ring_buffer_event_data(event);
75941diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
75942index b6c12c6..41fdc53 100644
75943--- a/kernel/trace/trace_output.c
75944+++ b/kernel/trace/trace_output.c
75945@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s, struct path *path)
75946 return 0;
75947 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
75948 if (!IS_ERR(p)) {
75949- p = mangle_path(s->buffer + s->len, p, "\n");
75950+ p = mangle_path(s->buffer + s->len, p, "\n\\");
75951 if (p) {
75952 s->len = p - s->buffer;
75953 return 1;
75954diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
75955index 8504ac7..ecf0adb 100644
75956--- a/kernel/trace/trace_stack.c
75957+++ b/kernel/trace/trace_stack.c
75958@@ -50,7 +50,7 @@ static inline void check_stack(void)
75959 return;
75960
75961 /* we do not handle interrupt stacks yet */
75962- if (!object_is_on_stack(&this_size))
75963+ if (!object_starts_on_stack(&this_size))
75964 return;
75965
75966 local_irq_save(flags);
75967diff --git a/kernel/trace/trace_workqueue.c b/kernel/trace/trace_workqueue.c
75968index 40cafb0..d5ead43 100644
75969--- a/kernel/trace/trace_workqueue.c
75970+++ b/kernel/trace/trace_workqueue.c
75971@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
75972 int cpu;
75973 pid_t pid;
75974 /* Can be inserted from interrupt or user context, need to be atomic */
75975- atomic_t inserted;
75976+ atomic_unchecked_t inserted;
75977 /*
75978 * Don't need to be atomic, works are serialized in a single workqueue thread
75979 * on a single CPU.
75980@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
75981 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
75982 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
75983 if (node->pid == wq_thread->pid) {
75984- atomic_inc(&node->inserted);
75985+ atomic_inc_unchecked(&node->inserted);
75986 goto found;
75987 }
75988 }
75989@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct seq_file *s, void *p)
75990 tsk = get_pid_task(pid, PIDTYPE_PID);
75991 if (tsk) {
75992 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
75993- atomic_read(&cws->inserted), cws->executed,
75994+ atomic_read_unchecked(&cws->inserted), cws->executed,
75995 tsk->comm);
75996 put_task_struct(tsk);
75997 }
75998diff --git a/kernel/user.c b/kernel/user.c
75999index 1b91701..8795237 100644
76000--- a/kernel/user.c
76001+++ b/kernel/user.c
76002@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct user_namespace *ns, uid_t uid)
76003 spin_lock_irq(&uidhash_lock);
76004 up = uid_hash_find(uid, hashent);
76005 if (up) {
76006+ put_user_ns(ns);
76007 key_put(new->uid_keyring);
76008 key_put(new->session_keyring);
76009 kmem_cache_free(uid_cachep, new);
76010diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
76011index 234ceb1..ad74049 100644
76012--- a/lib/Kconfig.debug
76013+++ b/lib/Kconfig.debug
76014@@ -905,7 +905,7 @@ config LATENCYTOP
76015 select STACKTRACE
76016 select SCHEDSTATS
76017 select SCHED_DEBUG
76018- depends on HAVE_LATENCYTOP_SUPPORT
76019+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
76020 help
76021 Enable this option if you want to use the LatencyTOP tool
76022 to find out which userspace is blocking on what kernel operations.
76023diff --git a/lib/bitmap.c b/lib/bitmap.c
76024index 7025658..8d14cab 100644
76025--- a/lib/bitmap.c
76026+++ b/lib/bitmap.c
76027@@ -341,7 +341,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
76028 {
76029 int c, old_c, totaldigits, ndigits, nchunks, nbits;
76030 u32 chunk;
76031- const char __user *ubuf = buf;
76032+ const char __user *ubuf = (const char __force_user *)buf;
76033
76034 bitmap_zero(maskp, nmaskbits);
76035
76036@@ -426,7 +426,7 @@ int bitmap_parse_user(const char __user *ubuf,
76037 {
76038 if (!access_ok(VERIFY_READ, ubuf, ulen))
76039 return -EFAULT;
76040- return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits);
76041+ return __bitmap_parse((const char __force_kernel *)ubuf, ulen, 1, maskp, nmaskbits);
76042 }
76043 EXPORT_SYMBOL(bitmap_parse_user);
76044
76045diff --git a/lib/bug.c b/lib/bug.c
76046index 300e41a..2779eb0 100644
76047--- a/lib/bug.c
76048+++ b/lib/bug.c
76049@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
76050 return BUG_TRAP_TYPE_NONE;
76051
76052 bug = find_bug(bugaddr);
76053+ if (!bug)
76054+ return BUG_TRAP_TYPE_NONE;
76055
76056 printk(KERN_EMERG "------------[ cut here ]------------\n");
76057
76058diff --git a/lib/debugobjects.c b/lib/debugobjects.c
76059index 2b413db..e21d207 100644
76060--- a/lib/debugobjects.c
76061+++ b/lib/debugobjects.c
76062@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
76063 if (limit > 4)
76064 return;
76065
76066- is_on_stack = object_is_on_stack(addr);
76067+ is_on_stack = object_starts_on_stack(addr);
76068 if (is_on_stack == onstack)
76069 return;
76070
76071diff --git a/lib/devres.c b/lib/devres.c
76072index 72c8909..7543868 100644
76073--- a/lib/devres.c
76074+++ b/lib/devres.c
76075@@ -80,7 +80,7 @@ void devm_iounmap(struct device *dev, void __iomem *addr)
76076 {
76077 iounmap(addr);
76078 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
76079- (void *)addr));
76080+ (void __force *)addr));
76081 }
76082 EXPORT_SYMBOL(devm_iounmap);
76083
76084@@ -140,7 +140,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
76085 {
76086 ioport_unmap(addr);
76087 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
76088- devm_ioport_map_match, (void *)addr));
76089+ devm_ioport_map_match, (void __force *)addr));
76090 }
76091 EXPORT_SYMBOL(devm_ioport_unmap);
76092
76093diff --git a/lib/dma-debug.c b/lib/dma-debug.c
76094index 084e879..0674448 100644
76095--- a/lib/dma-debug.c
76096+++ b/lib/dma-debug.c
76097@@ -861,7 +861,7 @@ out:
76098
76099 static void check_for_stack(struct device *dev, void *addr)
76100 {
76101- if (object_is_on_stack(addr))
76102+ if (object_starts_on_stack(addr))
76103 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
76104 "stack [addr=%p]\n", addr);
76105 }
76106diff --git a/lib/idr.c b/lib/idr.c
76107index eda7ba3..915dfae 100644
76108--- a/lib/idr.c
76109+++ b/lib/idr.c
76110@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
76111 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
76112
76113 /* if already at the top layer, we need to grow */
76114- if (id >= 1 << (idp->layers * IDR_BITS)) {
76115+ if (id >= (1 << (idp->layers * IDR_BITS))) {
76116 *starting_id = id;
76117 return IDR_NEED_TO_GROW;
76118 }
76119diff --git a/lib/inflate.c b/lib/inflate.c
76120index d102559..4215f31 100644
76121--- a/lib/inflate.c
76122+++ b/lib/inflate.c
76123@@ -266,7 +266,7 @@ static void free(void *where)
76124 malloc_ptr = free_mem_ptr;
76125 }
76126 #else
76127-#define malloc(a) kmalloc(a, GFP_KERNEL)
76128+#define malloc(a) kmalloc((a), GFP_KERNEL)
76129 #define free(a) kfree(a)
76130 #endif
76131
76132diff --git a/lib/kobject.c b/lib/kobject.c
76133index b512b74..8115eb1 100644
76134--- a/lib/kobject.c
76135+++ b/lib/kobject.c
76136@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr,
76137 return ret;
76138 }
76139
76140-struct sysfs_ops kobj_sysfs_ops = {
76141+const struct sysfs_ops kobj_sysfs_ops = {
76142 .show = kobj_attr_show,
76143 .store = kobj_attr_store,
76144 };
76145@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
76146 * If the kset was not able to be created, NULL will be returned.
76147 */
76148 static struct kset *kset_create(const char *name,
76149- struct kset_uevent_ops *uevent_ops,
76150+ const struct kset_uevent_ops *uevent_ops,
76151 struct kobject *parent_kobj)
76152 {
76153 struct kset *kset;
76154@@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name,
76155 * If the kset was not able to be created, NULL will be returned.
76156 */
76157 struct kset *kset_create_and_add(const char *name,
76158- struct kset_uevent_ops *uevent_ops,
76159+ const struct kset_uevent_ops *uevent_ops,
76160 struct kobject *parent_kobj)
76161 {
76162 struct kset *kset;
76163diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c
76164index 507b821..0bf8ed0 100644
76165--- a/lib/kobject_uevent.c
76166+++ b/lib/kobject_uevent.c
76167@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action,
76168 const char *subsystem;
76169 struct kobject *top_kobj;
76170 struct kset *kset;
76171- struct kset_uevent_ops *uevent_ops;
76172+ const struct kset_uevent_ops *uevent_ops;
76173 u64 seq;
76174 int i = 0;
76175 int retval = 0;
76176diff --git a/lib/kref.c b/lib/kref.c
76177index 9ecd6e8..12c94c1 100644
76178--- a/lib/kref.c
76179+++ b/lib/kref.c
76180@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
76181 */
76182 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
76183 {
76184- WARN_ON(release == NULL);
76185+ BUG_ON(release == NULL);
76186 WARN_ON(release == (void (*)(struct kref *))kfree);
76187
76188 if (atomic_dec_and_test(&kref->refcount)) {
76189diff --git a/lib/parser.c b/lib/parser.c
76190index b00d020..1b34325 100644
76191--- a/lib/parser.c
76192+++ b/lib/parser.c
76193@@ -126,7 +126,7 @@ static int match_number(substring_t *s, int *result, int base)
76194 char *buf;
76195 int ret;
76196
76197- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
76198+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
76199 if (!buf)
76200 return -ENOMEM;
76201 memcpy(buf, s->from, s->to - s->from);
76202diff --git a/lib/radix-tree.c b/lib/radix-tree.c
76203index 92cdd99..a8149d7 100644
76204--- a/lib/radix-tree.c
76205+++ b/lib/radix-tree.c
76206@@ -81,7 +81,7 @@ struct radix_tree_preload {
76207 int nr;
76208 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
76209 };
76210-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
76211+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
76212
76213 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
76214 {
76215diff --git a/lib/random32.c b/lib/random32.c
76216index 217d5c4..45aba8a 100644
76217--- a/lib/random32.c
76218+++ b/lib/random32.c
76219@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *state)
76220 */
76221 static inline u32 __seed(u32 x, u32 m)
76222 {
76223- return (x < m) ? x + m : x;
76224+ return (x <= m) ? x + m + 1 : x;
76225 }
76226
76227 /**
76228diff --git a/lib/vsprintf.c b/lib/vsprintf.c
76229index 33bed5e..1477e46 100644
76230--- a/lib/vsprintf.c
76231+++ b/lib/vsprintf.c
76232@@ -16,6 +16,9 @@
76233 * - scnprintf and vscnprintf
76234 */
76235
76236+#ifdef CONFIG_GRKERNSEC_HIDESYM
76237+#define __INCLUDED_BY_HIDESYM 1
76238+#endif
76239 #include <stdarg.h>
76240 #include <linux/module.h>
76241 #include <linux/types.h>
76242@@ -546,12 +549,12 @@ static char *number(char *buf, char *end, unsigned long long num,
76243 return buf;
76244 }
76245
76246-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
76247+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
76248 {
76249 int len, i;
76250
76251 if ((unsigned long)s < PAGE_SIZE)
76252- s = "<NULL>";
76253+ s = "(null)";
76254
76255 len = strnlen(s, spec.precision);
76256
76257@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, char *end, void *ptr,
76258 unsigned long value = (unsigned long) ptr;
76259 #ifdef CONFIG_KALLSYMS
76260 char sym[KSYM_SYMBOL_LEN];
76261- if (ext != 'f' && ext != 's')
76262+ if (ext != 'f' && ext != 's' && ext != 'a')
76263 sprint_symbol(sym, value);
76264 else
76265 kallsyms_lookup(value, NULL, NULL, NULL, sym);
76266@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr,
76267 * - 'f' For simple symbolic function names without offset
76268 * - 'S' For symbolic direct pointers with offset
76269 * - 's' For symbolic direct pointers without offset
76270+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
76271+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
76272 * - 'R' For a struct resource pointer, it prints the range of
76273 * addresses (not the name nor the flags)
76274 * - 'M' For a 6-byte MAC address, it prints the address in the
76275@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76276 struct printf_spec spec)
76277 {
76278 if (!ptr)
76279- return string(buf, end, "(null)", spec);
76280+ return string(buf, end, "(nil)", spec);
76281
76282 switch (*fmt) {
76283 case 'F':
76284@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr,
76285 case 's':
76286 /* Fallthrough */
76287 case 'S':
76288+#ifdef CONFIG_GRKERNSEC_HIDESYM
76289+ break;
76290+#else
76291+ return symbol_string(buf, end, ptr, spec, *fmt);
76292+#endif
76293+ case 'a':
76294+ /* Fallthrough */
76295+ case 'A':
76296 return symbol_string(buf, end, ptr, spec, *fmt);
76297 case 'R':
76298 return resource_string(buf, end, ptr, spec);
76299@@ -1445,7 +1458,7 @@ do { \
76300 size_t len;
76301 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
76302 || (unsigned long)save_str < PAGE_SIZE)
76303- save_str = "<NULL>";
76304+ save_str = "(null)";
76305 len = strlen(save_str);
76306 if (str + len + 1 < end)
76307 memcpy(str, save_str, len + 1);
76308@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76309 typeof(type) value; \
76310 if (sizeof(type) == 8) { \
76311 args = PTR_ALIGN(args, sizeof(u32)); \
76312- *(u32 *)&value = *(u32 *)args; \
76313- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
76314+ *(u32 *)&value = *(const u32 *)args; \
76315+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
76316 } else { \
76317 args = PTR_ALIGN(args, sizeof(type)); \
76318- value = *(typeof(type) *)args; \
76319+ value = *(const typeof(type) *)args; \
76320 } \
76321 args += sizeof(type); \
76322 value; \
76323@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
76324 const char *str_arg = args;
76325 size_t len = strlen(str_arg);
76326 args += len + 1;
76327- str = string(str, end, (char *)str_arg, spec);
76328+ str = string(str, end, str_arg, spec);
76329 break;
76330 }
76331
76332diff --git a/localversion-grsec b/localversion-grsec
76333new file mode 100644
76334index 0000000..7cd6065
76335--- /dev/null
76336+++ b/localversion-grsec
76337@@ -0,0 +1 @@
76338+-grsec
76339diff --git a/mm/Kconfig b/mm/Kconfig
76340index 2c19c0b..f3c3f83 100644
76341--- a/mm/Kconfig
76342+++ b/mm/Kconfig
76343@@ -228,7 +228,7 @@ config KSM
76344 config DEFAULT_MMAP_MIN_ADDR
76345 int "Low address space to protect from user allocation"
76346 depends on MMU
76347- default 4096
76348+ default 65536
76349 help
76350 This is the portion of low virtual memory which should be protected
76351 from userspace allocation. Keeping a user from writing to low pages
76352diff --git a/mm/backing-dev.c b/mm/backing-dev.c
76353index 67a33a5..094dcf1 100644
76354--- a/mm/backing-dev.c
76355+++ b/mm/backing-dev.c
76356@@ -272,7 +272,7 @@ static void bdi_task_init(struct backing_dev_info *bdi,
76357 list_add_tail_rcu(&wb->list, &bdi->wb_list);
76358 spin_unlock(&bdi->wb_lock);
76359
76360- tsk->flags |= PF_FLUSHER | PF_SWAPWRITE;
76361+ tsk->flags |= PF_SWAPWRITE;
76362 set_freezable();
76363
76364 /*
76365@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rcu_head *head)
76366 * Add the default flusher task that gets created for any bdi
76367 * that has dirty data pending writeout
76368 */
76369-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76370+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
76371 {
76372 if (!bdi_cap_writeback_dirty(bdi))
76373 return;
76374diff --git a/mm/filemap.c b/mm/filemap.c
76375index a1fe378..e26702f 100644
76376--- a/mm/filemap.c
76377+++ b/mm/filemap.c
76378@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
76379 struct address_space *mapping = file->f_mapping;
76380
76381 if (!mapping->a_ops->readpage)
76382- return -ENOEXEC;
76383+ return -ENODEV;
76384 file_accessed(file);
76385 vma->vm_ops = &generic_file_vm_ops;
76386 vma->vm_flags |= VM_CAN_NONLINEAR;
76387@@ -2024,6 +2024,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
76388 *pos = i_size_read(inode);
76389
76390 if (limit != RLIM_INFINITY) {
76391+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
76392 if (*pos >= limit) {
76393 send_sig(SIGXFSZ, current, 0);
76394 return -EFBIG;
76395diff --git a/mm/fremap.c b/mm/fremap.c
76396index b6ec85a..a24ac22 100644
76397--- a/mm/fremap.c
76398+++ b/mm/fremap.c
76399@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76400 retry:
76401 vma = find_vma(mm, start);
76402
76403+#ifdef CONFIG_PAX_SEGMEXEC
76404+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
76405+ goto out;
76406+#endif
76407+
76408 /*
76409 * Make sure the vma is shared, that it supports prefaulting,
76410 * and that the remapped range is valid and fully within
76411@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
76412 /*
76413 * drop PG_Mlocked flag for over-mapped range
76414 */
76415- unsigned int saved_flags = vma->vm_flags;
76416+ unsigned long saved_flags = vma->vm_flags;
76417 munlock_vma_pages_range(vma, start, start + size);
76418 vma->vm_flags = saved_flags;
76419 }
76420diff --git a/mm/highmem.c b/mm/highmem.c
76421index 9c1e627..5ca9447 100644
76422--- a/mm/highmem.c
76423+++ b/mm/highmem.c
76424@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
76425 * So no dangers, even with speculative execution.
76426 */
76427 page = pte_page(pkmap_page_table[i]);
76428+ pax_open_kernel();
76429 pte_clear(&init_mm, (unsigned long)page_address(page),
76430 &pkmap_page_table[i]);
76431-
76432+ pax_close_kernel();
76433 set_page_address(page, NULL);
76434 need_flush = 1;
76435 }
76436@@ -177,9 +178,11 @@ start:
76437 }
76438 }
76439 vaddr = PKMAP_ADDR(last_pkmap_nr);
76440+
76441+ pax_open_kernel();
76442 set_pte_at(&init_mm, vaddr,
76443 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
76444-
76445+ pax_close_kernel();
76446 pkmap_count[last_pkmap_nr] = 1;
76447 set_page_address(page, (void *)vaddr);
76448
76449diff --git a/mm/hugetlb.c b/mm/hugetlb.c
76450index 5e1e508..ac70275 100644
76451--- a/mm/hugetlb.c
76452+++ b/mm/hugetlb.c
76453@@ -869,6 +869,7 @@ free:
76454 list_del(&page->lru);
76455 enqueue_huge_page(h, page);
76456 }
76457+ spin_unlock(&hugetlb_lock);
76458
76459 /* Free unnecessary surplus pages to the buddy allocator */
76460 if (!list_empty(&surplus_list)) {
76461@@ -1933,6 +1934,26 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
76462 return 1;
76463 }
76464
76465+#ifdef CONFIG_PAX_SEGMEXEC
76466+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
76467+{
76468+ struct mm_struct *mm = vma->vm_mm;
76469+ struct vm_area_struct *vma_m;
76470+ unsigned long address_m;
76471+ pte_t *ptep_m;
76472+
76473+ vma_m = pax_find_mirror_vma(vma);
76474+ if (!vma_m)
76475+ return;
76476+
76477+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76478+ address_m = address + SEGMEXEC_TASK_SIZE;
76479+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
76480+ get_page(page_m);
76481+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
76482+}
76483+#endif
76484+
76485 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
76486 unsigned long address, pte_t *ptep, pte_t pte,
76487 struct page *pagecache_page)
76488@@ -2004,6 +2025,11 @@ retry_avoidcopy:
76489 huge_ptep_clear_flush(vma, address, ptep);
76490 set_huge_pte_at(mm, address, ptep,
76491 make_huge_pte(vma, new_page, 1));
76492+
76493+#ifdef CONFIG_PAX_SEGMEXEC
76494+ pax_mirror_huge_pte(vma, address, new_page);
76495+#endif
76496+
76497 /* Make the old page be freed below */
76498 new_page = old_page;
76499 }
76500@@ -2135,6 +2161,10 @@ retry:
76501 && (vma->vm_flags & VM_SHARED)));
76502 set_huge_pte_at(mm, address, ptep, new_pte);
76503
76504+#ifdef CONFIG_PAX_SEGMEXEC
76505+ pax_mirror_huge_pte(vma, address, page);
76506+#endif
76507+
76508 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
76509 /* Optimization, do the COW without a second fault */
76510 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
76511@@ -2163,6 +2193,28 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
76512 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
76513 struct hstate *h = hstate_vma(vma);
76514
76515+#ifdef CONFIG_PAX_SEGMEXEC
76516+ struct vm_area_struct *vma_m;
76517+
76518+ vma_m = pax_find_mirror_vma(vma);
76519+ if (vma_m) {
76520+ unsigned long address_m;
76521+
76522+ if (vma->vm_start > vma_m->vm_start) {
76523+ address_m = address;
76524+ address -= SEGMEXEC_TASK_SIZE;
76525+ vma = vma_m;
76526+ h = hstate_vma(vma);
76527+ } else
76528+ address_m = address + SEGMEXEC_TASK_SIZE;
76529+
76530+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
76531+ return VM_FAULT_OOM;
76532+ address_m &= HPAGE_MASK;
76533+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
76534+ }
76535+#endif
76536+
76537 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
76538 if (!ptep)
76539 return VM_FAULT_OOM;
76540diff --git a/mm/internal.h b/mm/internal.h
76541index f03e8e2..7354343 100644
76542--- a/mm/internal.h
76543+++ b/mm/internal.h
76544@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page *page);
76545 * in mm/page_alloc.c
76546 */
76547 extern void __free_pages_bootmem(struct page *page, unsigned int order);
76548+extern void free_compound_page(struct page *page);
76549 extern void prep_compound_page(struct page *page, unsigned long order);
76550
76551
76552diff --git a/mm/kmemleak.c b/mm/kmemleak.c
76553index c346660..b47382f 100644
76554--- a/mm/kmemleak.c
76555+++ b/mm/kmemleak.c
76556@@ -358,7 +358,7 @@ static void print_unreferenced(struct seq_file *seq,
76557
76558 for (i = 0; i < object->trace_len; i++) {
76559 void *ptr = (void *)object->trace[i];
76560- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
76561+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
76562 }
76563 }
76564
76565diff --git a/mm/maccess.c b/mm/maccess.c
76566index 9073695..1127f348 100644
76567--- a/mm/maccess.c
76568+++ b/mm/maccess.c
76569@@ -14,7 +14,7 @@
76570 * Safely read from address @src to the buffer at @dst. If a kernel fault
76571 * happens, handle that and return -EFAULT.
76572 */
76573-long probe_kernel_read(void *dst, void *src, size_t size)
76574+long probe_kernel_read(void *dst, const void *src, size_t size)
76575 {
76576 long ret;
76577 mm_segment_t old_fs = get_fs();
76578@@ -22,7 +22,7 @@ long probe_kernel_read(void *dst, void *src, size_t size)
76579 set_fs(KERNEL_DS);
76580 pagefault_disable();
76581 ret = __copy_from_user_inatomic(dst,
76582- (__force const void __user *)src, size);
76583+ (const void __force_user *)src, size);
76584 pagefault_enable();
76585 set_fs(old_fs);
76586
76587@@ -39,14 +39,14 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
76588 * Safely write to address @dst from the buffer at @src. If a kernel fault
76589 * happens, handle that and return -EFAULT.
76590 */
76591-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
76592+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
76593 {
76594 long ret;
76595 mm_segment_t old_fs = get_fs();
76596
76597 set_fs(KERNEL_DS);
76598 pagefault_disable();
76599- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
76600+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
76601 pagefault_enable();
76602 set_fs(old_fs);
76603
76604diff --git a/mm/madvise.c b/mm/madvise.c
76605index 35b1479..499f7d4 100644
76606--- a/mm/madvise.c
76607+++ b/mm/madvise.c
76608@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
76609 pgoff_t pgoff;
76610 unsigned long new_flags = vma->vm_flags;
76611
76612+#ifdef CONFIG_PAX_SEGMEXEC
76613+ struct vm_area_struct *vma_m;
76614+#endif
76615+
76616 switch (behavior) {
76617 case MADV_NORMAL:
76618 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
76619@@ -103,6 +107,13 @@ success:
76620 /*
76621 * vm_flags is protected by the mmap_sem held in write mode.
76622 */
76623+
76624+#ifdef CONFIG_PAX_SEGMEXEC
76625+ vma_m = pax_find_mirror_vma(vma);
76626+ if (vma_m)
76627+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
76628+#endif
76629+
76630 vma->vm_flags = new_flags;
76631
76632 out:
76633@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76634 struct vm_area_struct ** prev,
76635 unsigned long start, unsigned long end)
76636 {
76637+
76638+#ifdef CONFIG_PAX_SEGMEXEC
76639+ struct vm_area_struct *vma_m;
76640+#endif
76641+
76642 *prev = vma;
76643 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
76644 return -EINVAL;
76645@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
76646 zap_page_range(vma, start, end - start, &details);
76647 } else
76648 zap_page_range(vma, start, end - start, NULL);
76649+
76650+#ifdef CONFIG_PAX_SEGMEXEC
76651+ vma_m = pax_find_mirror_vma(vma);
76652+ if (vma_m) {
76653+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
76654+ struct zap_details details = {
76655+ .nonlinear_vma = vma_m,
76656+ .last_index = ULONG_MAX,
76657+ };
76658+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
76659+ } else
76660+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
76661+ }
76662+#endif
76663+
76664 return 0;
76665 }
76666
76667@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
76668 if (end < start)
76669 goto out;
76670
76671+#ifdef CONFIG_PAX_SEGMEXEC
76672+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
76673+ if (end > SEGMEXEC_TASK_SIZE)
76674+ goto out;
76675+ } else
76676+#endif
76677+
76678+ if (end > TASK_SIZE)
76679+ goto out;
76680+
76681 error = 0;
76682 if (end == start)
76683 goto out;
76684diff --git a/mm/memory-failure.c b/mm/memory-failure.c
76685index 8aeba53..b4a4198 100644
76686--- a/mm/memory-failure.c
76687+++ b/mm/memory-failure.c
76688@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
76689
76690 int sysctl_memory_failure_recovery __read_mostly = 1;
76691
76692-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76693+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
76694
76695 /*
76696 * Send all the processes who have the page mapped an ``action optional''
76697@@ -64,7 +64,7 @@ static int kill_proc_ao(struct task_struct *t, unsigned long addr, int trapno,
76698 si.si_signo = SIGBUS;
76699 si.si_errno = 0;
76700 si.si_code = BUS_MCEERR_AO;
76701- si.si_addr = (void *)addr;
76702+ si.si_addr = (void __user *)addr;
76703 #ifdef __ARCH_SI_TRAPNO
76704 si.si_trapno = trapno;
76705 #endif
76706@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn, int trapno, int ref)
76707 return 0;
76708 }
76709
76710- atomic_long_add(1, &mce_bad_pages);
76711+ atomic_long_add_unchecked(1, &mce_bad_pages);
76712
76713 /*
76714 * We need/can do nothing about count=0 pages.
76715diff --git a/mm/memory.c b/mm/memory.c
76716index 6c836d3..48f3264 100644
76717--- a/mm/memory.c
76718+++ b/mm/memory.c
76719@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
76720 return;
76721
76722 pmd = pmd_offset(pud, start);
76723+
76724+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
76725 pud_clear(pud);
76726 pmd_free_tlb(tlb, pmd, start);
76727+#endif
76728+
76729 }
76730
76731 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76732@@ -219,9 +223,12 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
76733 if (end - 1 > ceiling - 1)
76734 return;
76735
76736+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
76737 pud = pud_offset(pgd, start);
76738 pgd_clear(pgd);
76739 pud_free_tlb(tlb, pud, start);
76740+#endif
76741+
76742 }
76743
76744 /*
76745@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76746 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
76747 i = 0;
76748
76749- do {
76750+ while (nr_pages) {
76751 struct vm_area_struct *vma;
76752
76753- vma = find_extend_vma(mm, start);
76754+ vma = find_vma(mm, start);
76755 if (!vma && in_gate_area(tsk, start)) {
76756 unsigned long pg = start & PAGE_MASK;
76757 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
76758@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76759 continue;
76760 }
76761
76762- if (!vma ||
76763+ if (!vma || start < vma->vm_start ||
76764 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
76765 !(vm_flags & vma->vm_flags))
76766 return i ? : -EFAULT;
76767@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
76768 start += PAGE_SIZE;
76769 nr_pages--;
76770 } while (nr_pages && start < vma->vm_end);
76771- } while (nr_pages);
76772+ }
76773 return i;
76774 }
76775
76776@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
76777 page_add_file_rmap(page);
76778 set_pte_at(mm, addr, pte, mk_pte(page, prot));
76779
76780+#ifdef CONFIG_PAX_SEGMEXEC
76781+ pax_mirror_file_pte(vma, addr, page, ptl);
76782+#endif
76783+
76784 retval = 0;
76785 pte_unmap_unlock(pte, ptl);
76786 return retval;
76787@@ -1560,10 +1571,22 @@ out:
76788 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
76789 struct page *page)
76790 {
76791+
76792+#ifdef CONFIG_PAX_SEGMEXEC
76793+ struct vm_area_struct *vma_m;
76794+#endif
76795+
76796 if (addr < vma->vm_start || addr >= vma->vm_end)
76797 return -EFAULT;
76798 if (!page_count(page))
76799 return -EINVAL;
76800+
76801+#ifdef CONFIG_PAX_SEGMEXEC
76802+ vma_m = pax_find_mirror_vma(vma);
76803+ if (vma_m)
76804+ vma_m->vm_flags |= VM_INSERTPAGE;
76805+#endif
76806+
76807 vma->vm_flags |= VM_INSERTPAGE;
76808 return insert_page(vma, addr, page, vma->vm_page_prot);
76809 }
76810@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
76811 unsigned long pfn)
76812 {
76813 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
76814+ BUG_ON(vma->vm_mirror);
76815
76816 if (addr < vma->vm_start || addr >= vma->vm_end)
76817 return -EFAULT;
76818@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
76819 copy_user_highpage(dst, src, va, vma);
76820 }
76821
76822+#ifdef CONFIG_PAX_SEGMEXEC
76823+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
76824+{
76825+ struct mm_struct *mm = vma->vm_mm;
76826+ spinlock_t *ptl;
76827+ pte_t *pte, entry;
76828+
76829+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
76830+ entry = *pte;
76831+ if (!pte_present(entry)) {
76832+ if (!pte_none(entry)) {
76833+ BUG_ON(pte_file(entry));
76834+ free_swap_and_cache(pte_to_swp_entry(entry));
76835+ pte_clear_not_present_full(mm, address, pte, 0);
76836+ }
76837+ } else {
76838+ struct page *page;
76839+
76840+ flush_cache_page(vma, address, pte_pfn(entry));
76841+ entry = ptep_clear_flush(vma, address, pte);
76842+ BUG_ON(pte_dirty(entry));
76843+ page = vm_normal_page(vma, address, entry);
76844+ if (page) {
76845+ update_hiwater_rss(mm);
76846+ if (PageAnon(page))
76847+ dec_mm_counter(mm, anon_rss);
76848+ else
76849+ dec_mm_counter(mm, file_rss);
76850+ page_remove_rmap(page);
76851+ page_cache_release(page);
76852+ }
76853+ }
76854+ pte_unmap_unlock(pte, ptl);
76855+}
76856+
76857+/* PaX: if vma is mirrored, synchronize the mirror's PTE
76858+ *
76859+ * the ptl of the lower mapped page is held on entry and is not released on exit
76860+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
76861+ */
76862+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76863+{
76864+ struct mm_struct *mm = vma->vm_mm;
76865+ unsigned long address_m;
76866+ spinlock_t *ptl_m;
76867+ struct vm_area_struct *vma_m;
76868+ pmd_t *pmd_m;
76869+ pte_t *pte_m, entry_m;
76870+
76871+ BUG_ON(!page_m || !PageAnon(page_m));
76872+
76873+ vma_m = pax_find_mirror_vma(vma);
76874+ if (!vma_m)
76875+ return;
76876+
76877+ BUG_ON(!PageLocked(page_m));
76878+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76879+ address_m = address + SEGMEXEC_TASK_SIZE;
76880+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76881+ pte_m = pte_offset_map_nested(pmd_m, address_m);
76882+ ptl_m = pte_lockptr(mm, pmd_m);
76883+ if (ptl != ptl_m) {
76884+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76885+ if (!pte_none(*pte_m))
76886+ goto out;
76887+ }
76888+
76889+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76890+ page_cache_get(page_m);
76891+ page_add_anon_rmap(page_m, vma_m, address_m);
76892+ inc_mm_counter(mm, anon_rss);
76893+ set_pte_at(mm, address_m, pte_m, entry_m);
76894+ update_mmu_cache(vma_m, address_m, entry_m);
76895+out:
76896+ if (ptl != ptl_m)
76897+ spin_unlock(ptl_m);
76898+ pte_unmap_nested(pte_m);
76899+ unlock_page(page_m);
76900+}
76901+
76902+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
76903+{
76904+ struct mm_struct *mm = vma->vm_mm;
76905+ unsigned long address_m;
76906+ spinlock_t *ptl_m;
76907+ struct vm_area_struct *vma_m;
76908+ pmd_t *pmd_m;
76909+ pte_t *pte_m, entry_m;
76910+
76911+ BUG_ON(!page_m || PageAnon(page_m));
76912+
76913+ vma_m = pax_find_mirror_vma(vma);
76914+ if (!vma_m)
76915+ return;
76916+
76917+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76918+ address_m = address + SEGMEXEC_TASK_SIZE;
76919+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76920+ pte_m = pte_offset_map_nested(pmd_m, address_m);
76921+ ptl_m = pte_lockptr(mm, pmd_m);
76922+ if (ptl != ptl_m) {
76923+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76924+ if (!pte_none(*pte_m))
76925+ goto out;
76926+ }
76927+
76928+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
76929+ page_cache_get(page_m);
76930+ page_add_file_rmap(page_m);
76931+ inc_mm_counter(mm, file_rss);
76932+ set_pte_at(mm, address_m, pte_m, entry_m);
76933+ update_mmu_cache(vma_m, address_m, entry_m);
76934+out:
76935+ if (ptl != ptl_m)
76936+ spin_unlock(ptl_m);
76937+ pte_unmap_nested(pte_m);
76938+}
76939+
76940+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
76941+{
76942+ struct mm_struct *mm = vma->vm_mm;
76943+ unsigned long address_m;
76944+ spinlock_t *ptl_m;
76945+ struct vm_area_struct *vma_m;
76946+ pmd_t *pmd_m;
76947+ pte_t *pte_m, entry_m;
76948+
76949+ vma_m = pax_find_mirror_vma(vma);
76950+ if (!vma_m)
76951+ return;
76952+
76953+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
76954+ address_m = address + SEGMEXEC_TASK_SIZE;
76955+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
76956+ pte_m = pte_offset_map_nested(pmd_m, address_m);
76957+ ptl_m = pte_lockptr(mm, pmd_m);
76958+ if (ptl != ptl_m) {
76959+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
76960+ if (!pte_none(*pte_m))
76961+ goto out;
76962+ }
76963+
76964+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
76965+ set_pte_at(mm, address_m, pte_m, entry_m);
76966+out:
76967+ if (ptl != ptl_m)
76968+ spin_unlock(ptl_m);
76969+ pte_unmap_nested(pte_m);
76970+}
76971+
76972+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
76973+{
76974+ struct page *page_m;
76975+ pte_t entry;
76976+
76977+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
76978+ goto out;
76979+
76980+ entry = *pte;
76981+ page_m = vm_normal_page(vma, address, entry);
76982+ if (!page_m)
76983+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
76984+ else if (PageAnon(page_m)) {
76985+ if (pax_find_mirror_vma(vma)) {
76986+ pte_unmap_unlock(pte, ptl);
76987+ lock_page(page_m);
76988+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
76989+ if (pte_same(entry, *pte))
76990+ pax_mirror_anon_pte(vma, address, page_m, ptl);
76991+ else
76992+ unlock_page(page_m);
76993+ }
76994+ } else
76995+ pax_mirror_file_pte(vma, address, page_m, ptl);
76996+
76997+out:
76998+ pte_unmap_unlock(pte, ptl);
76999+}
77000+#endif
77001+
77002 /*
77003 * This routine handles present pages, when users try to write
77004 * to a shared page. It is done by copying the page to a new address
77005@@ -2156,6 +2360,12 @@ gotten:
77006 */
77007 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77008 if (likely(pte_same(*page_table, orig_pte))) {
77009+
77010+#ifdef CONFIG_PAX_SEGMEXEC
77011+ if (pax_find_mirror_vma(vma))
77012+ BUG_ON(!trylock_page(new_page));
77013+#endif
77014+
77015 if (old_page) {
77016 if (!PageAnon(old_page)) {
77017 dec_mm_counter(mm, file_rss);
77018@@ -2207,6 +2417,10 @@ gotten:
77019 page_remove_rmap(old_page);
77020 }
77021
77022+#ifdef CONFIG_PAX_SEGMEXEC
77023+ pax_mirror_anon_pte(vma, address, new_page, ptl);
77024+#endif
77025+
77026 /* Free the old page.. */
77027 new_page = old_page;
77028 ret |= VM_FAULT_WRITE;
77029@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77030 swap_free(entry);
77031 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
77032 try_to_free_swap(page);
77033+
77034+#ifdef CONFIG_PAX_SEGMEXEC
77035+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
77036+#endif
77037+
77038 unlock_page(page);
77039
77040 if (flags & FAULT_FLAG_WRITE) {
77041@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
77042
77043 /* No need to invalidate - it was non-present before */
77044 update_mmu_cache(vma, address, pte);
77045+
77046+#ifdef CONFIG_PAX_SEGMEXEC
77047+ pax_mirror_anon_pte(vma, address, page, ptl);
77048+#endif
77049+
77050 unlock:
77051 pte_unmap_unlock(page_table, ptl);
77052 out:
77053@@ -2632,40 +2856,6 @@ out_release:
77054 }
77055
77056 /*
77057- * This is like a special single-page "expand_{down|up}wards()",
77058- * except we must first make sure that 'address{-|+}PAGE_SIZE'
77059- * doesn't hit another vma.
77060- */
77061-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
77062-{
77063- address &= PAGE_MASK;
77064- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
77065- struct vm_area_struct *prev = vma->vm_prev;
77066-
77067- /*
77068- * Is there a mapping abutting this one below?
77069- *
77070- * That's only ok if it's the same stack mapping
77071- * that has gotten split..
77072- */
77073- if (prev && prev->vm_end == address)
77074- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
77075-
77076- expand_stack(vma, address - PAGE_SIZE);
77077- }
77078- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
77079- struct vm_area_struct *next = vma->vm_next;
77080-
77081- /* As VM_GROWSDOWN but s/below/above/ */
77082- if (next && next->vm_start == address + PAGE_SIZE)
77083- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
77084-
77085- expand_upwards(vma, address + PAGE_SIZE);
77086- }
77087- return 0;
77088-}
77089-
77090-/*
77091 * We enter with non-exclusive mmap_sem (to exclude vma changes,
77092 * but allow concurrent faults), and pte mapped but not yet locked.
77093 * We return with mmap_sem still held, but pte unmapped and unlocked.
77094@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77095 unsigned long address, pte_t *page_table, pmd_t *pmd,
77096 unsigned int flags)
77097 {
77098- struct page *page;
77099+ struct page *page = NULL;
77100 spinlock_t *ptl;
77101 pte_t entry;
77102
77103- pte_unmap(page_table);
77104-
77105- /* Check if we need to add a guard page to the stack */
77106- if (check_stack_guard_page(vma, address) < 0)
77107- return VM_FAULT_SIGBUS;
77108-
77109- /* Use the zero-page for reads */
77110 if (!(flags & FAULT_FLAG_WRITE)) {
77111 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
77112 vma->vm_page_prot));
77113- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
77114+ ptl = pte_lockptr(mm, pmd);
77115+ spin_lock(ptl);
77116 if (!pte_none(*page_table))
77117 goto unlock;
77118 goto setpte;
77119 }
77120
77121 /* Allocate our own private page. */
77122+ pte_unmap(page_table);
77123+
77124 if (unlikely(anon_vma_prepare(vma)))
77125 goto oom;
77126 page = alloc_zeroed_user_highpage_movable(vma, address);
77127@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
77128 if (!pte_none(*page_table))
77129 goto release;
77130
77131+#ifdef CONFIG_PAX_SEGMEXEC
77132+ if (pax_find_mirror_vma(vma))
77133+ BUG_ON(!trylock_page(page));
77134+#endif
77135+
77136 inc_mm_counter(mm, anon_rss);
77137 page_add_new_anon_rmap(page, vma, address);
77138 setpte:
77139@@ -2720,6 +2911,12 @@ setpte:
77140
77141 /* No need to invalidate - it was non-present before */
77142 update_mmu_cache(vma, address, entry);
77143+
77144+#ifdef CONFIG_PAX_SEGMEXEC
77145+ if (page)
77146+ pax_mirror_anon_pte(vma, address, page, ptl);
77147+#endif
77148+
77149 unlock:
77150 pte_unmap_unlock(page_table, ptl);
77151 return 0;
77152@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77153 */
77154 /* Only go through if we didn't race with anybody else... */
77155 if (likely(pte_same(*page_table, orig_pte))) {
77156+
77157+#ifdef CONFIG_PAX_SEGMEXEC
77158+ if (anon && pax_find_mirror_vma(vma))
77159+ BUG_ON(!trylock_page(page));
77160+#endif
77161+
77162 flush_icache_page(vma, page);
77163 entry = mk_pte(page, vma->vm_page_prot);
77164 if (flags & FAULT_FLAG_WRITE)
77165@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77166
77167 /* no need to invalidate: a not-present page won't be cached */
77168 update_mmu_cache(vma, address, entry);
77169+
77170+#ifdef CONFIG_PAX_SEGMEXEC
77171+ if (anon)
77172+ pax_mirror_anon_pte(vma, address, page, ptl);
77173+ else
77174+ pax_mirror_file_pte(vma, address, page, ptl);
77175+#endif
77176+
77177 } else {
77178 if (charged)
77179 mem_cgroup_uncharge_page(page);
77180@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struct mm_struct *mm,
77181 if (flags & FAULT_FLAG_WRITE)
77182 flush_tlb_page(vma, address);
77183 }
77184+
77185+#ifdef CONFIG_PAX_SEGMEXEC
77186+ pax_mirror_pte(vma, address, pte, pmd, ptl);
77187+ return 0;
77188+#endif
77189+
77190 unlock:
77191 pte_unmap_unlock(pte, ptl);
77192 return 0;
77193@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77194 pmd_t *pmd;
77195 pte_t *pte;
77196
77197+#ifdef CONFIG_PAX_SEGMEXEC
77198+ struct vm_area_struct *vma_m;
77199+#endif
77200+
77201 __set_current_state(TASK_RUNNING);
77202
77203 count_vm_event(PGFAULT);
77204@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
77205 if (unlikely(is_vm_hugetlb_page(vma)))
77206 return hugetlb_fault(mm, vma, address, flags);
77207
77208+#ifdef CONFIG_PAX_SEGMEXEC
77209+ vma_m = pax_find_mirror_vma(vma);
77210+ if (vma_m) {
77211+ unsigned long address_m;
77212+ pgd_t *pgd_m;
77213+ pud_t *pud_m;
77214+ pmd_t *pmd_m;
77215+
77216+ if (vma->vm_start > vma_m->vm_start) {
77217+ address_m = address;
77218+ address -= SEGMEXEC_TASK_SIZE;
77219+ vma = vma_m;
77220+ } else
77221+ address_m = address + SEGMEXEC_TASK_SIZE;
77222+
77223+ pgd_m = pgd_offset(mm, address_m);
77224+ pud_m = pud_alloc(mm, pgd_m, address_m);
77225+ if (!pud_m)
77226+ return VM_FAULT_OOM;
77227+ pmd_m = pmd_alloc(mm, pud_m, address_m);
77228+ if (!pmd_m)
77229+ return VM_FAULT_OOM;
77230+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
77231+ return VM_FAULT_OOM;
77232+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
77233+ }
77234+#endif
77235+
77236 pgd = pgd_offset(mm, address);
77237 pud = pud_alloc(mm, pgd, address);
77238 if (!pud)
77239@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
77240 gate_vma.vm_start = FIXADDR_USER_START;
77241 gate_vma.vm_end = FIXADDR_USER_END;
77242 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
77243- gate_vma.vm_page_prot = __P101;
77244+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
77245 /*
77246 * Make sure the vDSO gets into every core dump.
77247 * Dumping its contents makes post-mortem fully interpretable later
77248diff --git a/mm/mempolicy.c b/mm/mempolicy.c
77249index 3c6e3e2..ad9871c 100644
77250--- a/mm/mempolicy.c
77251+++ b/mm/mempolicy.c
77252@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77253 struct vm_area_struct *next;
77254 int err;
77255
77256+#ifdef CONFIG_PAX_SEGMEXEC
77257+ struct vm_area_struct *vma_m;
77258+#endif
77259+
77260 err = 0;
77261 for (; vma && vma->vm_start < end; vma = next) {
77262 next = vma->vm_next;
77263@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_struct *vma, unsigned long start,
77264 err = policy_vma(vma, new);
77265 if (err)
77266 break;
77267+
77268+#ifdef CONFIG_PAX_SEGMEXEC
77269+ vma_m = pax_find_mirror_vma(vma);
77270+ if (vma_m) {
77271+ err = policy_vma(vma_m, new);
77272+ if (err)
77273+ break;
77274+ }
77275+#endif
77276+
77277 }
77278 return err;
77279 }
77280@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start, unsigned long len,
77281
77282 if (end < start)
77283 return -EINVAL;
77284+
77285+#ifdef CONFIG_PAX_SEGMEXEC
77286+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
77287+ if (end > SEGMEXEC_TASK_SIZE)
77288+ return -EINVAL;
77289+ } else
77290+#endif
77291+
77292+ if (end > TASK_SIZE)
77293+ return -EINVAL;
77294+
77295 if (end == start)
77296 return 0;
77297
77298@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77299 if (!mm)
77300 return -EINVAL;
77301
77302+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77303+ if (mm != current->mm &&
77304+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77305+ err = -EPERM;
77306+ goto out;
77307+ }
77308+#endif
77309+
77310 /*
77311 * Check if this process has the right to modify the specified
77312 * process. The right exists if the process has administrative
77313@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
77314 rcu_read_lock();
77315 tcred = __task_cred(task);
77316 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77317- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77318- !capable(CAP_SYS_NICE)) {
77319+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77320 rcu_read_unlock();
77321 err = -EPERM;
77322 goto out;
77323@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, void *v)
77324
77325 if (file) {
77326 seq_printf(m, " file=");
77327- seq_path(m, &file->f_path, "\n\t= ");
77328+ seq_path(m, &file->f_path, "\n\t\\= ");
77329 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
77330 seq_printf(m, " heap");
77331 } else if (vma->vm_start <= mm->start_stack &&
77332diff --git a/mm/migrate.c b/mm/migrate.c
77333index aaca868..2ebecdc 100644
77334--- a/mm/migrate.c
77335+++ b/mm/migrate.c
77336@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struct *mm, struct task_struct *task,
77337 unsigned long chunk_start;
77338 int err;
77339
77340+ pax_track_stack();
77341+
77342 task_nodes = cpuset_mems_allowed(task);
77343
77344 err = -ENOMEM;
77345@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77346 if (!mm)
77347 return -EINVAL;
77348
77349+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
77350+ if (mm != current->mm &&
77351+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
77352+ err = -EPERM;
77353+ goto out;
77354+ }
77355+#endif
77356+
77357 /*
77358 * Check if this process has the right to modify the specified
77359 * process. The right exists if the process has administrative
77360@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
77361 rcu_read_lock();
77362 tcred = __task_cred(task);
77363 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
77364- cred->uid != tcred->suid && cred->uid != tcred->uid &&
77365- !capable(CAP_SYS_NICE)) {
77366+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
77367 rcu_read_unlock();
77368 err = -EPERM;
77369 goto out;
77370diff --git a/mm/mlock.c b/mm/mlock.c
77371index 2d846cf..98134d2 100644
77372--- a/mm/mlock.c
77373+++ b/mm/mlock.c
77374@@ -13,6 +13,7 @@
77375 #include <linux/pagemap.h>
77376 #include <linux/mempolicy.h>
77377 #include <linux/syscalls.h>
77378+#include <linux/security.h>
77379 #include <linux/sched.h>
77380 #include <linux/module.h>
77381 #include <linux/rmap.h>
77382@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
77383 }
77384 }
77385
77386-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
77387-{
77388- return (vma->vm_flags & VM_GROWSDOWN) &&
77389- (vma->vm_start == addr) &&
77390- !vma_stack_continue(vma->vm_prev, addr);
77391-}
77392-
77393 /**
77394 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
77395 * @vma: target vma
77396@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(struct vm_area_struct *vma,
77397 if (vma->vm_flags & VM_WRITE)
77398 gup_flags |= FOLL_WRITE;
77399
77400- /* We don't try to access the guard page of a stack vma */
77401- if (stack_guard_page(vma, start)) {
77402- addr += PAGE_SIZE;
77403- nr_pages--;
77404- }
77405-
77406 while (nr_pages > 0) {
77407 int i;
77408
77409@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
77410 {
77411 unsigned long nstart, end, tmp;
77412 struct vm_area_struct * vma, * prev;
77413- int error;
77414+ int error = -EINVAL;
77415
77416 len = PAGE_ALIGN(len);
77417 end = start + len;
77418@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
77419 return -EINVAL;
77420 if (end == start)
77421 return 0;
77422+ if (end > TASK_SIZE)
77423+ return -EINVAL;
77424+
77425 vma = find_vma_prev(current->mm, start, &prev);
77426 if (!vma || vma->vm_start > start)
77427 return -ENOMEM;
77428@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
77429 for (nstart = start ; ; ) {
77430 unsigned int newflags;
77431
77432+#ifdef CONFIG_PAX_SEGMEXEC
77433+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77434+ break;
77435+#endif
77436+
77437 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
77438
77439 newflags = vma->vm_flags | VM_LOCKED;
77440@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
77441 lock_limit >>= PAGE_SHIFT;
77442
77443 /* check against resource limits */
77444+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
77445 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
77446 error = do_mlock(start, len, 1);
77447 up_write(&current->mm->mmap_sem);
77448@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
77449 static int do_mlockall(int flags)
77450 {
77451 struct vm_area_struct * vma, * prev = NULL;
77452- unsigned int def_flags = 0;
77453
77454 if (flags & MCL_FUTURE)
77455- def_flags = VM_LOCKED;
77456- current->mm->def_flags = def_flags;
77457+ current->mm->def_flags |= VM_LOCKED;
77458+ else
77459+ current->mm->def_flags &= ~VM_LOCKED;
77460 if (flags == MCL_FUTURE)
77461 goto out;
77462
77463 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
77464- unsigned int newflags;
77465+ unsigned long newflags;
77466
77467+#ifdef CONFIG_PAX_SEGMEXEC
77468+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
77469+ break;
77470+#endif
77471+
77472+ BUG_ON(vma->vm_end > TASK_SIZE);
77473 newflags = vma->vm_flags | VM_LOCKED;
77474 if (!(flags & MCL_CURRENT))
77475 newflags &= ~VM_LOCKED;
77476@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
77477 lock_limit >>= PAGE_SHIFT;
77478
77479 ret = -ENOMEM;
77480+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
77481 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
77482 capable(CAP_IPC_LOCK))
77483 ret = do_mlockall(flags);
77484diff --git a/mm/mmap.c b/mm/mmap.c
77485index 4b80cbf..c5ce1df 100644
77486--- a/mm/mmap.c
77487+++ b/mm/mmap.c
77488@@ -45,6 +45,16 @@
77489 #define arch_rebalance_pgtables(addr, len) (addr)
77490 #endif
77491
77492+static inline void verify_mm_writelocked(struct mm_struct *mm)
77493+{
77494+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
77495+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
77496+ up_read(&mm->mmap_sem);
77497+ BUG();
77498+ }
77499+#endif
77500+}
77501+
77502 static void unmap_region(struct mm_struct *mm,
77503 struct vm_area_struct *vma, struct vm_area_struct *prev,
77504 unsigned long start, unsigned long end);
77505@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struct *mm,
77506 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
77507 *
77508 */
77509-pgprot_t protection_map[16] = {
77510+pgprot_t protection_map[16] __read_only = {
77511 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
77512 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77513 };
77514
77515 pgprot_t vm_get_page_prot(unsigned long vm_flags)
77516 {
77517- return __pgprot(pgprot_val(protection_map[vm_flags &
77518+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
77519 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
77520 pgprot_val(arch_vm_get_page_prot(vm_flags)));
77521+
77522+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77523+ if (!nx_enabled &&
77524+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
77525+ (vm_flags & (VM_READ | VM_WRITE)))
77526+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
77527+#endif
77528+
77529+ return prot;
77530 }
77531 EXPORT_SYMBOL(vm_get_page_prot);
77532
77533 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
77534 int sysctl_overcommit_ratio = 50; /* default is 50% */
77535 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
77536+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
77537 struct percpu_counter vm_committed_as;
77538
77539 /*
77540@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
77541 struct vm_area_struct *next = vma->vm_next;
77542
77543 might_sleep();
77544+ BUG_ON(vma->vm_mirror);
77545 if (vma->vm_ops && vma->vm_ops->close)
77546 vma->vm_ops->close(vma);
77547 if (vma->vm_file) {
77548@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
77549 * not page aligned -Ram Gupta
77550 */
77551 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
77552+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
77553 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
77554 (mm->end_data - mm->start_data) > rlim)
77555 goto out;
77556@@ -704,6 +726,12 @@ static int
77557 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
77558 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77559 {
77560+
77561+#ifdef CONFIG_PAX_SEGMEXEC
77562+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
77563+ return 0;
77564+#endif
77565+
77566 if (is_mergeable_vma(vma, file, vm_flags) &&
77567 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77568 if (vma->vm_pgoff == vm_pgoff)
77569@@ -723,6 +751,12 @@ static int
77570 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77571 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
77572 {
77573+
77574+#ifdef CONFIG_PAX_SEGMEXEC
77575+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
77576+ return 0;
77577+#endif
77578+
77579 if (is_mergeable_vma(vma, file, vm_flags) &&
77580 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
77581 pgoff_t vm_pglen;
77582@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
77583 struct vm_area_struct *vma_merge(struct mm_struct *mm,
77584 struct vm_area_struct *prev, unsigned long addr,
77585 unsigned long end, unsigned long vm_flags,
77586- struct anon_vma *anon_vma, struct file *file,
77587+ struct anon_vma *anon_vma, struct file *file,
77588 pgoff_t pgoff, struct mempolicy *policy)
77589 {
77590 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
77591 struct vm_area_struct *area, *next;
77592
77593+#ifdef CONFIG_PAX_SEGMEXEC
77594+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
77595+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
77596+
77597+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
77598+#endif
77599+
77600 /*
77601 * We later require that vma->vm_flags == vm_flags,
77602 * so this tests vma->vm_flags & VM_SPECIAL, too.
77603@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77604 if (next && next->vm_end == end) /* cases 6, 7, 8 */
77605 next = next->vm_next;
77606
77607+#ifdef CONFIG_PAX_SEGMEXEC
77608+ if (prev)
77609+ prev_m = pax_find_mirror_vma(prev);
77610+ if (area)
77611+ area_m = pax_find_mirror_vma(area);
77612+ if (next)
77613+ next_m = pax_find_mirror_vma(next);
77614+#endif
77615+
77616 /*
77617 * Can it merge with the predecessor?
77618 */
77619@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77620 /* cases 1, 6 */
77621 vma_adjust(prev, prev->vm_start,
77622 next->vm_end, prev->vm_pgoff, NULL);
77623- } else /* cases 2, 5, 7 */
77624+
77625+#ifdef CONFIG_PAX_SEGMEXEC
77626+ if (prev_m)
77627+ vma_adjust(prev_m, prev_m->vm_start,
77628+ next_m->vm_end, prev_m->vm_pgoff, NULL);
77629+#endif
77630+
77631+ } else { /* cases 2, 5, 7 */
77632 vma_adjust(prev, prev->vm_start,
77633 end, prev->vm_pgoff, NULL);
77634+
77635+#ifdef CONFIG_PAX_SEGMEXEC
77636+ if (prev_m)
77637+ vma_adjust(prev_m, prev_m->vm_start,
77638+ end_m, prev_m->vm_pgoff, NULL);
77639+#endif
77640+
77641+ }
77642 return prev;
77643 }
77644
77645@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
77646 mpol_equal(policy, vma_policy(next)) &&
77647 can_vma_merge_before(next, vm_flags,
77648 anon_vma, file, pgoff+pglen)) {
77649- if (prev && addr < prev->vm_end) /* case 4 */
77650+ if (prev && addr < prev->vm_end) { /* case 4 */
77651 vma_adjust(prev, prev->vm_start,
77652 addr, prev->vm_pgoff, NULL);
77653- else /* cases 3, 8 */
77654+
77655+#ifdef CONFIG_PAX_SEGMEXEC
77656+ if (prev_m)
77657+ vma_adjust(prev_m, prev_m->vm_start,
77658+ addr_m, prev_m->vm_pgoff, NULL);
77659+#endif
77660+
77661+ } else { /* cases 3, 8 */
77662 vma_adjust(area, addr, next->vm_end,
77663 next->vm_pgoff - pglen, NULL);
77664+
77665+#ifdef CONFIG_PAX_SEGMEXEC
77666+ if (area_m)
77667+ vma_adjust(area_m, addr_m, next_m->vm_end,
77668+ next_m->vm_pgoff - pglen, NULL);
77669+#endif
77670+
77671+ }
77672 return area;
77673 }
77674
77675@@ -898,14 +978,11 @@ none:
77676 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
77677 struct file *file, long pages)
77678 {
77679- const unsigned long stack_flags
77680- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
77681-
77682 if (file) {
77683 mm->shared_vm += pages;
77684 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
77685 mm->exec_vm += pages;
77686- } else if (flags & stack_flags)
77687+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
77688 mm->stack_vm += pages;
77689 if (flags & (VM_RESERVED|VM_IO))
77690 mm->reserved_vm += pages;
77691@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77692 * (the exception is when the underlying filesystem is noexec
77693 * mounted, in which case we dont add PROT_EXEC.)
77694 */
77695- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
77696+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
77697 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
77698 prot |= PROT_EXEC;
77699
77700@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77701 /* Obtain the address to map to. we verify (or select) it and ensure
77702 * that it represents a valid section of the address space.
77703 */
77704- addr = get_unmapped_area(file, addr, len, pgoff, flags);
77705+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
77706 if (addr & ~PAGE_MASK)
77707 return addr;
77708
77709@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77710 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
77711 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
77712
77713+#ifdef CONFIG_PAX_MPROTECT
77714+ if (mm->pax_flags & MF_PAX_MPROTECT) {
77715+#ifndef CONFIG_PAX_MPROTECT_COMPAT
77716+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
77717+ gr_log_rwxmmap(file);
77718+
77719+#ifdef CONFIG_PAX_EMUPLT
77720+ vm_flags &= ~VM_EXEC;
77721+#else
77722+ return -EPERM;
77723+#endif
77724+
77725+ }
77726+
77727+ if (!(vm_flags & VM_EXEC))
77728+ vm_flags &= ~VM_MAYEXEC;
77729+#else
77730+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
77731+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
77732+#endif
77733+ else
77734+ vm_flags &= ~VM_MAYWRITE;
77735+ }
77736+#endif
77737+
77738+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77739+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
77740+ vm_flags &= ~VM_PAGEEXEC;
77741+#endif
77742+
77743 if (flags & MAP_LOCKED)
77744 if (!can_do_mlock())
77745 return -EPERM;
77746@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77747 locked += mm->locked_vm;
77748 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
77749 lock_limit >>= PAGE_SHIFT;
77750+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
77751 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
77752 return -EAGAIN;
77753 }
77754@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
77755 if (error)
77756 return error;
77757
77758+ if (!gr_acl_handle_mmap(file, prot))
77759+ return -EACCES;
77760+
77761 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
77762 }
77763 EXPORT_SYMBOL(do_mmap_pgoff);
77764@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
77765 */
77766 int vma_wants_writenotify(struct vm_area_struct *vma)
77767 {
77768- unsigned int vm_flags = vma->vm_flags;
77769+ unsigned long vm_flags = vma->vm_flags;
77770
77771 /* If it was private or non-writable, the write bit is already clear */
77772- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
77773+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
77774 return 0;
77775
77776 /* The backer wishes to know when pages are first written to? */
77777@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
77778 unsigned long charged = 0;
77779 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
77780
77781+#ifdef CONFIG_PAX_SEGMEXEC
77782+ struct vm_area_struct *vma_m = NULL;
77783+#endif
77784+
77785+ /*
77786+ * mm->mmap_sem is required to protect against another thread
77787+ * changing the mappings in case we sleep.
77788+ */
77789+ verify_mm_writelocked(mm);
77790+
77791 /* Clear old maps */
77792 error = -ENOMEM;
77793-munmap_back:
77794 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77795 if (vma && vma->vm_start < addr + len) {
77796 if (do_munmap(mm, addr, len))
77797 return -ENOMEM;
77798- goto munmap_back;
77799+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
77800+ BUG_ON(vma && vma->vm_start < addr + len);
77801 }
77802
77803 /* Check against address space limit. */
77804@@ -1173,6 +1294,16 @@ munmap_back:
77805 goto unacct_error;
77806 }
77807
77808+#ifdef CONFIG_PAX_SEGMEXEC
77809+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
77810+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
77811+ if (!vma_m) {
77812+ error = -ENOMEM;
77813+ goto free_vma;
77814+ }
77815+ }
77816+#endif
77817+
77818 vma->vm_mm = mm;
77819 vma->vm_start = addr;
77820 vma->vm_end = addr + len;
77821@@ -1195,6 +1326,19 @@ munmap_back:
77822 error = file->f_op->mmap(file, vma);
77823 if (error)
77824 goto unmap_and_free_vma;
77825+
77826+#ifdef CONFIG_PAX_SEGMEXEC
77827+ if (vma_m && (vm_flags & VM_EXECUTABLE))
77828+ added_exe_file_vma(mm);
77829+#endif
77830+
77831+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
77832+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
77833+ vma->vm_flags |= VM_PAGEEXEC;
77834+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
77835+ }
77836+#endif
77837+
77838 if (vm_flags & VM_EXECUTABLE)
77839 added_exe_file_vma(mm);
77840
77841@@ -1218,6 +1362,11 @@ munmap_back:
77842 vma_link(mm, vma, prev, rb_link, rb_parent);
77843 file = vma->vm_file;
77844
77845+#ifdef CONFIG_PAX_SEGMEXEC
77846+ if (vma_m)
77847+ pax_mirror_vma(vma_m, vma);
77848+#endif
77849+
77850 /* Once vma denies write, undo our temporary denial count */
77851 if (correct_wcount)
77852 atomic_inc(&inode->i_writecount);
77853@@ -1226,6 +1375,7 @@ out:
77854
77855 mm->total_vm += len >> PAGE_SHIFT;
77856 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
77857+ track_exec_limit(mm, addr, addr + len, vm_flags);
77858 if (vm_flags & VM_LOCKED) {
77859 /*
77860 * makes pages present; downgrades, drops, reacquires mmap_sem
77861@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
77862 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
77863 charged = 0;
77864 free_vma:
77865+
77866+#ifdef CONFIG_PAX_SEGMEXEC
77867+ if (vma_m)
77868+ kmem_cache_free(vm_area_cachep, vma_m);
77869+#endif
77870+
77871 kmem_cache_free(vm_area_cachep, vma);
77872 unacct_error:
77873 if (charged)
77874@@ -1255,6 +1411,44 @@ unacct_error:
77875 return error;
77876 }
77877
77878+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
77879+{
77880+ if (!vma) {
77881+#ifdef CONFIG_STACK_GROWSUP
77882+ if (addr > sysctl_heap_stack_gap)
77883+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
77884+ else
77885+ vma = find_vma(current->mm, 0);
77886+ if (vma && (vma->vm_flags & VM_GROWSUP))
77887+ return false;
77888+#endif
77889+ return true;
77890+ }
77891+
77892+ if (addr + len > vma->vm_start)
77893+ return false;
77894+
77895+ if (vma->vm_flags & VM_GROWSDOWN)
77896+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
77897+#ifdef CONFIG_STACK_GROWSUP
77898+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
77899+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
77900+#endif
77901+
77902+ return true;
77903+}
77904+
77905+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
77906+{
77907+ if (vma->vm_start < len)
77908+ return -ENOMEM;
77909+ if (!(vma->vm_flags & VM_GROWSDOWN))
77910+ return vma->vm_start - len;
77911+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
77912+ return vma->vm_start - len - sysctl_heap_stack_gap;
77913+ return -ENOMEM;
77914+}
77915+
77916 /* Get an address range which is currently unmapped.
77917 * For shmat() with addr=0.
77918 *
77919@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
77920 if (flags & MAP_FIXED)
77921 return addr;
77922
77923+#ifdef CONFIG_PAX_RANDMMAP
77924+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
77925+#endif
77926+
77927 if (addr) {
77928 addr = PAGE_ALIGN(addr);
77929- vma = find_vma(mm, addr);
77930- if (TASK_SIZE - len >= addr &&
77931- (!vma || addr + len <= vma->vm_start))
77932- return addr;
77933+ if (TASK_SIZE - len >= addr) {
77934+ vma = find_vma(mm, addr);
77935+ if (check_heap_stack_gap(vma, addr, len))
77936+ return addr;
77937+ }
77938 }
77939 if (len > mm->cached_hole_size) {
77940- start_addr = addr = mm->free_area_cache;
77941+ start_addr = addr = mm->free_area_cache;
77942 } else {
77943- start_addr = addr = TASK_UNMAPPED_BASE;
77944- mm->cached_hole_size = 0;
77945+ start_addr = addr = mm->mmap_base;
77946+ mm->cached_hole_size = 0;
77947 }
77948
77949 full_search:
77950@@ -1303,34 +1502,40 @@ full_search:
77951 * Start a new search - just in case we missed
77952 * some holes.
77953 */
77954- if (start_addr != TASK_UNMAPPED_BASE) {
77955- addr = TASK_UNMAPPED_BASE;
77956- start_addr = addr;
77957+ if (start_addr != mm->mmap_base) {
77958+ start_addr = addr = mm->mmap_base;
77959 mm->cached_hole_size = 0;
77960 goto full_search;
77961 }
77962 return -ENOMEM;
77963 }
77964- if (!vma || addr + len <= vma->vm_start) {
77965- /*
77966- * Remember the place where we stopped the search:
77967- */
77968- mm->free_area_cache = addr + len;
77969- return addr;
77970- }
77971+ if (check_heap_stack_gap(vma, addr, len))
77972+ break;
77973 if (addr + mm->cached_hole_size < vma->vm_start)
77974 mm->cached_hole_size = vma->vm_start - addr;
77975 addr = vma->vm_end;
77976 }
77977+
77978+ /*
77979+ * Remember the place where we stopped the search:
77980+ */
77981+ mm->free_area_cache = addr + len;
77982+ return addr;
77983 }
77984 #endif
77985
77986 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
77987 {
77988+
77989+#ifdef CONFIG_PAX_SEGMEXEC
77990+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
77991+ return;
77992+#endif
77993+
77994 /*
77995 * Is this a new hole at the lowest possible address?
77996 */
77997- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
77998+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
77999 mm->free_area_cache = addr;
78000 mm->cached_hole_size = ~0UL;
78001 }
78002@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78003 {
78004 struct vm_area_struct *vma;
78005 struct mm_struct *mm = current->mm;
78006- unsigned long addr = addr0;
78007+ unsigned long base = mm->mmap_base, addr = addr0;
78008
78009 /* requested length too big for entire address space */
78010 if (len > TASK_SIZE)
78011@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78012 if (flags & MAP_FIXED)
78013 return addr;
78014
78015+#ifdef CONFIG_PAX_RANDMMAP
78016+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
78017+#endif
78018+
78019 /* requesting a specific address */
78020 if (addr) {
78021 addr = PAGE_ALIGN(addr);
78022- vma = find_vma(mm, addr);
78023- if (TASK_SIZE - len >= addr &&
78024- (!vma || addr + len <= vma->vm_start))
78025- return addr;
78026+ if (TASK_SIZE - len >= addr) {
78027+ vma = find_vma(mm, addr);
78028+ if (check_heap_stack_gap(vma, addr, len))
78029+ return addr;
78030+ }
78031 }
78032
78033 /* check if free_area_cache is useful for us */
78034@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78035 /* make sure it can fit in the remaining address space */
78036 if (addr > len) {
78037 vma = find_vma(mm, addr-len);
78038- if (!vma || addr <= vma->vm_start)
78039+ if (check_heap_stack_gap(vma, addr - len, len))
78040 /* remember the address as a hint for next time */
78041 return (mm->free_area_cache = addr-len);
78042 }
78043@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78044 * return with success:
78045 */
78046 vma = find_vma(mm, addr);
78047- if (!vma || addr+len <= vma->vm_start)
78048+ if (check_heap_stack_gap(vma, addr, len))
78049 /* remember the address as a hint for next time */
78050 return (mm->free_area_cache = addr);
78051
78052@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
78053 mm->cached_hole_size = vma->vm_start - addr;
78054
78055 /* try just below the current vma->vm_start */
78056- addr = vma->vm_start-len;
78057- } while (len < vma->vm_start);
78058+ addr = skip_heap_stack_gap(vma, len);
78059+ } while (!IS_ERR_VALUE(addr));
78060
78061 bottomup:
78062 /*
78063@@ -1414,13 +1624,21 @@ bottomup:
78064 * can happen with large stack limits and large mmap()
78065 * allocations.
78066 */
78067+ mm->mmap_base = TASK_UNMAPPED_BASE;
78068+
78069+#ifdef CONFIG_PAX_RANDMMAP
78070+ if (mm->pax_flags & MF_PAX_RANDMMAP)
78071+ mm->mmap_base += mm->delta_mmap;
78072+#endif
78073+
78074+ mm->free_area_cache = mm->mmap_base;
78075 mm->cached_hole_size = ~0UL;
78076- mm->free_area_cache = TASK_UNMAPPED_BASE;
78077 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
78078 /*
78079 * Restore the topdown base:
78080 */
78081- mm->free_area_cache = mm->mmap_base;
78082+ mm->mmap_base = base;
78083+ mm->free_area_cache = base;
78084 mm->cached_hole_size = ~0UL;
78085
78086 return addr;
78087@@ -1429,6 +1647,12 @@ bottomup:
78088
78089 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78090 {
78091+
78092+#ifdef CONFIG_PAX_SEGMEXEC
78093+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
78094+ return;
78095+#endif
78096+
78097 /*
78098 * Is this a new hole at the highest possible address?
78099 */
78100@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
78101 mm->free_area_cache = addr;
78102
78103 /* dont allow allocations above current base */
78104- if (mm->free_area_cache > mm->mmap_base)
78105+ if (mm->free_area_cache > mm->mmap_base) {
78106 mm->free_area_cache = mm->mmap_base;
78107+ mm->cached_hole_size = ~0UL;
78108+ }
78109 }
78110
78111 unsigned long
78112@@ -1545,6 +1771,27 @@ out:
78113 return prev ? prev->vm_next : vma;
78114 }
78115
78116+#ifdef CONFIG_PAX_SEGMEXEC
78117+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
78118+{
78119+ struct vm_area_struct *vma_m;
78120+
78121+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
78122+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
78123+ BUG_ON(vma->vm_mirror);
78124+ return NULL;
78125+ }
78126+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
78127+ vma_m = vma->vm_mirror;
78128+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
78129+ BUG_ON(vma->vm_file != vma_m->vm_file);
78130+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
78131+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
78132+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
78133+ return vma_m;
78134+}
78135+#endif
78136+
78137 /*
78138 * Verify that the stack growth is acceptable and
78139 * update accounting. This is shared with both the
78140@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78141 return -ENOMEM;
78142
78143 /* Stack limit test */
78144+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
78145 if (size > rlim[RLIMIT_STACK].rlim_cur)
78146 return -ENOMEM;
78147
78148@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78149 unsigned long limit;
78150 locked = mm->locked_vm + grow;
78151 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
78152+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
78153 if (locked > limit && !capable(CAP_IPC_LOCK))
78154 return -ENOMEM;
78155 }
78156@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
78157 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
78158 * vma is the last one with address > vma->vm_end. Have to extend vma.
78159 */
78160+#ifndef CONFIG_IA64
78161+static
78162+#endif
78163 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78164 {
78165 int error;
78166+ bool locknext;
78167
78168 if (!(vma->vm_flags & VM_GROWSUP))
78169 return -EFAULT;
78170
78171+ /* Also guard against wrapping around to address 0. */
78172+ if (address < PAGE_ALIGN(address+1))
78173+ address = PAGE_ALIGN(address+1);
78174+ else
78175+ return -ENOMEM;
78176+
78177 /*
78178 * We must make sure the anon_vma is allocated
78179 * so that the anon_vma locking is not a noop.
78180 */
78181 if (unlikely(anon_vma_prepare(vma)))
78182 return -ENOMEM;
78183+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
78184+ if (locknext && anon_vma_prepare(vma->vm_next))
78185+ return -ENOMEM;
78186 anon_vma_lock(vma);
78187+ if (locknext)
78188+ anon_vma_lock(vma->vm_next);
78189
78190 /*
78191 * vma->vm_start/vm_end cannot change under us because the caller
78192 * is required to hold the mmap_sem in read mode. We need the
78193- * anon_vma lock to serialize against concurrent expand_stacks.
78194- * Also guard against wrapping around to address 0.
78195+ * anon_vma locks to serialize against concurrent expand_stacks
78196+ * and expand_upwards.
78197 */
78198- if (address < PAGE_ALIGN(address+4))
78199- address = PAGE_ALIGN(address+4);
78200- else {
78201- anon_vma_unlock(vma);
78202- return -ENOMEM;
78203- }
78204 error = 0;
78205
78206 /* Somebody else might have raced and expanded it already */
78207- if (address > vma->vm_end) {
78208+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
78209+ error = -ENOMEM;
78210+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
78211 unsigned long size, grow;
78212
78213 size = address - vma->vm_start;
78214@@ -1643,6 +1903,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
78215 vma->vm_end = address;
78216 }
78217 }
78218+ if (locknext)
78219+ anon_vma_unlock(vma->vm_next);
78220 anon_vma_unlock(vma);
78221 return error;
78222 }
78223@@ -1655,6 +1917,8 @@ static int expand_downwards(struct vm_area_struct *vma,
78224 unsigned long address)
78225 {
78226 int error;
78227+ bool lockprev = false;
78228+ struct vm_area_struct *prev;
78229
78230 /*
78231 * We must make sure the anon_vma is allocated
78232@@ -1668,6 +1932,15 @@ static int expand_downwards(struct vm_area_struct *vma,
78233 if (error)
78234 return error;
78235
78236+ prev = vma->vm_prev;
78237+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
78238+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
78239+#endif
78240+ if (lockprev && anon_vma_prepare(prev))
78241+ return -ENOMEM;
78242+ if (lockprev)
78243+ anon_vma_lock(prev);
78244+
78245 anon_vma_lock(vma);
78246
78247 /*
78248@@ -1677,9 +1950,17 @@ static int expand_downwards(struct vm_area_struct *vma,
78249 */
78250
78251 /* Somebody else might have raced and expanded it already */
78252- if (address < vma->vm_start) {
78253+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
78254+ error = -ENOMEM;
78255+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
78256 unsigned long size, grow;
78257
78258+#ifdef CONFIG_PAX_SEGMEXEC
78259+ struct vm_area_struct *vma_m;
78260+
78261+ vma_m = pax_find_mirror_vma(vma);
78262+#endif
78263+
78264 size = vma->vm_end - address;
78265 grow = (vma->vm_start - address) >> PAGE_SHIFT;
78266
78267@@ -1689,10 +1970,22 @@ static int expand_downwards(struct vm_area_struct *vma,
78268 if (!error) {
78269 vma->vm_start = address;
78270 vma->vm_pgoff -= grow;
78271+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
78272+
78273+#ifdef CONFIG_PAX_SEGMEXEC
78274+ if (vma_m) {
78275+ vma_m->vm_start -= grow << PAGE_SHIFT;
78276+ vma_m->vm_pgoff -= grow;
78277+ }
78278+#endif
78279+
78280+
78281 }
78282 }
78283 }
78284 anon_vma_unlock(vma);
78285+ if (lockprev)
78286+ anon_vma_unlock(prev);
78287 return error;
78288 }
78289
78290@@ -1768,6 +2061,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
78291 do {
78292 long nrpages = vma_pages(vma);
78293
78294+#ifdef CONFIG_PAX_SEGMEXEC
78295+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
78296+ vma = remove_vma(vma);
78297+ continue;
78298+ }
78299+#endif
78300+
78301 mm->total_vm -= nrpages;
78302 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
78303 vma = remove_vma(vma);
78304@@ -1813,6 +2113,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
78305 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
78306 vma->vm_prev = NULL;
78307 do {
78308+
78309+#ifdef CONFIG_PAX_SEGMEXEC
78310+ if (vma->vm_mirror) {
78311+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
78312+ vma->vm_mirror->vm_mirror = NULL;
78313+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
78314+ vma->vm_mirror = NULL;
78315+ }
78316+#endif
78317+
78318 rb_erase(&vma->vm_rb, &mm->mm_rb);
78319 mm->map_count--;
78320 tail_vma = vma;
78321@@ -1840,10 +2150,25 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78322 struct mempolicy *pol;
78323 struct vm_area_struct *new;
78324
78325+#ifdef CONFIG_PAX_SEGMEXEC
78326+ struct vm_area_struct *vma_m, *new_m = NULL;
78327+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
78328+#endif
78329+
78330 if (is_vm_hugetlb_page(vma) && (addr &
78331 ~(huge_page_mask(hstate_vma(vma)))))
78332 return -EINVAL;
78333
78334+#ifdef CONFIG_PAX_SEGMEXEC
78335+ vma_m = pax_find_mirror_vma(vma);
78336+
78337+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
78338+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
78339+ if (mm->map_count >= sysctl_max_map_count-1)
78340+ return -ENOMEM;
78341+ } else
78342+#endif
78343+
78344 if (mm->map_count >= sysctl_max_map_count)
78345 return -ENOMEM;
78346
78347@@ -1851,6 +2176,16 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78348 if (!new)
78349 return -ENOMEM;
78350
78351+#ifdef CONFIG_PAX_SEGMEXEC
78352+ if (vma_m) {
78353+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
78354+ if (!new_m) {
78355+ kmem_cache_free(vm_area_cachep, new);
78356+ return -ENOMEM;
78357+ }
78358+ }
78359+#endif
78360+
78361 /* most fields are the same, copy all, and then fixup */
78362 *new = *vma;
78363
78364@@ -1861,8 +2196,29 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78365 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
78366 }
78367
78368+#ifdef CONFIG_PAX_SEGMEXEC
78369+ if (vma_m) {
78370+ *new_m = *vma_m;
78371+ new_m->vm_mirror = new;
78372+ new->vm_mirror = new_m;
78373+
78374+ if (new_below)
78375+ new_m->vm_end = addr_m;
78376+ else {
78377+ new_m->vm_start = addr_m;
78378+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
78379+ }
78380+ }
78381+#endif
78382+
78383 pol = mpol_dup(vma_policy(vma));
78384 if (IS_ERR(pol)) {
78385+
78386+#ifdef CONFIG_PAX_SEGMEXEC
78387+ if (new_m)
78388+ kmem_cache_free(vm_area_cachep, new_m);
78389+#endif
78390+
78391 kmem_cache_free(vm_area_cachep, new);
78392 return PTR_ERR(pol);
78393 }
78394@@ -1883,6 +2239,28 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78395 else
78396 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
78397
78398+#ifdef CONFIG_PAX_SEGMEXEC
78399+ if (vma_m) {
78400+ mpol_get(pol);
78401+ vma_set_policy(new_m, pol);
78402+
78403+ if (new_m->vm_file) {
78404+ get_file(new_m->vm_file);
78405+ if (vma_m->vm_flags & VM_EXECUTABLE)
78406+ added_exe_file_vma(mm);
78407+ }
78408+
78409+ if (new_m->vm_ops && new_m->vm_ops->open)
78410+ new_m->vm_ops->open(new_m);
78411+
78412+ if (new_below)
78413+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
78414+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
78415+ else
78416+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
78417+ }
78418+#endif
78419+
78420 return 0;
78421 }
78422
78423@@ -1891,11 +2269,30 @@ int split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
78424 * work. This now handles partial unmappings.
78425 * Jeremy Fitzhardinge <jeremy@goop.org>
78426 */
78427+#ifdef CONFIG_PAX_SEGMEXEC
78428 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78429 {
78430+ int ret = __do_munmap(mm, start, len);
78431+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
78432+ return ret;
78433+
78434+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
78435+}
78436+
78437+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78438+#else
78439+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78440+#endif
78441+{
78442 unsigned long end;
78443 struct vm_area_struct *vma, *prev, *last;
78444
78445+ /*
78446+ * mm->mmap_sem is required to protect against another thread
78447+ * changing the mappings in case we sleep.
78448+ */
78449+ verify_mm_writelocked(mm);
78450+
78451 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
78452 return -EINVAL;
78453
78454@@ -1959,6 +2356,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
78455 /* Fix up all other VM information */
78456 remove_vma_list(mm, vma);
78457
78458+ track_exec_limit(mm, start, end, 0UL);
78459+
78460 return 0;
78461 }
78462
78463@@ -1971,22 +2370,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
78464
78465 profile_munmap(addr);
78466
78467+#ifdef CONFIG_PAX_SEGMEXEC
78468+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
78469+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
78470+ return -EINVAL;
78471+#endif
78472+
78473 down_write(&mm->mmap_sem);
78474 ret = do_munmap(mm, addr, len);
78475 up_write(&mm->mmap_sem);
78476 return ret;
78477 }
78478
78479-static inline void verify_mm_writelocked(struct mm_struct *mm)
78480-{
78481-#ifdef CONFIG_DEBUG_VM
78482- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
78483- WARN_ON(1);
78484- up_read(&mm->mmap_sem);
78485- }
78486-#endif
78487-}
78488-
78489 /*
78490 * this is really a simplified "do_mmap". it only handles
78491 * anonymous maps. eventually we may be able to do some
78492@@ -2000,6 +2395,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78493 struct rb_node ** rb_link, * rb_parent;
78494 pgoff_t pgoff = addr >> PAGE_SHIFT;
78495 int error;
78496+ unsigned long charged;
78497
78498 len = PAGE_ALIGN(len);
78499 if (!len)
78500@@ -2011,16 +2407,30 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78501
78502 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
78503
78504+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
78505+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
78506+ flags &= ~VM_EXEC;
78507+
78508+#ifdef CONFIG_PAX_MPROTECT
78509+ if (mm->pax_flags & MF_PAX_MPROTECT)
78510+ flags &= ~VM_MAYEXEC;
78511+#endif
78512+
78513+ }
78514+#endif
78515+
78516 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
78517 if (error & ~PAGE_MASK)
78518 return error;
78519
78520+ charged = len >> PAGE_SHIFT;
78521+
78522 /*
78523 * mlock MCL_FUTURE?
78524 */
78525 if (mm->def_flags & VM_LOCKED) {
78526 unsigned long locked, lock_limit;
78527- locked = len >> PAGE_SHIFT;
78528+ locked = charged;
78529 locked += mm->locked_vm;
78530 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
78531 lock_limit >>= PAGE_SHIFT;
78532@@ -2037,22 +2447,22 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78533 /*
78534 * Clear old maps. this also does some error checking for us
78535 */
78536- munmap_back:
78537 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78538 if (vma && vma->vm_start < addr + len) {
78539 if (do_munmap(mm, addr, len))
78540 return -ENOMEM;
78541- goto munmap_back;
78542+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
78543+ BUG_ON(vma && vma->vm_start < addr + len);
78544 }
78545
78546 /* Check against address space limits *after* clearing old maps... */
78547- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
78548+ if (!may_expand_vm(mm, charged))
78549 return -ENOMEM;
78550
78551 if (mm->map_count > sysctl_max_map_count)
78552 return -ENOMEM;
78553
78554- if (security_vm_enough_memory(len >> PAGE_SHIFT))
78555+ if (security_vm_enough_memory(charged))
78556 return -ENOMEM;
78557
78558 /* Can we just expand an old private anonymous mapping? */
78559@@ -2066,7 +2476,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78560 */
78561 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78562 if (!vma) {
78563- vm_unacct_memory(len >> PAGE_SHIFT);
78564+ vm_unacct_memory(charged);
78565 return -ENOMEM;
78566 }
78567
78568@@ -2078,11 +2488,12 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
78569 vma->vm_page_prot = vm_get_page_prot(flags);
78570 vma_link(mm, vma, prev, rb_link, rb_parent);
78571 out:
78572- mm->total_vm += len >> PAGE_SHIFT;
78573+ mm->total_vm += charged;
78574 if (flags & VM_LOCKED) {
78575 if (!mlock_vma_pages_range(vma, addr, addr + len))
78576- mm->locked_vm += (len >> PAGE_SHIFT);
78577+ mm->locked_vm += charged;
78578 }
78579+ track_exec_limit(mm, addr, addr + len, flags);
78580 return addr;
78581 }
78582
78583@@ -2129,8 +2540,10 @@ void exit_mmap(struct mm_struct *mm)
78584 * Walk the list again, actually closing and freeing it,
78585 * with preemption enabled, without holding any MM locks.
78586 */
78587- while (vma)
78588+ while (vma) {
78589+ vma->vm_mirror = NULL;
78590 vma = remove_vma(vma);
78591+ }
78592
78593 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
78594 }
78595@@ -2144,6 +2557,10 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78596 struct vm_area_struct * __vma, * prev;
78597 struct rb_node ** rb_link, * rb_parent;
78598
78599+#ifdef CONFIG_PAX_SEGMEXEC
78600+ struct vm_area_struct *vma_m = NULL;
78601+#endif
78602+
78603 /*
78604 * The vm_pgoff of a purely anonymous vma should be irrelevant
78605 * until its first write fault, when page's anon_vma and index
78606@@ -2166,7 +2583,22 @@ int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
78607 if ((vma->vm_flags & VM_ACCOUNT) &&
78608 security_vm_enough_memory_mm(mm, vma_pages(vma)))
78609 return -ENOMEM;
78610+
78611+#ifdef CONFIG_PAX_SEGMEXEC
78612+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
78613+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78614+ if (!vma_m)
78615+ return -ENOMEM;
78616+ }
78617+#endif
78618+
78619 vma_link(mm, vma, prev, rb_link, rb_parent);
78620+
78621+#ifdef CONFIG_PAX_SEGMEXEC
78622+ if (vma_m)
78623+ pax_mirror_vma(vma_m, vma);
78624+#endif
78625+
78626 return 0;
78627 }
78628
78629@@ -2184,6 +2616,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78630 struct rb_node **rb_link, *rb_parent;
78631 struct mempolicy *pol;
78632
78633+ BUG_ON(vma->vm_mirror);
78634+
78635 /*
78636 * If anonymous vma has not yet been faulted, update new pgoff
78637 * to match new location, to increase its chance of merging.
78638@@ -2227,6 +2661,35 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
78639 return new_vma;
78640 }
78641
78642+#ifdef CONFIG_PAX_SEGMEXEC
78643+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
78644+{
78645+ struct vm_area_struct *prev_m;
78646+ struct rb_node **rb_link_m, *rb_parent_m;
78647+ struct mempolicy *pol_m;
78648+
78649+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
78650+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
78651+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
78652+ *vma_m = *vma;
78653+ pol_m = vma_policy(vma_m);
78654+ mpol_get(pol_m);
78655+ vma_set_policy(vma_m, pol_m);
78656+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
78657+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
78658+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
78659+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
78660+ if (vma_m->vm_file)
78661+ get_file(vma_m->vm_file);
78662+ if (vma_m->vm_ops && vma_m->vm_ops->open)
78663+ vma_m->vm_ops->open(vma_m);
78664+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
78665+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
78666+ vma_m->vm_mirror = vma;
78667+ vma->vm_mirror = vma_m;
78668+}
78669+#endif
78670+
78671 /*
78672 * Return true if the calling process may expand its vm space by the passed
78673 * number of pages
78674@@ -2237,7 +2700,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
78675 unsigned long lim;
78676
78677 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
78678-
78679+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
78680 if (cur + npages > lim)
78681 return 0;
78682 return 1;
78683@@ -2307,6 +2770,22 @@ int install_special_mapping(struct mm_struct *mm,
78684 vma->vm_start = addr;
78685 vma->vm_end = addr + len;
78686
78687+#ifdef CONFIG_PAX_MPROTECT
78688+ if (mm->pax_flags & MF_PAX_MPROTECT) {
78689+#ifndef CONFIG_PAX_MPROTECT_COMPAT
78690+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
78691+ return -EPERM;
78692+ if (!(vm_flags & VM_EXEC))
78693+ vm_flags &= ~VM_MAYEXEC;
78694+#else
78695+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
78696+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
78697+#endif
78698+ else
78699+ vm_flags &= ~VM_MAYWRITE;
78700+ }
78701+#endif
78702+
78703 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
78704 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
78705
78706diff --git a/mm/mprotect.c b/mm/mprotect.c
78707index 1737c7e..c7faeb4 100644
78708--- a/mm/mprotect.c
78709+++ b/mm/mprotect.c
78710@@ -24,10 +24,16 @@
78711 #include <linux/mmu_notifier.h>
78712 #include <linux/migrate.h>
78713 #include <linux/perf_event.h>
78714+
78715+#ifdef CONFIG_PAX_MPROTECT
78716+#include <linux/elf.h>
78717+#endif
78718+
78719 #include <asm/uaccess.h>
78720 #include <asm/pgtable.h>
78721 #include <asm/cacheflush.h>
78722 #include <asm/tlbflush.h>
78723+#include <asm/mmu_context.h>
78724
78725 #ifndef pgprot_modify
78726 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
78727@@ -132,6 +138,48 @@ static void change_protection(struct vm_area_struct *vma,
78728 flush_tlb_range(vma, start, end);
78729 }
78730
78731+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78732+/* called while holding the mmap semaphor for writing except stack expansion */
78733+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
78734+{
78735+ unsigned long oldlimit, newlimit = 0UL;
78736+
78737+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
78738+ return;
78739+
78740+ spin_lock(&mm->page_table_lock);
78741+ oldlimit = mm->context.user_cs_limit;
78742+ if ((prot & VM_EXEC) && oldlimit < end)
78743+ /* USER_CS limit moved up */
78744+ newlimit = end;
78745+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
78746+ /* USER_CS limit moved down */
78747+ newlimit = start;
78748+
78749+ if (newlimit) {
78750+ mm->context.user_cs_limit = newlimit;
78751+
78752+#ifdef CONFIG_SMP
78753+ wmb();
78754+ cpus_clear(mm->context.cpu_user_cs_mask);
78755+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
78756+#endif
78757+
78758+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
78759+ }
78760+ spin_unlock(&mm->page_table_lock);
78761+ if (newlimit == end) {
78762+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
78763+
78764+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
78765+ if (is_vm_hugetlb_page(vma))
78766+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
78767+ else
78768+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
78769+ }
78770+}
78771+#endif
78772+
78773 int
78774 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78775 unsigned long start, unsigned long end, unsigned long newflags)
78776@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78777 int error;
78778 int dirty_accountable = 0;
78779
78780+#ifdef CONFIG_PAX_SEGMEXEC
78781+ struct vm_area_struct *vma_m = NULL;
78782+ unsigned long start_m, end_m;
78783+
78784+ start_m = start + SEGMEXEC_TASK_SIZE;
78785+ end_m = end + SEGMEXEC_TASK_SIZE;
78786+#endif
78787+
78788 if (newflags == oldflags) {
78789 *pprev = vma;
78790 return 0;
78791 }
78792
78793+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
78794+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
78795+
78796+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
78797+ return -ENOMEM;
78798+
78799+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
78800+ return -ENOMEM;
78801+ }
78802+
78803 /*
78804 * If we make a private mapping writable we increase our commit;
78805 * but (without finer accounting) cannot reduce our commit if we
78806@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
78807 }
78808 }
78809
78810+#ifdef CONFIG_PAX_SEGMEXEC
78811+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
78812+ if (start != vma->vm_start) {
78813+ error = split_vma(mm, vma, start, 1);
78814+ if (error)
78815+ goto fail;
78816+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
78817+ *pprev = (*pprev)->vm_next;
78818+ }
78819+
78820+ if (end != vma->vm_end) {
78821+ error = split_vma(mm, vma, end, 0);
78822+ if (error)
78823+ goto fail;
78824+ }
78825+
78826+ if (pax_find_mirror_vma(vma)) {
78827+ error = __do_munmap(mm, start_m, end_m - start_m);
78828+ if (error)
78829+ goto fail;
78830+ } else {
78831+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
78832+ if (!vma_m) {
78833+ error = -ENOMEM;
78834+ goto fail;
78835+ }
78836+ vma->vm_flags = newflags;
78837+ pax_mirror_vma(vma_m, vma);
78838+ }
78839+ }
78840+#endif
78841+
78842 /*
78843 * First try to merge with previous and/or next vma.
78844 */
78845@@ -195,9 +293,21 @@ success:
78846 * vm_flags and vm_page_prot are protected by the mmap_sem
78847 * held in write mode.
78848 */
78849+
78850+#ifdef CONFIG_PAX_SEGMEXEC
78851+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
78852+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
78853+#endif
78854+
78855 vma->vm_flags = newflags;
78856+
78857+#ifdef CONFIG_PAX_MPROTECT
78858+ if (mm->binfmt && mm->binfmt->handle_mprotect)
78859+ mm->binfmt->handle_mprotect(vma, newflags);
78860+#endif
78861+
78862 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
78863- vm_get_page_prot(newflags));
78864+ vm_get_page_prot(vma->vm_flags));
78865
78866 if (vma_wants_writenotify(vma)) {
78867 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
78868@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78869 end = start + len;
78870 if (end <= start)
78871 return -ENOMEM;
78872+
78873+#ifdef CONFIG_PAX_SEGMEXEC
78874+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
78875+ if (end > SEGMEXEC_TASK_SIZE)
78876+ return -EINVAL;
78877+ } else
78878+#endif
78879+
78880+ if (end > TASK_SIZE)
78881+ return -EINVAL;
78882+
78883 if (!arch_validate_prot(prot))
78884 return -EINVAL;
78885
78886@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78887 /*
78888 * Does the application expect PROT_READ to imply PROT_EXEC:
78889 */
78890- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
78891+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
78892 prot |= PROT_EXEC;
78893
78894 vm_flags = calc_vm_prot_bits(prot);
78895@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78896 if (start > vma->vm_start)
78897 prev = vma;
78898
78899+#ifdef CONFIG_PAX_MPROTECT
78900+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
78901+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
78902+#endif
78903+
78904 for (nstart = start ; ; ) {
78905 unsigned long newflags;
78906
78907@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78908
78909 /* newflags >> 4 shift VM_MAY% in place of VM_% */
78910 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
78911+ if (prot & (PROT_WRITE | PROT_EXEC))
78912+ gr_log_rwxmprotect(vma->vm_file);
78913+
78914+ error = -EACCES;
78915+ goto out;
78916+ }
78917+
78918+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
78919 error = -EACCES;
78920 goto out;
78921 }
78922@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
78923 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
78924 if (error)
78925 goto out;
78926+
78927+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
78928+
78929 nstart = tmp;
78930
78931 if (nstart < prev->vm_end)
78932diff --git a/mm/mremap.c b/mm/mremap.c
78933index 3e98d79..1706cec 100644
78934--- a/mm/mremap.c
78935+++ b/mm/mremap.c
78936@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
78937 continue;
78938 pte = ptep_clear_flush(vma, old_addr, old_pte);
78939 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
78940+
78941+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
78942+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
78943+ pte = pte_exprotect(pte);
78944+#endif
78945+
78946 set_pte_at(mm, new_addr, new_pte, pte);
78947 }
78948
78949@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
78950 if (is_vm_hugetlb_page(vma))
78951 goto Einval;
78952
78953+#ifdef CONFIG_PAX_SEGMEXEC
78954+ if (pax_find_mirror_vma(vma))
78955+ goto Einval;
78956+#endif
78957+
78958 /* We can't remap across vm area boundaries */
78959 if (old_len > vma->vm_end - addr)
78960 goto Efault;
78961@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned long addr,
78962 unsigned long ret = -EINVAL;
78963 unsigned long charged = 0;
78964 unsigned long map_flags;
78965+ unsigned long pax_task_size = TASK_SIZE;
78966
78967 if (new_addr & ~PAGE_MASK)
78968 goto out;
78969
78970- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
78971+#ifdef CONFIG_PAX_SEGMEXEC
78972+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
78973+ pax_task_size = SEGMEXEC_TASK_SIZE;
78974+#endif
78975+
78976+ pax_task_size -= PAGE_SIZE;
78977+
78978+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
78979 goto out;
78980
78981 /* Check if the location we're moving into overlaps the
78982 * old location at all, and fail if it does.
78983 */
78984- if ((new_addr <= addr) && (new_addr+new_len) > addr)
78985- goto out;
78986-
78987- if ((addr <= new_addr) && (addr+old_len) > new_addr)
78988+ if (addr + old_len > new_addr && new_addr + new_len > addr)
78989 goto out;
78990
78991 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
78992@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long addr,
78993 struct vm_area_struct *vma;
78994 unsigned long ret = -EINVAL;
78995 unsigned long charged = 0;
78996+ unsigned long pax_task_size = TASK_SIZE;
78997
78998 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
78999 goto out;
79000@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long addr,
79001 if (!new_len)
79002 goto out;
79003
79004+#ifdef CONFIG_PAX_SEGMEXEC
79005+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
79006+ pax_task_size = SEGMEXEC_TASK_SIZE;
79007+#endif
79008+
79009+ pax_task_size -= PAGE_SIZE;
79010+
79011+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
79012+ old_len > pax_task_size || addr > pax_task_size-old_len)
79013+ goto out;
79014+
79015 if (flags & MREMAP_FIXED) {
79016 if (flags & MREMAP_MAYMOVE)
79017 ret = mremap_to(addr, old_len, new_addr, new_len);
79018@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long addr,
79019 addr + new_len);
79020 }
79021 ret = addr;
79022+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
79023 goto out;
79024 }
79025 }
79026@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long addr,
79027 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
79028 if (ret)
79029 goto out;
79030+
79031+ map_flags = vma->vm_flags;
79032 ret = move_vma(vma, addr, old_len, new_len, new_addr);
79033+ if (!(ret & ~PAGE_MASK)) {
79034+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
79035+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
79036+ }
79037 }
79038 out:
79039 if (ret & ~PAGE_MASK)
79040diff --git a/mm/nommu.c b/mm/nommu.c
79041index 406e8d4..53970d3 100644
79042--- a/mm/nommu.c
79043+++ b/mm/nommu.c
79044@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
79045 int sysctl_overcommit_ratio = 50; /* default is 50% */
79046 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
79047 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
79048-int heap_stack_gap = 0;
79049
79050 atomic_long_t mmap_pages_allocated;
79051
79052@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
79053 EXPORT_SYMBOL(find_vma);
79054
79055 /*
79056- * find a VMA
79057- * - we don't extend stack VMAs under NOMMU conditions
79058- */
79059-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
79060-{
79061- return find_vma(mm, addr);
79062-}
79063-
79064-/*
79065 * expand a stack to a given address
79066 * - not supported under NOMMU conditions
79067 */
79068diff --git a/mm/page_alloc.c b/mm/page_alloc.c
79069index 3ecab7e..594a471 100644
79070--- a/mm/page_alloc.c
79071+++ b/mm/page_alloc.c
79072@@ -289,7 +289,7 @@ out:
79073 * This usage means that zero-order pages may not be compound.
79074 */
79075
79076-static void free_compound_page(struct page *page)
79077+void free_compound_page(struct page *page)
79078 {
79079 __free_pages_ok(page, compound_order(page));
79080 }
79081@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79082 int bad = 0;
79083 int wasMlocked = __TestClearPageMlocked(page);
79084
79085+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79086+ unsigned long index = 1UL << order;
79087+#endif
79088+
79089 kmemcheck_free_shadow(page, order);
79090
79091 for (i = 0 ; i < (1 << order) ; ++i)
79092@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page *page, unsigned int order)
79093 debug_check_no_obj_freed(page_address(page),
79094 PAGE_SIZE << order);
79095 }
79096+
79097+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79098+ for (; index; --index)
79099+ sanitize_highpage(page + index - 1);
79100+#endif
79101+
79102 arch_free_page(page, order);
79103 kernel_map_pages(page, 1 << order, 0);
79104
79105@@ -702,8 +712,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
79106 arch_alloc_page(page, order);
79107 kernel_map_pages(page, 1 << order, 1);
79108
79109+#ifndef CONFIG_PAX_MEMORY_SANITIZE
79110 if (gfp_flags & __GFP_ZERO)
79111 prep_zero_page(page, order, gfp_flags);
79112+#endif
79113
79114 if (order && (gfp_flags & __GFP_COMP))
79115 prep_compound_page(page, order);
79116@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct page *page, int cold)
79117 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
79118 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
79119 }
79120+
79121+#ifdef CONFIG_PAX_MEMORY_SANITIZE
79122+ sanitize_highpage(page);
79123+#endif
79124+
79125 arch_free_page(page, 0);
79126 kernel_map_pages(page, 1, 0);
79127
79128@@ -2179,6 +2196,8 @@ void show_free_areas(void)
79129 int cpu;
79130 struct zone *zone;
79131
79132+ pax_track_stack();
79133+
79134 for_each_populated_zone(zone) {
79135 show_node(zone);
79136 printk("%s per-cpu:\n", zone->name);
79137@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct pglist_data *pgdat,
79138 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
79139 }
79140 #else
79141-static void inline setup_usemap(struct pglist_data *pgdat,
79142+static inline void setup_usemap(struct pglist_data *pgdat,
79143 struct zone *zone, unsigned long zonesize) {}
79144 #endif /* CONFIG_SPARSEMEM */
79145
79146diff --git a/mm/percpu.c b/mm/percpu.c
79147index c90614a..5f7b7b8 100644
79148--- a/mm/percpu.c
79149+++ b/mm/percpu.c
79150@@ -115,7 +115,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
79151 static unsigned int pcpu_high_unit_cpu __read_mostly;
79152
79153 /* the address of the first chunk which starts with the kernel static area */
79154-void *pcpu_base_addr __read_mostly;
79155+void *pcpu_base_addr __read_only;
79156 EXPORT_SYMBOL_GPL(pcpu_base_addr);
79157
79158 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
79159diff --git a/mm/rmap.c b/mm/rmap.c
79160index dd43373..d848cd7 100644
79161--- a/mm/rmap.c
79162+++ b/mm/rmap.c
79163@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_struct *vma)
79164 /* page_table_lock to protect against threads */
79165 spin_lock(&mm->page_table_lock);
79166 if (likely(!vma->anon_vma)) {
79167+
79168+#ifdef CONFIG_PAX_SEGMEXEC
79169+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
79170+
79171+ if (vma_m) {
79172+ BUG_ON(vma_m->anon_vma);
79173+ vma_m->anon_vma = anon_vma;
79174+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
79175+ }
79176+#endif
79177+
79178 vma->anon_vma = anon_vma;
79179 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
79180 allocated = NULL;
79181diff --git a/mm/shmem.c b/mm/shmem.c
79182index 3e0005b..1d659a8 100644
79183--- a/mm/shmem.c
79184+++ b/mm/shmem.c
79185@@ -31,7 +31,7 @@
79186 #include <linux/swap.h>
79187 #include <linux/ima.h>
79188
79189-static struct vfsmount *shm_mnt;
79190+struct vfsmount *shm_mnt;
79191
79192 #ifdef CONFIG_SHMEM
79193 /*
79194@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
79195 goto unlock;
79196 }
79197 entry = shmem_swp_entry(info, index, NULL);
79198+ if (!entry)
79199+ goto unlock;
79200 if (entry->val) {
79201 /*
79202 * The more uptodate page coming down from a stacked
79203@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_entry_t entry, gfp_t gfp,
79204 struct vm_area_struct pvma;
79205 struct page *page;
79206
79207+ pax_track_stack();
79208+
79209 spol = mpol_cond_copy(&mpol,
79210 mpol_shared_policy_lookup(&info->policy, idx));
79211
79212@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
79213
79214 info = SHMEM_I(inode);
79215 inode->i_size = len-1;
79216- if (len <= (char *)inode - (char *)info) {
79217+ if (len <= (char *)inode - (char *)info && len <= 64) {
79218 /* do it inline */
79219 memcpy(info, symname, len);
79220 inode->i_op = &shmem_symlink_inline_operations;
79221@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
79222 int err = -ENOMEM;
79223
79224 /* Round up to L1_CACHE_BYTES to resist false sharing */
79225- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
79226- L1_CACHE_BYTES), GFP_KERNEL);
79227+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
79228 if (!sbinfo)
79229 return -ENOMEM;
79230
79231diff --git a/mm/slab.c b/mm/slab.c
79232index c8d466a..909e01e 100644
79233--- a/mm/slab.c
79234+++ b/mm/slab.c
79235@@ -174,7 +174,7 @@
79236
79237 /* Legal flag mask for kmem_cache_create(). */
79238 #if DEBUG
79239-# define CREATE_MASK (SLAB_RED_ZONE | \
79240+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
79241 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
79242 SLAB_CACHE_DMA | \
79243 SLAB_STORE_USER | \
79244@@ -182,7 +182,7 @@
79245 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79246 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
79247 #else
79248-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
79249+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
79250 SLAB_CACHE_DMA | \
79251 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
79252 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
79253@@ -308,7 +308,7 @@ struct kmem_list3 {
79254 * Need this for bootstrapping a per node allocator.
79255 */
79256 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
79257-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
79258+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
79259 #define CACHE_CACHE 0
79260 #define SIZE_AC MAX_NUMNODES
79261 #define SIZE_L3 (2 * MAX_NUMNODES)
79262@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_list3 *parent)
79263 if ((x)->max_freeable < i) \
79264 (x)->max_freeable = i; \
79265 } while (0)
79266-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
79267-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
79268-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
79269-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
79270+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
79271+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
79272+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
79273+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
79274 #else
79275 #define STATS_INC_ACTIVE(x) do { } while (0)
79276 #define STATS_DEC_ACTIVE(x) do { } while (0)
79277@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
79278 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
79279 */
79280 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
79281- const struct slab *slab, void *obj)
79282+ const struct slab *slab, const void *obj)
79283 {
79284 u32 offset = (obj - slab->s_mem);
79285 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
79286@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
79287 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
79288 sizes[INDEX_AC].cs_size,
79289 ARCH_KMALLOC_MINALIGN,
79290- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79291+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79292 NULL);
79293
79294 if (INDEX_AC != INDEX_L3) {
79295@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
79296 kmem_cache_create(names[INDEX_L3].name,
79297 sizes[INDEX_L3].cs_size,
79298 ARCH_KMALLOC_MINALIGN,
79299- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79300+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79301 NULL);
79302 }
79303
79304@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
79305 sizes->cs_cachep = kmem_cache_create(names->name,
79306 sizes->cs_size,
79307 ARCH_KMALLOC_MINALIGN,
79308- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
79309+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
79310 NULL);
79311 }
79312 #ifdef CONFIG_ZONE_DMA
79313@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, void *p)
79314 }
79315 /* cpu stats */
79316 {
79317- unsigned long allochit = atomic_read(&cachep->allochit);
79318- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
79319- unsigned long freehit = atomic_read(&cachep->freehit);
79320- unsigned long freemiss = atomic_read(&cachep->freemiss);
79321+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
79322+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
79323+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
79324+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
79325
79326 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
79327 allochit, allocmiss, freehit, freemiss);
79328@@ -4471,15 +4471,70 @@ static const struct file_operations proc_slabstats_operations = {
79329
79330 static int __init slab_proc_init(void)
79331 {
79332- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
79333+ mode_t gr_mode = S_IRUGO;
79334+
79335+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79336+ gr_mode = S_IRUSR;
79337+#endif
79338+
79339+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
79340 #ifdef CONFIG_DEBUG_SLAB_LEAK
79341- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
79342+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
79343 #endif
79344 return 0;
79345 }
79346 module_init(slab_proc_init);
79347 #endif
79348
79349+void check_object_size(const void *ptr, unsigned long n, bool to)
79350+{
79351+
79352+#ifdef CONFIG_PAX_USERCOPY
79353+ struct page *page;
79354+ struct kmem_cache *cachep = NULL;
79355+ struct slab *slabp;
79356+ unsigned int objnr;
79357+ unsigned long offset;
79358+ const char *type;
79359+
79360+ if (!n)
79361+ return;
79362+
79363+ type = "<null>";
79364+ if (ZERO_OR_NULL_PTR(ptr))
79365+ goto report;
79366+
79367+ if (!virt_addr_valid(ptr))
79368+ return;
79369+
79370+ page = virt_to_head_page(ptr);
79371+
79372+ type = "<process stack>";
79373+ if (!PageSlab(page)) {
79374+ if (object_is_on_stack(ptr, n) == -1)
79375+ goto report;
79376+ return;
79377+ }
79378+
79379+ cachep = page_get_cache(page);
79380+ type = cachep->name;
79381+ if (!(cachep->flags & SLAB_USERCOPY))
79382+ goto report;
79383+
79384+ slabp = page_get_slab(page);
79385+ objnr = obj_to_index(cachep, slabp, ptr);
79386+ BUG_ON(objnr >= cachep->num);
79387+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
79388+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
79389+ return;
79390+
79391+report:
79392+ pax_report_usercopy(ptr, n, to, type);
79393+#endif
79394+
79395+}
79396+EXPORT_SYMBOL(check_object_size);
79397+
79398 /**
79399 * ksize - get the actual amount of memory allocated for a given object
79400 * @objp: Pointer to the object
79401diff --git a/mm/slob.c b/mm/slob.c
79402index 837ebd6..4712174 100644
79403--- a/mm/slob.c
79404+++ b/mm/slob.c
79405@@ -29,7 +29,7 @@
79406 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
79407 * alloc_pages() directly, allocating compound pages so the page order
79408 * does not have to be separately tracked, and also stores the exact
79409- * allocation size in page->private so that it can be used to accurately
79410+ * allocation size in slob_page->size so that it can be used to accurately
79411 * provide ksize(). These objects are detected in kfree() because slob_page()
79412 * is false for them.
79413 *
79414@@ -58,6 +58,7 @@
79415 */
79416
79417 #include <linux/kernel.h>
79418+#include <linux/sched.h>
79419 #include <linux/slab.h>
79420 #include <linux/mm.h>
79421 #include <linux/swap.h> /* struct reclaim_state */
79422@@ -100,7 +101,8 @@ struct slob_page {
79423 unsigned long flags; /* mandatory */
79424 atomic_t _count; /* mandatory */
79425 slobidx_t units; /* free units left in page */
79426- unsigned long pad[2];
79427+ unsigned long pad[1];
79428+ unsigned long size; /* size when >=PAGE_SIZE */
79429 slob_t *free; /* first free slob_t in page */
79430 struct list_head list; /* linked list of free pages */
79431 };
79432@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
79433 */
79434 static inline int is_slob_page(struct slob_page *sp)
79435 {
79436- return PageSlab((struct page *)sp);
79437+ return PageSlab((struct page *)sp) && !sp->size;
79438 }
79439
79440 static inline void set_slob_page(struct slob_page *sp)
79441@@ -148,7 +150,7 @@ static inline void clear_slob_page(struct slob_page *sp)
79442
79443 static inline struct slob_page *slob_page(const void *addr)
79444 {
79445- return (struct slob_page *)virt_to_page(addr);
79446+ return (struct slob_page *)virt_to_head_page(addr);
79447 }
79448
79449 /*
79450@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
79451 /*
79452 * Return the size of a slob block.
79453 */
79454-static slobidx_t slob_units(slob_t *s)
79455+static slobidx_t slob_units(const slob_t *s)
79456 {
79457 if (s->units > 0)
79458 return s->units;
79459@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
79460 /*
79461 * Return the next free slob block pointer after this one.
79462 */
79463-static slob_t *slob_next(slob_t *s)
79464+static slob_t *slob_next(const slob_t *s)
79465 {
79466 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
79467 slobidx_t next;
79468@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
79469 /*
79470 * Returns true if s is the last free block in its page.
79471 */
79472-static int slob_last(slob_t *s)
79473+static int slob_last(const slob_t *s)
79474 {
79475 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
79476 }
79477@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
79478 if (!page)
79479 return NULL;
79480
79481+ set_slob_page(page);
79482 return page_address(page);
79483 }
79484
79485@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
79486 if (!b)
79487 return NULL;
79488 sp = slob_page(b);
79489- set_slob_page(sp);
79490
79491 spin_lock_irqsave(&slob_lock, flags);
79492 sp->units = SLOB_UNITS(PAGE_SIZE);
79493 sp->free = b;
79494+ sp->size = 0;
79495 INIT_LIST_HEAD(&sp->list);
79496 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
79497 set_slob_page_free(sp, slob_list);
79498@@ -475,10 +478,9 @@ out:
79499 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
79500 #endif
79501
79502-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79503+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
79504 {
79505- unsigned int *m;
79506- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79507+ slob_t *m;
79508 void *ret;
79509
79510 lockdep_trace_alloc(gfp);
79511@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79512
79513 if (!m)
79514 return NULL;
79515- *m = size;
79516+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
79517+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
79518+ m[0].units = size;
79519+ m[1].units = align;
79520 ret = (void *)m + align;
79521
79522 trace_kmalloc_node(_RET_IP_, ret,
79523@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79524
79525 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
79526 if (ret) {
79527- struct page *page;
79528- page = virt_to_page(ret);
79529- page->private = size;
79530+ struct slob_page *sp;
79531+ sp = slob_page(ret);
79532+ sp->size = size;
79533 }
79534
79535 trace_kmalloc_node(_RET_IP_, ret,
79536 size, PAGE_SIZE << order, gfp, node);
79537 }
79538
79539- kmemleak_alloc(ret, size, 1, gfp);
79540+ return ret;
79541+}
79542+
79543+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
79544+{
79545+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79546+ void *ret = __kmalloc_node_align(size, gfp, node, align);
79547+
79548+ if (!ZERO_OR_NULL_PTR(ret))
79549+ kmemleak_alloc(ret, size, 1, gfp);
79550 return ret;
79551 }
79552 EXPORT_SYMBOL(__kmalloc_node);
79553@@ -528,13 +542,92 @@ void kfree(const void *block)
79554 sp = slob_page(block);
79555 if (is_slob_page(sp)) {
79556 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79557- unsigned int *m = (unsigned int *)(block - align);
79558- slob_free(m, *m + align);
79559- } else
79560+ slob_t *m = (slob_t *)(block - align);
79561+ slob_free(m, m[0].units + align);
79562+ } else {
79563+ clear_slob_page(sp);
79564+ free_slob_page(sp);
79565+ sp->size = 0;
79566 put_page(&sp->page);
79567+ }
79568 }
79569 EXPORT_SYMBOL(kfree);
79570
79571+void check_object_size(const void *ptr, unsigned long n, bool to)
79572+{
79573+
79574+#ifdef CONFIG_PAX_USERCOPY
79575+ struct slob_page *sp;
79576+ const slob_t *free;
79577+ const void *base;
79578+ unsigned long flags;
79579+ const char *type;
79580+
79581+ if (!n)
79582+ return;
79583+
79584+ type = "<null>";
79585+ if (ZERO_OR_NULL_PTR(ptr))
79586+ goto report;
79587+
79588+ if (!virt_addr_valid(ptr))
79589+ return;
79590+
79591+ type = "<process stack>";
79592+ sp = slob_page(ptr);
79593+ if (!PageSlab((struct page*)sp)) {
79594+ if (object_is_on_stack(ptr, n) == -1)
79595+ goto report;
79596+ return;
79597+ }
79598+
79599+ type = "<slob>";
79600+ if (sp->size) {
79601+ base = page_address(&sp->page);
79602+ if (base <= ptr && n <= sp->size - (ptr - base))
79603+ return;
79604+ goto report;
79605+ }
79606+
79607+ /* some tricky double walking to find the chunk */
79608+ spin_lock_irqsave(&slob_lock, flags);
79609+ base = (void *)((unsigned long)ptr & PAGE_MASK);
79610+ free = sp->free;
79611+
79612+ while (!slob_last(free) && (void *)free <= ptr) {
79613+ base = free + slob_units(free);
79614+ free = slob_next(free);
79615+ }
79616+
79617+ while (base < (void *)free) {
79618+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
79619+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
79620+ int offset;
79621+
79622+ if (ptr < base + align)
79623+ break;
79624+
79625+ offset = ptr - base - align;
79626+ if (offset >= m) {
79627+ base += size;
79628+ continue;
79629+ }
79630+
79631+ if (n > m - offset)
79632+ break;
79633+
79634+ spin_unlock_irqrestore(&slob_lock, flags);
79635+ return;
79636+ }
79637+
79638+ spin_unlock_irqrestore(&slob_lock, flags);
79639+report:
79640+ pax_report_usercopy(ptr, n, to, type);
79641+#endif
79642+
79643+}
79644+EXPORT_SYMBOL(check_object_size);
79645+
79646 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
79647 size_t ksize(const void *block)
79648 {
79649@@ -547,10 +640,10 @@ size_t ksize(const void *block)
79650 sp = slob_page(block);
79651 if (is_slob_page(sp)) {
79652 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
79653- unsigned int *m = (unsigned int *)(block - align);
79654- return SLOB_UNITS(*m) * SLOB_UNIT;
79655+ slob_t *m = (slob_t *)(block - align);
79656+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
79657 } else
79658- return sp->page.private;
79659+ return sp->size;
79660 }
79661 EXPORT_SYMBOL(ksize);
79662
79663@@ -566,8 +659,13 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79664 {
79665 struct kmem_cache *c;
79666
79667+#ifdef CONFIG_PAX_USERCOPY
79668+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
79669+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
79670+#else
79671 c = slob_alloc(sizeof(struct kmem_cache),
79672 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
79673+#endif
79674
79675 if (c) {
79676 c->name = name;
79677@@ -605,17 +703,25 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
79678 {
79679 void *b;
79680
79681+#ifdef CONFIG_PAX_USERCOPY
79682+ b = __kmalloc_node_align(c->size, flags, node, c->align);
79683+#else
79684 if (c->size < PAGE_SIZE) {
79685 b = slob_alloc(c->size, flags, c->align, node);
79686 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79687 SLOB_UNITS(c->size) * SLOB_UNIT,
79688 flags, node);
79689 } else {
79690+ struct slob_page *sp;
79691+
79692 b = slob_new_pages(flags, get_order(c->size), node);
79693+ sp = slob_page(b);
79694+ sp->size = c->size;
79695 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
79696 PAGE_SIZE << get_order(c->size),
79697 flags, node);
79698 }
79699+#endif
79700
79701 if (c->ctor)
79702 c->ctor(b);
79703@@ -627,10 +733,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
79704
79705 static void __kmem_cache_free(void *b, int size)
79706 {
79707- if (size < PAGE_SIZE)
79708+ struct slob_page *sp = slob_page(b);
79709+
79710+ if (is_slob_page(sp))
79711 slob_free(b, size);
79712- else
79713+ else {
79714+ clear_slob_page(sp);
79715+ free_slob_page(sp);
79716+ sp->size = 0;
79717 slob_free_pages(b, get_order(size));
79718+ }
79719 }
79720
79721 static void kmem_rcu_free(struct rcu_head *head)
79722@@ -643,18 +755,32 @@ static void kmem_rcu_free(struct rcu_head *head)
79723
79724 void kmem_cache_free(struct kmem_cache *c, void *b)
79725 {
79726+ int size = c->size;
79727+
79728+#ifdef CONFIG_PAX_USERCOPY
79729+ if (size + c->align < PAGE_SIZE) {
79730+ size += c->align;
79731+ b -= c->align;
79732+ }
79733+#endif
79734+
79735 kmemleak_free_recursive(b, c->flags);
79736 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
79737 struct slob_rcu *slob_rcu;
79738- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
79739+ slob_rcu = b + (size - sizeof(struct slob_rcu));
79740 INIT_RCU_HEAD(&slob_rcu->head);
79741- slob_rcu->size = c->size;
79742+ slob_rcu->size = size;
79743 call_rcu(&slob_rcu->head, kmem_rcu_free);
79744 } else {
79745- __kmem_cache_free(b, c->size);
79746+ __kmem_cache_free(b, size);
79747 }
79748
79749+#ifdef CONFIG_PAX_USERCOPY
79750+ trace_kfree(_RET_IP_, b);
79751+#else
79752 trace_kmem_cache_free(_RET_IP_, b);
79753+#endif
79754+
79755 }
79756 EXPORT_SYMBOL(kmem_cache_free);
79757
79758diff --git a/mm/slub.c b/mm/slub.c
79759index 4996fc7..87e01d0 100644
79760--- a/mm/slub.c
79761+++ b/mm/slub.c
79762@@ -201,7 +201,7 @@ struct track {
79763
79764 enum track_item { TRACK_ALLOC, TRACK_FREE };
79765
79766-#ifdef CONFIG_SLUB_DEBUG
79767+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79768 static int sysfs_slab_add(struct kmem_cache *);
79769 static int sysfs_slab_alias(struct kmem_cache *, const char *);
79770 static void sysfs_slab_remove(struct kmem_cache *);
79771@@ -410,7 +410,7 @@ static void print_track(const char *s, struct track *t)
79772 if (!t->addr)
79773 return;
79774
79775- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
79776+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
79777 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
79778 }
79779
79780@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *s, void *x)
79781
79782 page = virt_to_head_page(x);
79783
79784+ BUG_ON(!PageSlab(page));
79785+
79786 slab_free(s, page, x, _RET_IP_);
79787
79788 trace_kmem_cache_free(_RET_IP_, x);
79789@@ -1937,7 +1939,7 @@ static int slub_min_objects;
79790 * Merge control. If this is set then no merging of slab caches will occur.
79791 * (Could be removed. This was introduced to pacify the merge skeptics.)
79792 */
79793-static int slub_nomerge;
79794+static int slub_nomerge = 1;
79795
79796 /*
79797 * Calculate the order of allocation given an slab object size.
79798@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
79799 * list to avoid pounding the page allocator excessively.
79800 */
79801 set_min_partial(s, ilog2(s->size));
79802- s->refcount = 1;
79803+ atomic_set(&s->refcount, 1);
79804 #ifdef CONFIG_NUMA
79805 s->remote_node_defrag_ratio = 1000;
79806 #endif
79807@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struct kmem_cache *s)
79808 void kmem_cache_destroy(struct kmem_cache *s)
79809 {
79810 down_write(&slub_lock);
79811- s->refcount--;
79812- if (!s->refcount) {
79813+ if (atomic_dec_and_test(&s->refcount)) {
79814 list_del(&s->list);
79815 up_write(&slub_lock);
79816 if (kmem_cache_close(s)) {
79817@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(char *str)
79818 __setup("slub_nomerge", setup_slub_nomerge);
79819
79820 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
79821- const char *name, int size, gfp_t gfp_flags)
79822+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
79823 {
79824- unsigned int flags = 0;
79825-
79826 if (gfp_flags & SLUB_DMA)
79827- flags = SLAB_CACHE_DMA;
79828+ flags |= SLAB_CACHE_DMA;
79829
79830 /*
79831 * This function is called with IRQs disabled during early-boot on
79832@@ -2915,6 +2914,50 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
79833 EXPORT_SYMBOL(__kmalloc_node);
79834 #endif
79835
79836+void check_object_size(const void *ptr, unsigned long n, bool to)
79837+{
79838+
79839+#ifdef CONFIG_PAX_USERCOPY
79840+ struct page *page;
79841+ struct kmem_cache *s = NULL;
79842+ unsigned long offset;
79843+ const char *type;
79844+
79845+ if (!n)
79846+ return;
79847+
79848+ type = "<null>";
79849+ if (ZERO_OR_NULL_PTR(ptr))
79850+ goto report;
79851+
79852+ if (!virt_addr_valid(ptr))
79853+ return;
79854+
79855+ page = get_object_page(ptr);
79856+
79857+ type = "<process stack>";
79858+ if (!page) {
79859+ if (object_is_on_stack(ptr, n) == -1)
79860+ goto report;
79861+ return;
79862+ }
79863+
79864+ s = page->slab;
79865+ type = s->name;
79866+ if (!(s->flags & SLAB_USERCOPY))
79867+ goto report;
79868+
79869+ offset = (ptr - page_address(page)) % s->size;
79870+ if (offset <= s->objsize && n <= s->objsize - offset)
79871+ return;
79872+
79873+report:
79874+ pax_report_usercopy(ptr, n, to, type);
79875+#endif
79876+
79877+}
79878+EXPORT_SYMBOL(check_object_size);
79879+
79880 size_t ksize(const void *object)
79881 {
79882 struct page *page;
79883@@ -3185,8 +3228,8 @@ void __init kmem_cache_init(void)
79884 * kmem_cache_open for slab_state == DOWN.
79885 */
79886 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
79887- sizeof(struct kmem_cache_node), GFP_NOWAIT);
79888- kmalloc_caches[0].refcount = -1;
79889+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
79890+ atomic_set(&kmalloc_caches[0].refcount, -1);
79891 caches++;
79892
79893 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
79894@@ -3198,18 +3241,18 @@ void __init kmem_cache_init(void)
79895 /* Caches that are not of the two-to-the-power-of size */
79896 if (KMALLOC_MIN_SIZE <= 32) {
79897 create_kmalloc_cache(&kmalloc_caches[1],
79898- "kmalloc-96", 96, GFP_NOWAIT);
79899+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
79900 caches++;
79901 }
79902 if (KMALLOC_MIN_SIZE <= 64) {
79903 create_kmalloc_cache(&kmalloc_caches[2],
79904- "kmalloc-192", 192, GFP_NOWAIT);
79905+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
79906 caches++;
79907 }
79908
79909 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
79910 create_kmalloc_cache(&kmalloc_caches[i],
79911- "kmalloc", 1 << i, GFP_NOWAIT);
79912+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
79913 caches++;
79914 }
79915
79916@@ -3293,7 +3336,7 @@ static int slab_unmergeable(struct kmem_cache *s)
79917 /*
79918 * We may have set a slab to be unmergeable during bootstrap.
79919 */
79920- if (s->refcount < 0)
79921+ if (atomic_read(&s->refcount) < 0)
79922 return 1;
79923
79924 return 0;
79925@@ -3353,7 +3396,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79926 if (s) {
79927 int cpu;
79928
79929- s->refcount++;
79930+ atomic_inc(&s->refcount);
79931 /*
79932 * Adjust the object sizes so that we clear
79933 * the complete object on kzalloc.
79934@@ -3372,7 +3415,7 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
79935
79936 if (sysfs_slab_alias(s, name)) {
79937 down_write(&slub_lock);
79938- s->refcount--;
79939+ atomic_dec(&s->refcount);
79940 up_write(&slub_lock);
79941 goto err;
79942 }
79943@@ -4101,7 +4144,7 @@ SLAB_ATTR_RO(ctor);
79944
79945 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
79946 {
79947- return sprintf(buf, "%d\n", s->refcount - 1);
79948+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
79949 }
79950 SLAB_ATTR_RO(aliases);
79951
79952@@ -4503,7 +4546,7 @@ static void kmem_cache_release(struct kobject *kobj)
79953 kfree(s);
79954 }
79955
79956-static struct sysfs_ops slab_sysfs_ops = {
79957+static const struct sysfs_ops slab_sysfs_ops = {
79958 .show = slab_attr_show,
79959 .store = slab_attr_store,
79960 };
79961@@ -4522,7 +4565,7 @@ static int uevent_filter(struct kset *kset, struct kobject *kobj)
79962 return 0;
79963 }
79964
79965-static struct kset_uevent_ops slab_uevent_ops = {
79966+static const struct kset_uevent_ops slab_uevent_ops = {
79967 .filter = uevent_filter,
79968 };
79969
79970@@ -4564,6 +4607,7 @@ static char *create_unique_id(struct kmem_cache *s)
79971 return name;
79972 }
79973
79974+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79975 static int sysfs_slab_add(struct kmem_cache *s)
79976 {
79977 int err;
79978@@ -4619,6 +4663,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
79979 kobject_del(&s->kobj);
79980 kobject_put(&s->kobj);
79981 }
79982+#endif
79983
79984 /*
79985 * Need to buffer aliases during bootup until sysfs becomes
79986@@ -4632,6 +4677,7 @@ struct saved_alias {
79987
79988 static struct saved_alias *alias_list;
79989
79990+#if defined(CONFIG_SLUB_DEBUG) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
79991 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79992 {
79993 struct saved_alias *al;
79994@@ -4654,6 +4700,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
79995 alias_list = al;
79996 return 0;
79997 }
79998+#endif
79999
80000 static int __init slab_sysfs_init(void)
80001 {
80002@@ -4785,7 +4832,13 @@ static const struct file_operations proc_slabinfo_operations = {
80003
80004 static int __init slab_proc_init(void)
80005 {
80006- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
80007+ mode_t gr_mode = S_IRUGO;
80008+
80009+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80010+ gr_mode = S_IRUSR;
80011+#endif
80012+
80013+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
80014 return 0;
80015 }
80016 module_init(slab_proc_init);
80017diff --git a/mm/swap.c b/mm/swap.c
80018index 308e57d..5de19c0 100644
80019--- a/mm/swap.c
80020+++ b/mm/swap.c
80021@@ -30,6 +30,7 @@
80022 #include <linux/notifier.h>
80023 #include <linux/backing-dev.h>
80024 #include <linux/memcontrol.h>
80025+#include <linux/hugetlb.h>
80026
80027 #include "internal.h"
80028
80029@@ -65,6 +66,8 @@ static void put_compound_page(struct page *page)
80030 compound_page_dtor *dtor;
80031
80032 dtor = get_compound_page_dtor(page);
80033+ if (!PageHuge(page))
80034+ BUG_ON(dtor != free_compound_page);
80035 (*dtor)(page);
80036 }
80037 }
80038diff --git a/mm/util.c b/mm/util.c
80039index e48b493..24a601d 100644
80040--- a/mm/util.c
80041+++ b/mm/util.c
80042@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
80043 void arch_pick_mmap_layout(struct mm_struct *mm)
80044 {
80045 mm->mmap_base = TASK_UNMAPPED_BASE;
80046+
80047+#ifdef CONFIG_PAX_RANDMMAP
80048+ if (mm->pax_flags & MF_PAX_RANDMMAP)
80049+ mm->mmap_base += mm->delta_mmap;
80050+#endif
80051+
80052 mm->get_unmapped_area = arch_get_unmapped_area;
80053 mm->unmap_area = arch_unmap_area;
80054 }
80055diff --git a/mm/vmalloc.c b/mm/vmalloc.c
80056index f34ffd0..e60c44f 100644
80057--- a/mm/vmalloc.c
80058+++ b/mm/vmalloc.c
80059@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
80060
80061 pte = pte_offset_kernel(pmd, addr);
80062 do {
80063- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80064- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80065+
80066+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80067+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
80068+ BUG_ON(!pte_exec(*pte));
80069+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
80070+ continue;
80071+ }
80072+#endif
80073+
80074+ {
80075+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
80076+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
80077+ }
80078 } while (pte++, addr += PAGE_SIZE, addr != end);
80079 }
80080
80081@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80082 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
80083 {
80084 pte_t *pte;
80085+ int ret = -ENOMEM;
80086
80087 /*
80088 * nr is a running index into the array which helps higher level
80089@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
80090 pte = pte_alloc_kernel(pmd, addr);
80091 if (!pte)
80092 return -ENOMEM;
80093+
80094+ pax_open_kernel();
80095 do {
80096 struct page *page = pages[*nr];
80097
80098- if (WARN_ON(!pte_none(*pte)))
80099- return -EBUSY;
80100- if (WARN_ON(!page))
80101- return -ENOMEM;
80102+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80103+ if (!(pgprot_val(prot) & _PAGE_NX))
80104+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
80105+ else
80106+#endif
80107+
80108+ if (WARN_ON(!pte_none(*pte))) {
80109+ ret = -EBUSY;
80110+ goto out;
80111+ }
80112+ if (WARN_ON(!page)) {
80113+ ret = -ENOMEM;
80114+ goto out;
80115+ }
80116 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
80117 (*nr)++;
80118 } while (pte++, addr += PAGE_SIZE, addr != end);
80119- return 0;
80120+ ret = 0;
80121+out:
80122+ pax_close_kernel();
80123+ return ret;
80124 }
80125
80126 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
80127@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void *x)
80128 * and fall back on vmalloc() if that fails. Others
80129 * just put it in the vmalloc space.
80130 */
80131-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
80132+#ifdef CONFIG_MODULES
80133+#ifdef MODULES_VADDR
80134 unsigned long addr = (unsigned long)x;
80135 if (addr >= MODULES_VADDR && addr < MODULES_END)
80136 return 1;
80137 #endif
80138+
80139+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
80140+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
80141+ return 1;
80142+#endif
80143+
80144+#endif
80145+
80146 return is_vmalloc_addr(x);
80147 }
80148
80149@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
80150
80151 if (!pgd_none(*pgd)) {
80152 pud_t *pud = pud_offset(pgd, addr);
80153+#ifdef CONFIG_X86
80154+ if (!pud_large(*pud))
80155+#endif
80156 if (!pud_none(*pud)) {
80157 pmd_t *pmd = pmd_offset(pud, addr);
80158+#ifdef CONFIG_X86
80159+ if (!pmd_large(*pmd))
80160+#endif
80161 if (!pmd_none(*pmd)) {
80162 pte_t *ptep, pte;
80163
80164@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vmap_area *va)
80165 struct rb_node *tmp;
80166
80167 while (*p) {
80168- struct vmap_area *tmp;
80169+ struct vmap_area *varea;
80170
80171 parent = *p;
80172- tmp = rb_entry(parent, struct vmap_area, rb_node);
80173- if (va->va_start < tmp->va_end)
80174+ varea = rb_entry(parent, struct vmap_area, rb_node);
80175+ if (va->va_start < varea->va_end)
80176 p = &(*p)->rb_left;
80177- else if (va->va_end > tmp->va_start)
80178+ else if (va->va_end > varea->va_start)
80179 p = &(*p)->rb_right;
80180 else
80181 BUG();
80182@@ -1245,6 +1287,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
80183 struct vm_struct *area;
80184
80185 BUG_ON(in_interrupt());
80186+
80187+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80188+ if (flags & VM_KERNEXEC) {
80189+ if (start != VMALLOC_START || end != VMALLOC_END)
80190+ return NULL;
80191+ start = (unsigned long)MODULES_EXEC_VADDR;
80192+ end = (unsigned long)MODULES_EXEC_END;
80193+ }
80194+#endif
80195+
80196 if (flags & VM_IOREMAP) {
80197 int bit = fls(size);
80198
80199@@ -1484,6 +1536,11 @@ void *vmap(struct page **pages, unsigned int count,
80200 if (count > totalram_pages)
80201 return NULL;
80202
80203+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80204+ if (!(pgprot_val(prot) & _PAGE_NX))
80205+ flags |= VM_KERNEXEC;
80206+#endif
80207+
80208 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
80209 __builtin_return_address(0));
80210 if (!area)
80211@@ -1594,6 +1651,14 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80212 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
80213 return NULL;
80214
80215+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
80216+ if (!(pgprot_val(prot) & _PAGE_NX))
80217+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
80218+ VMALLOC_START, VMALLOC_END, node,
80219+ gfp_mask, caller);
80220+ else
80221+#endif
80222+
80223 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
80224 VMALLOC_START, VMALLOC_END, node,
80225 gfp_mask, caller);
80226@@ -1619,6 +1684,7 @@ static void *__vmalloc_node(unsigned long size, unsigned long align,
80227 return addr;
80228 }
80229
80230+#undef __vmalloc
80231 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
80232 {
80233 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
80234@@ -1635,6 +1701,7 @@ EXPORT_SYMBOL(__vmalloc);
80235 * For tight control over page level allocator and protection flags
80236 * use __vmalloc() instead.
80237 */
80238+#undef vmalloc
80239 void *vmalloc(unsigned long size)
80240 {
80241 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80242@@ -1649,6 +1716,7 @@ EXPORT_SYMBOL(vmalloc);
80243 * The resulting memory area is zeroed so it can be mapped to userspace
80244 * without leaking data.
80245 */
80246+#undef vmalloc_user
80247 void *vmalloc_user(unsigned long size)
80248 {
80249 struct vm_struct *area;
80250@@ -1676,6 +1744,7 @@ EXPORT_SYMBOL(vmalloc_user);
80251 * For tight control over page level allocator and protection flags
80252 * use __vmalloc() instead.
80253 */
80254+#undef vmalloc_node
80255 void *vmalloc_node(unsigned long size, int node)
80256 {
80257 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
80258@@ -1698,10 +1767,10 @@ EXPORT_SYMBOL(vmalloc_node);
80259 * For tight control over page level allocator and protection flags
80260 * use __vmalloc() instead.
80261 */
80262-
80263+#undef vmalloc_exec
80264 void *vmalloc_exec(unsigned long size)
80265 {
80266- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
80267+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
80268 -1, __builtin_return_address(0));
80269 }
80270
80271@@ -1720,6 +1789,7 @@ void *vmalloc_exec(unsigned long size)
80272 * Allocate enough 32bit PA addressable pages to cover @size from the
80273 * page level allocator and map them into contiguous kernel virtual space.
80274 */
80275+#undef vmalloc_32
80276 void *vmalloc_32(unsigned long size)
80277 {
80278 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
80279@@ -1734,6 +1804,7 @@ EXPORT_SYMBOL(vmalloc_32);
80280 * The resulting memory area is 32bit addressable and zeroed so it can be
80281 * mapped to userspace without leaking data.
80282 */
80283+#undef vmalloc_32_user
80284 void *vmalloc_32_user(unsigned long size)
80285 {
80286 struct vm_struct *area;
80287@@ -1998,6 +2069,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
80288 unsigned long uaddr = vma->vm_start;
80289 unsigned long usize = vma->vm_end - vma->vm_start;
80290
80291+ BUG_ON(vma->vm_mirror);
80292+
80293 if ((PAGE_SIZE-1) & (unsigned long)addr)
80294 return -EINVAL;
80295
80296diff --git a/mm/vmstat.c b/mm/vmstat.c
80297index 42d76c6..5643dc4 100644
80298--- a/mm/vmstat.c
80299+++ b/mm/vmstat.c
80300@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
80301 *
80302 * vm_stat contains the global counters
80303 */
80304-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80305+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
80306 EXPORT_SYMBOL(vm_stat);
80307
80308 #ifdef CONFIG_SMP
80309@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
80310 v = p->vm_stat_diff[i];
80311 p->vm_stat_diff[i] = 0;
80312 local_irq_restore(flags);
80313- atomic_long_add(v, &zone->vm_stat[i]);
80314+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
80315 global_diff[i] += v;
80316 #ifdef CONFIG_NUMA
80317 /* 3 seconds idle till flush */
80318@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
80319
80320 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
80321 if (global_diff[i])
80322- atomic_long_add(global_diff[i], &vm_stat[i]);
80323+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
80324 }
80325
80326 #endif
80327@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
80328 start_cpu_timer(cpu);
80329 #endif
80330 #ifdef CONFIG_PROC_FS
80331- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
80332- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
80333- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
80334- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
80335+ {
80336+ mode_t gr_mode = S_IRUGO;
80337+#ifdef CONFIG_GRKERNSEC_PROC_ADD
80338+ gr_mode = S_IRUSR;
80339+#endif
80340+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
80341+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
80342+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
80343+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
80344+#else
80345+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
80346+#endif
80347+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
80348+ }
80349 #endif
80350 return 0;
80351 }
80352diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
80353index a29c5ab..6143f20 100644
80354--- a/net/8021q/vlan.c
80355+++ b/net/8021q/vlan.c
80356@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
80357 err = -EPERM;
80358 if (!capable(CAP_NET_ADMIN))
80359 break;
80360- if ((args.u.name_type >= 0) &&
80361- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
80362+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
80363 struct vlan_net *vn;
80364
80365 vn = net_generic(net, vlan_net_id);
80366diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
80367index a2d2984..f9eb711 100644
80368--- a/net/9p/trans_fd.c
80369+++ b/net/9p/trans_fd.c
80370@@ -419,7 +419,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
80371 oldfs = get_fs();
80372 set_fs(get_ds());
80373 /* The cast to a user pointer is valid due to the set_fs() */
80374- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
80375+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
80376 set_fs(oldfs);
80377
80378 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
80379diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
80380index 02cc7e7..4514f1b 100644
80381--- a/net/atm/atm_misc.c
80382+++ b/net/atm/atm_misc.c
80383@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int truesize)
80384 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
80385 return 1;
80386 atm_return(vcc,truesize);
80387- atomic_inc(&vcc->stats->rx_drop);
80388+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80389 return 0;
80390 }
80391
80392@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc,int pdu_size,
80393 }
80394 }
80395 atm_return(vcc,guess);
80396- atomic_inc(&vcc->stats->rx_drop);
80397+ atomic_inc_unchecked(&vcc->stats->rx_drop);
80398 return NULL;
80399 }
80400
80401@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafprm *tp)
80402
80403 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80404 {
80405-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80406+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80407 __SONET_ITEMS
80408 #undef __HANDLE_ITEM
80409 }
80410@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80411
80412 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
80413 {
80414-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
80415+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
80416 __SONET_ITEMS
80417 #undef __HANDLE_ITEM
80418 }
80419diff --git a/net/atm/lec.h b/net/atm/lec.h
80420index 9d14d19..5c145f3 100644
80421--- a/net/atm/lec.h
80422+++ b/net/atm/lec.h
80423@@ -48,7 +48,7 @@ struct lane2_ops {
80424 const u8 *tlvs, u32 sizeoftlvs);
80425 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
80426 const u8 *tlvs, u32 sizeoftlvs);
80427-};
80428+} __no_const;
80429
80430 /*
80431 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
80432diff --git a/net/atm/mpc.h b/net/atm/mpc.h
80433index 0919a88..a23d54e 100644
80434--- a/net/atm/mpc.h
80435+++ b/net/atm/mpc.h
80436@@ -33,7 +33,7 @@ struct mpoa_client {
80437 struct mpc_parameters parameters; /* parameters for this client */
80438
80439 const struct net_device_ops *old_ops;
80440- struct net_device_ops new_ops;
80441+ net_device_ops_no_const new_ops;
80442 };
80443
80444
80445diff --git a/net/atm/mpoa_caches.c b/net/atm/mpoa_caches.c
80446index 4504a4b..1733f1e 100644
80447--- a/net/atm/mpoa_caches.c
80448+++ b/net/atm/mpoa_caches.c
80449@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_client *client)
80450 struct timeval now;
80451 struct k_message msg;
80452
80453+ pax_track_stack();
80454+
80455 do_gettimeofday(&now);
80456
80457 write_lock_irq(&client->egress_lock);
80458diff --git a/net/atm/proc.c b/net/atm/proc.c
80459index ab8419a..aa91497 100644
80460--- a/net/atm/proc.c
80461+++ b/net/atm/proc.c
80462@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
80463 const struct k_atm_aal_stats *stats)
80464 {
80465 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
80466- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
80467- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
80468- atomic_read(&stats->rx_drop));
80469+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
80470+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
80471+ atomic_read_unchecked(&stats->rx_drop));
80472 }
80473
80474 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
80475@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *seq, struct atm_vcc *vcc)
80476 {
80477 struct sock *sk = sk_atm(vcc);
80478
80479+#ifdef CONFIG_GRKERNSEC_HIDESYM
80480+ seq_printf(seq, "%p ", NULL);
80481+#else
80482 seq_printf(seq, "%p ", vcc);
80483+#endif
80484+
80485 if (!vcc->dev)
80486 seq_printf(seq, "Unassigned ");
80487 else
80488@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *seq, struct atm_vcc *vcc)
80489 {
80490 if (!vcc->dev)
80491 seq_printf(seq, sizeof(void *) == 4 ?
80492+#ifdef CONFIG_GRKERNSEC_HIDESYM
80493+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
80494+#else
80495 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
80496+#endif
80497 else
80498 seq_printf(seq, "%3d %3d %5d ",
80499 vcc->dev->number, vcc->vpi, vcc->vci);
80500diff --git a/net/atm/resources.c b/net/atm/resources.c
80501index 56b7322..c48b84e 100644
80502--- a/net/atm/resources.c
80503+++ b/net/atm/resources.c
80504@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *dev)
80505 static void copy_aal_stats(struct k_atm_aal_stats *from,
80506 struct atm_aal_stats *to)
80507 {
80508-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
80509+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
80510 __AAL_STAT_ITEMS
80511 #undef __HANDLE_ITEM
80512 }
80513@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
80514 static void subtract_aal_stats(struct k_atm_aal_stats *from,
80515 struct atm_aal_stats *to)
80516 {
80517-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
80518+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
80519 __AAL_STAT_ITEMS
80520 #undef __HANDLE_ITEM
80521 }
80522diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
80523index 8567d47..bba2292 100644
80524--- a/net/bridge/br_private.h
80525+++ b/net/bridge/br_private.h
80526@@ -255,7 +255,7 @@ extern void br_ifinfo_notify(int event, struct net_bridge_port *port);
80527
80528 #ifdef CONFIG_SYSFS
80529 /* br_sysfs_if.c */
80530-extern struct sysfs_ops brport_sysfs_ops;
80531+extern const struct sysfs_ops brport_sysfs_ops;
80532 extern int br_sysfs_addif(struct net_bridge_port *p);
80533
80534 /* br_sysfs_br.c */
80535diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
80536index 9a52ac5..c97538e 100644
80537--- a/net/bridge/br_stp_if.c
80538+++ b/net/bridge/br_stp_if.c
80539@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridge *br)
80540 char *envp[] = { NULL };
80541
80542 if (br->stp_enabled == BR_USER_STP) {
80543- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
80544+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
80545 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
80546 br->dev->name, r);
80547
80548diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
80549index 820643a..ce77fb3 100644
80550--- a/net/bridge/br_sysfs_if.c
80551+++ b/net/bridge/br_sysfs_if.c
80552@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobject * kobj,
80553 return ret;
80554 }
80555
80556-struct sysfs_ops brport_sysfs_ops = {
80557+const struct sysfs_ops brport_sysfs_ops = {
80558 .show = brport_show,
80559 .store = brport_store,
80560 };
80561diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
80562index d73d47f..72df42a 100644
80563--- a/net/bridge/netfilter/ebtables.c
80564+++ b/net/bridge/netfilter/ebtables.c
80565@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struct ebt_table *t, void __user *user,
80566 unsigned int entries_size, nentries;
80567 char *entries;
80568
80569+ pax_track_stack();
80570+
80571 if (cmd == EBT_SO_GET_ENTRIES) {
80572 entries_size = t->private->entries_size;
80573 nentries = t->private->nentries;
80574diff --git a/net/can/bcm.c b/net/can/bcm.c
80575index 2ffd2e0..72a7486 100644
80576--- a/net/can/bcm.c
80577+++ b/net/can/bcm.c
80578@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file *m, void *v)
80579 struct bcm_sock *bo = bcm_sk(sk);
80580 struct bcm_op *op;
80581
80582+#ifdef CONFIG_GRKERNSEC_HIDESYM
80583+ seq_printf(m, ">>> socket %p", NULL);
80584+ seq_printf(m, " / sk %p", NULL);
80585+ seq_printf(m, " / bo %p", NULL);
80586+#else
80587 seq_printf(m, ">>> socket %p", sk->sk_socket);
80588 seq_printf(m, " / sk %p", sk);
80589 seq_printf(m, " / bo %p", bo);
80590+#endif
80591 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
80592 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
80593 seq_printf(m, " <<<\n");
80594diff --git a/net/compat.c b/net/compat.c
80595index 9559afc..ccd74e1 100644
80596--- a/net/compat.c
80597+++ b/net/compat.c
80598@@ -69,9 +69,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
80599 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
80600 __get_user(kmsg->msg_flags, &umsg->msg_flags))
80601 return -EFAULT;
80602- kmsg->msg_name = compat_ptr(tmp1);
80603- kmsg->msg_iov = compat_ptr(tmp2);
80604- kmsg->msg_control = compat_ptr(tmp3);
80605+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
80606+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
80607+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
80608 return 0;
80609 }
80610
80611@@ -94,7 +94,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80612 kern_msg->msg_name = NULL;
80613
80614 tot_len = iov_from_user_compat_to_kern(kern_iov,
80615- (struct compat_iovec __user *)kern_msg->msg_iov,
80616+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
80617 kern_msg->msg_iovlen);
80618 if (tot_len >= 0)
80619 kern_msg->msg_iov = kern_iov;
80620@@ -114,20 +114,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
80621
80622 #define CMSG_COMPAT_FIRSTHDR(msg) \
80623 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
80624- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
80625+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
80626 (struct compat_cmsghdr __user *)NULL)
80627
80628 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
80629 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
80630 (ucmlen) <= (unsigned long) \
80631 ((mhdr)->msg_controllen - \
80632- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
80633+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
80634
80635 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
80636 struct compat_cmsghdr __user *cmsg, int cmsg_len)
80637 {
80638 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
80639- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
80640+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
80641 msg->msg_controllen)
80642 return NULL;
80643 return (struct compat_cmsghdr __user *)ptr;
80644@@ -219,7 +219,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80645 {
80646 struct compat_timeval ctv;
80647 struct compat_timespec cts[3];
80648- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80649+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80650 struct compat_cmsghdr cmhdr;
80651 int cmlen;
80652
80653@@ -271,7 +271,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
80654
80655 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
80656 {
80657- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
80658+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
80659 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
80660 int fdnum = scm->fp->count;
80661 struct file **fp = scm->fp->fp;
80662@@ -433,7 +433,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
80663 len = sizeof(ktime);
80664 old_fs = get_fs();
80665 set_fs(KERNEL_DS);
80666- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
80667+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
80668 set_fs(old_fs);
80669
80670 if (!err) {
80671@@ -570,7 +570,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80672 case MCAST_JOIN_GROUP:
80673 case MCAST_LEAVE_GROUP:
80674 {
80675- struct compat_group_req __user *gr32 = (void *)optval;
80676+ struct compat_group_req __user *gr32 = (void __user *)optval;
80677 struct group_req __user *kgr =
80678 compat_alloc_user_space(sizeof(struct group_req));
80679 u32 interface;
80680@@ -591,7 +591,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80681 case MCAST_BLOCK_SOURCE:
80682 case MCAST_UNBLOCK_SOURCE:
80683 {
80684- struct compat_group_source_req __user *gsr32 = (void *)optval;
80685+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
80686 struct group_source_req __user *kgsr = compat_alloc_user_space(
80687 sizeof(struct group_source_req));
80688 u32 interface;
80689@@ -612,7 +612,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
80690 }
80691 case MCAST_MSFILTER:
80692 {
80693- struct compat_group_filter __user *gf32 = (void *)optval;
80694+ struct compat_group_filter __user *gf32 = (void __user *)optval;
80695 struct group_filter __user *kgf;
80696 u32 interface, fmode, numsrc;
80697
80698diff --git a/net/core/dev.c b/net/core/dev.c
80699index 84a0705..575db4c 100644
80700--- a/net/core/dev.c
80701+++ b/net/core/dev.c
80702@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const char *name)
80703 if (no_module && capable(CAP_NET_ADMIN))
80704 no_module = request_module("netdev-%s", name);
80705 if (no_module && capable(CAP_SYS_MODULE)) {
80706+#ifdef CONFIG_GRKERNSEC_MODHARDEN
80707+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
80708+#else
80709 if (!request_module("%s", name))
80710 pr_err("Loading kernel module for a network device "
80711 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
80712 "instead\n", name);
80713+#endif
80714 }
80715 }
80716 EXPORT_SYMBOL(dev_load);
80717@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
80718
80719 struct dev_gso_cb {
80720 void (*destructor)(struct sk_buff *skb);
80721-};
80722+} __no_const;
80723
80724 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
80725
80726@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
80727 }
80728 EXPORT_SYMBOL(netif_rx_ni);
80729
80730-static void net_tx_action(struct softirq_action *h)
80731+static void net_tx_action(void)
80732 {
80733 struct softnet_data *sd = &__get_cpu_var(softnet_data);
80734
80735@@ -2827,7 +2831,7 @@ void netif_napi_del(struct napi_struct *napi)
80736 EXPORT_SYMBOL(netif_napi_del);
80737
80738
80739-static void net_rx_action(struct softirq_action *h)
80740+static void net_rx_action(void)
80741 {
80742 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
80743 unsigned long time_limit = jiffies + 2;
80744diff --git a/net/core/flow.c b/net/core/flow.c
80745index 9601587..8c4824e 100644
80746--- a/net/core/flow.c
80747+++ b/net/core/flow.c
80748@@ -35,11 +35,11 @@ struct flow_cache_entry {
80749 atomic_t *object_ref;
80750 };
80751
80752-atomic_t flow_cache_genid = ATOMIC_INIT(0);
80753+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
80754
80755 static u32 flow_hash_shift;
80756 #define flow_hash_size (1 << flow_hash_shift)
80757-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
80758+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
80759
80760 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
80761
80762@@ -52,7 +52,7 @@ struct flow_percpu_info {
80763 u32 hash_rnd;
80764 int count;
80765 };
80766-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
80767+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
80768
80769 #define flow_hash_rnd_recalc(cpu) \
80770 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
80771@@ -69,7 +69,7 @@ struct flow_flush_info {
80772 atomic_t cpuleft;
80773 struct completion completion;
80774 };
80775-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
80776+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
80777
80778 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
80779
80780@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family, u8 dir,
80781 if (fle->family == family &&
80782 fle->dir == dir &&
80783 flow_key_compare(key, &fle->key) == 0) {
80784- if (fle->genid == atomic_read(&flow_cache_genid)) {
80785+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
80786 void *ret = fle->object;
80787
80788 if (ret)
80789@@ -228,7 +228,7 @@ nocache:
80790 err = resolver(net, key, family, dir, &obj, &obj_ref);
80791
80792 if (fle && !err) {
80793- fle->genid = atomic_read(&flow_cache_genid);
80794+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
80795
80796 if (fle->object)
80797 atomic_dec(fle->object_ref);
80798@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(unsigned long data)
80799
80800 fle = flow_table(cpu)[i];
80801 for (; fle; fle = fle->next) {
80802- unsigned genid = atomic_read(&flow_cache_genid);
80803+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
80804
80805 if (!fle->object || fle->genid == genid)
80806 continue;
80807diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
80808index d4fd895..ac9b1e6 100644
80809--- a/net/core/rtnetlink.c
80810+++ b/net/core/rtnetlink.c
80811@@ -57,7 +57,7 @@ struct rtnl_link
80812 {
80813 rtnl_doit_func doit;
80814 rtnl_dumpit_func dumpit;
80815-};
80816+} __no_const;
80817
80818 static DEFINE_MUTEX(rtnl_mutex);
80819
80820diff --git a/net/core/scm.c b/net/core/scm.c
80821index d98eafc..1a190a9 100644
80822--- a/net/core/scm.c
80823+++ b/net/core/scm.c
80824@@ -191,7 +191,7 @@ error:
80825 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80826 {
80827 struct cmsghdr __user *cm
80828- = (__force struct cmsghdr __user *)msg->msg_control;
80829+ = (struct cmsghdr __force_user *)msg->msg_control;
80830 struct cmsghdr cmhdr;
80831 int cmlen = CMSG_LEN(len);
80832 int err;
80833@@ -214,7 +214,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
80834 err = -EFAULT;
80835 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
80836 goto out;
80837- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
80838+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
80839 goto out;
80840 cmlen = CMSG_SPACE(len);
80841 if (msg->msg_controllen < cmlen)
80842@@ -229,7 +229,7 @@ out:
80843 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80844 {
80845 struct cmsghdr __user *cm
80846- = (__force struct cmsghdr __user*)msg->msg_control;
80847+ = (struct cmsghdr __force_user *)msg->msg_control;
80848
80849 int fdmax = 0;
80850 int fdnum = scm->fp->count;
80851@@ -249,7 +249,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
80852 if (fdnum < fdmax)
80853 fdmax = fdnum;
80854
80855- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
80856+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
80857 i++, cmfptr++)
80858 {
80859 int new_fd;
80860diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
80861index 45329d7..626aaa6 100644
80862--- a/net/core/secure_seq.c
80863+++ b/net/core/secure_seq.c
80864@@ -57,7 +57,7 @@ __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
80865 EXPORT_SYMBOL(secure_tcpv6_sequence_number);
80866
80867 u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80868- __be16 dport)
80869+ __be16 dport)
80870 {
80871 u32 secret[MD5_MESSAGE_BYTES / 4];
80872 u32 hash[MD5_DIGEST_WORDS];
80873@@ -71,7 +71,6 @@ u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
80874 secret[i] = net_secret[i];
80875
80876 md5_transform(hash, secret);
80877-
80878 return hash[0];
80879 }
80880 #endif
80881diff --git a/net/core/skbuff.c b/net/core/skbuff.c
80882index a807f8c..65f906f 100644
80883--- a/net/core/skbuff.c
80884+++ b/net/core/skbuff.c
80885@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset,
80886 struct sk_buff *frag_iter;
80887 struct sock *sk = skb->sk;
80888
80889+ pax_track_stack();
80890+
80891 /*
80892 * __skb_splice_bits() only fails if the output has no room left,
80893 * so no point in going over the frag_list for the error case.
80894diff --git a/net/core/sock.c b/net/core/sock.c
80895index 6605e75..3acebda 100644
80896--- a/net/core/sock.c
80897+++ b/net/core/sock.c
80898@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
80899 break;
80900
80901 case SO_PEERCRED:
80902+ {
80903+ struct ucred peercred;
80904 if (len > sizeof(sk->sk_peercred))
80905 len = sizeof(sk->sk_peercred);
80906- if (copy_to_user(optval, &sk->sk_peercred, len))
80907+ peercred = sk->sk_peercred;
80908+ if (copy_to_user(optval, &peercred, len))
80909 return -EFAULT;
80910 goto lenout;
80911+ }
80912
80913 case SO_PEERNAME:
80914 {
80915@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
80916 */
80917 smp_wmb();
80918 atomic_set(&sk->sk_refcnt, 1);
80919- atomic_set(&sk->sk_drops, 0);
80920+ atomic_set_unchecked(&sk->sk_drops, 0);
80921 }
80922 EXPORT_SYMBOL(sock_init_data);
80923
80924diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
80925index 2036568..c55883d 100644
80926--- a/net/decnet/sysctl_net_decnet.c
80927+++ b/net/decnet/sysctl_net_decnet.c
80928@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
80929
80930 if (len > *lenp) len = *lenp;
80931
80932- if (copy_to_user(buffer, addr, len))
80933+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
80934 return -EFAULT;
80935
80936 *lenp = len;
80937@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
80938
80939 if (len > *lenp) len = *lenp;
80940
80941- if (copy_to_user(buffer, devname, len))
80942+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
80943 return -EFAULT;
80944
80945 *lenp = len;
80946diff --git a/net/econet/Kconfig b/net/econet/Kconfig
80947index 39a2d29..f39c0fe 100644
80948--- a/net/econet/Kconfig
80949+++ b/net/econet/Kconfig
80950@@ -4,7 +4,7 @@
80951
80952 config ECONET
80953 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
80954- depends on EXPERIMENTAL && INET
80955+ depends on EXPERIMENTAL && INET && BROKEN
80956 ---help---
80957 Econet is a fairly old and slow networking protocol mainly used by
80958 Acorn computers to access file and print servers. It uses native
80959diff --git a/net/ieee802154/dgram.c b/net/ieee802154/dgram.c
80960index a413b1b..380849c 100644
80961--- a/net/ieee802154/dgram.c
80962+++ b/net/ieee802154/dgram.c
80963@@ -318,7 +318,7 @@ out:
80964 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
80965 {
80966 if (sock_queue_rcv_skb(sk, skb) < 0) {
80967- atomic_inc(&sk->sk_drops);
80968+ atomic_inc_unchecked(&sk->sk_drops);
80969 kfree_skb(skb);
80970 return NET_RX_DROP;
80971 }
80972diff --git a/net/ieee802154/raw.c b/net/ieee802154/raw.c
80973index 30e74ee..bfc6ee0 100644
80974--- a/net/ieee802154/raw.c
80975+++ b/net/ieee802154/raw.c
80976@@ -206,7 +206,7 @@ out:
80977 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
80978 {
80979 if (sock_queue_rcv_skb(sk, skb) < 0) {
80980- atomic_inc(&sk->sk_drops);
80981+ atomic_inc_unchecked(&sk->sk_drops);
80982 kfree_skb(skb);
80983 return NET_RX_DROP;
80984 }
80985diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
80986index dba56d2..acee5d6 100644
80987--- a/net/ipv4/inet_diag.c
80988+++ b/net/ipv4/inet_diag.c
80989@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct sock *sk,
80990 r->idiag_retrans = 0;
80991
80992 r->id.idiag_if = sk->sk_bound_dev_if;
80993+#ifdef CONFIG_GRKERNSEC_HIDESYM
80994+ r->id.idiag_cookie[0] = 0;
80995+ r->id.idiag_cookie[1] = 0;
80996+#else
80997 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
80998 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
80999+#endif
81000
81001 r->id.idiag_sport = inet->sport;
81002 r->id.idiag_dport = inet->dport;
81003@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
81004 r->idiag_family = tw->tw_family;
81005 r->idiag_retrans = 0;
81006 r->id.idiag_if = tw->tw_bound_dev_if;
81007+
81008+#ifdef CONFIG_GRKERNSEC_HIDESYM
81009+ r->id.idiag_cookie[0] = 0;
81010+ r->id.idiag_cookie[1] = 0;
81011+#else
81012 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
81013 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
81014+#endif
81015+
81016 r->id.idiag_sport = tw->tw_sport;
81017 r->id.idiag_dport = tw->tw_dport;
81018 r->id.idiag_src[0] = tw->tw_rcv_saddr;
81019@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk_buff *in_skb,
81020 if (sk == NULL)
81021 goto unlock;
81022
81023+#ifndef CONFIG_GRKERNSEC_HIDESYM
81024 err = -ESTALE;
81025 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
81026 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
81027 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
81028 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
81029 goto out;
81030+#endif
81031
81032 err = -ENOMEM;
81033 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
81034@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
81035 r->idiag_retrans = req->retrans;
81036
81037 r->id.idiag_if = sk->sk_bound_dev_if;
81038+
81039+#ifdef CONFIG_GRKERNSEC_HIDESYM
81040+ r->id.idiag_cookie[0] = 0;
81041+ r->id.idiag_cookie[1] = 0;
81042+#else
81043 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
81044 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
81045+#endif
81046
81047 tmo = req->expires - jiffies;
81048 if (tmo < 0)
81049diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
81050index d717267..56de7e7 100644
81051--- a/net/ipv4/inet_hashtables.c
81052+++ b/net/ipv4/inet_hashtables.c
81053@@ -18,12 +18,15 @@
81054 #include <linux/sched.h>
81055 #include <linux/slab.h>
81056 #include <linux/wait.h>
81057+#include <linux/security.h>
81058
81059 #include <net/inet_connection_sock.h>
81060 #include <net/inet_hashtables.h>
81061 #include <net/secure_seq.h>
81062 #include <net/ip.h>
81063
81064+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
81065+
81066 /*
81067 * Allocate and initialize a new local port bind bucket.
81068 * The bindhash mutex for snum's hash chain must be held here.
81069@@ -491,6 +494,8 @@ ok:
81070 }
81071 spin_unlock(&head->lock);
81072
81073+ gr_update_task_in_ip_table(current, inet_sk(sk));
81074+
81075 if (tw) {
81076 inet_twsk_deschedule(tw, death_row);
81077 inet_twsk_put(tw);
81078diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
81079index 13b229f..6956484 100644
81080--- a/net/ipv4/inetpeer.c
81081+++ b/net/ipv4/inetpeer.c
81082@@ -367,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81083 struct inet_peer *p, *n;
81084 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
81085
81086+ pax_track_stack();
81087+
81088 /* Look up for the address quickly. */
81089 read_lock_bh(&peer_pool_lock);
81090 p = lookup(daddr, NULL);
81091@@ -390,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 daddr, int create)
81092 return NULL;
81093 n->v4daddr = daddr;
81094 atomic_set(&n->refcnt, 1);
81095- atomic_set(&n->rid, 0);
81096+ atomic_set_unchecked(&n->rid, 0);
81097 n->ip_id_count = secure_ip_id(daddr);
81098 n->tcp_ts_stamp = 0;
81099
81100diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
81101index d3fe10b..feeafc9 100644
81102--- a/net/ipv4/ip_fragment.c
81103+++ b/net/ipv4/ip_fragment.c
81104@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
81105 return 0;
81106
81107 start = qp->rid;
81108- end = atomic_inc_return(&peer->rid);
81109+ end = atomic_inc_return_unchecked(&peer->rid);
81110 qp->rid = end;
81111
81112 rc = qp->q.fragments && (end - start) > max;
81113diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
81114index e982b5c..f079d75 100644
81115--- a/net/ipv4/ip_sockglue.c
81116+++ b/net/ipv4/ip_sockglue.c
81117@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81118 int val;
81119 int len;
81120
81121+ pax_track_stack();
81122+
81123 if (level != SOL_IP)
81124 return -EOPNOTSUPP;
81125
81126@@ -1173,7 +1175,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
81127 if (sk->sk_type != SOCK_STREAM)
81128 return -ENOPROTOOPT;
81129
81130- msg.msg_control = optval;
81131+ msg.msg_control = (void __force_kernel *)optval;
81132 msg.msg_controllen = len;
81133 msg.msg_flags = 0;
81134
81135diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
81136index f8d04c2..c1188f2 100644
81137--- a/net/ipv4/ipconfig.c
81138+++ b/net/ipv4/ipconfig.c
81139@@ -295,7 +295,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
81140
81141 mm_segment_t oldfs = get_fs();
81142 set_fs(get_ds());
81143- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81144+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81145 set_fs(oldfs);
81146 return res;
81147 }
81148@@ -306,7 +306,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
81149
81150 mm_segment_t oldfs = get_fs();
81151 set_fs(get_ds());
81152- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
81153+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
81154 set_fs(oldfs);
81155 return res;
81156 }
81157@@ -317,7 +317,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
81158
81159 mm_segment_t oldfs = get_fs();
81160 set_fs(get_ds());
81161- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
81162+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
81163 set_fs(oldfs);
81164 return res;
81165 }
81166diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
81167index c8b0cc3..4da5ae2 100644
81168--- a/net/ipv4/netfilter/arp_tables.c
81169+++ b/net/ipv4/netfilter/arp_tables.c
81170@@ -934,6 +934,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81171 private = &tmp;
81172 }
81173 #endif
81174+ memset(&info, 0, sizeof(info));
81175 info.valid_hooks = t->valid_hooks;
81176 memcpy(info.hook_entry, private->hook_entry,
81177 sizeof(info.hook_entry));
81178diff --git a/net/ipv4/netfilter/ip_queue.c b/net/ipv4/netfilter/ip_queue.c
81179index c156db2..e772975 100644
81180--- a/net/ipv4/netfilter/ip_queue.c
81181+++ b/net/ipv4/netfilter/ip_queue.c
81182@@ -286,6 +286,9 @@ ipq_mangle_ipv4(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81183
81184 if (v->data_len < sizeof(*user_iph))
81185 return 0;
81186+ if (v->data_len > 65535)
81187+ return -EMSGSIZE;
81188+
81189 diff = v->data_len - e->skb->len;
81190 if (diff < 0) {
81191 if (pskb_trim(e->skb, v->data_len))
81192@@ -409,7 +412,8 @@ ipq_dev_drop(int ifindex)
81193 static inline void
81194 __ipq_rcv_skb(struct sk_buff *skb)
81195 {
81196- int status, type, pid, flags, nlmsglen, skblen;
81197+ int status, type, pid, flags;
81198+ unsigned int nlmsglen, skblen;
81199 struct nlmsghdr *nlh;
81200
81201 skblen = skb->len;
81202diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
81203index 0606db1..02e7e4c 100644
81204--- a/net/ipv4/netfilter/ip_tables.c
81205+++ b/net/ipv4/netfilter/ip_tables.c
81206@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81207 private = &tmp;
81208 }
81209 #endif
81210+ memset(&info, 0, sizeof(info));
81211 info.valid_hooks = t->valid_hooks;
81212 memcpy(info.hook_entry, private->hook_entry,
81213 sizeof(info.hook_entry));
81214diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81215index d9521f6..3c3eb25 100644
81216--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
81217+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
81218@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(struct asn1_ctx *ctx,
81219
81220 *len = 0;
81221
81222- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
81223+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
81224 if (*octets == NULL) {
81225 if (net_ratelimit())
81226 printk("OOM in bsalg (%d)\n", __LINE__);
81227diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
81228index ab996f9..3da5f96 100644
81229--- a/net/ipv4/raw.c
81230+++ b/net/ipv4/raw.c
81231@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81232 /* Charge it to the socket. */
81233
81234 if (sock_queue_rcv_skb(sk, skb) < 0) {
81235- atomic_inc(&sk->sk_drops);
81236+ atomic_inc_unchecked(&sk->sk_drops);
81237 kfree_skb(skb);
81238 return NET_RX_DROP;
81239 }
81240@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk, struct sk_buff * skb)
81241 int raw_rcv(struct sock *sk, struct sk_buff *skb)
81242 {
81243 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
81244- atomic_inc(&sk->sk_drops);
81245+ atomic_inc_unchecked(&sk->sk_drops);
81246 kfree_skb(skb);
81247 return NET_RX_DROP;
81248 }
81249@@ -724,16 +724,23 @@ static int raw_init(struct sock *sk)
81250
81251 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
81252 {
81253+ struct icmp_filter filter;
81254+
81255+ if (optlen < 0)
81256+ return -EINVAL;
81257 if (optlen > sizeof(struct icmp_filter))
81258 optlen = sizeof(struct icmp_filter);
81259- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
81260+ if (copy_from_user(&filter, optval, optlen))
81261 return -EFAULT;
81262+ raw_sk(sk)->filter = filter;
81263+
81264 return 0;
81265 }
81266
81267 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
81268 {
81269 int len, ret = -EFAULT;
81270+ struct icmp_filter filter;
81271
81272 if (get_user(len, optlen))
81273 goto out;
81274@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
81275 if (len > sizeof(struct icmp_filter))
81276 len = sizeof(struct icmp_filter);
81277 ret = -EFAULT;
81278- if (put_user(len, optlen) ||
81279- copy_to_user(optval, &raw_sk(sk)->filter, len))
81280+ filter = raw_sk(sk)->filter;
81281+ if (put_user(len, optlen) || len > sizeof filter ||
81282+ copy_to_user(optval, &filter, len))
81283 goto out;
81284 ret = 0;
81285 out: return ret;
81286@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81287 sk_wmem_alloc_get(sp),
81288 sk_rmem_alloc_get(sp),
81289 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81290- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81291+ atomic_read(&sp->sk_refcnt),
81292+#ifdef CONFIG_GRKERNSEC_HIDESYM
81293+ NULL,
81294+#else
81295+ sp,
81296+#endif
81297+ atomic_read_unchecked(&sp->sk_drops));
81298 }
81299
81300 static int raw_seq_show(struct seq_file *seq, void *v)
81301diff --git a/net/ipv4/route.c b/net/ipv4/route.c
81302index 58f141b..b759702 100644
81303--- a/net/ipv4/route.c
81304+++ b/net/ipv4/route.c
81305@@ -269,7 +269,7 @@ static inline unsigned int rt_hash(__be32 daddr, __be32 saddr, int idx,
81306
81307 static inline int rt_genid(struct net *net)
81308 {
81309- return atomic_read(&net->ipv4.rt_genid);
81310+ return atomic_read_unchecked(&net->ipv4.rt_genid);
81311 }
81312
81313 #ifdef CONFIG_PROC_FS
81314@@ -889,7 +889,7 @@ static void rt_cache_invalidate(struct net *net)
81315 unsigned char shuffle;
81316
81317 get_random_bytes(&shuffle, sizeof(shuffle));
81318- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
81319+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
81320 }
81321
81322 /*
81323@@ -3357,7 +3357,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
81324
81325 static __net_init int rt_secret_timer_init(struct net *net)
81326 {
81327- atomic_set(&net->ipv4.rt_genid,
81328+ atomic_set_unchecked(&net->ipv4.rt_genid,
81329 (int) ((num_physpages ^ (num_physpages>>8)) ^
81330 (jiffies ^ (jiffies >> 7))));
81331
81332diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
81333index f095659..adc892a 100644
81334--- a/net/ipv4/tcp.c
81335+++ b/net/ipv4/tcp.c
81336@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
81337 int val;
81338 int err = 0;
81339
81340+ pax_track_stack();
81341+
81342 /* This is a string value all the others are int's */
81343 if (optname == TCP_CONGESTION) {
81344 char name[TCP_CA_NAME_MAX];
81345@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
81346 struct tcp_sock *tp = tcp_sk(sk);
81347 int val, len;
81348
81349+ pax_track_stack();
81350+
81351 if (get_user(len, optlen))
81352 return -EFAULT;
81353
81354diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
81355index 6fc7961..33bad4a 100644
81356--- a/net/ipv4/tcp_ipv4.c
81357+++ b/net/ipv4/tcp_ipv4.c
81358@@ -85,6 +85,9 @@
81359 int sysctl_tcp_tw_reuse __read_mostly;
81360 int sysctl_tcp_low_latency __read_mostly;
81361
81362+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81363+extern int grsec_enable_blackhole;
81364+#endif
81365
81366 #ifdef CONFIG_TCP_MD5SIG
81367 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
81368@@ -1543,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
81369 return 0;
81370
81371 reset:
81372+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81373+ if (!grsec_enable_blackhole)
81374+#endif
81375 tcp_v4_send_reset(rsk, skb);
81376 discard:
81377 kfree_skb(skb);
81378@@ -1604,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
81379 TCP_SKB_CB(skb)->sacked = 0;
81380
81381 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81382- if (!sk)
81383+ if (!sk) {
81384+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81385+ ret = 1;
81386+#endif
81387 goto no_tcp_socket;
81388+ }
81389
81390 process:
81391- if (sk->sk_state == TCP_TIME_WAIT)
81392+ if (sk->sk_state == TCP_TIME_WAIT) {
81393+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81394+ ret = 2;
81395+#endif
81396 goto do_time_wait;
81397+ }
81398
81399 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
81400 goto discard_and_relse;
81401@@ -1651,6 +1665,10 @@ no_tcp_socket:
81402 bad_packet:
81403 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81404 } else {
81405+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81406+ if (!grsec_enable_blackhole || (ret == 1 &&
81407+ (skb->dev->flags & IFF_LOOPBACK)))
81408+#endif
81409 tcp_v4_send_reset(NULL, skb);
81410 }
81411
81412@@ -2238,7 +2256,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
81413 0, /* non standard timer */
81414 0, /* open_requests have no inode */
81415 atomic_read(&sk->sk_refcnt),
81416+#ifdef CONFIG_GRKERNSEC_HIDESYM
81417+ NULL,
81418+#else
81419 req,
81420+#endif
81421 len);
81422 }
81423
81424@@ -2280,7 +2302,12 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
81425 sock_i_uid(sk),
81426 icsk->icsk_probes_out,
81427 sock_i_ino(sk),
81428- atomic_read(&sk->sk_refcnt), sk,
81429+ atomic_read(&sk->sk_refcnt),
81430+#ifdef CONFIG_GRKERNSEC_HIDESYM
81431+ NULL,
81432+#else
81433+ sk,
81434+#endif
81435 jiffies_to_clock_t(icsk->icsk_rto),
81436 jiffies_to_clock_t(icsk->icsk_ack.ato),
81437 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
81438@@ -2308,7 +2335,13 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
81439 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
81440 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
81441 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81442- atomic_read(&tw->tw_refcnt), tw, len);
81443+ atomic_read(&tw->tw_refcnt),
81444+#ifdef CONFIG_GRKERNSEC_HIDESYM
81445+ NULL,
81446+#else
81447+ tw,
81448+#endif
81449+ len);
81450 }
81451
81452 #define TMPSZ 150
81453diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
81454index 4c03598..e09a8e8 100644
81455--- a/net/ipv4/tcp_minisocks.c
81456+++ b/net/ipv4/tcp_minisocks.c
81457@@ -26,6 +26,10 @@
81458 #include <net/inet_common.h>
81459 #include <net/xfrm.h>
81460
81461+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81462+extern int grsec_enable_blackhole;
81463+#endif
81464+
81465 #ifdef CONFIG_SYSCTL
81466 #define SYNC_INIT 0 /* let the user enable it */
81467 #else
81468@@ -672,6 +676,10 @@ listen_overflow:
81469
81470 embryonic_reset:
81471 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
81472+
81473+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81474+ if (!grsec_enable_blackhole)
81475+#endif
81476 if (!(flg & TCP_FLAG_RST))
81477 req->rsk_ops->send_reset(sk, skb);
81478
81479diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
81480index af83bdf..ec91cb2 100644
81481--- a/net/ipv4/tcp_output.c
81482+++ b/net/ipv4/tcp_output.c
81483@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
81484 __u8 *md5_hash_location;
81485 int mss;
81486
81487+ pax_track_stack();
81488+
81489 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
81490 if (skb == NULL)
81491 return NULL;
81492diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
81493index 59f5b5e..193860f 100644
81494--- a/net/ipv4/tcp_probe.c
81495+++ b/net/ipv4/tcp_probe.c
81496@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
81497 if (cnt + width >= len)
81498 break;
81499
81500- if (copy_to_user(buf + cnt, tbuf, width))
81501+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
81502 return -EFAULT;
81503 cnt += width;
81504 }
81505diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
81506index 57d5501..a9ed13a 100644
81507--- a/net/ipv4/tcp_timer.c
81508+++ b/net/ipv4/tcp_timer.c
81509@@ -21,6 +21,10 @@
81510 #include <linux/module.h>
81511 #include <net/tcp.h>
81512
81513+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81514+extern int grsec_lastack_retries;
81515+#endif
81516+
81517 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
81518 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
81519 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
81520@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock *sk)
81521 }
81522 }
81523
81524+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81525+ if ((sk->sk_state == TCP_LAST_ACK) &&
81526+ (grsec_lastack_retries > 0) &&
81527+ (grsec_lastack_retries < retry_until))
81528+ retry_until = grsec_lastack_retries;
81529+#endif
81530+
81531 if (retransmits_timed_out(sk, retry_until)) {
81532 /* Has it gone just too far? */
81533 tcp_write_err(sk);
81534diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
81535index 0ac8833..58d8c43 100644
81536--- a/net/ipv4/udp.c
81537+++ b/net/ipv4/udp.c
81538@@ -86,6 +86,7 @@
81539 #include <linux/types.h>
81540 #include <linux/fcntl.h>
81541 #include <linux/module.h>
81542+#include <linux/security.h>
81543 #include <linux/socket.h>
81544 #include <linux/sockios.h>
81545 #include <linux/igmp.h>
81546@@ -106,6 +107,10 @@
81547 #include <net/xfrm.h>
81548 #include "udp_impl.h"
81549
81550+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81551+extern int grsec_enable_blackhole;
81552+#endif
81553+
81554 struct udp_table udp_table;
81555 EXPORT_SYMBOL(udp_table);
81556
81557@@ -371,6 +376,9 @@ found:
81558 return s;
81559 }
81560
81561+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
81562+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
81563+
81564 /*
81565 * This routine is called by the ICMP module when it gets some
81566 * sort of error condition. If err < 0 then the socket should
81567@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
81568 dport = usin->sin_port;
81569 if (dport == 0)
81570 return -EINVAL;
81571+
81572+ err = gr_search_udp_sendmsg(sk, usin);
81573+ if (err)
81574+ return err;
81575 } else {
81576 if (sk->sk_state != TCP_ESTABLISHED)
81577 return -EDESTADDRREQ;
81578+
81579+ err = gr_search_udp_sendmsg(sk, NULL);
81580+ if (err)
81581+ return err;
81582+
81583 daddr = inet->daddr;
81584 dport = inet->dport;
81585 /* Open fast path for connected socket.
81586@@ -945,6 +962,10 @@ try_again:
81587 if (!skb)
81588 goto out;
81589
81590+ err = gr_search_udp_recvmsg(sk, skb);
81591+ if (err)
81592+ goto out_free;
81593+
81594 ulen = skb->len - sizeof(struct udphdr);
81595 copied = len;
81596 if (copied > ulen)
81597@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
81598 if (rc == -ENOMEM) {
81599 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
81600 is_udplite);
81601- atomic_inc(&sk->sk_drops);
81602+ atomic_inc_unchecked(&sk->sk_drops);
81603 }
81604 goto drop;
81605 }
81606@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81607 goto csum_error;
81608
81609 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
81610+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81611+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81612+#endif
81613 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
81614
81615 /*
81616@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
81617 sk_wmem_alloc_get(sp),
81618 sk_rmem_alloc_get(sp),
81619 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
81620- atomic_read(&sp->sk_refcnt), sp,
81621- atomic_read(&sp->sk_drops), len);
81622+ atomic_read(&sp->sk_refcnt),
81623+#ifdef CONFIG_GRKERNSEC_HIDESYM
81624+ NULL,
81625+#else
81626+ sp,
81627+#endif
81628+ atomic_read_unchecked(&sp->sk_drops), len);
81629 }
81630
81631 int udp4_seq_show(struct seq_file *seq, void *v)
81632diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
81633index 8ac3d09..fc58c5f 100644
81634--- a/net/ipv6/addrconf.c
81635+++ b/net/ipv6/addrconf.c
81636@@ -2053,7 +2053,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
81637 p.iph.ihl = 5;
81638 p.iph.protocol = IPPROTO_IPV6;
81639 p.iph.ttl = 64;
81640- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
81641+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
81642
81643 if (ops->ndo_do_ioctl) {
81644 mm_segment_t oldfs = get_fs();
81645diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
81646index cc4797d..7cfdfcc 100644
81647--- a/net/ipv6/inet6_connection_sock.c
81648+++ b/net/ipv6/inet6_connection_sock.c
81649@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *sk, struct dst_entry *dst,
81650 #ifdef CONFIG_XFRM
81651 {
81652 struct rt6_info *rt = (struct rt6_info *)dst;
81653- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
81654+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
81655 }
81656 #endif
81657 }
81658@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(struct sock *sk, u32 cookie)
81659 #ifdef CONFIG_XFRM
81660 if (dst) {
81661 struct rt6_info *rt = (struct rt6_info *)dst;
81662- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
81663+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
81664 sk->sk_dst_cache = NULL;
81665 dst_release(dst);
81666 dst = NULL;
81667diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
81668index 093e9b2..f72cddb 100644
81669--- a/net/ipv6/inet6_hashtables.c
81670+++ b/net/ipv6/inet6_hashtables.c
81671@@ -119,7 +119,7 @@ out:
81672 }
81673 EXPORT_SYMBOL(__inet6_lookup_established);
81674
81675-static int inline compute_score(struct sock *sk, struct net *net,
81676+static inline int compute_score(struct sock *sk, struct net *net,
81677 const unsigned short hnum,
81678 const struct in6_addr *daddr,
81679 const int dif)
81680diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
81681index 4f7aaf6..f7acf45 100644
81682--- a/net/ipv6/ipv6_sockglue.c
81683+++ b/net/ipv6/ipv6_sockglue.c
81684@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
81685 int val, valbool;
81686 int retv = -ENOPROTOOPT;
81687
81688+ pax_track_stack();
81689+
81690 if (optval == NULL)
81691 val=0;
81692 else {
81693@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81694 int len;
81695 int val;
81696
81697+ pax_track_stack();
81698+
81699 if (ip6_mroute_opt(optname))
81700 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
81701
81702@@ -922,7 +926,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
81703 if (sk->sk_type != SOCK_STREAM)
81704 return -ENOPROTOOPT;
81705
81706- msg.msg_control = optval;
81707+ msg.msg_control = (void __force_kernel *)optval;
81708 msg.msg_controllen = len;
81709 msg.msg_flags = 0;
81710
81711diff --git a/net/ipv6/netfilter/ip6_queue.c b/net/ipv6/netfilter/ip6_queue.c
81712index 1cf3f0c..1d4376f 100644
81713--- a/net/ipv6/netfilter/ip6_queue.c
81714+++ b/net/ipv6/netfilter/ip6_queue.c
81715@@ -287,6 +287,9 @@ ipq_mangle_ipv6(ipq_verdict_msg_t *v, struct nf_queue_entry *e)
81716
81717 if (v->data_len < sizeof(*user_iph))
81718 return 0;
81719+ if (v->data_len > 65535)
81720+ return -EMSGSIZE;
81721+
81722 diff = v->data_len - e->skb->len;
81723 if (diff < 0) {
81724 if (pskb_trim(e->skb, v->data_len))
81725@@ -411,7 +414,8 @@ ipq_dev_drop(int ifindex)
81726 static inline void
81727 __ipq_rcv_skb(struct sk_buff *skb)
81728 {
81729- int status, type, pid, flags, nlmsglen, skblen;
81730+ int status, type, pid, flags;
81731+ unsigned int nlmsglen, skblen;
81732 struct nlmsghdr *nlh;
81733
81734 skblen = skb->len;
81735diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
81736index 78b5a36..7f37433 100644
81737--- a/net/ipv6/netfilter/ip6_tables.c
81738+++ b/net/ipv6/netfilter/ip6_tables.c
81739@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, void __user *user, int *len, int compat)
81740 private = &tmp;
81741 }
81742 #endif
81743+ memset(&info, 0, sizeof(info));
81744 info.valid_hooks = t->valid_hooks;
81745 memcpy(info.hook_entry, private->hook_entry,
81746 sizeof(info.hook_entry));
81747diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
81748index 4f24570..b813b34 100644
81749--- a/net/ipv6/raw.c
81750+++ b/net/ipv6/raw.c
81751@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
81752 {
81753 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
81754 skb_checksum_complete(skb)) {
81755- atomic_inc(&sk->sk_drops);
81756+ atomic_inc_unchecked(&sk->sk_drops);
81757 kfree_skb(skb);
81758 return NET_RX_DROP;
81759 }
81760
81761 /* Charge it to the socket. */
81762 if (sock_queue_rcv_skb(sk,skb)<0) {
81763- atomic_inc(&sk->sk_drops);
81764+ atomic_inc_unchecked(&sk->sk_drops);
81765 kfree_skb(skb);
81766 return NET_RX_DROP;
81767 }
81768@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81769 struct raw6_sock *rp = raw6_sk(sk);
81770
81771 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
81772- atomic_inc(&sk->sk_drops);
81773+ atomic_inc_unchecked(&sk->sk_drops);
81774 kfree_skb(skb);
81775 return NET_RX_DROP;
81776 }
81777@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
81778
81779 if (inet->hdrincl) {
81780 if (skb_checksum_complete(skb)) {
81781- atomic_inc(&sk->sk_drops);
81782+ atomic_inc_unchecked(&sk->sk_drops);
81783 kfree_skb(skb);
81784 return NET_RX_DROP;
81785 }
81786@@ -518,7 +518,7 @@ csum_copy_err:
81787 as some normal condition.
81788 */
81789 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
81790- atomic_inc(&sk->sk_drops);
81791+ atomic_inc_unchecked(&sk->sk_drops);
81792 goto out;
81793 }
81794
81795@@ -600,7 +600,7 @@ out:
81796 return err;
81797 }
81798
81799-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
81800+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
81801 struct flowi *fl, struct rt6_info *rt,
81802 unsigned int flags)
81803 {
81804@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
81805 u16 proto;
81806 int err;
81807
81808+ pax_track_stack();
81809+
81810 /* Rough check on arithmetic overflow,
81811 better check is made in ip6_append_data().
81812 */
81813@@ -916,12 +918,17 @@ do_confirm:
81814 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
81815 char __user *optval, int optlen)
81816 {
81817+ struct icmp6_filter filter;
81818+
81819 switch (optname) {
81820 case ICMPV6_FILTER:
81821+ if (optlen < 0)
81822+ return -EINVAL;
81823 if (optlen > sizeof(struct icmp6_filter))
81824 optlen = sizeof(struct icmp6_filter);
81825- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
81826+ if (copy_from_user(&filter, optval, optlen))
81827 return -EFAULT;
81828+ raw6_sk(sk)->filter = filter;
81829 return 0;
81830 default:
81831 return -ENOPROTOOPT;
81832@@ -934,6 +941,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81833 char __user *optval, int __user *optlen)
81834 {
81835 int len;
81836+ struct icmp6_filter filter;
81837
81838 switch (optname) {
81839 case ICMPV6_FILTER:
81840@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
81841 len = sizeof(struct icmp6_filter);
81842 if (put_user(len, optlen))
81843 return -EFAULT;
81844- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
81845+ filter = raw6_sk(sk)->filter;
81846+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
81847 return -EFAULT;
81848 return 0;
81849 default:
81850@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
81851 0, 0L, 0,
81852 sock_i_uid(sp), 0,
81853 sock_i_ino(sp),
81854- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
81855+ atomic_read(&sp->sk_refcnt),
81856+#ifdef CONFIG_GRKERNSEC_HIDESYM
81857+ NULL,
81858+#else
81859+ sp,
81860+#endif
81861+ atomic_read_unchecked(&sp->sk_drops));
81862 }
81863
81864 static int raw6_seq_show(struct seq_file *seq, void *v)
81865diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
81866index faae6df..d4430c1 100644
81867--- a/net/ipv6/tcp_ipv6.c
81868+++ b/net/ipv6/tcp_ipv6.c
81869@@ -89,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(struct sock *sk,
81870 }
81871 #endif
81872
81873+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81874+extern int grsec_enable_blackhole;
81875+#endif
81876+
81877 static void tcp_v6_hash(struct sock *sk)
81878 {
81879 if (sk->sk_state != TCP_CLOSE) {
81880@@ -1579,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
81881 return 0;
81882
81883 reset:
81884+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81885+ if (!grsec_enable_blackhole)
81886+#endif
81887 tcp_v6_send_reset(sk, skb);
81888 discard:
81889 if (opt_skb)
81890@@ -1656,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
81891 TCP_SKB_CB(skb)->sacked = 0;
81892
81893 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
81894- if (!sk)
81895+ if (!sk) {
81896+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81897+ ret = 1;
81898+#endif
81899 goto no_tcp_socket;
81900+ }
81901
81902 process:
81903- if (sk->sk_state == TCP_TIME_WAIT)
81904+ if (sk->sk_state == TCP_TIME_WAIT) {
81905+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81906+ ret = 2;
81907+#endif
81908 goto do_time_wait;
81909+ }
81910
81911 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
81912 goto discard_and_relse;
81913@@ -1701,6 +1716,10 @@ no_tcp_socket:
81914 bad_packet:
81915 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
81916 } else {
81917+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81918+ if (!grsec_enable_blackhole || (ret == 1 &&
81919+ (skb->dev->flags & IFF_LOOPBACK)))
81920+#endif
81921 tcp_v6_send_reset(NULL, skb);
81922 }
81923
81924@@ -1916,7 +1935,13 @@ static void get_openreq6(struct seq_file *seq,
81925 uid,
81926 0, /* non standard timer */
81927 0, /* open_requests have no inode */
81928- 0, req);
81929+ 0,
81930+#ifdef CONFIG_GRKERNSEC_HIDESYM
81931+ NULL
81932+#else
81933+ req
81934+#endif
81935+ );
81936 }
81937
81938 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81939@@ -1966,7 +1991,12 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
81940 sock_i_uid(sp),
81941 icsk->icsk_probes_out,
81942 sock_i_ino(sp),
81943- atomic_read(&sp->sk_refcnt), sp,
81944+ atomic_read(&sp->sk_refcnt),
81945+#ifdef CONFIG_GRKERNSEC_HIDESYM
81946+ NULL,
81947+#else
81948+ sp,
81949+#endif
81950 jiffies_to_clock_t(icsk->icsk_rto),
81951 jiffies_to_clock_t(icsk->icsk_ack.ato),
81952 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
81953@@ -2001,7 +2031,13 @@ static void get_timewait6_sock(struct seq_file *seq,
81954 dest->s6_addr32[2], dest->s6_addr32[3], destp,
81955 tw->tw_substate, 0, 0,
81956 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
81957- atomic_read(&tw->tw_refcnt), tw);
81958+ atomic_read(&tw->tw_refcnt),
81959+#ifdef CONFIG_GRKERNSEC_HIDESYM
81960+ NULL
81961+#else
81962+ tw
81963+#endif
81964+ );
81965 }
81966
81967 static int tcp6_seq_show(struct seq_file *seq, void *v)
81968diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
81969index 9cc6289..052c521 100644
81970--- a/net/ipv6/udp.c
81971+++ b/net/ipv6/udp.c
81972@@ -49,6 +49,10 @@
81973 #include <linux/seq_file.h>
81974 #include "udp_impl.h"
81975
81976+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81977+extern int grsec_enable_blackhole;
81978+#endif
81979+
81980 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
81981 {
81982 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
81983@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
81984 if (rc == -ENOMEM) {
81985 UDP6_INC_STATS_BH(sock_net(sk),
81986 UDP_MIB_RCVBUFERRORS, is_udplite);
81987- atomic_inc(&sk->sk_drops);
81988+ atomic_inc_unchecked(&sk->sk_drops);
81989 }
81990 goto drop;
81991 }
81992@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
81993 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
81994 proto == IPPROTO_UDPLITE);
81995
81996+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
81997+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
81998+#endif
81999 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
82000
82001 kfree_skb(skb);
82002@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
82003 0, 0L, 0,
82004 sock_i_uid(sp), 0,
82005 sock_i_ino(sp),
82006- atomic_read(&sp->sk_refcnt), sp,
82007- atomic_read(&sp->sk_drops));
82008+ atomic_read(&sp->sk_refcnt),
82009+#ifdef CONFIG_GRKERNSEC_HIDESYM
82010+ NULL,
82011+#else
82012+ sp,
82013+#endif
82014+ atomic_read_unchecked(&sp->sk_drops));
82015 }
82016
82017 int udp6_seq_show(struct seq_file *seq, void *v)
82018diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
82019index 811984d..11f59b7 100644
82020--- a/net/irda/ircomm/ircomm_tty.c
82021+++ b/net/irda/ircomm/ircomm_tty.c
82022@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82023 add_wait_queue(&self->open_wait, &wait);
82024
82025 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
82026- __FILE__,__LINE__, tty->driver->name, self->open_count );
82027+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82028
82029 /* As far as I can see, we protect open_count - Jean II */
82030 spin_lock_irqsave(&self->spinlock, flags);
82031 if (!tty_hung_up_p(filp)) {
82032 extra_count = 1;
82033- self->open_count--;
82034+ local_dec(&self->open_count);
82035 }
82036 spin_unlock_irqrestore(&self->spinlock, flags);
82037- self->blocked_open++;
82038+ local_inc(&self->blocked_open);
82039
82040 while (1) {
82041 if (tty->termios->c_cflag & CBAUD) {
82042@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82043 }
82044
82045 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
82046- __FILE__,__LINE__, tty->driver->name, self->open_count );
82047+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
82048
82049 schedule();
82050 }
82051@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
82052 if (extra_count) {
82053 /* ++ is not atomic, so this should be protected - Jean II */
82054 spin_lock_irqsave(&self->spinlock, flags);
82055- self->open_count++;
82056+ local_inc(&self->open_count);
82057 spin_unlock_irqrestore(&self->spinlock, flags);
82058 }
82059- self->blocked_open--;
82060+ local_dec(&self->blocked_open);
82061
82062 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
82063- __FILE__,__LINE__, tty->driver->name, self->open_count);
82064+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
82065
82066 if (!retval)
82067 self->flags |= ASYNC_NORMAL_ACTIVE;
82068@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
82069 }
82070 /* ++ is not atomic, so this should be protected - Jean II */
82071 spin_lock_irqsave(&self->spinlock, flags);
82072- self->open_count++;
82073+ local_inc(&self->open_count);
82074
82075 tty->driver_data = self;
82076 self->tty = tty;
82077 spin_unlock_irqrestore(&self->spinlock, flags);
82078
82079 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
82080- self->line, self->open_count);
82081+ self->line, local_read(&self->open_count));
82082
82083 /* Not really used by us, but lets do it anyway */
82084 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
82085@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82086 return;
82087 }
82088
82089- if ((tty->count == 1) && (self->open_count != 1)) {
82090+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
82091 /*
82092 * Uh, oh. tty->count is 1, which means that the tty
82093 * structure will be freed. state->count should always
82094@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82095 */
82096 IRDA_DEBUG(0, "%s(), bad serial port count; "
82097 "tty->count is 1, state->count is %d\n", __func__ ,
82098- self->open_count);
82099- self->open_count = 1;
82100+ local_read(&self->open_count));
82101+ local_set(&self->open_count, 1);
82102 }
82103
82104- if (--self->open_count < 0) {
82105+ if (local_dec_return(&self->open_count) < 0) {
82106 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
82107- __func__, self->line, self->open_count);
82108- self->open_count = 0;
82109+ __func__, self->line, local_read(&self->open_count));
82110+ local_set(&self->open_count, 0);
82111 }
82112- if (self->open_count) {
82113+ if (local_read(&self->open_count)) {
82114 spin_unlock_irqrestore(&self->spinlock, flags);
82115
82116 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
82117@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_struct *tty, struct file *filp)
82118 tty->closing = 0;
82119 self->tty = NULL;
82120
82121- if (self->blocked_open) {
82122+ if (local_read(&self->blocked_open)) {
82123 if (self->close_delay)
82124 schedule_timeout_interruptible(self->close_delay);
82125 wake_up_interruptible(&self->open_wait);
82126@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
82127 spin_lock_irqsave(&self->spinlock, flags);
82128 self->flags &= ~ASYNC_NORMAL_ACTIVE;
82129 self->tty = NULL;
82130- self->open_count = 0;
82131+ local_set(&self->open_count, 0);
82132 spin_unlock_irqrestore(&self->spinlock, flags);
82133
82134 wake_up_interruptible(&self->open_wait);
82135@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
82136 seq_putc(m, '\n');
82137
82138 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
82139- seq_printf(m, "Open count: %d\n", self->open_count);
82140+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
82141 seq_printf(m, "Max data size: %d\n", self->max_data_size);
82142 seq_printf(m, "Max header size: %d\n", self->max_header_size);
82143
82144diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
82145index bada1b9..f325943 100644
82146--- a/net/iucv/af_iucv.c
82147+++ b/net/iucv/af_iucv.c
82148@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct sock *sk)
82149
82150 write_lock_bh(&iucv_sk_list.lock);
82151
82152- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
82153+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82154 while (__iucv_get_sock_by_name(name)) {
82155 sprintf(name, "%08x",
82156- atomic_inc_return(&iucv_sk_list.autobind_name));
82157+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
82158 }
82159
82160 write_unlock_bh(&iucv_sk_list.lock);
82161diff --git a/net/key/af_key.c b/net/key/af_key.c
82162index 4e98193..439b449 100644
82163--- a/net/key/af_key.c
82164+++ b/net/key/af_key.c
82165@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk, struct sk_buff *skb,
82166 struct xfrm_migrate m[XFRM_MAX_DEPTH];
82167 struct xfrm_kmaddress k;
82168
82169+ pax_track_stack();
82170+
82171 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
82172 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
82173 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
82174@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_file *f, void *v)
82175 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
82176 else
82177 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
82178+#ifdef CONFIG_GRKERNSEC_HIDESYM
82179+ NULL,
82180+#else
82181 s,
82182+#endif
82183 atomic_read(&s->sk_refcnt),
82184 sk_rmem_alloc_get(s),
82185 sk_wmem_alloc_get(s),
82186diff --git a/net/lapb/lapb_iface.c b/net/lapb/lapb_iface.c
82187index bda96d1..c038b72 100644
82188--- a/net/lapb/lapb_iface.c
82189+++ b/net/lapb/lapb_iface.c
82190@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev, struct lapb_register_struct *callbacks
82191 goto out;
82192
82193 lapb->dev = dev;
82194- lapb->callbacks = *callbacks;
82195+ lapb->callbacks = callbacks;
82196
82197 __lapb_insert_cb(lapb);
82198
82199@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device *dev, struct sk_buff *skb)
82200
82201 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
82202 {
82203- if (lapb->callbacks.connect_confirmation)
82204- lapb->callbacks.connect_confirmation(lapb->dev, reason);
82205+ if (lapb->callbacks->connect_confirmation)
82206+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
82207 }
82208
82209 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
82210 {
82211- if (lapb->callbacks.connect_indication)
82212- lapb->callbacks.connect_indication(lapb->dev, reason);
82213+ if (lapb->callbacks->connect_indication)
82214+ lapb->callbacks->connect_indication(lapb->dev, reason);
82215 }
82216
82217 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
82218 {
82219- if (lapb->callbacks.disconnect_confirmation)
82220- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
82221+ if (lapb->callbacks->disconnect_confirmation)
82222+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
82223 }
82224
82225 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
82226 {
82227- if (lapb->callbacks.disconnect_indication)
82228- lapb->callbacks.disconnect_indication(lapb->dev, reason);
82229+ if (lapb->callbacks->disconnect_indication)
82230+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
82231 }
82232
82233 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
82234 {
82235- if (lapb->callbacks.data_indication)
82236- return lapb->callbacks.data_indication(lapb->dev, skb);
82237+ if (lapb->callbacks->data_indication)
82238+ return lapb->callbacks->data_indication(lapb->dev, skb);
82239
82240 kfree_skb(skb);
82241 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
82242@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *lapb, struct sk_buff *skb)
82243 {
82244 int used = 0;
82245
82246- if (lapb->callbacks.data_transmit) {
82247- lapb->callbacks.data_transmit(lapb->dev, skb);
82248+ if (lapb->callbacks->data_transmit) {
82249+ lapb->callbacks->data_transmit(lapb->dev, skb);
82250 used = 1;
82251 }
82252
82253diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
82254index fe2d3f8..e57f683 100644
82255--- a/net/mac80211/cfg.c
82256+++ b/net/mac80211/cfg.c
82257@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(struct wiphy *wiphy,
82258 return err;
82259 }
82260
82261-struct cfg80211_ops mac80211_config_ops = {
82262+const struct cfg80211_ops mac80211_config_ops = {
82263 .add_virtual_intf = ieee80211_add_iface,
82264 .del_virtual_intf = ieee80211_del_iface,
82265 .change_virtual_intf = ieee80211_change_iface,
82266diff --git a/net/mac80211/cfg.h b/net/mac80211/cfg.h
82267index 7d7879f..2d51f62 100644
82268--- a/net/mac80211/cfg.h
82269+++ b/net/mac80211/cfg.h
82270@@ -4,6 +4,6 @@
82271 #ifndef __CFG_H
82272 #define __CFG_H
82273
82274-extern struct cfg80211_ops mac80211_config_ops;
82275+extern const struct cfg80211_ops mac80211_config_ops;
82276
82277 #endif /* __CFG_H */
82278diff --git a/net/mac80211/debugfs_key.c b/net/mac80211/debugfs_key.c
82279index 99c7525..9cb4937 100644
82280--- a/net/mac80211/debugfs_key.c
82281+++ b/net/mac80211/debugfs_key.c
82282@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file *file, char __user *userbuf,
82283 size_t count, loff_t *ppos)
82284 {
82285 struct ieee80211_key *key = file->private_data;
82286- int i, res, bufsize = 2 * key->conf.keylen + 2;
82287+ int i, bufsize = 2 * key->conf.keylen + 2;
82288 char *buf = kmalloc(bufsize, GFP_KERNEL);
82289 char *p = buf;
82290+ ssize_t res;
82291+
82292+ if (buf == NULL)
82293+ return -ENOMEM;
82294
82295 for (i = 0; i < key->conf.keylen; i++)
82296 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
82297diff --git a/net/mac80211/debugfs_sta.c b/net/mac80211/debugfs_sta.c
82298index 33a2e89..08650c8 100644
82299--- a/net/mac80211/debugfs_sta.c
82300+++ b/net/mac80211/debugfs_sta.c
82301@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struct file *file, char __user *userbuf,
82302 int i;
82303 struct sta_info *sta = file->private_data;
82304
82305+ pax_track_stack();
82306+
82307 spin_lock_bh(&sta->lock);
82308 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
82309 sta->ampdu_mlme.dialog_token_allocator + 1);
82310diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
82311index ca62bfe..6657a03 100644
82312--- a/net/mac80211/ieee80211_i.h
82313+++ b/net/mac80211/ieee80211_i.h
82314@@ -25,6 +25,7 @@
82315 #include <linux/etherdevice.h>
82316 #include <net/cfg80211.h>
82317 #include <net/mac80211.h>
82318+#include <asm/local.h>
82319 #include "key.h"
82320 #include "sta_info.h"
82321
82322@@ -635,7 +636,7 @@ struct ieee80211_local {
82323 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
82324 spinlock_t queue_stop_reason_lock;
82325
82326- int open_count;
82327+ local_t open_count;
82328 int monitors, cooked_mntrs;
82329 /* number of interfaces with corresponding FIF_ flags */
82330 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
82331diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
82332index 079c500..eb3c6d4 100644
82333--- a/net/mac80211/iface.c
82334+++ b/net/mac80211/iface.c
82335@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_device *dev)
82336 break;
82337 }
82338
82339- if (local->open_count == 0) {
82340+ if (local_read(&local->open_count) == 0) {
82341 res = drv_start(local);
82342 if (res)
82343 goto err_del_bss;
82344@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_device *dev)
82345 * Validate the MAC address for this device.
82346 */
82347 if (!is_valid_ether_addr(dev->dev_addr)) {
82348- if (!local->open_count)
82349+ if (!local_read(&local->open_count))
82350 drv_stop(local);
82351 return -EADDRNOTAVAIL;
82352 }
82353@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_device *dev)
82354
82355 hw_reconf_flags |= __ieee80211_recalc_idle(local);
82356
82357- local->open_count++;
82358+ local_inc(&local->open_count);
82359 if (hw_reconf_flags) {
82360 ieee80211_hw_config(local, hw_reconf_flags);
82361 /*
82362@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_device *dev)
82363 err_del_interface:
82364 drv_remove_interface(local, &conf);
82365 err_stop:
82366- if (!local->open_count)
82367+ if (!local_read(&local->open_count))
82368 drv_stop(local);
82369 err_del_bss:
82370 sdata->bss = NULL;
82371@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_device *dev)
82372 WARN_ON(!list_empty(&sdata->u.ap.vlans));
82373 }
82374
82375- local->open_count--;
82376+ local_dec(&local->open_count);
82377
82378 switch (sdata->vif.type) {
82379 case NL80211_IFTYPE_AP_VLAN:
82380@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_device *dev)
82381
82382 ieee80211_recalc_ps(local, -1);
82383
82384- if (local->open_count == 0) {
82385+ if (local_read(&local->open_count) == 0) {
82386 ieee80211_clear_tx_pending(local);
82387 ieee80211_stop_device(local);
82388
82389diff --git a/net/mac80211/main.c b/net/mac80211/main.c
82390index 2dfe176..74e4388 100644
82391--- a/net/mac80211/main.c
82392+++ b/net/mac80211/main.c
82393@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
82394 local->hw.conf.power_level = power;
82395 }
82396
82397- if (changed && local->open_count) {
82398+ if (changed && local_read(&local->open_count)) {
82399 ret = drv_config(local, changed);
82400 /*
82401 * Goal:
82402diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
82403index e67eea7..fcc227e 100644
82404--- a/net/mac80211/mlme.c
82405+++ b/net/mac80211/mlme.c
82406@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee80211_sub_if_data *sdata,
82407 bool have_higher_than_11mbit = false, newsta = false;
82408 u16 ap_ht_cap_flags;
82409
82410+ pax_track_stack();
82411+
82412 /*
82413 * AssocResp and ReassocResp have identical structure, so process both
82414 * of them in this function.
82415diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
82416index e535f1c..4d733d1 100644
82417--- a/net/mac80211/pm.c
82418+++ b/net/mac80211/pm.c
82419@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw)
82420 }
82421
82422 /* stop hardware - this must stop RX */
82423- if (local->open_count)
82424+ if (local_read(&local->open_count))
82425 ieee80211_stop_device(local);
82426
82427 local->suspended = true;
82428diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
82429index b33efc4..0a2efb6 100644
82430--- a/net/mac80211/rate.c
82431+++ b/net/mac80211/rate.c
82432@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
82433 struct rate_control_ref *ref, *old;
82434
82435 ASSERT_RTNL();
82436- if (local->open_count)
82437+ if (local_read(&local->open_count))
82438 return -EBUSY;
82439
82440 ref = rate_control_alloc(name, local);
82441diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
82442index b1d7904..57e4da7 100644
82443--- a/net/mac80211/tx.c
82444+++ b/net/mac80211/tx.c
82445@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx, int group_addr,
82446 return cpu_to_le16(dur);
82447 }
82448
82449-static int inline is_ieee80211_device(struct ieee80211_local *local,
82450+static inline int is_ieee80211_device(struct ieee80211_local *local,
82451 struct net_device *dev)
82452 {
82453 return local == wdev_priv(dev->ieee80211_ptr);
82454diff --git a/net/mac80211/util.c b/net/mac80211/util.c
82455index 31b1085..48fb26d 100644
82456--- a/net/mac80211/util.c
82457+++ b/net/mac80211/util.c
82458@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
82459 local->resuming = true;
82460
82461 /* restart hardware */
82462- if (local->open_count) {
82463+ if (local_read(&local->open_count)) {
82464 /*
82465 * Upon resume hardware can sometimes be goofy due to
82466 * various platform / driver / bus issues, so restarting
82467diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
82468index 634d14a..b35a608 100644
82469--- a/net/netfilter/Kconfig
82470+++ b/net/netfilter/Kconfig
82471@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
82472
82473 To compile it as a module, choose M here. If unsure, say N.
82474
82475+config NETFILTER_XT_MATCH_GRADM
82476+ tristate '"gradm" match support'
82477+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
82478+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
82479+ ---help---
82480+ The gradm match allows to match on grsecurity RBAC being enabled.
82481+ It is useful when iptables rules are applied early on bootup to
82482+ prevent connections to the machine (except from a trusted host)
82483+ while the RBAC system is disabled.
82484+
82485 config NETFILTER_XT_MATCH_HASHLIMIT
82486 tristate '"hashlimit" match support'
82487 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
82488diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
82489index 49f62ee..a17b2c6 100644
82490--- a/net/netfilter/Makefile
82491+++ b/net/netfilter/Makefile
82492@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRACK) += xt_conntrack.o
82493 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
82494 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
82495 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
82496+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
82497 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
82498 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
82499 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
82500diff --git a/net/netfilter/ipvs/ip_vs_app.c b/net/netfilter/ipvs/ip_vs_app.c
82501index 3c7e427..724043c 100644
82502--- a/net/netfilter/ipvs/ip_vs_app.c
82503+++ b/net/netfilter/ipvs/ip_vs_app.c
82504@@ -564,7 +564,7 @@ static const struct file_operations ip_vs_app_fops = {
82505 .open = ip_vs_app_open,
82506 .read = seq_read,
82507 .llseek = seq_lseek,
82508- .release = seq_release,
82509+ .release = seq_release_net,
82510 };
82511 #endif
82512
82513diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
82514index 95682e5..457dbac 100644
82515--- a/net/netfilter/ipvs/ip_vs_conn.c
82516+++ b/net/netfilter/ipvs/ip_vs_conn.c
82517@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
82518 /* if the connection is not template and is created
82519 * by sync, preserve the activity flag.
82520 */
82521- cp->flags |= atomic_read(&dest->conn_flags) &
82522+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
82523 (~IP_VS_CONN_F_INACTIVE);
82524 else
82525- cp->flags |= atomic_read(&dest->conn_flags);
82526+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
82527 cp->dest = dest;
82528
82529 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
82530@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport,
82531 atomic_set(&cp->refcnt, 1);
82532
82533 atomic_set(&cp->n_control, 0);
82534- atomic_set(&cp->in_pkts, 0);
82535+ atomic_set_unchecked(&cp->in_pkts, 0);
82536
82537 atomic_inc(&ip_vs_conn_count);
82538 if (flags & IP_VS_CONN_F_NO_CPORT)
82539@@ -871,7 +871,7 @@ static const struct file_operations ip_vs_conn_fops = {
82540 .open = ip_vs_conn_open,
82541 .read = seq_read,
82542 .llseek = seq_lseek,
82543- .release = seq_release,
82544+ .release = seq_release_net,
82545 };
82546
82547 static const char *ip_vs_origin_name(unsigned flags)
82548@@ -934,7 +934,7 @@ static const struct file_operations ip_vs_conn_sync_fops = {
82549 .open = ip_vs_conn_sync_open,
82550 .read = seq_read,
82551 .llseek = seq_lseek,
82552- .release = seq_release,
82553+ .release = seq_release_net,
82554 };
82555
82556 #endif
82557@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
82558
82559 /* Don't drop the entry if its number of incoming packets is not
82560 located in [0, 8] */
82561- i = atomic_read(&cp->in_pkts);
82562+ i = atomic_read_unchecked(&cp->in_pkts);
82563 if (i > 8 || i < 0) return 0;
82564
82565 if (!todrop_rate[i]) return 0;
82566diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
82567index b95699f..5fee919 100644
82568--- a/net/netfilter/ipvs/ip_vs_core.c
82569+++ b/net/netfilter/ipvs/ip_vs_core.c
82570@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
82571 ret = cp->packet_xmit(skb, cp, pp);
82572 /* do not touch skb anymore */
82573
82574- atomic_inc(&cp->in_pkts);
82575+ atomic_inc_unchecked(&cp->in_pkts);
82576 ip_vs_conn_put(cp);
82577 return ret;
82578 }
82579@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb,
82580 * Sync connection if it is about to close to
82581 * encorage the standby servers to update the connections timeout
82582 */
82583- pkts = atomic_add_return(1, &cp->in_pkts);
82584+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
82585 if (af == AF_INET &&
82586 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
82587 (((cp->protocol != IPPROTO_TCP ||
82588diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
82589index 02b2610..2d89424 100644
82590--- a/net/netfilter/ipvs/ip_vs_ctl.c
82591+++ b/net/netfilter/ipvs/ip_vs_ctl.c
82592@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc,
82593 ip_vs_rs_hash(dest);
82594 write_unlock_bh(&__ip_vs_rs_lock);
82595 }
82596- atomic_set(&dest->conn_flags, conn_flags);
82597+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
82598
82599 /* bind the service */
82600 if (!dest->svc) {
82601@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82602 " %-7s %-6d %-10d %-10d\n",
82603 &dest->addr.in6,
82604 ntohs(dest->port),
82605- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82606+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82607 atomic_read(&dest->weight),
82608 atomic_read(&dest->activeconns),
82609 atomic_read(&dest->inactconns));
82610@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
82611 "%-7s %-6d %-10d %-10d\n",
82612 ntohl(dest->addr.ip),
82613 ntohs(dest->port),
82614- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
82615+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
82616 atomic_read(&dest->weight),
82617 atomic_read(&dest->activeconns),
82618 atomic_read(&dest->inactconns));
82619@@ -1927,7 +1927,7 @@ static const struct file_operations ip_vs_info_fops = {
82620 .open = ip_vs_info_open,
82621 .read = seq_read,
82622 .llseek = seq_lseek,
82623- .release = seq_release_private,
82624+ .release = seq_release_net,
82625 };
82626
82627 #endif
82628@@ -1976,7 +1976,7 @@ static const struct file_operations ip_vs_stats_fops = {
82629 .open = ip_vs_stats_seq_open,
82630 .read = seq_read,
82631 .llseek = seq_lseek,
82632- .release = single_release,
82633+ .release = single_release_net,
82634 };
82635
82636 #endif
82637@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip_vs_get_dests *get,
82638
82639 entry.addr = dest->addr.ip;
82640 entry.port = dest->port;
82641- entry.conn_flags = atomic_read(&dest->conn_flags);
82642+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
82643 entry.weight = atomic_read(&dest->weight);
82644 entry.u_threshold = dest->u_threshold;
82645 entry.l_threshold = dest->l_threshold;
82646@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
82647 unsigned char arg[128];
82648 int ret = 0;
82649
82650+ pax_track_stack();
82651+
82652 if (!capable(CAP_NET_ADMIN))
82653 return -EPERM;
82654
82655@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
82656 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
82657
82658 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
82659- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82660+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
82661 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
82662 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
82663 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
82664diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
82665index e177f0d..55e8581 100644
82666--- a/net/netfilter/ipvs/ip_vs_sync.c
82667+++ b/net/netfilter/ipvs/ip_vs_sync.c
82668@@ -438,7 +438,7 @@ static void ip_vs_process_message(const char *buffer, const size_t buflen)
82669
82670 if (opt)
82671 memcpy(&cp->in_seq, opt, sizeof(*opt));
82672- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82673+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
82674 cp->state = state;
82675 cp->old_state = cp->state;
82676 /*
82677diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
82678index 30b3189..e2e4b55 100644
82679--- a/net/netfilter/ipvs/ip_vs_xmit.c
82680+++ b/net/netfilter/ipvs/ip_vs_xmit.c
82681@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
82682 else
82683 rc = NF_ACCEPT;
82684 /* do not touch skb anymore */
82685- atomic_inc(&cp->in_pkts);
82686+ atomic_inc_unchecked(&cp->in_pkts);
82687 goto out;
82688 }
82689
82690@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
82691 else
82692 rc = NF_ACCEPT;
82693 /* do not touch skb anymore */
82694- atomic_inc(&cp->in_pkts);
82695+ atomic_inc_unchecked(&cp->in_pkts);
82696 goto out;
82697 }
82698
82699diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
82700index d521718..d0fd7a1 100644
82701--- a/net/netfilter/nf_conntrack_netlink.c
82702+++ b/net/netfilter/nf_conntrack_netlink.c
82703@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlattr *attr,
82704 static int
82705 ctnetlink_parse_tuple(const struct nlattr * const cda[],
82706 struct nf_conntrack_tuple *tuple,
82707- enum ctattr_tuple type, u_int8_t l3num)
82708+ enum ctattr_type type, u_int8_t l3num)
82709 {
82710 struct nlattr *tb[CTA_TUPLE_MAX+1];
82711 int err;
82712diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
82713index f900dc3..5e45346 100644
82714--- a/net/netfilter/nfnetlink_log.c
82715+++ b/net/netfilter/nfnetlink_log.c
82716@@ -68,7 +68,7 @@ struct nfulnl_instance {
82717 };
82718
82719 static DEFINE_RWLOCK(instances_lock);
82720-static atomic_t global_seq;
82721+static atomic_unchecked_t global_seq;
82722
82723 #define INSTANCE_BUCKETS 16
82724 static struct hlist_head instance_table[INSTANCE_BUCKETS];
82725@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_instance *inst,
82726 /* global sequence number */
82727 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
82728 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
82729- htonl(atomic_inc_return(&global_seq)));
82730+ htonl(atomic_inc_return_unchecked(&global_seq)));
82731
82732 if (data_len) {
82733 struct nlattr *nla;
82734diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
82735new file mode 100644
82736index 0000000..b1bac76
82737--- /dev/null
82738+++ b/net/netfilter/xt_gradm.c
82739@@ -0,0 +1,51 @@
82740+/*
82741+ * gradm match for netfilter
82742